diff --git a/cmd/cloud.go b/cmd/cloud.go index 054c194bd40..13305096298 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -109,7 +109,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud // Cloud config cloudConfig := cloud.NewConfig().Apply(derivedConf.Collectors.Cloud) - if err := envconfig.Process("k6", &cloudConfig); err != nil { + if err = envconfig.Process("", &cloudConfig); err != nil { return err } if !cloudConfig.Token.Valid { diff --git a/cmd/collectors.go b/cmd/collectors.go index 2da68ba4941..749dc2a99ab 100644 --- a/cmd/collectors.go +++ b/cmd/collectors.go @@ -72,7 +72,7 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config return jsonc.New(afero.NewOsFs(), arg) case collectorInfluxDB: config := influxdb.NewConfig().Apply(conf.Collectors.InfluxDB) - if err := envconfig.Process("k6", &config); err != nil { + if err := envconfig.Process("", &config); err != nil { return nil, err } urlConfig, err := influxdb.ParseURL(arg) @@ -83,7 +83,7 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config return influxdb.New(config) case collectorCloud: config := cloud.NewConfig().Apply(conf.Collectors.Cloud) - if err := envconfig.Process("k6", &config); err != nil { + if err := envconfig.Process("", &config); err != nil { return nil, err } if arg != "" { @@ -92,7 +92,7 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config return cloud.New(config, src, conf.Options, consts.Version) case collectorKafka: config := kafka.NewConfig().Apply(conf.Collectors.Kafka) - if err := envconfig.Process("k6", &config); err != nil { + if err := envconfig.Process("", &config); err != nil { return nil, err } if arg != "" { @@ -117,7 +117,7 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config return datadog.New(config) case collectorCSV: config := csv.NewConfig().Apply(conf.Collectors.CSV) - if err := envconfig.Process("k6", &config); err != nil { + if err := envconfig.Process("", &config); err != nil { return nil, err } if arg != "" { diff --git a/cmd/config.go b/cmd/config.go index 27deb1e49f7..3d07b68e1a0 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -61,11 +61,11 @@ func configFlagSet() *pflag.FlagSet { type Config struct { lib.Options - Out []string `json:"out" envconfig:"out"` - Linger null.Bool `json:"linger" envconfig:"linger"` - NoUsageReport null.Bool `json:"noUsageReport" envconfig:"no_usage_report"` - NoThresholds null.Bool `json:"noThresholds" envconfig:"no_thresholds"` - NoSummary null.Bool `json:"noSummary" envconfig:"no_summary"` + Out []string `json:"out" envconfig:"K6_OUT"` + Linger null.Bool `json:"linger" envconfig:"K6_LINGER"` + NoUsageReport null.Bool `json:"noUsageReport" envconfig:"K6_NO_USAGE_REPORT"` + NoThresholds null.Bool `json:"noThresholds" envconfig:"K6_NO_THRESHOLDS"` + NoSummary null.Bool `json:"noSummary" envconfig:"K6_NO_SUMMARY"` Collectors struct { InfluxDB influxdb.Config `json:"influxdb"` @@ -174,10 +174,12 @@ func writeDiskConfig(fs afero.Fs, configPath string, conf Config) error { func readEnvConfig() (conf Config, err error) { // TODO: replace envconfig and refactor the whole configuration from the groun up :/ for _, err := range []error{ - envconfig.Process("k6", &conf), - envconfig.Process("k6", &conf.Collectors.Cloud), - envconfig.Process("k6", &conf.Collectors.InfluxDB), - envconfig.Process("k6", &conf.Collectors.Kafka), + envconfig.Process("", &conf), + envconfig.Process("", &conf.Collectors.Cloud), + envconfig.Process("", &conf.Collectors.InfluxDB), + envconfig.Process("", &conf.Collectors.Kafka), + envconfig.Process("k6_statsd", &conf.Collectors.StatsD), + envconfig.Process("k6_datadog", &conf.Collectors.Datadog), } { return conf, err } @@ -288,6 +290,8 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf cliConf.Collectors.InfluxDB = influxdb.NewConfig().Apply(cliConf.Collectors.InfluxDB) cliConf.Collectors.Cloud = cloud.NewConfig().Apply(cliConf.Collectors.Cloud) cliConf.Collectors.Kafka = kafka.NewConfig().Apply(cliConf.Collectors.Kafka) + cliConf.Collectors.StatsD = common.NewConfig().Apply(cliConf.Collectors.StatsD) + cliConf.Collectors.Datadog = datadog.NewConfig().Apply(cliConf.Collectors.Datadog) fileConf, _, err := readDiskConfig(fs) if err != nil { diff --git a/cmd/config_test.go b/cmd/config_test.go index 270de7fa5a4..9962d685bf8 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -107,7 +107,7 @@ func TestConfigEnv(t *testing.T) { t.Run(`"`+value+`"`, func(t *testing.T) { assert.NoError(t, os.Setenv(field.Key, value)) var config Config - assert.NoError(t, envconfig.Process("k6", &config)) + assert.NoError(t, envconfig.Process("", &config)) fn(config) }) } diff --git a/cmd/options.go b/cmd/options.go index b643870acca..2485f4fda28 100644 --- a/cmd/options.go +++ b/cmd/options.go @@ -89,7 +89,7 @@ func getOptions(flags *pflag.FlagSet) (lib.Options, error) { Batch: getNullInt64(flags, "batch"), RPS: getNullInt64(flags, "rps"), UserAgent: getNullString(flags, "user-agent"), - HttpDebug: getNullString(flags, "http-debug"), + HTTPDebug: getNullString(flags, "http-debug"), InsecureSkipTLSVerify: getNullBool(flags, "insecure-skip-tls-verify"), NoConnectionReuse: getNullBool(flags, "no-connection-reuse"), NoVUConnectionReuse: getNullBool(flags, "no-vu-connection-reuse"), diff --git a/cmd/run.go b/cmd/run.go index 69162bca2b4..6245e1c1a0d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -357,7 +357,7 @@ a commandline interface for interacting with it.`, updateFreq = 1 * time.Second } ticker := time.NewTicker(updateFreq) - if quiet || conf.HttpDebug.Valid && conf.HttpDebug.String != "" { + if quiet || conf.HTTPDebug.Valid && conf.HTTPDebug.String != "" { ticker.Stop() } mainLoop: diff --git a/js/modules/k6/http/request_test.go b/js/modules/k6/http/request_test.go index e03b827d3ac..a419285c1eb 100644 --- a/js/modules/k6/http/request_test.go +++ b/js/modules/k6/http/request_test.go @@ -123,7 +123,7 @@ func newRuntime( UserAgent: null.StringFrom("TestUserAgent"), Throw: null.BoolFrom(true), SystemTags: &stats.DefaultSystemTagSet, - //HttpDebug: null.StringFrom("full"), + //HTTPDebug: null.StringFrom("full"), } samples := make(chan stats.SampleContainer, 1000) @@ -1936,7 +1936,7 @@ func TestDigestAuthWithBody(t *testing.T) { defer tb.Cleanup() state.Options.Throw = null.BoolFrom(true) - state.Options.HttpDebug = null.StringFrom("full") + state.Options.HTTPDebug = null.StringFrom("full") tb.Mux.HandleFunc("/digest-auth-with-post/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, "POST", r.Method) diff --git a/lib/netext/httpext/request.go b/lib/netext/httpext/request.go index 0280c2bbf25..4f4440c0f55 100644 --- a/lib/netext/httpext/request.go +++ b/lib/netext/httpext/request.go @@ -267,10 +267,10 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error tracerTransport := newTransport(state, tags) var transport http.RoundTripper = tracerTransport - if state.Options.HttpDebug.String != "" { + if state.Options.HTTPDebug.String != "" { transport = httpDebugTransport{ originalTransport: transport, - httpDebugOption: state.Options.HttpDebug.String, + httpDebugOption: state.Options.HTTPDebug.String, } } diff --git a/lib/options.go b/lib/options.go index 3f4835863fe..0e3a1c659b7 100644 --- a/lib/options.go +++ b/lib/options.go @@ -185,102 +185,102 @@ func ParseCIDR(s string) (*IPNet, error) { type Options struct { // Should the test start in a paused state? - Paused null.Bool `json:"paused" envconfig:"paused"` + Paused null.Bool `json:"paused" envconfig:"K6_PAUSED"` // Initial values for VUs, max VUs, duration cap, iteration cap, and stages. // See the Runner or Executor interfaces for more information. - VUs null.Int `json:"vus" envconfig:"vus"` + VUs null.Int `json:"vus" envconfig:"K6_VUS"` //TODO: deprecate this? or reuse it in the manual control "scheduler"? - VUsMax null.Int `json:"vusMax" envconfig:"vus_max"` - Duration types.NullDuration `json:"duration" envconfig:"duration"` - Iterations null.Int `json:"iterations" envconfig:"iterations"` - Stages []Stage `json:"stages" envconfig:"stages"` + VUsMax null.Int `json:"vusMax" envconfig:"K6_VUS_MAX"` + Duration types.NullDuration `json:"duration" envconfig:"K6_DURATION"` + Iterations null.Int `json:"iterations" envconfig:"K6_ITERATIONS"` + Stages []Stage `json:"stages" envconfig:"K6_STAGES"` Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"-"` // Timeouts for the setup() and teardown() functions - SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"setup_timeout"` - TeardownTimeout types.NullDuration `json:"teardownTimeout" envconfig:"teardown_timeout"` + SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"K6_SETUP_TIMEOUT"` + TeardownTimeout types.NullDuration `json:"teardownTimeout" envconfig:"K6_TEARDOWN_TIMEOUT"` // Limit HTTP requests per second. - RPS null.Int `json:"rps" envconfig:"rps"` + RPS null.Int `json:"rps" envconfig:"K6_RPS"` // How many HTTP redirects do we follow? - MaxRedirects null.Int `json:"maxRedirects" envconfig:"max_redirects"` + MaxRedirects null.Int `json:"maxRedirects" envconfig:"K6_MAX_REDIRECTS"` // Default User Agent string for HTTP requests. - UserAgent null.String `json:"userAgent" envconfig:"user_agent"` + UserAgent null.String `json:"userAgent" envconfig:"K6_USER_AGENT"` // How many batch requests are allowed in parallel, in total and per host? - Batch null.Int `json:"batch" envconfig:"batch"` - BatchPerHost null.Int `json:"batchPerHost" envconfig:"batch_per_host"` + Batch null.Int `json:"batch" envconfig:"K6_BATCH"` + BatchPerHost null.Int `json:"batchPerHost" envconfig:"K6_BATCH_PER_HOST"` // Should all HTTP requests and responses be logged (excluding body)? - HttpDebug null.String `json:"httpDebug" envconfig:"http_debug"` + HTTPDebug null.String `json:"httpDebug" envconfig:"K6_HTTP_DEBUG"` // Accept invalid or untrusted TLS certificates. - InsecureSkipTLSVerify null.Bool `json:"insecureSkipTLSVerify" envconfig:"insecure_skip_tls_verify"` + InsecureSkipTLSVerify null.Bool `json:"insecureSkipTLSVerify" envconfig:"K6_INSECURE_SKIP_TLS_VERIFY"` // Specify TLS versions and cipher suites, and present client certificates. - TLSCipherSuites *TLSCipherSuites `json:"tlsCipherSuites" envconfig:"tls_cipher_suites"` - TLSVersion *TLSVersions `json:"tlsVersion" envconfig:"tls_version"` - TLSAuth []*TLSAuth `json:"tlsAuth" envconfig:"tlsauth"` + TLSCipherSuites *TLSCipherSuites `json:"tlsCipherSuites" envconfig:"K6_TLS_CIPHER_SUITES"` + TLSVersion *TLSVersions `json:"tlsVersion" envconfig:"K6_TLS_VERSION"` + TLSAuth []*TLSAuth `json:"tlsAuth" envconfig:"K6_TLSAUTH"` // Throw warnings (eg. failed HTTP requests) as errors instead of simply logging them. - Throw null.Bool `json:"throw" envconfig:"throw"` + Throw null.Bool `json:"throw" envconfig:"K6_THROW"` // Define thresholds; these take the form of 'metric=["snippet1", "snippet2"]'. // To create a threshold on a derived metric based on tag queries ("submetrics"), create a // metric on a nonexistent metric named 'real_metric{tagA:valueA,tagB:valueB}'. - Thresholds map[string]stats.Thresholds `json:"thresholds" envconfig:"thresholds"` + Thresholds map[string]stats.Thresholds `json:"thresholds" envconfig:"K6_THRESHOLDS"` // Blacklist IP ranges that tests may not contact. Mainly useful in hosted setups. - BlacklistIPs []*IPNet `json:"blacklistIPs" envconfig:"blacklist_ips"` + BlacklistIPs []*IPNet `json:"blacklistIPs" envconfig:"K6_BLACKLIST_IPS"` // Hosts overrides dns entries for given hosts - Hosts map[string]net.IP `json:"hosts" envconfig:"hosts"` + Hosts map[string]net.IP `json:"hosts" envconfig:"K6_HOSTS"` // Disable keep-alive connections - NoConnectionReuse null.Bool `json:"noConnectionReuse" envconfig:"no_connection_reuse"` + NoConnectionReuse null.Bool `json:"noConnectionReuse" envconfig:"K6_NO_CONNECTION_REUSE"` // Do not reuse connections between VU iterations. This gives more realistic results (depending // on what you're looking for), but you need to raise various kernel limits or you'll get // errors about running out of file handles or sockets, or being unable to bind addresses. - NoVUConnectionReuse null.Bool `json:"noVUConnectionReuse" envconfig:"no_vu_connection_reuse"` + NoVUConnectionReuse null.Bool `json:"noVUConnectionReuse" envconfig:"K6_NO_VU_CONNECTION_REUSE"` // MinIterationDuration can be used to force VUs to pause between iterations if a specific // iteration is shorter than the specified value. - MinIterationDuration types.NullDuration `json:"minIterationDuration" envconfig:"min_iteration_duration"` + MinIterationDuration types.NullDuration `json:"minIterationDuration" envconfig:"K6_MIN_ITERATION_DURATION"` // These values are for third party collectors' benefit. // Can't be set through env vars. External map[string]json.RawMessage `json:"ext" ignored:"true"` // Summary trend stats for trend metrics (response times) in CLI output - SummaryTrendStats []string `json:"summaryTrendStats" envconfig:"summary_trend_stats"` + SummaryTrendStats []string `json:"summaryTrendStats" envconfig:"K6_SUMMARY_TREND_STATS"` // Summary time unit for summary metrics (response times) in CLI output - SummaryTimeUnit null.String `json:"summaryTimeUnit" envconfig:"summary_time_unit"` + SummaryTimeUnit null.String `json:"summaryTimeUnit" envconfig:"K6_SUMMARY_TIME_UNIT"` // Which system tags to include with metrics ("method", "vu" etc.) // Use pointer for identifying whether user provide any tag or not. - SystemTags *stats.SystemTagSet `json:"systemTags" envconfig:"system_tags"` + SystemTags *stats.SystemTagSet `json:"systemTags" envconfig:"K6_SYSTEM_TAGS"` // Tags to be applied to all samples for this running - RunTags *stats.SampleTags `json:"tags" envconfig:"tags"` + RunTags *stats.SampleTags `json:"tags" envconfig:"K6_TAGS"` // Buffer size of the channel for metric samples; 0 means unbuffered - MetricSamplesBufferSize null.Int `json:"metricSamplesBufferSize" envconfig:"metric_samples_buffer_size"` + MetricSamplesBufferSize null.Int `json:"metricSamplesBufferSize" envconfig:"K6_METRIC_SAMPLES_BUFFER_SIZE"` // Do not reset cookies after a VU iteration - NoCookiesReset null.Bool `json:"noCookiesReset" envconfig:"no_cookies_reset"` + NoCookiesReset null.Bool `json:"noCookiesReset" envconfig:"K6_NO_COOKIES_RESET"` // Discard Http Responses Body - DiscardResponseBodies null.Bool `json:"discardResponseBodies" envconfig:"discard_response_bodies"` + DiscardResponseBodies null.Bool `json:"discardResponseBodies" envconfig:"K6_DISCARD_RESPONSE_BODIES"` // Redirect console logging to a file - ConsoleOutput null.String `json:"-" envconfig:"console_output"` + ConsoleOutput null.String `json:"-" envconfig:"K6_CONSOLE_OUTPUT"` } // Returns the result of overwriting any fields with any that are set on the argument. @@ -358,8 +358,8 @@ func (o Options) Apply(opts Options) Options { if opts.BatchPerHost.Valid { o.BatchPerHost = opts.BatchPerHost } - if opts.HttpDebug.Valid { - o.HttpDebug = opts.HttpDebug + if opts.HTTPDebug.Valid { + o.HTTPDebug = opts.HTTPDebug } if opts.InsecureSkipTLSVerify.Valid { o.InsecureSkipTLSVerify = opts.InsecureSkipTLSVerify diff --git a/lib/options_test.go b/lib/options_test.go index ce8117de919..c7d86928636 100644 --- a/lib/options_test.go +++ b/lib/options_test.go @@ -124,10 +124,10 @@ func TestOptions(t *testing.T) { assert.True(t, opts.BatchPerHost.Valid) assert.Equal(t, int64(12345), opts.BatchPerHost.Int64) }) - t.Run("HttpDebug", func(t *testing.T) { - opts := Options{}.Apply(Options{HttpDebug: null.StringFrom("foo")}) - assert.True(t, opts.HttpDebug.Valid) - assert.Equal(t, "foo", opts.HttpDebug.String) + t.Run("HTTPDebug", func(t *testing.T) { + opts := Options{}.Apply(Options{HTTPDebug: null.StringFrom("foo")}) + assert.True(t, opts.HTTPDebug.Valid) + assert.Equal(t, "foo", opts.HTTPDebug.String) }) t.Run("InsecureSkipTLSVerify", func(t *testing.T) { opts := Options{}.Apply(Options{InsecureSkipTLSVerify: null.BoolFrom(true)}) diff --git a/lib/runtime_options.go b/lib/runtime_options.go index 4355f9dc0c0..124ba7274d6 100644 --- a/lib/runtime_options.go +++ b/lib/runtime_options.go @@ -25,10 +25,10 @@ import null "gopkg.in/guregu/null.v3" // RuntimeOptions are settings passed onto the goja JS runtime type RuntimeOptions struct { // Whether to pass the actual system environment variables to the JS runtime - IncludeSystemEnvVars null.Bool `json:"includeSystemEnvVars" envconfig:"include_system_env_vars"` + IncludeSystemEnvVars null.Bool `json:"includeSystemEnvVars" envconfig:"K6_INCLUDE_SYSTEM_ENV_VARS"` // Environment variables passed onto the runner - Env map[string]string `json:"env" envconfig:"env"` + Env map[string]string `json:"env" envconfig:"K6_ENV"` } // Apply overwrites the receiver RuntimeOptions' fields with any that are set diff --git a/stats/cloud/config.go b/stats/cloud/config.go index 847ff66ff1a..bcdeb5cbfe2 100644 --- a/stats/cloud/config.go +++ b/stats/cloud/config.go @@ -28,21 +28,22 @@ import ( ) // Config holds all the necessary data and options for sending metrics to the Load Impact cloud. +//nolint: lll type Config struct { // TODO: refactor common stuff between cloud execution and output - Token null.String `json:"token" envconfig:"CLOUD_TOKEN"` + Token null.String `json:"token" envconfig:"K6_CLOUD_TOKEN"` DeprecatedToken null.String `json:"-" envconfig:"K6CLOUD_TOKEN"` - ProjectID null.Int `json:"projectID" envconfig:"CLOUD_PROJECT_ID"` - Name null.String `json:"name" envconfig:"CLOUD_NAME"` + ProjectID null.Int `json:"projectID" envconfig:"K6_CLOUD_PROJECT_ID"` + Name null.String `json:"name" envconfig:"K6_CLOUD_NAME"` - Host null.String `json:"host" envconfig:"CLOUD_HOST"` - WebAppURL null.String `json:"webAppURL" envconfig:"CLOUD_WEB_APP_URL"` - NoCompress null.Bool `json:"noCompress" envconfig:"CLOUD_NO_COMPRESS"` + Host null.String `json:"host" envconfig:"K6_CLOUD_HOST"` + WebAppURL null.String `json:"webAppURL" envconfig:"K6_CLOUD_WEB_APP_URL"` + NoCompress null.Bool `json:"noCompress" envconfig:"K6_CLOUD_NO_COMPRESS"` - MaxMetricSamplesPerPackage null.Int `json:"maxMetricSamplesPerPackage" envconfig:"CLOUD_MAX_METRIC_SAMPLES_PER_PACKAGE"` + MaxMetricSamplesPerPackage null.Int `json:"maxMetricSamplesPerPackage" envconfig:"K6_CLOUD_MAX_METRIC_SAMPLES_PER_PACKAGE"` // The time interval between periodic API calls for sending samples to the cloud ingest service. - MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"CLOUD_METRIC_PUSH_INTERVAL"` + MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"K6_CLOUD_METRIC_PUSH_INTERVAL"` // Aggregation docs: // @@ -103,23 +104,23 @@ type Config struct { // on the next MetricPushInterval event. // If specified and is greater than 0, sample aggregation with that period is enabled - AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"CLOUD_AGGREGATION_PERIOD"` + AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"K6_CLOUD_AGGREGATION_PERIOD"` // If aggregation is enabled, this is how often new HTTP trails will be sorted into buckets and sub-buckets and aggregated. - AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"CLOUD_AGGREGATION_CALC_INTERVAL"` + AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"K6_CLOUD_AGGREGATION_CALC_INTERVAL"` // If aggregation is enabled, this specifies how long we'll wait for period samples to accumulate before trying to aggregate them. - AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"CLOUD_AGGREGATION_WAIT_PERIOD"` + AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"K6_CLOUD_AGGREGATION_WAIT_PERIOD"` // If aggregation is enabled, but the collected samples for a certain AggregationPeriod after AggregationPushDelay has passed are less than this number, they won't be aggregated. - AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"CLOUD_AGGREGATION_MIN_SAMPLES"` + AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"K6_CLOUD_AGGREGATION_MIN_SAMPLES"` // If this is enabled and a sub-bucket has more than AggregationMinSamples HTTP trails in it, they would all be // aggregated without attempting to find and separate any outlier metrics first. // IMPORTANT: This is intended for testing purposes only or, in extreme cases, when the result precision // isn't very important and the improved aggregation percentage would be worth the potentially huge loss // of metric granularity and possible masking of any outlier samples. - AggregationSkipOutlierDetection null.Bool `json:"aggregationSkipOutlierDetection" envconfig:"CLOUD_AGGREGATION_SKIP_OUTLIER_DETECTION"` + AggregationSkipOutlierDetection null.Bool `json:"aggregationSkipOutlierDetection" envconfig:"K6_CLOUD_AGGREGATION_SKIP_OUTLIER_DETECTION"` // If aggregation and outlier detection are enabled, this option specifies the // number of HTTP trails in a sub-bucket that determine which quartile-calculating @@ -131,19 +132,19 @@ type Config struct { // QuickSelect-based (https://en.wikipedia.org/wiki/Quickselect) algorithm will // be used. It doesn't support interpolation, so there's a small loss of precision // in the outlier detection, but it's not as resource-heavy as the sorting algorithm. - AggregationOutlierAlgoThreshold null.Int `json:"aggregationOutlierAlgoThreshold" envconfig:"CLOUD_AGGREGATION_OUTLIER_ALGO_THRESHOLD"` + AggregationOutlierAlgoThreshold null.Int `json:"aggregationOutlierAlgoThreshold" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_ALGO_THRESHOLD"` // The radius (as a fraction) from the median at which to sample Q1 and Q3. // By default it's one quarter (0.25) and if set to something different, the Q in IQR // won't make much sense... But this would allow us to select tighter sample groups for // aggregation if we want. - AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"` + AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"` // Connection or request times with how many IQRs below Q1 to consier as non-aggregatable outliers. - AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"` + AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"` // Connection or request times with how many IQRs above Q3 to consier as non-aggregatable outliers. - AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"` + AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"` } // NewConfig creates a new Config instance with default values for some fields. diff --git a/stats/csv/config.go b/stats/csv/config.go index 859bbee70cc..95a7a2b33be 100644 --- a/stats/csv/config.go +++ b/stats/csv/config.go @@ -33,15 +33,15 @@ import ( // Config is the config for the csv collector type Config struct { // Samples. - FileName null.String `json:"file_name" envconfig:"CSV_FILENAME"` - SaveInterval types.NullDuration `json:"save_interval" envconfig:"CSV_SAVE_INTERVAL"` + FileName null.String `json:"file_name" envconfig:"K6_CSV_FILENAME"` + SaveInterval types.NullDuration `json:"save_interval" envconfig:"K6_CSV_SAVE_INTERVAL"` } // config is a duplicate of ConfigFields as we can not mapstructure.Decode into // null types so we duplicate the struct with primitive types to Decode into type config struct { - FileName string `json:"file_name" mapstructure:"file_name" envconfig:"CSV_FILENAME"` - SaveInterval string `json:"save_interval" mapstructure:"save_interval" envconfig:"CSV_SAVE_INTERVAL"` + FileName string `json:"file_name" mapstructure:"file_name" envconfig:"K6_CSV_FILENAME"` + SaveInterval string `json:"save_interval" mapstructure:"save_interval" envconfig:"K6_CSV_SAVE_INTERVAL"` } // NewConfig creates a new Config instance with default values for some fields. diff --git a/stats/influxdb/config.go b/stats/influxdb/config.go index dc557165adb..794a4ae43a0 100644 --- a/stats/influxdb/config.go +++ b/stats/influxdb/config.go @@ -35,20 +35,20 @@ import ( type Config struct { // Connection. - Addr null.String `json:"addr" envconfig:"INFLUXDB_ADDR"` - Username null.String `json:"username,omitempty" envconfig:"INFLUXDB_USERNAME"` - Password null.String `json:"password,omitempty" envconfig:"INFLUXDB_PASSWORD"` - Insecure null.Bool `json:"insecure,omitempty" envconfig:"INFLUXDB_INSECURE"` - PayloadSize null.Int `json:"payloadSize,omitempty" envconfig:"INFLUXDB_PAYLOAD_SIZE"` - PushInterval types.NullDuration `json:"pushInterval,omitempty" envconfig:"INFLUXDB_PUSH_INTERVAL"` - ConcurrentWrites null.Int `json:"concurrentWrites,omitempty" envconfig:"INFLUXDB_CONCURRENT_WRITES"` + Addr null.String `json:"addr" envconfig:"K6_INFLUXDB_ADDR"` + Username null.String `json:"username,omitempty" envconfig:"K6_INFLUXDB_USERNAME"` + Password null.String `json:"password,omitempty" envconfig:"K6_INFLUXDB_PASSWORD"` + Insecure null.Bool `json:"insecure,omitempty" envconfig:"K6_INFLUXDB_INSECURE"` + PayloadSize null.Int `json:"payloadSize,omitempty" envconfig:"K6_INFLUXDB_PAYLOAD_SIZE"` + PushInterval types.NullDuration `json:"pushInterval,omitempty" envconfig:"K6_INFLUXDB_PUSH_INTERVAL"` + ConcurrentWrites null.Int `json:"concurrentWrites,omitempty" envconfig:"K6_INFLUXDB_CONCURRENT_WRITES"` // Samples. - DB null.String `json:"db" envconfig:"INFLUXDB_DB"` - Precision null.String `json:"precision,omitempty" envconfig:"INFLUXDB_PRECISION"` - Retention null.String `json:"retention,omitempty" envconfig:"INFLUXDB_RETENTION"` - Consistency null.String `json:"consistency,omitempty" envconfig:"INFLUXDB_CONSISTENCY"` - TagsAsFields []string `json:"tagsAsFields,omitempty" envconfig:"INFLUXDB_TAGS_AS_FIELDS"` + DB null.String `json:"db" envconfig:"K6_INFLUXDB_DB"` + Precision null.String `json:"precision,omitempty" envconfig:"K6_INFLUXDB_PRECISION"` + Retention null.String `json:"retention,omitempty" envconfig:"K6_INFLUXDB_RETENTION"` + Consistency null.String `json:"consistency,omitempty" envconfig:"K6_INFLUXDB_CONSISTENCY"` + TagsAsFields []string `json:"tagsAsFields,omitempty" envconfig:"K6_INFLUXDB_TAGS_AS_FIELDS"` } func NewConfig() *Config { diff --git a/stats/kafka/config.go b/stats/kafka/config.go index 96f68a83d8c..49941186d61 100644 --- a/stats/kafka/config.go +++ b/stats/kafka/config.go @@ -33,12 +33,12 @@ import ( // Config is the config for the kafka collector type Config struct { // Connection. - Brokers []string `json:"brokers" envconfig:"KAFKA_BROKERS"` + Brokers []string `json:"brokers" envconfig:"K6_KAFKA_BROKERS"` // Samples. - Topic null.String `json:"topic" envconfig:"KAFKA_TOPIC"` - Format null.String `json:"format" envconfig:"KAFKA_FORMAT"` - PushInterval types.NullDuration `json:"push_interval" envconfig:"KAFKA_PUSH_INTERVAL"` + Topic null.String `json:"topic" envconfig:"K6_KAFKA_TOPIC"` + Format null.String `json:"format" envconfig:"K6_KAFKA_FORMAT"` + PushInterval types.NullDuration `json:"push_interval" envconfig:"K6_KAFKA_PUSH_INTERVAL"` InfluxDBConfig influxdb.Config `json:"influxdb"` } @@ -46,10 +46,10 @@ type Config struct { // config is a duplicate of ConfigFields as we can not mapstructure.Decode into // null types so we duplicate the struct with primitive types to Decode into type config struct { - Brokers []string `json:"brokers" mapstructure:"brokers" envconfig:"KAFKA_BROKERS"` - Topic string `json:"topic" mapstructure:"topic" envconfig:"KAFKA_TOPIC"` - Format string `json:"format" mapstructure:"format" envconfig:"KAFKA_FORMAT"` - PushInterval string `json:"push_interval" mapstructure:"push_interval" envconfig:"KAFKA_PUSH_INTERVAL"` + Brokers []string `json:"brokers" mapstructure:"brokers" envconfig:"K6_KAFKA_BROKERS"` + Topic string `json:"topic" mapstructure:"topic" envconfig:"K6_KAFKA_TOPIC"` + Format string `json:"format" mapstructure:"format" envconfig:"K6_KAFKA_FORMAT"` + PushInterval string `json:"push_interval" mapstructure:"push_interval" envconfig:"K6_KAFKA_PUSH_INTERVAL"` InfluxDBConfig influxdb.Config `json:"influxdb" mapstructure:"influxdb"` }