From 41f487165dffa06e8c6d931a3dae903c530fb6f6 Mon Sep 17 00:00:00 2001 From: wilhelmguo Date: Wed, 27 Mar 2019 17:27:30 +0800 Subject: [PATCH] glide init and install --- glide.lock | 12 + glide.yaml | 8 + .../influxdata/influxdb/.circleci/config.yml | 19 + .../influxdata/influxdb/.dockerignore | 1 + .../influxdb/.github/ISSUE_TEMPLATE.md | 56 + .../influxdb/.github/PULL_REQUEST_TEMPLATE.md | 10 + .../github.com/influxdata/influxdb/.gitignore | 83 + .../influxdata/influxdb/.hooks/pre-commit | 23 + .../influxdata/influxdb/.mention-bot | 6 + .../influxdata/influxdb/CHANGELOG.md | 3253 ++++ .../influxdata/influxdb/CODING_GUIDELINES.md | 82 + .../influxdata/influxdb/CONTRIBUTING.md | 282 + .../github.com/influxdata/influxdb/Dockerfile | 19 + .../influxdb/Dockerfile_build_ubuntu32 | 39 + .../influxdb/Dockerfile_build_ubuntu64 | 41 + .../influxdb/Dockerfile_build_ubuntu64_git | 44 + .../influxdb/Dockerfile_build_ubuntu64_go1.11 | 43 + .../influxdb/Dockerfile_jenkins_ubuntu32 | 18 + .../influxdb/Dockerfile_test_ubuntu32 | 12 + vendor/github.com/influxdata/influxdb/Godeps | 49 + .../github.com/influxdata/influxdb/Gopkg.lock | 366 + .../github.com/influxdata/influxdb/Gopkg.toml | 75 + .../influxdata/influxdb/Jenkinsfile | 96 + vendor/github.com/influxdata/influxdb/LICENSE | 20 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 63 + .../github.com/influxdata/influxdb/QUERIES.md | 190 + .../github.com/influxdata/influxdb/README.md | 71 + vendor/github.com/influxdata/influxdb/TODO.md | 9 + .../influxdata/influxdb/appveyor.yml | 37 + .../github.com/influxdata/influxdb/build.py | 991 ++ .../github.com/influxdata/influxdb/build.sh | 22 + .../influxdata/influxdb/client/README.md | 312 + .../influxdb/client/example_test.go | 113 + .../influxdata/influxdb/client/influxdb.go | 870 + .../influxdb/client/influxdb_test.go | 1022 ++ .../influxdata/influxdb/client/v2/client.go | 662 + .../influxdb/client/v2/client_test.go | 913 ++ .../influxdb/client/v2/example_test.go | 265 + .../influxdata/influxdb/client/v2/udp.go | 112 + .../cmd/influx-tools/export/README.md | 76 + .../cmd/influx-tools/export/bucket.go | 48 + .../cmd/influx-tools/export/bucket_test.go | 116 + .../cmd/influx-tools/export/command.go | 217 + .../cmd/influx-tools/export/exporter.go | 234 + .../influxdb/cmd/influx-tools/help/help.go | 40 + .../cmd/influx-tools/importer/README.md | 12 + .../cmd/influx-tools/importer/command.go | 144 + .../cmd/influx-tools/importer/importer.go | 232 + .../influx-tools/importer/series_writer.go | 114 + .../cmd/influx-tools/importer/shard_writer.go | 96 + .../influx-tools/internal/errlist/errlist.go | 37 + .../internal/format/binary/binary.pb.go | 2231 +++ .../internal/format/binary/binary.proto | 72 + .../internal/format/binary/common.go | 60 + .../format/binary/messagetype_string.go | 17 + .../internal/format/binary/reader.go | 281 + .../internal/format/binary/reader_test.go | 457 + .../internal/format/binary/writer.go | 375 + .../internal/format/binary/writer_test.go | 114 + .../internal/format/conflictwriter.go | 176 + .../internal/format/line/writer.go | 181 + .../internal/format/text/writer.go | 191 + .../influx-tools/internal/format/writer.go | 156 + .../internal/storage/resultset.go | 86 + .../internal/storage/series_cursor.go | 176 + .../influx-tools/internal/storage/store.go | 40 + .../cmd/influx-tools/internal/tlv/tlv.go | 96 + .../cmd/influx-tools/internal/tlv/tlv_test.go | 42 + .../influxdb/cmd/influx-tools/main.go | 143 + .../cmd/influx-tools/server/server.go | 29 + .../influxdata/influxdb/cmd/influx/cli/cli.go | 1149 ++ .../cmd/influx/cli/cli_internal_test.go | 58 + .../influxdb/cmd/influx/cli/cli_test.go | 674 + .../influxdb/cmd/influx/cli/parser.go | 34 + .../cmd/influx/cli/parser_internal_test.go | 90 + .../influxdata/influxdb/cmd/influx/main.go | 128 + .../influxdb/cmd/influx_inspect/README.md | 107 + .../cmd/influx_inspect/buildtsi/buildtsi.go | 410 + .../cmd/influx_inspect/deletetsm/deletetsm.go | 156 + .../cmd/influx_inspect/dumptsi/dumptsi.go | 525 + .../cmd/influx_inspect/dumptsm/dumptsm.go | 335 + .../influx_inspect/dumptsm/dumptsm_test.go | 3 + .../influx_inspect/dumptsmwal/dumptsmwal.go | 162 + .../cmd/influx_inspect/export/export.go | 419 + .../cmd/influx_inspect/export/export_test.go | 358 + .../influxdb/cmd/influx_inspect/help/help.go | 47 + .../cmd/influx_inspect/help/help_test.go | 3 + .../influxdb/cmd/influx_inspect/main.go | 120 + .../cmd/influx_inspect/report/report.go | 326 + .../cmd/influx_inspect/report/report_test.go | 3 + .../cmd/influx_inspect/reporttsi/report.go | 487 + .../verify/seriesfile/command.go | 120 + .../verify/seriesfile/verify.go | 402 + .../verify/seriesfile/verify_test.go | 168 + .../cmd/influx_inspect/verify/tsm/verify.go | 120 + .../influx_inspect/verify/tsm/verify_test.go | 3 + .../influxdb/cmd/influx_stress/README.md | 43 + .../cmd/influx_stress/examples/template.toml | 92 + .../cmd/influx_stress/influx_stress.go | 71 + .../influxdb/cmd/influx_tsm/README.md | 152 + .../influxdb/cmd/influx_tsm/b1/reader.go | 270 + .../influxdb/cmd/influx_tsm/bz1/reader.go | 371 + .../influxdb/cmd/influx_tsm/converter.go | 118 + .../influxdb/cmd/influx_tsm/main.go | 419 + .../influxdb/cmd/influx_tsm/stats/stats.go | 55 + .../influxdb/cmd/influx_tsm/tracker.go | 130 + .../influxdb/cmd/influx_tsm/tsdb/codec.go | 119 + .../influxdb/cmd/influx_tsm/tsdb/database.go | 244 + .../cmd/influx_tsm/tsdb/internal/meta.pb.go | 121 + .../influxdb/cmd/influx_tsm/tsdb/types.go | 60 + .../influxdb/cmd/influxd/backup/backup.go | 620 + .../cmd/influxd/backup_util/backup_util.go | 225 + .../influxd/backup_util/internal/data.pb.go | 71 + .../influxd/backup_util/internal/data.proto | 12 + .../influxdb/cmd/influxd/help/help.go | 46 + .../influxdata/influxdb/cmd/influxd/main.go | 169 + .../influxdb/cmd/influxd/restore/restore.go | 600 + .../influxdb/cmd/influxd/run/command.go | 301 + .../influxdb/cmd/influxd/run/command_test.go | 64 + .../influxdb/cmd/influxd/run/config.go | 262 + .../cmd/influxd/run/config_command.go | 92 + .../influxdb/cmd/influxd/run/config_test.go | 522 + .../influxdb/cmd/influxd/run/server.go | 655 + .../influxdb/cmd/integration_config_test.go | 96 + .../influxdb/cmd/integration_test.go | 91 + .../influxdata/influxdb/cmd/parse.go | 29 + .../influxdb/cmd/store/help/help.go | 40 + .../influxdata/influxdb/cmd/store/main.go | 65 + .../influxdb/cmd/store/query/query.go | 570 + .../influxdata/influxdb/coordinator/config.go | 63 + .../influxdb/coordinator/config_test.go | 24 + .../influxdb/coordinator/meta_client.go | 36 + .../influxdb/coordinator/meta_client_test.go | 162 + .../influxdb/coordinator/points_writer.go | 402 + .../points_writer_internal_test.go | 46 + .../coordinator/points_writer_test.go | 683 + .../influxdb/coordinator/shard_mapper.go | 255 + .../influxdb/coordinator/shard_mapper_test.go | 105 + .../coordinator/statement_executor.go | 1402 ++ .../coordinator/statement_executor_test.go | 616 + .../influxdata/influxdb/docker/entrypoint.sh | 12 + .../influxdb/docker/init-influxdb.sh | 120 + .../github.com/influxdata/influxdb/errors.go | 42 + .../influxdata/influxdb/etc/burn-in/.rvmrc | 1 + .../influxdata/influxdb/etc/burn-in/Gemfile | 4 + .../influxdb/etc/burn-in/Gemfile.lock | 14 + .../influxdb/etc/burn-in/burn-in.rb | 79 + .../influxdata/influxdb/etc/burn-in/log.rb | 23 + .../influxdb/etc/burn-in/random_gaussian.rb | 31 + .../influxdb/etc/burn-in/random_points.rb | 29 + .../influxdb/etc/config.sample.toml | 556 + .../github.com/influxdata/influxdb/gobuild.sh | 18 + .../influxdata/influxdb/importer/README.md | 214 + .../influxdb/importer/v8/importer.go | 274 + .../influxdata/influxdb/influxdb.go | 6 + .../influxdb/internal/authorizer.go | 38 + .../influxdata/influxdb/internal/cursors.go | 132 + .../influxdb/internal/meta_client.go | 179 + .../influxdb/internal/storage_store.go | 78 + .../influxdb/internal/tsdb_store.go | 151 + .../influxdata/influxdb/logger/config.go | 18 + .../influxdata/influxdb/logger/context.go | 24 + .../influxdata/influxdb/logger/fields.go | 111 + .../influxdata/influxdb/logger/logger.go | 127 + .../influxdata/influxdb/logger/style_guide.md | 192 + .../influxdata/influxdb/man/Makefile | 41 + .../influxdata/influxdb/man/README.md | 38 + .../influxdata/influxdb/man/footer.txt | 13 + .../influxdata/influxdb/man/influx.txt | 97 + .../influxdb/man/influx_inspect.txt | 104 + .../influxdata/influxdb/man/influx_stress.txt | 52 + .../influxdata/influxdb/man/influx_tsm.txt | 58 + .../influxdb/man/influxd-backup.txt | 51 + .../influxdb/man/influxd-config.txt | 40 + .../influxdb/man/influxd-restore.txt | 58 + .../influxdata/influxdb/man/influxd-run.txt | 32 + .../influxdb/man/influxd-version.txt | 17 + .../influxdata/influxdb/man/influxd.txt | 40 + .../influxdata/influxdb/models/consistency.go | 48 + .../influxdata/influxdb/models/inline_fnv.go | 32 + .../influxdb/models/inline_fnv_test.go | 29 + .../influxdb/models/inline_strconv_parse.go | 44 + .../models/inline_strconv_parse_test.go | 103 + .../influxdata/influxdb/models/points.go | 2463 +++ .../influxdb/models/points_internal_test.go | 17 + .../influxdata/influxdb/models/points_test.go | 2551 +++ .../influxdata/influxdb/models/rows.go | 62 + .../influxdata/influxdb/models/statistic.go | 42 + .../influxdb/models/statistic_test.go | 55 + .../influxdata/influxdb/models/time.go | 74 + .../influxdb/models/uint_support.go | 7 + .../influxdata/influxdb/monitor/README.md | 46 + .../influxdata/influxdb/monitor/build_info.go | 22 + .../influxdb/monitor/build_info_test.go | 43 + .../influxdata/influxdb/monitor/config.go | 63 + .../influxdb/monitor/config_test.go | 52 + .../monitor/diagnostics/diagnostics.go | 64 + .../influxdata/influxdb/monitor/go_runtime.go | 21 + .../influxdb/monitor/go_runtime_test.go | 39 + .../influxdata/influxdb/monitor/network.go | 23 + .../influxdb/monitor/network_test.go | 44 + .../influxdata/influxdb/monitor/reporter.go | 10 + .../influxdata/influxdb/monitor/service.go | 501 + .../influxdb/monitor/service_test.go | 482 + .../influxdata/influxdb/monitor/system.go | 29 + .../influxdb/monitor/system_test.go | 55 + .../github.com/influxdata/influxdb/nightly.sh | 57 + vendor/github.com/influxdata/influxdb/node.go | 121 + .../influxdata/influxdb/pkg/README.md | 5 + .../influxdb/pkg/binaryutil/binaryutil.go | 22 + .../influxdata/influxdb/pkg/bloom/bloom.go | 136 + .../influxdb/pkg/bloom/bloom_test.go | 189 + .../influxdb/pkg/bytesutil/bytesutil.go | 195 + .../influxdb/pkg/bytesutil/bytesutil_test.go | 281 + .../influxdata/influxdb/pkg/deep/equal.go | 185 + .../influxdata/influxdb/pkg/escape/bytes.go | 115 + .../influxdb/pkg/escape/bytes_test.go | 139 + .../influxdata/influxdb/pkg/escape/strings.go | 21 + .../influxdb/pkg/escape/strings_test.go | 115 + .../influxdb/pkg/estimator/hll/compressed.go | 173 + .../influxdb/pkg/estimator/hll/hll.go | 495 + .../influxdb/pkg/estimator/hll/hll_test.go | 683 + .../influxdb/pkg/estimator/sketch.go | 24 + .../influxdata/influxdb/pkg/file/file_unix.go | 20 + .../influxdb/pkg/file/file_windows.go | 18 + .../influxdata/influxdb/pkg/limiter/fixed.go | 46 + .../influxdb/pkg/limiter/fixed_test.go | 26 + .../influxdb/pkg/limiter/write_test.go | 34 + .../influxdata/influxdb/pkg/limiter/writer.go | 83 + .../influxdb/pkg/metrics/context.go | 20 + .../influxdb/pkg/metrics/counter.go | 28 + .../influxdb/pkg/metrics/counter_test.go | 14 + .../influxdb/pkg/metrics/default_registry.go | 36 + .../influxdb/pkg/metrics/descriptors.go | 64 + .../influxdb/pkg/metrics/descriptors_test.go | 21 + .../influxdata/influxdb/pkg/metrics/doc.go | 6 + .../influxdata/influxdb/pkg/metrics/group.go | 37 + .../influxdb/pkg/metrics/group_registry.go | 79 + .../influxdb/pkg/metrics/registry.go | 87 + .../influxdb/pkg/metrics/registry_test.go | 63 + .../influxdata/influxdb/pkg/metrics/timer.go | 34 + .../influxdb/pkg/metrics/timer_test.go | 14 + .../influxdb/pkg/mmap/mmap_solaris.go | 45 + .../influxdata/influxdb/pkg/mmap/mmap_test.go | 22 + .../influxdata/influxdb/pkg/mmap/mmap_unix.go | 49 + .../influxdb/pkg/mmap/mmap_windows.go | 56 + .../influxdata/influxdb/pkg/pool/bytes.go | 99 + .../influxdb/pkg/pool/bytes_test.go | 16 + .../influxdata/influxdb/pkg/pool/generic.go | 40 + .../influxdb/pkg/pprofutil/pprofutil.go | 36 + .../influxdata/influxdb/pkg/radix/buffer.go | 31 + .../influxdb/pkg/radix/buffer_test.go | 55 + .../influxdata/influxdb/pkg/radix/sort.go | 92 + .../influxdb/pkg/radix/sort_test.go | 27 + .../influxdata/influxdb/pkg/radix/tree.go | 428 + .../influxdb/pkg/radix/tree_test.go | 174 + .../influxdata/influxdb/pkg/rhh/rhh.go | 286 + .../influxdata/influxdb/pkg/rhh/rhh_test.go | 78 + .../influxdata/influxdb/pkg/slices/bytes.go | 37 + .../influxdb/pkg/slices/bytes_test.go | 78 + .../influxdb/pkg/slices/merge.gen.go | 398 + .../influxdb/pkg/slices/merge.gen.go.tmpl | 104 + .../influxdb/pkg/slices/merge_test.go | 101 + .../influxdata/influxdb/pkg/slices/strings.go | 50 + .../influxdb/pkg/slices/strings_test.go | 83 + .../influxdata/influxdb/pkg/slices/tmpldata | 22 + .../influxdb/pkg/snowflake/README.md | 38 + .../influxdata/influxdb/pkg/snowflake/gen.go | 107 + .../influxdb/pkg/snowflake/gen_test.go | 68 + .../influxdata/influxdb/pkg/tar/file_unix.go | 20 + .../influxdb/pkg/tar/file_windows.go | 19 + .../influxdata/influxdb/pkg/tar/stream.go | 163 + .../influxdb/pkg/testing/assert/assertions.go | 116 + .../influxdb/pkg/testing/assert/doc.go | 4 + .../influxdb/pkg/testing/assert/helper.go | 55 + .../influxdb/pkg/tlsconfig/tls_config.go | 128 + .../influxdb/pkg/tracing/context.go | 32 + .../influxdata/influxdb/pkg/tracing/doc.go | 26 + .../influxdb/pkg/tracing/fields/field.go | 117 + .../influxdb/pkg/tracing/fields/fields.go | 61 + .../pkg/tracing/fields/fields_test.go | 101 + .../influxdb/pkg/tracing/labels/labels.go | 74 + .../pkg/tracing/labels/labels_test.go | 101 + .../influxdb/pkg/tracing/rawspan.go | 18 + .../influxdata/influxdb/pkg/tracing/span.go | 84 + .../influxdb/pkg/tracing/spancontext.go | 27 + .../influxdata/influxdb/pkg/tracing/trace.go | 138 + .../influxdb/pkg/tracing/trace_encoding.go | 136 + .../influxdata/influxdb/pkg/tracing/tree.go | 74 + .../influxdata/influxdb/pkg/tracing/util.go | 26 + .../influxdb/pkg/tracing/wire/binary.go | 7 + .../influxdb/pkg/tracing/wire/binary.pb.go | 1292 ++ .../influxdb/pkg/tracing/wire/binary.proto | 44 + .../influxdb/prometheus/converters.go | 275 + .../influxdb/prometheus/remote/generate.go | 3 + .../influxdb/prometheus/remote/remote.pb.go | 1759 ++ .../influxdb/prometheus/remote/remote.proto | 70 + .../influxdb/query/call_iterator.go | 1531 ++ .../influxdb/query/call_iterator_test.go | 1213 ++ .../influxdata/influxdb/query/cast.go | 88 + .../influxdata/influxdb/query/compile.go | 1195 ++ .../influxdata/influxdb/query/compile_test.go | 433 + .../influxdata/influxdb/query/cursor.go | 432 + .../influxdata/influxdb/query/emitter.go | 81 + .../influxdb/query/execution_context.go | 113 + .../influxdata/influxdb/query/executor.go | 474 + .../influxdb/query/executor_test.go | 535 + .../influxdata/influxdb/query/explain.go | 86 + .../influxdb/query/functions.gen.go | 2433 +++ .../influxdb/query/functions.gen.go.tmpl | 219 + .../influxdata/influxdb/query/functions.go | 2148 +++ .../influxdb/query/functions_test.go | 499 + .../influxdb/query/internal/gota/README.md | 3 + .../influxdb/query/internal/gota/cmo.go | 127 + .../influxdb/query/internal/gota/cmo_test.go | 41 + .../influxdb/query/internal/gota/ema.go | 188 + .../influxdb/query/internal/gota/ema_test.go | 114 + .../influxdb/query/internal/gota/kama.go | 113 + .../influxdb/query/internal/gota/kama_test.go | 70 + .../influxdb/query/internal/gota/rsi.go | 48 + .../influxdb/query/internal/gota/rsi_test.go | 23 + .../influxdb/query/internal/gota/trix.go | 53 + .../influxdb/query/internal/gota/trix_test.go | 23 + .../query/internal/gota/utils_test.go | 10 + .../influxdb/query/internal/internal.pb.go | 606 + .../influxdb/query/internal/internal.proto | 82 + .../influxdata/influxdb/query/iterator.gen.go | 13329 ++++++++++++++++ .../influxdb/query/iterator.gen.go.tmpl | 1580 ++ .../influxdata/influxdb/query/iterator.go | 1423 ++ .../influxdb/query/iterator_mapper.go | 67 + .../influxdb/query/iterator_mapper_test.go | 74 + .../influxdb/query/iterator_test.go | 1784 +++ .../influxdata/influxdb/query/linear.go | 31 + .../influxdata/influxdb/query/math.go | 243 + .../influxdata/influxdb/query/math_test.go | 212 + .../influxdata/influxdb/query/monitor.go | 48 + .../influxdata/influxdb/query/monitor_test.go | 61 + .../influxdb/query/neldermead/neldermead.go | 239 + .../query/neldermead/neldermead_test.go | 64 + .../influxdata/influxdb/query/point.gen.go | 1139 ++ .../influxdb/query/point.gen.go.tmpl | 250 + .../influxdata/influxdb/query/point.go | 382 + .../influxdata/influxdb/query/point_test.go | 187 + .../influxdata/influxdb/query/query.go | 7 + .../influxdata/influxdb/query/result.go | 141 + .../influxdata/influxdb/query/select.go | 974 ++ .../influxdata/influxdb/query/select_test.go | 4197 +++++ .../influxdb/query/statement_rewriter.go | 487 + .../influxdb/query/statement_rewriter_test.go | 308 + .../influxdata/influxdb/query/subquery.go | 126 + .../influxdb/query/subquery_test.go | 322 + .../influxdata/influxdb/query/task_manager.go | 319 + .../influxdata/influxdb/query/tmpldata | 37 + .../influxdata/influxdb/releng/README.md | 39 + .../influxdb/releng/_go_versions.sh | 5 + .../influxdb/releng/raw-binaries/Dockerfile | 10 + .../influxdb/releng/raw-binaries/build.bash | 59 + .../influxdb/releng/source-tarball/Dockerfile | 20 + .../influxdb/releng/source-tarball/build.bash | 60 + .../influxdb/releng/unit-tests/Dockerfile | 15 + .../influxdb/releng/unit-tests/run.bash | 48 + .../influxdb/scripts/influxdb.service | 19 + .../influxdata/influxdb/scripts/init.sh | 233 + .../influxdata/influxdb/scripts/logrotate | 8 + .../influxdb/scripts/post-install.sh | 71 + .../influxdb/scripts/post-uninstall.sh | 54 + .../influxdb/scripts/pre-install.sh | 16 + .../influxdb/services/collectd/README.md | 40 + .../services/collectd/collectd_test.conf | 209 + .../influxdb/services/collectd/config.go | 177 + .../influxdb/services/collectd/config_test.go | 32 + .../influxdb/services/collectd/service.go | 506 + .../services/collectd/service_test.go | 731 + .../services/collectd/test_client/README.md | 3 + .../services/collectd/test_client/client.go | 70 + .../services/continuous_querier/config.go | 74 + .../continuous_querier/config_test.go | 46 + .../continuous_querier/continuous_queries.md | 235 + .../services/continuous_querier/service.go | 579 + .../continuous_querier/service_test.go | 851 + .../influxdb/services/graphite/README.md | 192 + .../influxdb/services/graphite/config.go | 288 + .../influxdb/services/graphite/config_test.go | 170 + .../influxdb/services/graphite/errors.go | 14 + .../influxdb/services/graphite/parser.go | 422 + .../influxdb/services/graphite/parser_test.go | 724 + .../influxdb/services/graphite/service.go | 489 + .../services/graphite/service_test.go | 306 + .../influxdb/services/httpd/config.go | 93 + .../influxdb/services/httpd/config_test.go | 58 + .../influxdb/services/httpd/gzip.go | 104 + .../influxdb/services/httpd/handler.go | 1729 ++ .../influxdb/services/httpd/handler_test.go | 1264 ++ .../influxdata/influxdb/services/httpd/io.go | 44 + .../influxdb/services/httpd/io_test.go | 31 + .../influxdb/services/httpd/listen.go | 51 + .../influxdb/services/httpd/listen_test.go | 108 + .../influxdb/services/httpd/pprof.go | 338 + .../influxdb/services/httpd/requests.go | 140 + .../services/httpd/response_logger.go | 167 + .../services/httpd/response_writer.go | 331 + .../services/httpd/response_writer_test.go | 208 + .../influxdb/services/httpd/service.go | 253 + .../influxdb/services/meta/client.go | 1063 ++ .../influxdb/services/meta/client_test.go | 1170 ++ .../influxdb/services/meta/config.go | 47 + .../influxdb/services/meta/config_test.go | 26 + .../influxdata/influxdb/services/meta/data.go | 1745 ++ .../services/meta/data_internal_test.go | 64 + .../influxdb/services/meta/data_test.go | 394 + .../influxdb/services/meta/errors.go | 119 + .../influxdb/services/meta/file_unix.go | 10 + .../influxdb/services/meta/file_windows.go | 14 + .../services/meta/internal/meta.pb.go | 2011 +++ .../services/meta/internal/meta.proto | 393 + .../influxdb/services/meta/meta_test.go | 7 + .../services/meta/query_authorizer.go | 117 + .../services/meta/write_authorizer.go | 29 + .../influxdb/services/opentsdb/README.md | 10 + .../influxdb/services/opentsdb/config.go | 131 + .../influxdb/services/opentsdb/config_test.go | 41 + .../influxdb/services/opentsdb/handler.go | 197 + .../influxdb/services/opentsdb/service.go | 489 + .../services/opentsdb/service_test.go | 292 + .../influxdb/services/precreator/README.md | 13 + .../influxdb/services/precreator/config.go | 67 + .../services/precreator/config_test.go | 67 + .../influxdb/services/precreator/service.go | 92 + .../services/precreator/service_test.go | 55 + .../influxdb/services/retention/config.go | 49 + .../services/retention/config_test.go | 51 + .../influxdb/services/retention/service.go | 162 + .../services/retention/service_test.go | 397 + .../influxdb/services/snapshotter/client.go | 214 + .../services/snapshotter/client_test.go | 89 + .../influxdb/services/snapshotter/service.go | 464 + .../services/snapshotter/service_test.go | 449 + .../services/storage/batch_cursor.gen.go | 927 ++ .../services/storage/batch_cursor.gen.go.tmpl | 210 + .../storage/batch_cursor.gen.go.tmpldata | 40 + .../influxdb/services/storage/batch_cursor.go | 159 + .../influxdb/services/storage/config.go | 41 + .../influxdb/services/storage/eval.go | 284 + .../influxdb/services/storage/expr.go | 25 + .../services/storage/group_resultset.go | 436 + .../services/storage/group_resultset_test.go | 213 + .../influxdb/services/storage/predicate.go | 140 + .../influxdb/services/storage/predicate.pb.go | 1347 ++ .../influxdb/services/storage/predicate.proto | 60 + .../services/storage/predicate_influxql.go | 321 + .../services/storage/predicate_test.go | 237 + .../services/storage/response_writer.gen.go | 431 + .../storage/response_writer.gen.go.tmpl | 92 + .../services/storage/response_writer.go | 198 + .../influxdb/services/storage/resultset.go | 60 + .../influxdb/services/storage/rpc_service.go | 222 + .../services/storage/series_cursor.go | 328 + .../services/storage/series_cursor_test.go | 71 + .../influxdb/services/storage/service.go | 75 + .../influxdb/services/storage/storage.go | 55 + .../influxdb/services/storage/storage.pb.go | 4273 +++++ .../influxdb/services/storage/storage.proto | 215 + .../services/storage/storage.yarpc.go | 191 + .../influxdb/services/storage/store.go | 167 + .../influxdb/services/storage/string.go | 16 + .../influxdb/services/storage/tagsbuffer.go | 30 + .../influxdb/services/storage/yarpc_server.go | 39 + .../influxdb/services/subscriber/config.go | 107 + .../services/subscriber/config_test.go | 111 + .../influxdb/services/subscriber/http.go | 81 + .../influxdb/services/subscriber/service.go | 456 + .../services/subscriber/service_test.go | 445 + .../influxdb/services/subscriber/udp.go | 42 + .../influxdb/services/udp/README.md | 128 + .../influxdb/services/udp/config.go | 127 + .../influxdb/services/udp/config_test.go | 46 + .../influxdb/services/udp/service.go | 314 + .../influxdb/services/udp/service_test.go | 157 + .../influxdata/influxdb/stress/DESIGN.md | 47 + .../influxdata/influxdb/stress/README.md | 115 + .../influxdata/influxdb/stress/basic.go | 691 + .../influxdata/influxdb/stress/config.go | 145 + .../influxdata/influxdb/stress/run.go | 335 + .../influxdata/influxdb/stress/stress.toml | 54 + .../influxdata/influxdb/stress/stress_test.go | 594 + .../stress/stress_test_server/server.go | 74 + .../influxdata/influxdb/stress/template.go | 64 + .../influxdata/influxdb/stress/util.go | 132 + .../influxdata/influxdb/stress/v2/DESIGN.md | 164 + .../influxdata/influxdb/stress/v2/README.md | 177 + .../influxdb/stress/v2/influx_stress_v2.png | Bin 0 -> 93150 bytes .../influxdb/stress/v2/iql/default.iql | 13 + .../influxdb/stress/v2/iql/file.iql | 45 + .../influxdata/influxdb/stress/v2/main.go | 59 + .../influxdb/stress/v2/statement/exec.go | 32 + .../influxdb/stress/v2/statement/exec_test.go | 41 + .../influxdb/stress/v2/statement/function.go | 176 + .../stress/v2/statement/function_test.go | 139 + .../influxdb/stress/v2/statement/go.go | 40 + .../influxdb/stress/v2/statement/go_test.go | 41 + .../influxdb/stress/v2/statement/influxql.go | 69 + .../stress/v2/statement/influxql_test.go | 44 + .../influxdb/stress/v2/statement/insert.go | 214 + .../stress/v2/statement/insert_test.go | 50 + .../influxdb/stress/v2/statement/query.go | 161 + .../stress/v2/statement/query_test.go | 42 + .../influxdb/stress/v2/statement/report.go | 237 + .../stress/v2/statement/report_test.go | 210 + .../stress/v2/statement/response_time.go | 40 + .../stress/v2/statement/response_time_test.go | 45 + .../influxdb/stress/v2/statement/set.go | 59 + .../influxdb/stress/v2/statement/set_test.go | 92 + .../influxdb/stress/v2/statement/statement.go | 32 + .../influxdb/stress/v2/statement/template.go | 47 + .../stress/v2/statement/template_test.go | 72 + .../influxdb/stress/v2/statement/timestamp.go | 51 + .../stress/v2/statement/timestamp_test.go | 31 + .../influxdb/stress/v2/statement/wait.go | 32 + .../influxdb/stress/v2/statement/wait_test.go | 41 + .../stress/v2/stress_client/commune.go | 58 + .../stress/v2/stress_client/commune_test.go | 57 + .../stress/v2/stress_client/directive.go | 19 + .../stress/v2/stress_client/directive_test.go | 20 + .../stress/v2/stress_client/package.go | 22 + .../stress/v2/stress_client/package_test.go | 16 + .../stress/v2/stress_client/reporting.go | 95 + .../stress/v2/stress_client/reporting_test.go | 100 + .../stress/v2/stress_client/response.go | 50 + .../stress/v2/stress_client/response_test.go | 20 + .../stress/v2/stress_client/stressTest.go | 175 + .../v2/stress_client/stressTest_test.go | 32 + .../stress/v2/stress_client/stress_client.go | 175 + .../v2/stress_client/stress_client_query.go | 69 + .../v2/stress_client/stress_client_write.go | 112 + .../stress/v2/stress_client/tracer.go | 19 + .../stress/v2/stress_client/tracer_test.go | 17 + .../influxdb/stress/v2/stress_client/util.go | 89 + .../influxdb/stress/v2/stressql/parser.go | 158 + .../stress/v2/stressql/parser_test.go | 16 + .../stress/v2/stressql/statement/parser.go | 687 + .../v2/stressql/statement/parser_test.go | 243 + .../github.com/influxdata/influxdb/tcp/mux.go | 309 + .../influxdata/influxdb/tcp/mux_test.go | 212 + vendor/github.com/influxdata/influxdb/test.sh | 164 + .../influxdata/influxdb/tests/README.md | 174 + .../influxdb/tests/backup_restore_test.go | 284 + .../influxdb/tests/server_bench_test.go | 136 + .../influxdb/tests/server_concurrent_test.go | 170 + .../influxdb/tests/server_delete_test.go | 625 + .../influxdb/tests/server_helpers.go | 743 + .../influxdata/influxdb/tests/server_suite.go | 598 + .../influxdata/influxdb/tests/server_test.go | 9707 +++++++++++ .../influxdata/influxdb/toml/toml.go | 279 + .../influxdata/influxdb/toml/toml_test.go | 247 + .../influxdata/influxdb/tsdb/README.md | 91 + .../influxdata/influxdb/tsdb/batcher.go | 157 + .../influxdata/influxdb/tsdb/batcher_test.go | 146 + .../influxdata/influxdb/tsdb/config.go | 191 + .../influxdata/influxdb/tsdb/config_test.go | 141 + .../influxdata/influxdb/tsdb/cursor.go | 71 + .../influxdata/influxdb/tsdb/doc.go | 5 + .../influxdata/influxdb/tsdb/engine.go | 221 + .../influxdata/influxdb/tsdb/engine/engine.go | 9 + .../influxdb/tsdb/engine/tsm1/DESIGN.md | 451 + .../tsdb/engine/tsm1/batch_cursor.gen.go | 1552 ++ .../tsdb/engine/tsm1/batch_cursor.gen.go.tmpl | 326 + .../influxdb/tsdb/engine/tsm1/bit_reader.go | 133 + .../tsdb/engine/tsm1/bit_reader_test.go | 180 + .../influxdb/tsdb/engine/tsm1/bool.go | 169 + .../influxdb/tsdb/engine/tsm1/bool_test.go | 161 + .../influxdb/tsdb/engine/tsm1/cache.go | 832 + .../tsdb/engine/tsm1/cache_race_test.go | 206 + .../influxdb/tsdb/engine/tsm1/cache_test.go | 970 ++ .../influxdb/tsdb/engine/tsm1/compact.gen.go | 1004 ++ .../tsdb/engine/tsm1/compact.gen.go.tmpl | 206 + .../tsdb/engine/tsm1/compact.gen.go.tmpldata | 22 + .../influxdb/tsdb/engine/tsm1/compact.go | 1786 +++ .../influxdb/tsdb/engine/tsm1/compact_test.go | 2887 ++++ .../tsdb/engine/tsm1/cursor_iterator.gen.go | 115 + .../engine/tsm1/cursor_iterator.gen.go.tmpl | 33 + .../tsdb/engine/tsm1/cursor_iterator.go | 80 + .../influxdb/tsdb/engine/tsm1/digest.go | 248 + .../tsdb/engine/tsm1/digest_reader.go | 97 + .../influxdb/tsdb/engine/tsm1/digest_test.go | 476 + .../tsdb/engine/tsm1/digest_writer.go | 137 + .../tsdb/engine/tsm1/digest_writer_test.go | 198 + .../influxdb/tsdb/engine/tsm1/encoding.gen.go | 1534 ++ .../tsdb/engine/tsm1/encoding.gen.go.tmpl | 276 + .../tsdb/engine/tsm1/encoding.gen.go.tmpldata | 38 + .../tsdb/engine/tsm1/encoding.gen_test.go | 212 + .../influxdb/tsdb/engine/tsm1/encoding.go | 1034 ++ .../tsdb/engine/tsm1/encoding_test.go | 1726 ++ .../influxdb/tsdb/engine/tsm1/engine.go | 3052 ++++ .../tsdb/engine/tsm1/engine_cursor.go | 11 + .../influxdb/tsdb/engine/tsm1/engine_test.go | 2600 +++ .../tsdb/engine/tsm1/file_store.gen.go | 907 ++ .../tsdb/engine/tsm1/file_store.gen.go.tmpl | 185 + .../engine/tsm1/file_store.gen.go.tmpldata | 22 + .../influxdb/tsdb/engine/tsm1/file_store.go | 1427 ++ .../engine/tsm1/file_store_key_iterator.go | 112 + .../tsm1/file_store_key_iterator_test.go | 198 + .../tsdb/engine/tsm1/file_store_observer.go | 6 + .../tsdb/engine/tsm1/file_store_test.go | 2981 ++++ .../influxdb/tsdb/engine/tsm1/file_unix.go | 20 + .../influxdb/tsdb/engine/tsm1/file_windows.go | 18 + .../influxdb/tsdb/engine/tsm1/float.go | 280 + .../influxdb/tsdb/engine/tsm1/float_test.go | 286 + .../influxdb/tsdb/engine/tsm1/int.go | 324 + .../influxdb/tsdb/engine/tsm1/int_test.go | 646 + .../influxdb/tsdb/engine/tsm1/iterator.gen.go | 2521 +++ .../tsdb/engine/tsm1/iterator.gen.go.tmpl | 609 + .../tsdb/engine/tsm1/iterator.gen.go.tmpldata | 37 + .../influxdb/tsdb/engine/tsm1/iterator.go | 218 + .../tsdb/engine/tsm1/iterator_test.go | 161 + .../influxdb/tsdb/engine/tsm1/mmap_unix.go | 43 + .../influxdb/tsdb/engine/tsm1/mmap_windows.go | 133 + .../influxdb/tsdb/engine/tsm1/pools.go | 27 + .../influxdb/tsdb/engine/tsm1/reader.go | 1791 +++ .../influxdb/tsdb/engine/tsm1/reader_test.go | 1938 +++ .../influxdb/tsdb/engine/tsm1/ring.go | 317 + .../influxdb/tsdb/engine/tsm1/ring_test.go | 144 + .../influxdb/tsdb/engine/tsm1/scheduler.go | 79 + .../tsdb/engine/tsm1/scheduler_test.go | 74 + .../influxdb/tsdb/engine/tsm1/string.go | 129 + .../influxdb/tsdb/engine/tsm1/string_test.go | 177 + .../influxdb/tsdb/engine/tsm1/timestamp.go | 414 + .../tsdb/engine/tsm1/timestamp_test.go | 604 + .../influxdb/tsdb/engine/tsm1/tombstone.go | 721 + .../tsdb/engine/tsm1/tombstone_test.go | 361 + .../influxdb/tsdb/engine/tsm1/wal.go | 1238 ++ .../influxdb/tsdb/engine/tsm1/wal_test.go | 864 + .../influxdb/tsdb/engine/tsm1/writer.go | 824 + .../influxdb/tsdb/engine/tsm1/writer_test.go | 623 + .../influxdb/tsdb/field_validator.go | 70 + .../influxdata/influxdb/tsdb/index.go | 2712 ++++ .../influxdata/influxdb/tsdb/index/index.go | 6 + .../influxdb/tsdb/index/inmem/inmem.go | 1307 ++ .../influxdb/tsdb/index/inmem/inmem_test.go | 145 + .../influxdb/tsdb/index/inmem/meta.go | 1523 ++ .../influxdb/tsdb/index/inmem/meta_test.go | 243 + .../influxdb/tsdb/index/internal/file_set.go | 69 + .../influxdb/tsdb/index/tsi1/doc.go | 238 + .../influxdb/tsdb/index/tsi1/file_set.go | 596 + .../influxdb/tsdb/index/tsi1/file_set_test.go | 309 + .../influxdb/tsdb/index/tsi1/index.go | 1013 ++ .../influxdb/tsdb/index/tsi1/index_file.go | 522 + .../tsdb/index/tsi1/index_file_test.go | 187 + .../influxdb/tsdb/index/tsi1/index_files.go | 464 + .../tsdb/index/tsi1/index_files_test.go | 56 + .../influxdb/tsdb/index/tsi1/index_test.go | 413 + .../influxdb/tsdb/index/tsi1/log_file.go | 1508 ++ .../influxdb/tsdb/index/tsi1/log_file_test.go | 461 + .../tsdb/index/tsi1/measurement_block.go | 652 + .../tsdb/index/tsi1/measurement_block_test.go | 181 + .../influxdb/tsdb/index/tsi1/partition.go | 1319 ++ .../tsdb/index/tsi1/partition_test.go | 119 + .../influxdb/tsdb/index/tsi1/tag_block.go | 786 + .../tsdb/index/tsi1/tag_block_test.go | 149 + .../influxdb/tsdb/index/tsi1/tsi1.go | 554 + .../influxdb/tsdb/index/tsi1/tsi1_test.go | 321 + .../influxdata/influxdb/tsdb/index_test.go | 664 + .../influxdb/tsdb/internal/meta.pb.go | 174 + .../influxdb/tsdb/internal/meta.proto | 33 + .../influxdata/influxdb/tsdb/meta.go | 98 + .../influxdata/influxdb/tsdb/meta_test.go | 261 + .../influxdata/influxdb/tsdb/series_cursor.go | 155 + .../influxdata/influxdb/tsdb/series_file.go | 494 + .../influxdb/tsdb/series_file_test.go | 217 + .../influxdata/influxdb/tsdb/series_index.go | 373 + .../influxdb/tsdb/series_index_test.go | 132 + .../influxdb/tsdb/series_partition.go | 719 + .../influxdb/tsdb/series_segment.go | 408 + .../influxdb/tsdb/series_segment_test.go | 258 + .../influxdata/influxdb/tsdb/series_set.go | 210 + .../influxdb/tsdb/series_set_test.go | 311 + .../influxdata/influxdb/tsdb/shard.go | 1990 +++ .../influxdb/tsdb/shard_internal_test.go | 268 + .../influxdata/influxdb/tsdb/shard_test.go | 2188 +++ .../influxdata/influxdb/tsdb/store.go | 1978 +++ .../influxdb/tsdb/store_internal_test.go | 167 + .../influxdata/influxdb/tsdb/store_test.go | 2143 +++ .../tsdb/testdata/line-protocol-1M.txt.gz | Bin 0 -> 3116981 bytes .../influxdata/influxdb/uuid/uuid.go | 96 + .../influxdata/influxdb/write-gdm-deps.sh | 2 + .../natefinch/lumberjack.v2/.gitignore | 23 + .../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 + .../natefinch/lumberjack.v2/README.md | 174 + .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/example_test.go | 18 + .../natefinch/lumberjack.v2/linux_test.go | 205 + .../natefinch/lumberjack.v2/lumberjack.go | 541 + .../lumberjack.v2/lumberjack_test.go | 816 + .../natefinch/lumberjack.v2/rotate_test.go | 27 + .../natefinch/lumberjack.v2/testing_test.go | 91 + 695 files changed, 228301 insertions(+) create mode 100644 glide.lock create mode 100644 glide.yaml create mode 100644 vendor/github.com/influxdata/influxdb/.circleci/config.yml create mode 100644 vendor/github.com/influxdata/influxdb/.dockerignore create mode 100644 vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/influxdata/influxdb/.gitignore create mode 100644 vendor/github.com/influxdata/influxdb/.hooks/pre-commit create mode 100644 vendor/github.com/influxdata/influxdb/.mention-bot create mode 100644 vendor/github.com/influxdata/influxdb/CHANGELOG.md create mode 100644 vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md create mode 100644 vendor/github.com/influxdata/influxdb/CONTRIBUTING.md create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go1.11 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Godeps create mode 100644 vendor/github.com/influxdata/influxdb/Gopkg.lock create mode 100644 vendor/github.com/influxdata/influxdb/Gopkg.toml create mode 100644 vendor/github.com/influxdata/influxdb/Jenkinsfile create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 vendor/github.com/influxdata/influxdb/QUERIES.md create mode 100644 vendor/github.com/influxdata/influxdb/README.md create mode 100644 vendor/github.com/influxdata/influxdb/TODO.md create mode 100644 vendor/github.com/influxdata/influxdb/appveyor.yml create mode 100755 vendor/github.com/influxdata/influxdb/build.py create mode 100755 vendor/github.com/influxdata/influxdb/build.sh create mode 100644 vendor/github.com/influxdata/influxdb/client/README.md create mode 100644 vendor/github.com/influxdata/influxdb/client/example_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/influxdb.go create mode 100644 vendor/github.com/influxdata/influxdb/client/influxdb_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/client.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/example_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/udp.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/exporter.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/help/help.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/importer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/series_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/shard_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist/errlist.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.proto create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/common.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/messagetype_string.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/conflictwriter.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/line/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/text/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/resultset.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/series_cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/store.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx-tools/server/server.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/deletetsm/deletetsm.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsmwal/dumptsmwal.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi/report.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/integration_config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/integration_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/parse.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/store/help/help.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/store/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/store/query/query.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/config.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/meta_client.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/points_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go create mode 100644 vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go create mode 100755 vendor/github.com/influxdata/influxdb/docker/entrypoint.sh create mode 100755 vendor/github.com/influxdata/influxdb/docker/init-influxdb.sh create mode 100644 vendor/github.com/influxdata/influxdb/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/config.sample.toml create mode 100755 vendor/github.com/influxdata/influxdb/gobuild.sh create mode 100644 vendor/github.com/influxdata/influxdb/importer/README.md create mode 100644 vendor/github.com/influxdata/influxdb/importer/v8/importer.go create mode 100644 vendor/github.com/influxdata/influxdb/influxdb.go create mode 100644 vendor/github.com/influxdata/influxdb/internal/authorizer.go create mode 100644 vendor/github.com/influxdata/influxdb/internal/cursors.go create mode 100644 vendor/github.com/influxdata/influxdb/internal/meta_client.go create mode 100644 vendor/github.com/influxdata/influxdb/internal/storage_store.go create mode 100644 vendor/github.com/influxdata/influxdb/internal/tsdb_store.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/config.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/context.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/fields.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/logger.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/style_guide.md create mode 100644 vendor/github.com/influxdata/influxdb/man/Makefile create mode 100644 vendor/github.com/influxdata/influxdb/man/README.md create mode 100644 vendor/github.com/influxdata/influxdb/man/footer.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influx.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influx_inspect.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influx_stress.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influx_tsm.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd-backup.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd-config.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd-restore.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd-run.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd-version.txt create mode 100644 vendor/github.com/influxdata/influxdb/man/influxd.txt create mode 100644 vendor/github.com/influxdata/influxdb/models/consistency.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_fnv.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/rows.go create mode 100644 vendor/github.com/influxdata/influxdb/models/statistic.go create mode 100644 vendor/github.com/influxdata/influxdb/models/statistic_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/time.go create mode 100644 vendor/github.com/influxdata/influxdb/models/uint_support.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/README.md create mode 100644 vendor/github.com/influxdata/influxdb/monitor/build_info.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/build_info_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/config.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/go_runtime.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/network.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/network_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/reporter.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/service.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/system.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/system_test.go create mode 100755 vendor/github.com/influxdata/influxdb/nightly.sh create mode 100644 vendor/github.com/influxdata/influxdb/node.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/README.md create mode 100644 vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/bloom/bloom.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/deep/equal.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/strings.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/strings_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/estimator/hll/compressed.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/estimator/sketch.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/limiter/fixed.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/limiter/fixed_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/limiter/write_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/limiter/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/context.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/counter.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/counter_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/default_registry.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/group.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/group_registry.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/registry.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/registry_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/timer.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/metrics/timer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/pool/bytes_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/pool/generic.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/buffer.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/buffer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/sort.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/sort_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/tree.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/radix/tree_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/rhh/rhh_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/bytes.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/bytes_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/strings.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/strings_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/stream.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/testing/assert/assertions.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/testing/assert/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/testing/assert/helper.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tlsconfig/tls_config.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/context.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/fields/field.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/rawspan.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/span.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/spancontext.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/trace_encoding.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/tree.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/util.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.proto create mode 100644 vendor/github.com/influxdata/influxdb/prometheus/converters.go create mode 100644 vendor/github.com/influxdata/influxdb/prometheus/remote/generate.go create mode 100644 vendor/github.com/influxdata/influxdb/prometheus/remote/remote.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/prometheus/remote/remote.proto create mode 100644 vendor/github.com/influxdata/influxdb/query/call_iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/query/call_iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/cast.go create mode 100644 vendor/github.com/influxdata/influxdb/query/compile.go create mode 100644 vendor/github.com/influxdata/influxdb/query/compile_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/query/emitter.go create mode 100644 vendor/github.com/influxdata/influxdb/query/execution_context.go create mode 100644 vendor/github.com/influxdata/influxdb/query/executor.go create mode 100644 vendor/github.com/influxdata/influxdb/query/executor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/explain.go create mode 100644 vendor/github.com/influxdata/influxdb/query/functions.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/query/functions.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/query/functions.go create mode 100644 vendor/github.com/influxdata/influxdb/query/functions_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/README.md create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/cmo.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/cmo_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/ema.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/ema_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/kama.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/kama_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/rsi.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/rsi_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/trix.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/trix_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/gota/utils_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/query/internal/internal.proto create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator_mapper.go create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator_mapper_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/linear.go create mode 100644 vendor/github.com/influxdata/influxdb/query/math.go create mode 100644 vendor/github.com/influxdata/influxdb/query/math_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/monitor.go create mode 100644 vendor/github.com/influxdata/influxdb/query/monitor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/neldermead/neldermead.go create mode 100644 vendor/github.com/influxdata/influxdb/query/neldermead/neldermead_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/point.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/query/point.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/query/point.go create mode 100644 vendor/github.com/influxdata/influxdb/query/point_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/query.go create mode 100644 vendor/github.com/influxdata/influxdb/query/result.go create mode 100644 vendor/github.com/influxdata/influxdb/query/select.go create mode 100644 vendor/github.com/influxdata/influxdb/query/select_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/statement_rewriter.go create mode 100644 vendor/github.com/influxdata/influxdb/query/statement_rewriter_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/subquery.go create mode 100644 vendor/github.com/influxdata/influxdb/query/subquery_test.go create mode 100644 vendor/github.com/influxdata/influxdb/query/task_manager.go create mode 100644 vendor/github.com/influxdata/influxdb/query/tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/releng/README.md create mode 100644 vendor/github.com/influxdata/influxdb/releng/_go_versions.sh create mode 100644 vendor/github.com/influxdata/influxdb/releng/raw-binaries/Dockerfile create mode 100755 vendor/github.com/influxdata/influxdb/releng/raw-binaries/build.bash create mode 100644 vendor/github.com/influxdata/influxdb/releng/source-tarball/Dockerfile create mode 100755 vendor/github.com/influxdata/influxdb/releng/source-tarball/build.bash create mode 100644 vendor/github.com/influxdata/influxdb/releng/unit-tests/Dockerfile create mode 100755 vendor/github.com/influxdata/influxdb/releng/unit-tests/run.bash create mode 100644 vendor/github.com/influxdata/influxdb/scripts/influxdb.service create mode 100755 vendor/github.com/influxdata/influxdb/scripts/init.sh create mode 100644 vendor/github.com/influxdata/influxdb/scripts/logrotate create mode 100644 vendor/github.com/influxdata/influxdb/scripts/post-install.sh create mode 100644 vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh create mode 100755 vendor/github.com/influxdata/influxdb/scripts/pre-install.sh create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/parser_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/gzip.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/handler.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/io.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/io_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/listen.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/listen_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/pprof.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/requests.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/response_logger.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/response_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/client.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/data.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/data_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/data_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/internal/meta.proto create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/meta_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/write_authorizer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/client.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/eval.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/expr.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/group_resultset.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/group_resultset_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/predicate.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/predicate.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/predicate.proto create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/predicate_influxql.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/predicate_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/resultset.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/rpc_service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/series_cursor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/storage.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/storage.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/storage.proto create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/storage.yarpc.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/store.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/string.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/tagsbuffer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/yarpc_server.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/http.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/udp.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/DESIGN.md create mode 100644 vendor/github.com/influxdata/influxdb/stress/README.md create mode 100644 vendor/github.com/influxdata/influxdb/stress/basic.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/config.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/run.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress.toml create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress_test_server/server.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/template.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/util.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/DESIGN.md create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/README.md create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/influx_stress_v2.png create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/iql/default.iql create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/iql/file.iql create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/main.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/exec.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/exec_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/function.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/go.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/go_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/influxql.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/influxql_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/insert.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/insert_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/query.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/query_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/report.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/report_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/response_time.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/response_time_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/set.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/set_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/statement.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/template.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/template_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/timestamp.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/timestamp_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/wait.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/statement/wait_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/commune.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/commune_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/directive.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/directive_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/package.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/package_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/reporting.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/reporting_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/response.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/response_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_write.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/tracer.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/tracer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stress_client/util.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stressql/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stressql/parser_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tcp/mux.go create mode 100644 vendor/github.com/influxdata/influxdb/tcp/mux_test.go create mode 100755 vendor/github.com/influxdata/influxdb/test.sh create mode 100644 vendor/github.com/influxdata/influxdb/tests/README.md create mode 100644 vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_bench_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_concurrent_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_delete_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_helpers.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_suite.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_test.go create mode 100644 vendor/github.com/influxdata/influxdb/toml/toml.go create mode 100644 vendor/github.com/influxdata/influxdb/toml/toml_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/README.md create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/batcher.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/batcher_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/config.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/DESIGN.md create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/batch_cursor.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/batch_cursor.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bit_reader.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bit_reader_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_race_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cursor_iterator.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cursor_iterator.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cursor_iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_reader.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_observer.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/pools.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/scheduler.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/scheduler_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/field_validator.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/index.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/meta.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/meta_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_file.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_index.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_partition.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_segment.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_set.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_set_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/shard.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/shard_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/store.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/store_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/testdata/line-protocol-1M.txt.gz create mode 100644 vendor/github.com/influxdata/influxdb/uuid/uuid.go create mode 100755 vendor/github.com/influxdata/influxdb/write-gdm-deps.sh create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/example_test.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/linux_test.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/rotate_test.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/testing_test.go diff --git a/glide.lock b/glide.lock new file mode 100644 index 0000000..7f76234 --- /dev/null +++ b/glide.lock @@ -0,0 +1,12 @@ +hash: f09f5d56bd2e8cdd3c53b975e94668a0c77a606afb261e5ac7cb5e6553f3bec4 +updated: 2019-03-27T17:25:19.730877447+08:00 +imports: +- name: github.com/influxdata/influxdb + version: 0036db61391d79bb0bdc769a4d1ad1437df2ac89 + subpackages: + - client/v2 + - models + - pkg/escape +- name: gopkg.in/natefinch/lumberjack.v2 + version: a96e63847dc3c67d17befa69c303767e2f84e54f +testImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 0000000..a7096d5 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,8 @@ +package: github.com/shell909090/influx-proxy +import: + - package: github.com/influxdata/influxdb + version: ~1.6.4 + subpackages: + - client/v2 + - package: gopkg.in/natefinch/lumberjack.v2 + version: ~2.1.0 diff --git a/vendor/github.com/influxdata/influxdb/.circleci/config.yml b/vendor/github.com/influxdata/influxdb/.circleci/config.yml new file mode 100644 index 0000000..bc285c7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.circleci/config.yml @@ -0,0 +1,19 @@ +version: 2 + +jobs: + build: + machine: + enabled: true + docker_layer_caching: true + environment: + - PARALLELISM: 4 # Input to influxdb/build.py + parallelism: 5 # How many CircleCI test containers + steps: + - checkout + - run: + name: Ensure CircleCI parallelism matches "./test.sh count" + command: "[ `./test.sh count` -eq $CIRCLE_NODE_TOTAL ]" + - run: + name: Execute test + command: ./test.sh $CIRCLE_NODE_INDEX + no_output_timeout: 1500s diff --git a/vendor/github.com/influxdata/influxdb/.dockerignore b/vendor/github.com/influxdata/influxdb/.dockerignore new file mode 100644 index 0000000..378eac2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.dockerignore @@ -0,0 +1 @@ +build diff --git a/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..4423a0f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,56 @@ +### Directions +_GitHub Issues are reserved for actionable bug reports and feature requests._ +_General questions should be sent to the [InfluxDB Community Site](https://community.influxdata.com)._ + +_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._ +_If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below. +_Erase the other section and everything on and above this line._ + +### Bug report + +__System info:__ [Include InfluxDB version, operating system name, and other relevant details] + +__Steps to reproduce:__ + +1. [First Step] +2. [Second Step] +3. [and so on...] + +__Expected behavior:__ [What you expected to happen] + +__Actual behavior:__ [What actually happened] + +__Additional info:__ [Include gist of relevant config, logs, etc.] + +Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team. + +``` +curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true" + +curl -o vars.txt "http://localhost:8086/debug/vars" +iostat -xd 1 30 > iostat.txt +``` + +**Please note** It will take at least 30 seconds for the first cURL command above to return a response. +This is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect. +Ideally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time. + +If you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether. + +Please run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue. + +*Please note, the quickest way to fix a bug is to open a Pull Request.* + + +### Feature Request + +Opening a feature request kicks off a discussion. +Requests may be closed if we're not actively planning to work on them. + +__Proposal:__ [Description of the feature] + +__Current behavior:__ [What currently happens] + +__Desired behavior:__ [What you would like to happen] + +__Use case:__ [Why is this important (helps with prioritizing requests)] diff --git a/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..40afe54 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,10 @@ +###### Required for all non-trivial PRs +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) + +###### Required only if applicable +_You can erase any checkboxes below this note if they are not applicable to your Pull Request._ +- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated +- [ ] Provide example syntax +- [ ] Update man page when modifying a command +- [ ] Config changes: update sample config (`etc/config.sample.toml`), server `NewDemoConfig` method, and `Diagnostics` methods reporting config settings, if necessary +- [ ] [InfluxData Documentation](https://github.com/influxdata/docs.influxdata.com): issue filed or pull request submitted \ diff --git a/vendor/github.com/influxdata/influxdb/.gitignore b/vendor/github.com/influxdata/influxdb/.gitignore new file mode 100644 index 0000000..b30f31b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.gitignore @@ -0,0 +1,83 @@ +# Keep editor-specific, non-project specific ignore rules in global .gitignore: +# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore + +*~ +src/ + +config.json +/bin/ + +/query/a.out* + +# ignore generated files. +cmd/influxd/version.go + +# executables + +*.test + +influx_tsm +**/influx_tsm +!**/influx_tsm/ + +influx_stress +**/influx_stress +!**/influx_stress/ + +influxd +**/influxd +!**/influxd/ + +influx +**/influx +!**/influx/ + +influxdb +**/influxdb +!**/influxdb/ + +influx_inspect +**/influx_inspect +!**/influx_inspect/ + +/benchmark-tool +/main +/benchmark-storage +godef +gosym +gocode +inspect-raft + +# dependencies +out_rpm/ +packages/ + +# autconf +autom4te.cache/ +config.log +config.status + +# log file +influxdb.log +benchmark.log + +# config file +config.toml + +# test data files +integration/migration_data/ +test-logs/ + +# man outputs +man/*.xml +man/*.1 +man/*.1.gz + +# test outputs +/test-results.xml + +# profile data +/prof + +# vendored files +/vendor diff --git a/vendor/github.com/influxdata/influxdb/.hooks/pre-commit b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit new file mode 100644 index 0000000..6cf240b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` +if [ $fmtcount -gt 0 ]; then + echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing" + exit 1 +fi + +vetcount=`go tool vet ./ 2>&1 | wc -l` +if [ $vetcount -gt 0 ]; then + echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing" + exit 1 +fi +exit 0 + +# Ensure FIXME lines are removed before commit. +fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g') +if [ "$fixme_lines" != "" ]; then + echo "Please remove the following lines:" + echo -e "$fixme_lines" + exit 1 +fi + diff --git a/vendor/github.com/influxdata/influxdb/.mention-bot b/vendor/github.com/influxdata/influxdb/.mention-bot new file mode 100644 index 0000000..5f8689b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.mention-bot @@ -0,0 +1,6 @@ +{ + "maxReviewers": 3, + "fileBlacklist": ["CHANGELOG.md"], + "userBlacklist": ["pauldix", "toddboom", "aviau", "mark-rushakoff"], + "requiredOrgs": ["influxdata"] +} diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md new file mode 100644 index 0000000..cf24514 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -0,0 +1,3253 @@ +v1.6.6 [2019-02-28] +------------------- + +### Bugfixes + +- [#10951](https://github.com/influxdata/influxdb/pull/10951): Marked functions that always return floats as always returning floats. +- [#11003](https://github.com/influxdata/influxdb/pull/11003): Fix Cardinality estimation error. +- [#11909](https://github.com/influxdata/influxdb/pull/11909): Update tagKeyValue mutex to write lock. + +v1.6.5 [2019-01-09] +------------------- + +### Bugfixes + +- [#10425](https://github.com/influxdata/influxdb/pull/10425): Fix panic in IndexSet. +- [#10426](https://github.com/influxdata/influxdb/pull/10426): fix(tsdb): copy measurement names when expression is provided. +- [#10556](https://github.com/influxdata/influxdb/pull/10556): Drop NaN values when writing back points. +- [#10558](https://github.com/influxdata/influxdb/pull/10558): Pass the query authorizer to subqueries. +- [#10576](https://github.com/influxdata/influxdb/pull/10576): Update Go runtime to 1.10.6 +- [#10585](https://github.com/influxdata/influxdb/pull/10585): Limit database and retention policy names to 255 characters. + +v1.6.4 [2018-10-23] +------------------- + +### Features + +- [#10347](https://github.com/influxdata/influxdb/pull/10347): Set cache size when building TSI index. + +### Bugfixes + +- [#10320](https://github.com/influxdata/influxdb/pull/10320): Fix tsi1 sketch locking. +- [#10334](https://github.com/influxdata/influxdb/pull/10334): Fix subquery functionality when a function references a tag from the subquery. +- [#10334](https://github.com/influxdata/influxdb/pull/10334): Strip tags from a subquery when the outer query does not group by that tag. +- [#10316](https://github.com/influxdata/influxdb/pull/10316): Add -series-file flag to dumptsi command help. +- [#10348](https://github.com/influxdata/influxdb/pull/10348): Cleanup failed TSM snapshots. +- [#10355](https://github.com/influxdata/influxdb/pull/10355): Fix TSM1 panic on reader error. +- [#10356](https://github.com/influxdata/influxdb/pull/10356): Fix series file tombstoning. +- [#10364](https://github.com/influxdata/influxdb/pull/10364): Fixing the stream iterator to not ignore the error. +- [#10371](https://github.com/influxdata/influxdb/pull/10371): Do not panic when a series id iterator is nil. +- [#10343](https://github.com/influxdata/influxdb/pull/10343): Fix append of possible nil iterator. + +v1.6.3 [2018-09-14] +------------------- + +### Features + +- [#10280](https://github.com/influxdata/influxdb/pull/10280): Remove TSI1 HLL sketches from heap. + +### Bugfixes + +- [#10251](https://github.com/influxdata/influxdb/pull/10251): Fix the inherited interval for derivative and others. +- [#10273](https://github.com/influxdata/influxdb/pull/10273): Fix the derivative and others time ranges for aggregate data. + +v1.6.2 [2018-08-27] +------------------- + +### Features + +- [#10191](https://github.com/influxdata/influxdb/pull/10191): Reduce allocations in TSI TagSets implementation. + +### Bugfixes + +- [#10219](https://github.com/influxdata/influxdb/pull/10219): Ensure orhpaned series cleaned up with shard drop. + +v1.6.1 [2018-08-03] +------------------- + +### Features + +- [#10078](https://github.com/influxdata/influxdb/pull/10078): Improve LogFile performance with bitset iterator. +- [#10077](https://github.com/influxdata/influxdb/pull/10077): Add TSI index cardinality report to influx_inspect. +- [#10079](https://github.com/influxdata/influxdb/pull/10079): Update to Go 1.10. +- [#10080](https://github.com/influxdata/influxdb/pull/10080): Improve performance of buildtsi and TSI planning. +- [#10099](https://github.com/influxdata/influxdb/pull/10099): Improve performance of read service for single measurements. +- [#10103](https://github.com/influxdata/influxdb/pull/10103): Remove max concurrent compaction limit. +- [#10154](https://github.com/influxdata/influxdb/pull/10154): Provide configurable TLS options. +- [#10155](https://github.com/influxdata/influxdb/pull/10155): Add option to hint MADV_WILLNEED to kernel. + +### Bugfixes + +- [#10012](https://github.com/influxdata/influxdb/pull/10012): Improve series segment recovery. +- [#10076](https://github.com/influxdata/influxdb/pull/10076): Fix windows mmap on zero length file. +- [#10037](https://github.com/influxdata/influxdb/pull/10037): Ensure Filter iterators executed as late as possible. +- [#10061](https://github.com/influxdata/influxdb/pull/10061): Document UDP precision setting in config. +- [#10091](https://github.com/influxdata/influxdb/pull/10091): Allow tag keys to contain underscores. +- [#10095](https://github.com/influxdata/influxdb/pull/10095): Fix a panic when matching on a specific type of regular expression. + +v1.6.0 [2018-07-05] +------------------- + +### Breaking changes + +- If math is used with the same selector multiple times, it will now act as a selector rather than an aggregate. See [#9563](https://github.com/influxdata/influxdb/pull/9563) for details. + +### Features + +- [#9429](https://github.com/influxdata/influxdb/pull/9429): Support proxy environment variables in the influx client. +- [#9499](https://github.com/influxdata/influxdb/pull/9499): Implement basic trigonometry functions. +- [#9643](https://github.com/influxdata/influxdb/pull/9643): Add ability to delete many series with predicate. +- [#9632](https://github.com/influxdata/influxdb/pull/9632): Implement floor, ceil, and round functions. +- [#9399](https://github.com/influxdata/influxdb/pull/9399): Allow customizing the unix socket group and permissions created by the server. +- [#9620](https://github.com/influxdata/influxdb/pull/9620): Add more math functions to influxql. +- [#9757](https://github.com/influxdata/influxdb/pull/9757): Add suppress-write-log option to disable the write log when the log is enabled. +- [#9260](https://github.com/influxdata/influxdb/pull/9260): Add additional technical analysis algorithms. +- [#8034](https://github.com/influxdata/influxdb/pull/8034): Validate points on input. +- [#9777](https://github.com/influxdata/influxdb/pull/9777): Log information about index version during startup. +- [#9793](https://github.com/influxdata/influxdb/pull/9793): Add key sanitization to deletetsm. +- [#9791](https://github.com/influxdata/influxdb/pull/9791): Optimize the spread function to process points iteratively instead of in batch. +- [#9649](https://github.com/influxdata/influxdb/pull/9649): Allow math functions to be used in the condition. +- [#9888](https://github.com/influxdata/influxdb/pull/9888): HTTP Write Throttle. +- [#9822](https://github.com/influxdata/influxdb/pull/9822): Implement SHOW STATS FOR 'indexes'. +- [#9832](https://github.com/influxdata/influxdb/pull/9832): Add dumptsmwal command to influx_inspect. +- [#9959](https://github.com/influxdata/influxdb/pull/9959): Improve the number of regex patterns that are optimized to static OR conditions. + +### Bugfixes + +- [#9553](https://github.com/influxdata/influxdb/pull/9553): Support setting the log level through the environment variable. +- [#9551](https://github.com/influxdata/influxdb/pull/9551): Fix panic when checking fieldsets. +- [#9573](https://github.com/influxdata/influxdb/pull/9573): Ensure correct number of tags parsed when commas used. +- [#9580](https://github.com/influxdata/influxdb/pull/9580): Fix data race in WAL. +- [#9586](https://github.com/influxdata/influxdb/pull/9586): Allow SHOW SERIES kill. +- [#9612](https://github.com/influxdata/influxdb/pull/9612): Revert "Use MADV_WILLNEED when loading TSM files". +- [#9633](https://github.com/influxdata/influxdb/pull/9633): Fix regression to allow now() to be used as the group by offset again. +- [#9647](https://github.com/influxdata/influxdb/pull/9647): Delete deleted shards in retention service. +- [#9659](https://github.com/influxdata/influxdb/pull/9659): Ignore index size in Engine.DiskSize(). +- [#9661](https://github.com/influxdata/influxdb/pull/9661): Fix buildtsi partition key. +- [#9665](https://github.com/influxdata/influxdb/pull/9665): Enable casting values from a subquery. +- [#9682](https://github.com/influxdata/influxdb/pull/9682): Avoid a panic when using show diagnostics with text/csv. +- [#9696](https://github.com/influxdata/influxdb/pull/9696): Properly track the response bytes written for queries in all format types. +- [#9615](https://github.com/influxdata/influxdb/pull/9615): Remove error for series file when no shards exist. +- [#9751](https://github.com/influxdata/influxdb/pull/9751): Fix the validation for multiple nested distinct calls. +- [#9792](https://github.com/influxdata/influxdb/pull/9792): TSM: TSMReader.Close blocks until reads complete +- [#9858](https://github.com/influxdata/influxdb/pull/9858): Return the correct auxiliary values for top/bottom. +- [#9866](https://github.com/influxdata/influxdb/pull/9866): Close TSMReaders from FileStore.Close after releasing FileStore mutex. +- [#9932](https://github.com/influxdata/influxdb/pull/9932): buildtsi: Do not escape measurement names. + +v1.5.4 [2018-06-21] +------------------- + +### Bugfixes + +- [#9924](https://github.com/influxdata/influxdb/pull/9924): [1.5] Fix panic in readTombstoneV4. +- [#9931](https://github.com/influxdata/influxdb/pull/9931): buildtsi: Do not escape measurement names. + +v1.5.3 [2018-05-25] +------------------- + +### Features + +- [#9903](https://github.com/influxdata/influxdb/pull/9903): Add optional pprof http endpoint immediately on startup. + +### Bugfixes + +- [#9765](https://github.com/influxdata/influxdb/pull/9765): Fix the validation for multiple nested distinct calls. +- [#9869](https://github.com/influxdata/influxdb/pull/9869): Return the correct auxiliary values for top/bottom. + +v1.5.2 [2018-04-12] +------------------- + +### Features + +- [#9680](https://github.com/influxdata/influxdb/pull/9680): Check for root user when running buildtsi. +- [#9672](https://github.com/influxdata/influxdb/pull/9672): [1.5] Adjustable TSI Compaction Threshold + +### Bugfixes + +- [#9638](https://github.com/influxdata/influxdb/pull/9638): backport: check for failure case where backup dir has no manifest files. +- [#9651](https://github.com/influxdata/influxdb/pull/9651): Fix regression to allow now() to be used as the group by offset again. +- [#9614](https://github.com/influxdata/influxdb/pull/9614): 1.5: Revert "Use MADV_WILLNEED when loading TSM files". +- [#9660](https://github.com/influxdata/influxdb/pull/9660): Ignore index size in Engine.DiskSize(). +- [#9662](https://github.com/influxdata/influxdb/pull/9662): [1.5] Fix buildtsi partition key. +- [#9676](https://github.com/influxdata/influxdb/pull/9676): Ensure that conditions are encoded correctly even if the AST is not properly formed. + +v1.5.1 [2018-03-20] +------------------- + +### Bugfixes + +- [#9542](https://github.com/influxdata/influxdb/pull/9542): Allow time variable to be case insensitive again. +- [#9564](https://github.com/influxdata/influxdb/pull/9564): Support setting the log level through the environment variable. +- [#9575](https://github.com/influxdata/influxdb/pull/9575): Ensure correct number of tags parsed. +- [#9566](https://github.com/influxdata/influxdb/pull/9566): Fix panic when checking fieldsets. +- [#9587](https://github.com/influxdata/influxdb/pull/9587): Fix data race in WAL. + +v1.5.0 [2018-03-06] +------------------- + +### Breaking changes + +- The default logging format has been changed. See [#9055](https://github.com/influxdata/influxdb/pull/9055) and [#9066](https://github.com/influxdata/influxdb/pull/9056) for details. + +### Features + +- [#8495](https://github.com/influxdata/influxdb/pull/8495): Improve CLI connection warnings +- [#3019](https://github.com/influxdata/influxdb/issues/3019): Backup utility prints a list of backup files. +- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup/Restore can produce/consume data in the same format as the enterprise backup/restore tool. +- [#8880](https://github.com/influxdata/influxdb/issues/8879): Restore runs in online mode, does not delete existing databases +- [#8879](https://github.com/influxdata/influxdb/issues/8879): Export functionality using start/end to filter exported data by timestamp +- [#9084](https://github.com/influxdata/influxdb/pull/9084): Handle high cardinality deletes in TSM engine +- [#9162](https://github.com/influxdata/influxdb/pull/9162): Improve inmem index startup performance for high cardinality. +- [#8491](https://github.com/influxdata/influxdb/pull/8491): Add further tsi support for streaming/copying shards. +- [#9181](https://github.com/influxdata/influxdb/pull/9181): Schedule a full compaction after a successful import +- [#9218](https://github.com/influxdata/influxdb/pull/9218): Add Prometheus `/metrics` endpoint. +- [#9213](https://github.com/influxdata/influxdb/pull/9213): Add ability to generate shard digests. +- [#9184](https://github.com/influxdata/influxdb/pull/9184): Allow setting the node id in the influx cli program. +- [#9056](https://github.com/influxdata/influxdb/pull/9056): Add logging configuration to the main configuration file. +- [#9445](https://github.com/influxdata/influxdb/pull/9445): Suppress the InfluxDB banner if the log output is not a TTY. +- [#9449](https://github.com/influxdata/influxdb/pull/9449): Added option to write HTTP request logs to separate file. +- [#9454](https://github.com/influxdata/influxdb/pull/9454): Update logging calls to take advantage of structured logging. +- [#9456](https://github.com/influxdata/influxdb/pull/9456): Generate trace logs for a number of important InfluxDB operations. +- [#9488](https://github.com/influxdata/influxdb/pull/9488): Improve startup time of inmem index. + +### Bugfixes + +- [#9095](https://github.com/influxdata/influxdb/pull/9095): Refuse extra arguments to influx CLI +- [#9058](https://github.com/influxdata/influxdb/issues/9058): Fix space required after regex operator. Thanks @stop-start! +- [#9109](https://github.com/influxdata/influxdb/issues/9109): Fix: panic: sync: WaitGroup is reused before previous Wait has returned +- [#9163](https://github.com/influxdata/influxdb/pull/9163): Fix race condition in the merge iterator close method. +- [#9144](https://github.com/influxdata/influxdb/issues/9144): Fix query compilation so multiple nested distinct calls is allowable +- [#8789](https://github.com/influxdata/influxdb/issues/8789): Fix CLI to allow quoted database names in use statement +- [#9208](https://github.com/influxdata/influxdb/pull/9208): Updated client 4xx error message when response body length is zero. +- [#9230](https://github.com/influxdata/influxdb/pull/9230): Remove extraneous newlines from the log. +- [#9226](https://github.com/influxdata/influxdb/issues/9226): Allow lone boolean literals in a condition expression. +- [#9235](https://github.com/influxdata/influxdb/pull/9235): Improve performance when writes exceed `max-values-per-tag` or `max-series`. +- [#9216](https://github.com/influxdata/influxdb/issues/9216): Prevent a panic when a query simultaneously finishes and is killed at the same time. +- [#9255](https://github.com/influxdata/influxdb/issues/9255): Fix missing sorting of blocks by time when compacting. +- [#9327](https://github.com/influxdata/influxdb/pull/9327): wal: update lastWriteTime behavior +- [#9290](https://github.com/influxdata/influxdb/issues/9290): Fix regression to allow binary operations on literals. +- [#9342](https://github.com/influxdata/influxdb/pull/9342): Fix data races in tcp.Mux and tcp.listener +- [#9353](https://github.com/influxdata/influxdb/pull/9353): Fix panic in msgpack httpd WriteResponse error handler. +- [#9335](https://github.com/influxdata/influxdb/pull/9335): Prevent race condition caused by WaitGroup re-use +- [#9386](https://github.com/influxdata/influxdb/issues/9386): Fix stddev() call to report itself as always returning a float. +- [#9401](https://github.com/influxdata/influxdb/pull/9401): Fix windows history file location. +- [#9403](https://github.com/influxdata/influxdb/pull/9403): Do not explicitly specify ports 80 or 443 when they are the default port. +- [#8878](https://github.com/influxdata/influxdb/pull/8878): Do not report an error when dropping a CQ on a non-existent DB/RP. +- [#9423](https://github.com/influxdata/influxdb/pull/9423): Fix imports of multiple databases in a single import file from `influx -import`. +- [#9443](https://github.com/influxdata/influxdb/pull/9443): Fix regression when math between literals is used in a field. +- [#9464](https://github.com/influxdata/influxdb/pull/9464): Re-open last WAL segment. +- [#9470](https://github.com/influxdata/influxdb/pull/9470): Make closing TSM cursors idempotent. +- [#9489](https://github.com/influxdata/influxdb/pull/9489): Add dumptsi path error handling.. +- [#9493](https://github.com/influxdata/influxdb/pull/9493): Fix the implicit time range in a subquery. +- [#9491](https://github.com/influxdata/influxdb/pull/9491): Evaluate a true boolean literal when calculating tag sets. +- [#9496](https://github.com/influxdata/influxdb/pull/9496): Fix panic on tsi1 log replay of deleted series.. +- [#9510](https://github.com/influxdata/influxdb/pull/9510): Fix TSI log file recovery. +- [#9513](https://github.com/influxdata/influxdb/pull/9513): Fix missing Store.Close() unlock. + +v1.4.3 [unreleased] +------------------- + +### Configuration Changes + +#### `[data]` Section + +The default value for `cache-snapshot-memory-size` has been changed from `25m` to `256m`. + +### Bugfixes + +- [#9201](https://github.com/influxdata/influxdb/issues/9201): Fix higher disk i/o utilization + +v1.4.2 [2017-11-15] +------------------- + +Refer to the 1.4.0 breaking changes section if `influxd` fails to start with an `incompatible tsi1 index MANIFEST` error. + +### Bugfixes + +- [#9117](https://github.com/influxdata/influxdb/pull/9117): Fix panic: runtime error: slice bounds out of range + +v1.4.1 [2017-11-13] +------------------- + +### Bugfixes + +- [#9105](https://github.com/influxdata/influxdb/pull/9105): Fix descending cursors and range queries via IFQL RPC API. + +v1.4.0 [2017-11-13] +------------------- + +### Breaking changes + +You can no longer specify a different `ORDER BY` clause in a subquery than the one in the top level query. This functionality never worked properly, but was not explicitly forbidden. + +As part of the ongoing development of the `tsi1` index, the implementation of a Bloom Filter, used to efficiently determine if series are not present in the index, was altered in [#8857](https://github.com/influxdata/influxdb/pull/8857). While this significantly increases the performance of the index and reduces its memory consumption, the existing `tsi1` indexes created while running previous versions of the database are not compatible with 1.4.0. + +Users with databases using the `tsi1` index must go through the following process to upgrade to 1.4.0: + +1. Stop `influxd`. +2. Remove all `index` directories on databases using the `tsi1` index. With default configuration these can be found in `/var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index` or `~/.influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index`. It's worth noting at this point how many different `shard_ids` you visit. +3. Run the `influx_inspect inmem2tsi` tool using the shard's data and WAL directories for -datadir and -waldir, respectively. Given the example in step (2) that would be `influx_inspect inmem2tsi -datadir /var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID -waldir /path/to/influxdb/wal/DB_NAME/RP_NAME/SHARD_ID`. +4. Repeat step (3) for each shard that needs to be converted. +5. Start `influxd`. + +Users with existing `tsi1` shards, who attempt to start version 1.4.0 without following the above steps, will find the shards refuse to open, and will most likely see the following error message: + +`incompatible tsi1 index MANIFEST` + +### Configuration Changes + +#### `[collectd]` Section + +- `parse-multivalue-plugin` was added with a default of `split`. When set to `split`, multivalue plugin data (e.g. df free:5000,used:1000) will be split into separate measurements (e.g., (df_free, value=5000) (df_used, value=1000)). When set to `join`, multivalue plugin will be stored as a single multi-value measurement (e.g., (df, free=5000,used=1000)). + +### Features + +- [#8574](https://github.com/influxdata/influxdb/pull/8574): Add 'X-Influxdb-Build' to http response headers so users can identify if a response is from an OSS or Enterprise service. +- [#8426](https://github.com/influxdata/influxdb/issues/8426): Add `parse-multivalue-plugin` to allow users to choose how multivalue plugins should be handled by the collectd service. +- [#8548](https://github.com/influxdata/influxdb/issues/8548): Allow panic recovery to be disabled when investigating server issues. +- [#8525](https://github.com/influxdata/influxdb/issues/8525): Support http pipelining for /query endpoint. +- [#8652](https://github.com/influxdata/influxdb/pull/8652): Reduce allocations when reading data +- [#8592](https://github.com/influxdata/influxdb/pull/8592): Mutex profiles are now available. +- [#8669](https://github.com/influxdata/influxdb/pull/8669): TSI Index Migration Tool +- [#7195](https://github.com/influxdata/influxdb/issues/7195): Support SHOW CARDINALITY queries. +- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service +- [#8572](https://github.com/influxdata/influxdb/pull/8572): All errors from queries or writes are available via X-InfluxDB-Error header, and 5xx error messages will be written to server logs. +- [#8662](https://github.com/influxdata/influxdb/pull/8662): Improve test coverage across both indexes. +- [#8611](https://github.com/influxdata/influxdb/issues/8611): Respect X-Request-Id/Request-Id headers. +- [#8572](https://github.com/influxdata/influxdb/issues/8668): InfluxDB now uses MIT licensed version of BurntSushi/toml. +- [#8752](https://github.com/influxdata/influxdb/pull/8752): Use system cursors for measurement, series, and tag key meta queries. +- [#6563](https://github.com/influxdata/influxdb/issues/6563): Support Ctrl+C to cancel a running query in the Influx CLI. Thanks @emluque! +- [#8776](https://github.com/influxdata/influxdb/pull/8776): Initial implementation of explain plan. +- [#8791](https://github.com/influxdata/influxdb/pull/8791): Include the number of scanned cached values in the iterator cost. +- [#8784](https://github.com/influxdata/influxdb/pull/8784): Add support for the Prometheus remote read and write APIs. +- [#8851](https://github.com/influxdata/influxdb/pull/8851): Improve performance of `Include` and `Exclude` functions +- [#8854](https://github.com/influxdata/influxdb/pull/8854): Report the task status for a query. +- [#8853](https://github.com/influxdata/influxdb/pull/8853): Reduce allocations, improve `readEntries` performance by simplifying loop +- [#8830](https://github.com/influxdata/influxdb/issues/8830): Separate importer log statements to stdout and stderr. +- [#8857](https://github.com/influxdata/influxdb/pull/8857): Improve performance of Bloom Filter in TSI index. +- [#8897](https://github.com/influxdata/influxdb/pull/8897): Add message pack format for query responses. +- [#8886](https://github.com/influxdata/influxdb/pull/8886): Improved compaction scheduling +- [#8690](https://github.com/influxdata/influxdb/issues/8690): Implicitly decide on a lower limit for fill queries when none is present. +- [#8947](https://github.com/influxdata/influxdb/pull/8947): Add `EXPLAIN ANALYZE` command, which produces a detailed execution plan of a `SELECT` statement. +- [#8963](https://github.com/influxdata/influxdb/pull/8963): Streaming inmem2tsi conversion. +- [#8995](https://github.com/influxdata/influxdb/pull/8995): Sort & validate TSI key value insertion. +- [#8968](https://github.com/influxdata/influxdb/issues/8968): Make client errors more helpful on downstream errs. Thanks @darkliquid! +- [#8984](https://github.com/influxdata/influxdb/pull/8984): EXACT and estimated CARDINALITY queries. +- [#8893](https://github.com/influxdata/influxdb/pull/8893): Handle nil MeasurementIterator. +- [#8986](https://github.com/influxdata/influxdb/issues/8986): Add long-line support to client importer. Thanks @lets00! +- [#9021](https://github.com/influxdata/influxdb/pull/9021): Update to go 1.9.2 +- [#8891](https://github.com/influxdata/influxdb/pull/8891): Allow human-readable byte sizes in config +- [#9073](https://github.com/influxdata/influxdb/pull/9073): Improve SHOW TAG KEYS performance. +- [#7355](https://github.com/influxdata/influxdb/issues/7355): Create a command to truncate shard groups + +### Bugfixes + +- [#8480](https://github.com/influxdata/influxdb/pull/8480): Change the default stats interval to 1 second instead of 10 seconds. +- [#8466](https://github.com/influxdata/influxdb/issues/8466): illumos build broken on syscall.Mmap +- [#8124](https://github.com/influxdata/influxdb/issues/8124): Prevent privileges on non-existent databases from being set. +- [#8461](https://github.com/influxdata/influxdb/issues/8461) influxd backup tool will now separate out its logging to stdout and stderr. Thanks @xginn8! +- [#8558](https://github.com/influxdata/influxdb/issues/8558): Dropping measurement used several GB disk space +- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. +- [#8590](https://github.com/influxdata/influxdb/issues/8590): influx cli case sensitivity. +- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. +- [#8097](https://github.com/influxdata/influxdb/pull/8097): Return query parsing errors in CSV formats. +- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. +- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. +- [#8694](https://github.com/influxdata/influxdb/issues/8694): Reduce CPU usage when checking series cardinality +- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. +- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. +- [#8712](https://github.com/influxdata/influxdb/pull/8712): Improve condition parsing. +- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators +- [#8695](https://github.com/influxdata/influxdb/issues/8695): Fix merging bug on system iterators. +- [#8699](https://github.com/influxdata/influxdb/issues/8699): Force subqueries to match the parent queries ordering. +- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. +- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` +- [#8638](https://github.com/influxdata/influxdb/issues/8638): Fix `influx_inspect export` so it skips missing files. +- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. +- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. +- [#8697](https://github.com/influxdata/influxdb/issues/8697): Drop Series Cause Write Fail/Write Timeouts/High Memory Usage +- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers +- [#8749](https://github.com/influxdata/influxdb/issues/8749): An OSS read-only user should be able to list measurements on a database +- [#8678](https://github.com/influxdata/influxdb/issues/8678): Ensure time and tag-based condition can be used with tsi1 index when deleting. +- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. +- [#8895](https://github.com/influxdata/influxdb/issues/8895): Fix a minor memory leak in batching points in tsdb. +- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. +- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output +- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file +- [#8952](https://github.com/influxdata/influxdb/issues/8952): Fix WAL panic: runtime error: makeslice: cap out of range +- [#8975](https://github.com/influxdata/influxdb/pull/8975): Copy returned bytes from TSI meta functions. +- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range +- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction +- [#8780](https://github.com/influxdata/influxdb/issues/8780): Prevent deadlock during collectd, graphite, opentsdb, and udp shutdown. +- [#8983](https://github.com/influxdata/influxdb/issues/8983): Remove the pidfile after the server has exited. +- [#9005](https://github.com/influxdata/influxdb/pull/9005): Return `query.ErrQueryInterrupted` for successful read on `InterruptCh`. +- [#8989](https://github.com/influxdata/influxdb/issues/8989): Fix race inside Measurement index. +- [#8819](https://github.com/influxdata/influxdb/issues/8819): Ensure retention service always removes local shards. +- [#8965](https://github.com/influxdata/influxdb/issues/8965): Handle utf16 files when reading the configuration file. +- [#8538](https://github.com/influxdata/influxdb/pull/8538): Fix panic: runtime error: slice bounds out of range + +v1.3.7 [2017-10-26] +------------------- + +### Bugfixes + +- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. +- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file +- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output +- [#8951](https://github.com/influxdata/influxdb/issues/8951): Add RPM dependency on shadow-utils for `useradd`. +- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range +- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction +- [#9006](https://github.com/influxdata/influxdb/pull/9006): Return `query.ErrQueryInterrupted` for a successful read on `InterruptCh`. +- [#8978](https://github.com/influxdata/influxdb/pull/8978): Copy returned bytes from TSI meta functions. + +v1.3.6 [2017-09-29] +------------------- + +### Bugfixes + +- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. +- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. +- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers +- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. +- [#8842](https://github.com/influxdata/influxdb/issues/8842): Fix several races in the shard and engine. +- [#8887](https://github.com/influxdata/influxdb/pull/8887): Fix race on cache entry. + +v1.3.5 [2017-08-29] +------------------- + +### Bugfixes + +- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. +- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` + +v1.3.4 [2017-08-23] +------------------- + +### Bugfixes + +- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. +- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. +- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. +- [#8701](https://github.com/influxdata/influxdb/pull/8701): Fix drop measurement not dropping all data +- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. +- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. +- [#8713](https://github.com/influxdata/influxdb/issues/8713): Deadlock when dropping measurement and writing +- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators +- [#8726](https://github.com/influxdata/influxdb/pull/8726): Fix leaking tmp file when large compaction aborted + +### Features + +- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service + +v1.3.3 [2017-08-10] +------------------- + +### Bugfixes + +- [#8681](https://github.com/influxdata/influxdb/pull/8681): Resolves a memory leak when NewReaderIterator creates a nilFloatIterator, the reader is not closed + +v1.3.2 [2017-08-04] +------------------- + +### Bugfixes + +- [#8629](https://github.com/influxdata/influxdb/pull/8629): Interrupt in progress TSM compactions +- [#8630](https://github.com/influxdata/influxdb/pull/8630): Prevent excessive memory usage when dropping series +- [#8640](https://github.com/influxdata/influxdb/issues/8640): Significantly improve performance of SHOW TAG VALUES. + +v1.3.1 [2017-07-20] +------------------- + +### Bugfixes + +- [#8559](https://github.com/influxdata/influxdb/issues/8559): Ensure temporary TSM files get cleaned up when compaction aborted. +- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive +- [#8531](https://github.com/influxdata/influxdb/issues/8531): Duplicate points generated via INSERT after DELETE +- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. + +v1.3.0 [2017-06-21] +------------------- + +### Release Notes + +#### Continuous Query Statistics + +When enabled, each time a continuous query is completed, a number of details regarding the execution are written to the `cq_query` measurement of the internal monitor database (`_internal` by default). The tags and fields of interest are + +| tag / field | description | +|:------------------|:---------------------------------------------------| +| `db` | name of database | +| `cq` | name of continuous query | +| `durationNS` | query execution time in nanoseconds | +| `startTime` | lower bound of time range | +| `endTime` | upper bound of time range | +| `pointsWrittenOK` | number of points written to the target measurement | + +- `startTime` and `endTime` are UNIX timestamps, in nanoseconds. +- The number of points written is also included in CQ log messages. + +### Removals + +The admin UI is removed and unusable in this release. The `[admin]` configuration section will be ignored. + +### Configuration Changes + +- The top-level config `bind-address` now defaults to `localhost:8088`. The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet). + +The following new configuration options are available. + +#### `[http]` Section + +- `max-body-size` was added with a default of 25,000,000, but can be disabled by setting it to 0. Specifies the maximum size (in bytes) of a client request body. When a client sends data that exceeds the configured maximum size, a `413 Request Entity Too Large` HTTP response is returned. + +#### `[continuous_queries]` Section + +- `query-stats-enabled` was added with a default of `false`. When set to `true`, continuous query execution statistics are written to the default monitor store. + +### Features + +- [#8512](https://github.com/influxdata/influxdb/pull/8512): Switch to LogLog-Beta Cardinality estimation +- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay +- [#7977](https://github.com/influxdata/influxdb/issues/7977): Add chunked request processing back into the Go client v2 +- [#7974](https://github.com/influxdata/influxdb/pull/7974): Allow non-admin users to execute SHOW DATABASES. +- [#7948](https://github.com/influxdata/influxdb/pull/7948): Reduce memory allocations by reusing gzip.Writers across requests +- [#7776](https://github.com/influxdata/influxdb/issues/7776): Add system information to /debug/vars. +- [#7553](https://github.com/influxdata/influxdb/issues/7553): Add modulo operator to the query language. +- [#7856](https://github.com/influxdata/influxdb/issues/7856): Failed points during an import now result in a non-zero exit code. +- [#7821](https://github.com/influxdata/influxdb/issues/7821): Expose some configuration settings via SHOW DIAGNOSTICS +- [#8025](https://github.com/influxdata/influxdb/issues/8025): Support single and multiline comments in InfluxQL. +- [#6541](https://github.com/influxdata/influxdb/issues/6541): Support timezone offsets for queries. +- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL. +- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL. +- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language. +- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements +- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI. +- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1 +- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits +- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling. +- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests. +- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability +- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive. +- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support. +- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator. +- [#7129](https://github.com/influxdata/influxdb/issues/7129): Maintain the tags of points selected by top() or bottom() when writing the results. +- [#8188](https://github.com/influxdata/influxdb/issues/8188): Write CQ stats to _internal + +### Bugfixes + +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8231](https://github.com/influxdata/influxdb/pull/8231): Fix spelling mistake in HTTP section of config -- shared-sercret +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8122](https://github.com/influxdata/influxdb/pull/8122): Suppress headers in output for influx cli when they are the same. +- [#8119](https://github.com/influxdata/influxdb/pull/8119): Add chunked/chunk size as setting/options in cli. +- [#8091](https://github.com/influxdata/influxdb/issues/8091): Do not increment the continuous query statistic if no query is run. +- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions. +- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values. +- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec. +- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default +- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query +- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8118](https://github.com/influxdata/influxdb/issues/8118): Significantly improve DROP DATABASE speed. +- [#8181](https://github.com/influxdata/influxdb/issues/8181): Return an error when an invalid duration literal is parsed. +- [#8093](https://github.com/influxdata/influxdb/issues/8093): Fix the time range when an exact timestamp is selected. +- [#8174](https://github.com/influxdata/influxdb/issues/8174): Fix query parser when using addition and subtraction without spaces. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8171](https://github.com/influxdata/influxdb/issues/8171): Significantly improve shutdown speed for high cardinality databases. +- [#8177](https://github.com/influxdata/influxdb/issues/8177): Fix racy integration test. +- [#8230](https://github.com/influxdata/influxdb/issues/8230): Prevent overflowing or underflowing during window computation. +- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding +- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean. +- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries. +- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions. +- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point. +- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries. +- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries. +- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries. +- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages +- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error +- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values. +- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB +- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file. +- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently +- [#8387](https://github.com/influxdata/influxdb/issues/8387): Fix TSM tmp file leaked on disk +- [#8417](https://github.com/influxdata/influxdb/issues/8417): Fix large field keys preventing snapshot compactions +- [#7957](https://github.com/influxdata/influxdb/issues/7957): URL query parameter credentials take priority over Authentication header. +- [#8443](https://github.com/influxdata/influxdb/issues/8443): TSI branch has duplicate tag values. +- [#8299](https://github.com/influxdata/influxdb/issues/8299): Out of memory when using HTTP API +- [#8455](https://github.com/influxdata/influxdb/pull/8455): Check file count before attempting a TSI level compaction. +- [#8470](https://github.com/influxdata/influxdb/issues/8470): index file fd leak in tsi branch +- [#8468](https://github.com/influxdata/influxdb/pull/8468): Fix TSI non-contiguous compaction panic. +- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive + +v1.2.4 [2017-05-08] +------------------- + +### Bugfixes + +- [#8338](https://github.com/influxdata/influxdb/pull/8338): Prefix partial write errors with `partial write:` to generalize identification in other subsystems + +v1.2.3 [2017-04-17] +------------------- + +### Bugfixes + +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points + +v1.2.2 [2017-03-14] +------------------- + +### Release Notes + +### Configuration Changes + +#### `[http]` Section + +- `max-row-limit` now defaults to `0`. The previous default was `10000`, but due to a bug, the value in use since `1.0` was `0`. + +### Bugfixes + +- [#8050](https://github.com/influxdata/influxdb/issues/8050): influxdb & grafana, absence of data on the graphs + +v1.2.1 [2017-03-08] +------------------- + +### Release Notes + +### Bugfixes + +- [#8100](https://github.com/influxdata/influxdb/issues/8100): Include IsRawQuery in the rewritten statement for meta queries. +- [#8095](https://github.com/influxdata/influxdb/pull/8095): Fix race in WALEntry.Encode and Values.Deduplicate +- [#8085](https://github.com/influxdata/influxdb/issues/8085): panic: interface conversion: tsm1.Value is tsm1.IntegerValue, not tsm1.FloatValue. +- [#8084](https://github.com/influxdata/influxdb/issues/8084): Points missing after compaction +- [#8080](https://github.com/influxdata/influxdb/issues/8080): Point.UnmarshalBinary() bounds check +- [#8078](https://github.com/influxdata/influxdb/issues/8078): Map types correctly when selecting a field with multiple measurements where one of the measurements is empty. +- [#8044](https://github.com/influxdata/influxdb/issues/8044): Treat non-reserved measurement names with underscores as normal measurements. +- [#8040](https://github.com/influxdata/influxdb/issues/8040): Reduce the expression in a subquery to avoid a panic. +- [#8028](https://github.com/influxdata/influxdb/issues/8028): Fix panic in collectd when configured to read types DB from directory. +- [#8001](https://github.com/influxdata/influxdb/issues/8001): Map types correctly when using a regex and one of the measurements is empty. +- [#7968](https://github.com/influxdata/influxdb/issues/7968): Properly select a tag within a subquery. +- [#7966](https://github.com/influxdata/influxdb/pull/7966): Prevent a panic when aggregates are used in an inner query with a raw query. +- [#7946](https://github.com/influxdata/influxdb/issues/7946): Fix authentication when subqueries are present. +- [#7910](https://github.com/influxdata/influxdb/issues/7910): Fix EvalType when a parenthesis expression is used. +- [#7906](https://github.com/influxdata/influxdb/issues/7906): Anchors not working as expected with case-insensitive regex +- [#7905](https://github.com/influxdata/influxdb/issues/7905): Fix ORDER BY time DESC with ordering series keys. +- [#7895](https://github.com/influxdata/influxdb/issues/7895): Fix incorrect math when aggregates that emit different times are used. +- [#7888](https://github.com/influxdata/influxdb/pull/7888): Expand query dimensions from the subquery. +- [#7885](https://github.com/influxdata/influxdb/issues/7885): Fix LIMIT and OFFSET when they are used in a subquery. +- [#7880](https://github.com/influxdata/influxdb/issues/7880): Dividing aggregate functions with different outputs doesn't panic. +- [#7877](https://github.com/influxdata/influxdb/issues/7877): Fix mapping of types when the measurement uses a regex + +v1.2.0 [2017-01-24] +------------------- + +### Release Notes + +This release introduces a major new querying capability in the form of sub-queries, and provides several performance improvements, including a 50% or better gain in write performance on larger numbers of cores. The release adds some stability and memory-related improvements, as well as several CLI-related bug fixes. If upgrading from a prior version, please read the configuration changes in the following section before upgrading. + +### Configuration Changes + +The following new configuration options are available, if upgrading to `1.2.0` from prior versions. + +#### `[[collectd]]` Section + +- `security-level` which defaults to `"none"`. This field also accepts `"sign"` and `"encrypt"` and enables different levels of transmission security for the collectd plugin. +- `auth-file` which defaults to `"/etc/collectd/auth_file"`. Specifies where to locate the authentication file used to authenticate clients when using signed or encrypted mode. + +### Deprecations + +The stress tool `influx_stress` will be removed in a subsequent release. We recommend using [`influx-stress`](https://github.com/influxdata/influx-stress) as a replacement. + +### Features + +- [#7830](https://github.com/influxdata/influxdb/pull/7830): Cache snapshotting performance improvements +- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS. +- [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli. +- [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct. +- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file. +- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly. +- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards. +- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps. +- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking. +- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present. +- [#7326](https://github.com/influxdata/influxdb/issues/7326): Verbose output for SSL connection errors. +- [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV +- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd. +- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere. +- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language. +- [#3188](https://github.com/influxdata/influxdb/issues/3188): [CLI feature request] USE retention policy for queries. + +### Bugfixes + +- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine. +- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled. +- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory +- [#7814](https://github.com/influxdata/influxdb/issues/7814): InfluxDB should do a partial write on mismatched type errors. +- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes +- [#7786](https://github.com/influxdata/influxdb/pull/7786): Fix potential race condition in correctness of tsm1_cache memBytes statistic. +- [#7784](https://github.com/influxdata/influxdb/pull/7784): Fix broken error return on meta client's UpdateUser and DropContinuousQuery methods. +- [#7741](https://github.com/influxdata/influxdb/pull/7741): Fix string quoting and significantly improve performance of `influx_inspect export`. +- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj +- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements. +- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates. +- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen +- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped. +- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer. +- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`. +- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey +- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen +- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values. +- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration. +- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. +- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... + +v1.1.5 [2017-04-28] +------------------- + +### Bugfixes + +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method + +v1.1.4 [2017-02-27] +------------------- + +### Bugfixes + +- [#8063](https://github.com/influxdata/influxdb/pull/8063): Backport #7631 to reduce GC allocations. + +v1.1.3 [2017-02-17] +------------------- + +### Bugfixes + +- [#8027](https://github.com/influxdata/influxdb/pull/8027): Remove Tags.shouldCopy, replace with forceCopy on series creation. + +v1.1.2 [2017-02-16] +------------------- + +### Bugfixes + +- [#7832](https://github.com/influxdata/influxdb/pull/7832): Fix memory leak when writing new series over HTTP +- [#7929](https://github.com/influxdata/influxdb/issues/7929): Fix series tag iteration segfault. (#7922) +- [#8011](https://github.com/influxdata/influxdb/issues/8011): Fix tag dereferencing panic. + +v1.1.1 [2016-12-06] +------------------- + +### Features + +- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. + +### Bugfixes + +- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes +- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident. +- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. + +### Security + +[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues. This release includes these security fixes. + +v1.1.0 [2016-11-14] +------------------- + +### Release Notes + +This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. + +### Deprecations + +The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. + +### Configuration Changes + +The following configuration changes may need to changed before upgrading to `1.1.0` from prior versions. + +#### `[admin]` Section + +- `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. + +#### `[data]` Section + +- `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. +- `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. +- `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. +- `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. + +### Features + +The query language has been extended with a few new features: + +- [#7442](https://github.com/influxdata/influxdb/pull/7442): Support regex on fields keys in select clause +- [#7403](https://github.com/influxdata/influxdb/pull/7403): New `linear` fill option +- [#7388](https://github.com/influxdata/influxdb/pull/7388): New `cumulative_sum` function +- [#7295](https://github.com/influxdata/influxdb/pull/7295): Support `ON` for `SHOW` commands + +All Changes: + +- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. +- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. +- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. +- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. +- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. +- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. +- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. +- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. +- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. +- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. +- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices +- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad +- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. +- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. +- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data +- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. +- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj +- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. +- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. +- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. +- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. +- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. +- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. +- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. +- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. +- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. +- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. + +### Bugfixes + +- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. +- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. +- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. +- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. +- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. +- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. +- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies +- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. +- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. +- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. +- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations +- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. +- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database +- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. +- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! +- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 +- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. +- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. +- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. +- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified +- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. +- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. + +v1.0.2 [2016-10-05] +------------------- + +### Bugfixes + +- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers +- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions +- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load +- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY +- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. + +v1.0.1 [2016-09-26] +------------------- + +### Bugfixes + +- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. +- [#7299](https://github.com/influxdata/influxdb/issues/7299): Ensure fieldsCreated stat available in shard measurement. +- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. +- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! +- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. +- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. + +v1.0.0 [2016-09-08] +------------------- + +### Release Notes + +### Breaking changes + +- `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. +- Config option `[cluster]` has been replaced with `[coordinator]` +- Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. +- Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. +- The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. +- The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. + +With this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald. + +### Features + +- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. +- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. +- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats +- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. +- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. +- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. +- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. +- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. +- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. +- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon +- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. +- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. +- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. +- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. +- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options +- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. +- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. +- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. +- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding +- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. +- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. +- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring +- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database +- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. +- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. +- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. +- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators +- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. +- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. +- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. +- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. +- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. +- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. +- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. +- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. +- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. +- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. +- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. +- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. +- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. +- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. + +### Bugfixes + +- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. +- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. +- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit +- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. +- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit +- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. +- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements +- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen +- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. +- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions +- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards +- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. +- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. +- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. +- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key +- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. +- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. +- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. +- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files +- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp +- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. +- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. +- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. +- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. +- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. +- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. +- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. +- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. +- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup +- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. +- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. +- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints +- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range +- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. +- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT +- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement +- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. +- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write +- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). +- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. +- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points +- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types +- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw +- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. +- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. +- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. +- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. +- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes +- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files +- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. +- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. +- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. +- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. +- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue +- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it +- [#6648](https://github.com/influxdata/influxdb/issues/6648): Make sure admin exists before authenticating query. +- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. +- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF +- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. +- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body +- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading +- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points +- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. +- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code +- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. +- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions +- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions +- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. +- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded +- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time +- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. +- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files +- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ + +v0.13.0 [2016-05-12] +-------------------- + +### Release Notes + +With this release InfluxDB is moving to Go v1.6. + +### Features + +- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) +- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES +- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol +- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. + +### Bugfixes + +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index +- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. +- [#6277](https://github.com/influxdata/influxdb/pull/6277): Fix deadlock in tsm1/file_store +- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction +- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. + +v0.12.2 [2016-04-20] +-------------------- + +### Bugfixes + +- [#6431](https://github.com/influxdata/influxdb/pull/6431): Fix panic in transform iterator on division. @thbourlove +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. + +v0.12.1 [2016-04-08] +-------------------- + +### Bugfixes + +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. + +v0.12.0 [2016-04-05] +-------------------- + +### Release Notes + +Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). + +This release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay). + +### Features + +- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! +- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. +- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. +- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. +- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. +- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes +- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. +- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. +- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. +- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection +- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. +- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. +- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. +- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. +- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats +- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies +- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. +- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. +- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. +- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. +- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading +- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. + +### Bugfixes + +- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy +- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database +- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database +- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. +- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments +- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM +- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. +- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv +- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' +- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. +- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux +- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' +- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. + +v0.11.1 [2016-03-31] +-------------------- + +### Bugfixes + +- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics +- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host +- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index +- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails +- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. + +v0.11.0 [2016-03-22] +-------------------- + +### Release Notes + +There were some important breaking changes in this release. Here's a list of the important things to know before upgrading: + +- [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). +- [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) +- JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. +- b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). +- On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. +- On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. + +### Features + +- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server +- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause + +### Bugfixes + +- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu +- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups +- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float +- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock +- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT +- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries + +v0.10.3 [2016-03-09] +-------------------- + +### Bugfixes + +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter + +v0.10.2 [2016-03-03] +-------------------- + +### Bugfixes + +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour + +v0.10.1 [2016-02-18] +-------------------- + +### Bugfixes + +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +v0.10.0 [2016-02-04] +-------------------- + +### Release Notes + +This release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used. + +This release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster. + +### Features + +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires + +### Bugfixes + +- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. +- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB +- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic +- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 +- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. +- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described +- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment +- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points +- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. +- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory +- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' +- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range +- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. +- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. +- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. + +v0.9.6 [2015-12-09] +------------------- + +### Release Notes + +This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. + +If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 + +### Features + +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes + +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. + +v0.9.5 [2015-11-20] +------------------- + +### Release Notes + +- Field names for the internal stats have been changed to be more inline with Go style. +- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. + +There are breaking changes in this release: + +- The filesystem hierarchy for packages has been changed, namely: + - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`\) + - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`\) + - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`\) + - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`\) + +### Features + +- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes + +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda + +v0.9.4 [2015-09-14] +------------------- + +### Release Notes + +With this release InfluxDB is moving to Go 1.5. + +### Features + +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes + +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3](regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +v0.9.3 [2015-08-26] +------------------- + +### Release Notes + +There are breaking changes in this release. + +- To store data points as integers you must now append `i` to the number if using the line protocol. +- If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. +- Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. +- The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features + +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes + +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +v0.9.2 [2015-07-24] +------------------- + +### Features + +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +v0.9.1 [2015-07-02] +------------------- + +### Features + +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +v0.9.0 [2015-06-11] +------------------- + +### Bugfixes + +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists + +v0.9.0-rc33 [2015-06-09] +------------------------ + +### Bugfixes + +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features + +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. + +v0.9.0-rc32 [2015-06-07] +------------------------ + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features + +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes + +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM /\/. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +v0.9.0-rc31 [2015-05-21] +------------------------ + +### Features + +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes + +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +PRs +--- + +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +v0.9.0-rc30 [2015-05-12] +------------------------ + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features + +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes + +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. + +v0.9.0-rc29 [2015-05-05] +------------------------ + +### Features + +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes + +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +v0.9.0-rc28 [2015-04-27] +------------------------ + +### Features + +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes + +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes + +v0.9.0-rc27 [04-23-2015] +------------------------ + +### Features + +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes + +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present + +v0.9.0-rc26 [04-21-2015] +------------------------ + +### Features + +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes + +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +v0.9.0-rc25 [2015-04-15] +------------------------ + +### Bugfixes + +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +Features +-------- + +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. + +v0.9.0-rc24 [2015-04-13] +------------------------ + +### Bugfixes + +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. + +v0.9.0-rc23 [2015-04-11] +------------------------ + +### Features + +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes + +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +v0.9.0-rc22 [2015-04-09] +------------------------ + +### Features + +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes + +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote + +v0.9.0-rc21 [2015-04-09] +------------------------ + +### Features + +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor + +### Bugfixes + +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +v0.9.0-rc20 [2015-04-04] +------------------------ + +### Features + +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes + +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +v0.9.0-rc19 [2015-04-01] +------------------------ + +### Features + +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. + +### Bugfixes + +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +v0.9.0-rc18 [2015-03-31] +------------------------ + +### Bugfixes + +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. + +v0.9.0-rc17 [2015-03-29] +------------------------ + +### Features + +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes + +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +v0.9.0-rc16 [2015-03-24] +------------------------ + +### Features + +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes + +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +v0.9.0-rc15 [2015-03-19] +------------------------ + +### Features + +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes + +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + +v0.9.0-rc14 [2015-03-18] +------------------------ + +### Bugfixes + +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +v0.9.0-rc13 [2015-03-17] +------------------------ + +### Features + +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes + +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +v0.9.0-rc12 [2015-03-15] +------------------------ + +### Bugfixes + +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features + +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features + +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. + +v0.9.0-rc11 [2015-03-13] +------------------------ + +### Bugfixes + +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features + +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +v0.9.0-rc10 [2015-03-09] +------------------------ + +### Bugfixes + +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features + +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +v0.9.0-rc9 [2015-03-06] +----------------------- + +### Bugfixes + +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft + +v0.9.0-rc8 [2015-03-05] +----------------------- + +### Bugfixes + +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features + +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path + +v0.9.0-rc7 [2015-03-02] +----------------------- + +### Features + +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +v0.9.0-rc6 [2015-02-27] +----------------------- + +### Bugfixes + +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +v0.9.0-rc5 [2015-02-27] +----------------------- + +### Bugfixes + +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT + +v0.9.0-rc4 [2015-02-24] +----------------------- + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +v0.9.0-rc3 [2015-02-23] +----------------------- + +### Features + +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + +v0.9.0-rc1,2 [no public release] +-------------------------------- + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +v0.8.6 [2014-11-15] +------------------- + +### Features + +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't panic if the process died while initializing +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use su instead of sudo in the init script. This fixes the startup problem on RHEL 6. +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't take two snapshots at the same time +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return an appropriate exit status code depending on whether the process exits due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix the timestamps of data points written by the collectd plugin. (Thanks, @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set the connection string of the local raft node +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil engine.ProcessorChain causes a panic. + +v0.8.5 [2014-10-27] +------------------- + +### Features + +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use the query language instead of the continuous query endpoints that were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix timestamps when the collectd is used and low resolution timestamps is set. + +v0.8.4 [2014-10-24] +------------------- + +### Bugfixes + +- Remove the continuous query api endpoints since the query language has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting from a non-existent series should give a better error message indicating that the series doesn't exist +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make redirecting to standard output and standard error optional instead of going to `/dev/null`. This can now be configured by setting `$STDOUT` in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make sure we drop a shard only when there's no one using it. Otherwise, the shard can be closed when another goroutine is writing to it which will cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow merge() to take a list of series (as opposed to a regex in #72) + +v0.8.4-rc.1 [2014-10-21] +------------------------ + +### Bugfixes + +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying for data outside of existing shards returns an empty response instead of throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix the tarball for homebrew so that rocksdb is included and the directory structure is clean +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix the content type when an error occurs and the client requests compression. +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations with group by time(1w), time(1m) and time(1y) (for week, month and year respectively) will cause the start time and end time of the bucket to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative for queries with group by time() and fill(), will take the difference between the first value in the bucket and the first value of the next bucket. +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add an install target to the Makefile +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge for multiple series using regex syntax + +v0.8.3 [2014-09-24] +------------------- + +### Bugfixes + +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple queries separated by semicolons work as expected. Queries are process sequentially +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case insensitive regex matching +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move cluster config from raft to API. +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix fill() for all aggregators +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an error if the storage engine can't be created +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically create shards which was causing too many shards to be created when used with grafana +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse big int in queries properly. + +v0.8.2 [2014-09-05] +------------------- + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return a meaningful error if an invalid column is used in where clause after joining multiple series + +v0.8.2 [2014-09-08] +------------------- + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +v0.8.1 [2014-09-03] +------------------- + +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +v0.8.0 [2014-08-22] +------------------- + +### Features + +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. + +v0.8.0-rc.5 [2014-08-15] +------------------------ + +### Features + +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific + +v0.8.0-rc.4 [2014-07-29] +------------------------ + +### Bugfixes + +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join + +v0.8.0-rc.3 [2014-07-21] +------------------------ + +### Bugfixes + +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + +v0.8.0-rc.2 [2014-07-15] +------------------------ + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +v0.8.0-rc.1 [2014-07-15] +------------------------ + +### Features + +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient + +v0.7.3 [2014-06-13] +------------------- + +### Bugfixes + +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +v0.7.2 [2014-05-30] +------------------- + +### Features + +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards + +### Features + +v0.7.1 [2014-05-29] +------------------- + +### Bugfixes + +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +v0.7.0 [2014-05-23] +------------------- + +### Bugfixes + +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a race condition in assigning id to db+series+field (Thanks @ohurvitz for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +v0.6.5 [2014-05-19] +------------------- + +### Features + +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +v0.6.4 [2014-05-16] +------------------- + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command + +v0.6.3 [2014-05-13] +------------------- + +### Features + +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating + +v0.6.2 [2014-05-09] +------------------- + +### Bugfixes + +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery + +v0.6.1 [2014-05-06] +------------------- + +### Bugfixes + +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off + +v0.6.0 [2014-05-02] +------------------- + +### Feature + +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly + +v0.5.12 [2014-04-29] +-------------------- + +### Bugfixes + +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +v0.5.11 [2014-04-25] +-------------------- + +### Features + +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization + +v0.5.10 [2014-04-22] +-------------------- + +### Features + +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work + +v0.5.9 [2014-04-18] +------------------- + +### Bugfixes + +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values + +v0.5.8 [2014-04-17] +------------------- + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that aggregation queries over large periods of time don't take insance amount of memory + +v0.5.7 [2014-04-15] +------------------- + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected causing count(*) queries on large time series to use lots of memory +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +v0.5.6 [2014-04-08] +------------------- + +### Features + +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +v0.5.5 [2014-04-04] +------------------- + +- Upgrade leveldb 1.10 -> 1.15 + +This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +v0.5.4 [2014-04-02] +------------------- + +### Bugfixes + +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation + +v0.5.3 [2014-03-31] +------------------- + +### Bugfixes + +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin + +v0.5.2 [2014-03-28] +------------------- + +### Bugfixes + +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries + +v0.5.1 [2014-03-24] +------------------- + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +v0.5.0 [2014-03-24] +------------------- + +### Features + +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +v0.5.0-rc.6 [2014-03-20] +------------------------ + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request number when the points are written randomly +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail when the password is too short or too long instead of passing it to the crypto library + +v0.5.0-rc.5 [2014-03-11] +------------------------ + +### Bugfixes + +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make sure we don't starve goroutines if we get an access denied error from one of the shards +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly + +v0.5.0-rc.4 [2014-03-07] +------------------------ + +### Bugfixes + +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +v0.5.0-rc.3 [2014-03-03] +------------------------ + +### Bugfixes + +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +v0.5.0-rc.2 [2014-02-27] +------------------------ + +### Bugfixes + +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id + +v0.5.0-rc.1 [2014-02-25] +------------------------ + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +v0.4.4 [2014-02-05] +------------------- + +### Features + +- Make the leveldb max open files configurable in the toml file + +v0.4.3 [2014-01-31] +------------------- + +### Bugfixes + +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation + +v0.4.1 [2014-01-30] +------------------- + +### Features + +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +v0.4.0 [2014-01-17] +------------------- + +Features +-------- + +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +v0.3.2 +------ + +Features +-------- + +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN + +Bugfixes +-------- + +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before + +v0.3.0 +------ + +Features +-------- + +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function + +Bugfixes +-------- + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +v0.2.0 +------ + +### Features + +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should be used to update user flags, password, etc. + +v0.1.0 +------ + +### Features + +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +v0.0.9 +------ + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat + +v0.0.8 +------ + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +v0.0.7 +------ + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +v0.0.6 +------ + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +v0.0.5 +------ + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +v0.0.2 +------ + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +v0.0.1 [2013-10-22] +------------------- + +- Initial Release diff --git a/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md new file mode 100644 index 0000000..6c1d2f2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md @@ -0,0 +1,82 @@ +_This document is currently in draft form._ + +# Background + +The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database. + +*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices. + +# The Guidelines + +## Try not to use third-party libraries + +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +## Always include a default case in a 'switch' statement +The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to. + +## When -- and when not -- set a channel to 'nil' + +## Use defer with anonymous functions to handle complex locking +Consider a block of code like the following. +``` + mu.Lock() + if foo == "quit" { + mu.Unlock() + return + } else if foo == "continue" { + if bar == "quit" { + mu.Unlock() + return + } + bar = "still going" + } else { + qux = "here at last" + mu.Unlock() + return + } + foo = "more to do" + bar = "still more to do" + mu.Unlock() + + qux = "finished now" + return +``` +While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so: +``` + more := func() bool { + mu.Lock() + defer mu.Unlock() + if foo == "quit" { + return false + } else if foo == "continue" { + if bar == "quit" { + return false + } + bar = "still going" + } else { + qux = "here at last" + return false + } + foo = "more to do" + bar = "still more to do" + return true + }() + + if more { + qux = "finished" + } + return +``` +This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case. + +## When to call 'panic()' + +# Useful links +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` + diff --git a/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md new file mode 100644 index 0000000..6c7ccf8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md @@ -0,0 +1,282 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +```bash +# create database +curl -X POST http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -X POST http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write?db=mydb --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -X POST http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -X POST http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below. + +You should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) +``` + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](https://influxdata.com/community/cla/). + +Installing Go +------------- +InfluxDB requires Go 1.10.6. + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.10.6 + gvm use go1.10.6 --default + +Installing Dep +------------- +InfluxDB uses [dep](https://github.com/golang/dep) to manage dependencies. Install it by running the following: + + go get github.com/golang/dep/cmd/dep + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + +```bash + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdata/influxdb +``` + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + +```bash + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdata + cd $GOPATH/src/github.com/influxdata + git clone git@github.com:/influxdb +``` + +Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly. + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +dep ensure +``` + +To then build and install the binaries, run the following command. +```bash +go clean ./... +go install ./... +``` +The binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +To set the version and commit flags during the build pass the following to the **install** command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `build.py` usage information: + +```bash +python build.py --help + +# Or to build a package for your current system +python build.py --package +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Troubleshooting** + +If generating the protobuf code is failing for you, check each of the following: +* Ensure the protobuf library can be found. Make sure that `LD_LIBRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. +* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + + +Generated Go Templates +---------------------- + +The query engine requires optimized data structures for each data type so +instead of writing each implementation several times we use templates. _Do not +change code that ends in a `.gen.go` extension!_ Instead you must edit the +`.gen.go.tmpl` file that was used to generate it. + +Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility +to generate the code: + +```sh +$ go get github.com/benbjohnson/tmpl +``` + +Then you can regenerate all templates in the project: + +```sh +$ go generate ./... +``` + +[tmpl]: https://github.com/benbjohnson/tmpl + + +Pre-commit checks +------------- + +We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: +```bash + cd $GOPATH/src/github.com/influxdata/influxdb + cp .hooks/pre-commit .git/hooks/ +``` +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof +# run queries, writes, whatever you're testing +# Quit out of influxd and influxd.prof will then be written. +# open up pprof to examine the profiling data. +go tool pprof ./influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` +Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. + +If you are profiling benchmarks built with the `testing` package, you may wish +to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit +the code being profiled: + +```go +func BenchmarkSomething(b *testing.B) { + // do something intensive like fill database with data... + defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop() + // do something that you want to profile... +} +``` + +Continuous Integration testing +----- +InfluxDB uses CircleCI for continuous integration testing. CircleCI executes [test.sh](https://github.com/influxdata/influxdb/blob/master/test.sh), so you may do the same on your local development environment before creating a pull request. + +The `test.sh` script executes a test suite with 5 variants (standard 64 bit, 64 bit with race detection, 32 bit, TSI, go version 1.10.6), each executes with a different arg, 0 through 4. Unless you know differently, `./test.sh 0` is probably all you need. diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile b/vendor/github.com/influxdata/influxdb/Dockerfile new file mode 100644 index 0000000..913c027 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.10.6 as builder +RUN go get -u github.com/golang/dep/... +WORKDIR /go/src/github.com/influxdata/influxdb +COPY Gopkg.toml Gopkg.lock ./ +RUN dep ensure -vendor-only +COPY . /go/src/github.com/influxdata/influxdb +RUN go install ./cmd/... + +FROM debian:stretch +COPY --from=builder /go/bin/* /usr/bin/ +COPY --from=builder /go/src/github.com/influxdata/influxdb/etc/config.sample.toml /etc/influxdb/influxdb.conf + +EXPOSE 8086 +VOLUME /var/lib/influxdb + +COPY docker/entrypoint.sh /entrypoint.sh +COPY docker/init-influxdb.sh /init-influxdb.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["influxd"] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 new file mode 100644 index 0000000..f500283 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 @@ -0,0 +1,39 @@ +FROM ioft/i386-ubuntu:xenial + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rubygems \ + autoconf \ + libtool \ + build-essential \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.10.6 +ENV GO_ARCH 386 +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 new file mode 100644 index 0000000..37138ed --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 @@ -0,0 +1,41 @@ +FROM ubuntu:xenial + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + autoconf \ + libtool \ + build-essential \ + rpm \ + zip \ + python \ + python-boto \ + asciidoc \ + xmlto \ + docbook-xsl + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.10.6 +ENV GO_ARCH amd64 +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git new file mode 100644 index 0000000..441039d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git @@ -0,0 +1,44 @@ +FROM ubuntu:xenial + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + build-essential \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Setup env +ENV GOPATH /root/go +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR + +VOLUME $PROJECT_DIR + + +# Install go +ENV GO_VERSION 1.10.6 +ENV GO_ARCH amd64 +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz + +# Clone Go tip for compilation +ENV GOROOT_BOOTSTRAP /usr/local/go +RUN git clone https://go.googlesource.com/go +ENV PATH /go/bin:$PATH + +# Add script for compiling go +ENV GO_CHECKOUT master +ADD ./gobuild.sh /gobuild.sh +ENTRYPOINT [ "/gobuild.sh" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go1.11 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go1.11 new file mode 100644 index 0000000..c08f28f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go1.11 @@ -0,0 +1,43 @@ +FROM ubuntu:xenial + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + autoconf \ + libtool \ + build-essential \ + rpm \ + zip \ + python \ + python-boto \ + asciidoc \ + xmlto \ + docbook-xsl + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go + +ENV GO_VERSION 1.11.3 + +ENV GO_ARCH amd64 +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 new file mode 100644 index 0000000..9166767 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 @@ -0,0 +1,18 @@ +FROM ioft/i386-ubuntu:xenial + +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + wget \ + mercurial \ + git && \ + rm -rf /var/lib/apt/lists/* + +# Install go +ENV GOPATH /go +ENV GO_VERSION 1.10.6 +ENV GO_ARCH 386 +RUN wget --no-verbose -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 0000000..af505b5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.4 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdata/influxdb +VOLUME /tmp/artifacts diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps new file mode 100644 index 0000000..37b6513 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -0,0 +1,49 @@ +collectd.org 2ce144541b8903101fb8f1483cc0497a68798122 +github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 +github.com/RoaringBitmap/roaring d6540aab65a17321470b1661bfc52da1823871e9 +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bmizerany/pat 6226ea591a40176dd3ff9cd8eff81ed6ca721a00 +github.com/boltdb/bolt 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 +github.com/cespare/xxhash 5c37fe3735342a2e0d01c87a907579987c8936cc +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/dgrijalva/jwt-go 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e +github.com/dgryski/go-bitstream 9f22ccc24718d9643ac427c8c897ae1a01575783 +github.com/glycerine/go-unsnap-stream 62a9a9eb44fd8932157b1a8ace2149eff5971af6 +github.com/gogo/protobuf 1adfc126b41513cc696b209667c8656ea7aac67c +github.com/golang/protobuf 925541529c1fa6821df4e44ce2723319eb2be768 +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 +github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 +github.com/influxdata/influxql a7267bff5327e316e54c54342b0bc9598753e3d5 +github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 +github.com/influxdata/yamux 1f58ded512de5feabbe30b60c7d33a7a896c5f16 +github.com/influxdata/yarpc f0da2db138cad2fb425541938fc28dd5a5bc6918 +github.com/jsternberg/zap-logfmt ac4bd917e18a4548ce6e0e765b29a4e7f397b0b6 +github.com/jwilder/encoding b4e1701a28efcc637d9afcca7d38e495fe909a09 +github.com/klauspost/compress 6c8db69c4b49dd4df1fff66996cf556176d0b9bf +github.com/klauspost/cpuid ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da +github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd +github.com/klauspost/pgzip 0bf5dcad4ada2814c3c00f996a982270bb81a506 +github.com/mattn/go-isatty 6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c +github.com/matttproud/golang_protobuf_extensions 3247c84500bff8d9fb6d579d800f20b3e091582c +github.com/mschoch/smat 90eadee771aeab36e8bf796039b8c261bebebe4f +github.com/opentracing/opentracing-go 328fceb7548c744337cd010914152b74eaf4c4ab +github.com/paulbellamy/ratecounter 524851a93235ac051e3540563ed7909357fe24ab +github.com/peterh/liner 6106ee4fe3e8435f18cd10e34557e5e50f0e792a +github.com/philhofer/fwd bb6d471dc95d4fe11e432687f8b70ff496cf3136 +github.com/prometheus/client_golang 661e31bf844dfca9aeba15f27ea8aa0d485ad212 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common e4aa40a9169a88835b849a6efb71e05dc04b88f0 +github.com/prometheus/procfs 54d17b57dd7d4a3aa092476596b3f8a933bde349 +github.com/retailnext/hllpp 101a6d2f8b52abfc409ac188958e7e7be0116331 +github.com/tinylib/msgp b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1 +github.com/willf/bitset d860f346b89450988a379d7d705e83c58d1ea227 +github.com/xlab/treeprint f3a15cfd24bf976c724324cb6846a8b54b88b639 +go.uber.org/atomic 8474b86a5a6f79c443ce4b2992817ff32cf208b8 +go.uber.org/multierr 3c4937480c32f4c13a875a1829af76c98ca3d40a +go.uber.org/zap 35aad584952c3e7020db7b839f6b102de6271f89 +golang.org/x/crypto c3a3ad6d03f7a915c0f7e194b7152974bb73d287 +golang.org/x/net 92b859f39abd2d91a854c9f9c4621b2f5054a92d +golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca +golang.org/x/sys d8e400bc7db4870d786864138af681469693d18c +golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 +golang.org/x/time 26559e0f760e39c24d730d3224364aef164ee23f diff --git a/vendor/github.com/influxdata/influxdb/Gopkg.lock b/vendor/github.com/influxdata/influxdb/Gopkg.lock new file mode 100644 index 0000000..0287213 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Gopkg.lock @@ -0,0 +1,366 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "collectd.org" + packages = [ + "api", + "cdtime", + "network" + ] + revision = "2ce144541b8903101fb8f1483cc0497a68798122" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" + +[[projects]] + name = "github.com/RoaringBitmap/roaring" + packages = ["."] + revision = "d6540aab65a17321470b1661bfc52da1823871e9" + version = "v0.4.3" + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + branch = "master" + name = "github.com/bmizerany/pat" + packages = ["."] + revision = "6226ea591a40176dd3ff9cd8eff81ed6ca721a00" + +[[projects]] + name = "github.com/boltdb/bolt" + packages = ["."] + revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8" + version = "v1.3.1" + +[[projects]] + name = "github.com/cespare/xxhash" + packages = ["."] + revision = "5c37fe3735342a2e0d01c87a907579987c8936cc" + version = "v1.0.0" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + branch = "master" + name = "github.com/dgryski/go-bitstream" + packages = ["."] + revision = "9f22ccc24718d9643ac427c8c897ae1a01575783" + +[[projects]] + branch = "master" + name = "github.com/glycerine/go-unsnap-stream" + packages = ["."] + revision = "62a9a9eb44fd8932157b1a8ace2149eff5971af6" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "codec", + "gogoproto", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] + revision = "1adfc126b41513cc696b209667c8656ea7aac67c" + version = "v1.0.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/golang/snappy" + packages = ["."] + revision = "d9eb7a3d35ec988b8585d4a0068e462c27d28380" + +[[projects]] + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value" + ] + revision = "3af367b6b30c263d47e8895973edcca9a49cf029" + version = "v0.2.0" + +[[projects]] + name = "github.com/influxdata/influxql" + packages = [ + ".", + "internal" + ] + revision = "a7267bff5327e316e54c54342b0bc9598753e3d5" + +[[projects]] + branch = "master" + name = "github.com/influxdata/usage-client" + packages = ["v1"] + revision = "6d3895376368aa52a3a81d2a16e90f0f52371967" + +[[projects]] + branch = "master" + name = "github.com/influxdata/yamux" + packages = ["."] + revision = "1f58ded512de5feabbe30b60c7d33a7a896c5f16" + +[[projects]] + branch = "master" + name = "github.com/influxdata/yarpc" + packages = [ + ".", + "codes", + "status", + "yarpcproto" + ] + revision = "f0da2db138cad2fb425541938fc28dd5a5bc6918" + +[[projects]] + name = "github.com/jsternberg/zap-logfmt" + packages = ["."] + revision = "ac4bd917e18a4548ce6e0e765b29a4e7f397b0b6" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/jwilder/encoding" + packages = ["simple8b"] + revision = "b4e1701a28efcc637d9afcca7d38e495fe909a09" + +[[projects]] + name = "github.com/klauspost/compress" + packages = ["flate"] + revision = "6c8db69c4b49dd4df1fff66996cf556176d0b9bf" + version = "v1.2.1" + +[[projects]] + name = "github.com/klauspost/cpuid" + packages = ["."] + revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da" + version = "v1.1" + +[[projects]] + name = "github.com/klauspost/crc32" + packages = ["."] + revision = "cb6bfca970f6908083f26f39a79009d608efd5cd" + version = "v1.1" + +[[projects]] + name = "github.com/klauspost/pgzip" + packages = ["."] + revision = "0bf5dcad4ada2814c3c00f996a982270bb81a506" + version = "v1.1" + +[[projects]] + branch = "master" + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/mschoch/smat" + packages = ["."] + revision = "90eadee771aeab36e8bf796039b8c261bebebe4f" + +[[projects]] + branch = "master" + name = "github.com/opentracing/opentracing-go" + packages = [ + ".", + "ext", + "log" + ] + revision = "328fceb7548c744337cd010914152b74eaf4c4ab" + +[[projects]] + name = "github.com/paulbellamy/ratecounter" + packages = ["."] + revision = "524851a93235ac051e3540563ed7909357fe24ab" + version = "v0.2.0" + +[[projects]] + branch = "master" + name = "github.com/peterh/liner" + packages = ["."] + revision = "6106ee4fe3e8435f18cd10e34557e5e50f0e792a" + +[[projects]] + name = "github.com/philhofer/fwd" + packages = ["."] + revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136" + version = "v1.0.0" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "661e31bf844dfca9aeba15f27ea8aa0d485ad212" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349" + +[[projects]] + branch = "master" + name = "github.com/retailnext/hllpp" + packages = ["."] + revision = "101a6d2f8b52abfc409ac188958e7e7be0116331" + +[[projects]] + name = "github.com/tinylib/msgp" + packages = ["msgp"] + revision = "b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1" + version = "1.0.2" + +[[projects]] + name = "github.com/willf/bitset" + packages = ["."] + revision = "d860f346b89450988a379d7d705e83c58d1ea227" + version = "v1.1.3" + +[[projects]] + branch = "master" + name = "github.com/xlab/treeprint" + packages = ["."] + revision = "f3a15cfd24bf976c724324cb6846a8b54b88b639" + +[[projects]] + name = "go.uber.org/atomic" + packages = ["."] + revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8" + version = "v1.3.1" + +[[projects]] + name = "go.uber.org/multierr" + packages = ["."] + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + "zaptest/observer" + ] + revision = "35aad584952c3e7020db7b839f6b102de6271f89" + version = "v1.7.1" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "ssh/terminal" + ] + revision = "c3a3ad6d03f7a915c0f7e194b7152974bb73d287" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["context"] + revision = "92b859f39abd2d91a854c9f9c4621b2f5054a92d" + +[[projects]] + branch = "master" + name = "golang.org/x/sync" + packages = ["errgroup"] + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows" + ] + revision = "d8e400bc7db4870d786864138af681469693d18c" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "encoding", + "encoding/internal", + "encoding/internal/identifier", + "encoding/unicode", + "internal/gen", + "internal/utf8internal", + "runes", + "transform", + "unicode/cldr" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "golang.org/x/time" + packages = ["rate"] + revision = "26559e0f760e39c24d730d3224364aef164ee23f" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "d29181544d590a3c8f971a777eb765e72e85007bc8ec5b37a97b785334b64ec2" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/influxdata/influxdb/Gopkg.toml b/vendor/github.com/influxdata/influxdb/Gopkg.toml new file mode 100644 index 0000000..80ace56 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Gopkg.toml @@ -0,0 +1,75 @@ +[[constraint]] + name = "collectd.org" + version = "0.3.0" + +[[constraint]] + name = "github.com/BurntSushi/toml" + branch = "master" + +[[constraint]] + name = "github.com/RoaringBitmap/roaring" + version = "0.4.3" + +[[constraint]] + name = "github.com/boltdb/bolt" + version = "1.3.1" + +[[constraint]] + name = "github.com/cespare/xxhash" + version = "1.0.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/gogo/protobuf" + version = "1.0.0" + +[[constraint]] + name = "github.com/golang/snappy" + revision = "d9eb7a3d35ec988b8585d4a0068e462c27d28380" + +[[constraint]] + name = "github.com/influxdata/influxql" + revision = "a7267bff5327e316e54c54342b0bc9598753e3d5" + +[[constraint]] + name = "github.com/mattn/go-isatty" + branch = "master" + +[[constraint]] + name = "github.com/opentracing/opentracing-go" + branch = "master" + +[[constraint]] + name = "github.com/paulbellamy/ratecounter" + version = "0.2.0" + +[[constraint]] + name = "github.com/retailnext/hllpp" + branch = "master" + +[[constraint]] + name = "github.com/tinylib/msgp" + version = "1.0.2" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.7.1" + +[[constraint]] + name = "github.com/jsternberg/zap-logfmt" + version = "1.0.0" + +[[constraint]] + name = "github.com/prometheus/client_golang" + revision = "661e31bf844dfca9aeba15f27ea8aa0d485ad212" + +[[constraint]] + name = "github.com/klauspost/pgzip" + version = "1.1.0" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/influxdata/influxdb/Jenkinsfile b/vendor/github.com/influxdata/influxdb/Jenkinsfile new file mode 100644 index 0000000..89181bc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Jenkinsfile @@ -0,0 +1,96 @@ +readTrusted 'Dockerfile_jenkins_ubuntu32' + +pipeline { + agent none + + stages { + stage('Update changelog') { + agent any + + when { + anyOf { + branch 'master' + expression { BRANCH_NAME ==~ /^\d+(.\d+)*$/ } + } + } + + steps { + sh "docker pull jsternberg/changelog" + withDockerContainer(image: "jsternberg/changelog") { + withCredentials( + [[$class: "UsernamePasswordMultiBinding", + credentialsId: "hercules-username-password", + usernameVariable: "GITHUB_USER", + passwordVariable: "GITHUB_TOKEN"]]) { + script { + if (env.GIT_PREVIOUS_SUCCESSFUL_COMMIT) { + sh "git changelog ${env.GIT_PREVIOUS_SUCCESSFUL_COMMIT}" + } else { + sh "git changelog" + } + } + } + } + + sshagent(credentials: ['jenkins-hercules-ssh']) { + sh """ + set -e + if ! git diff --quiet; then + git config remote.origin.pushurl git@github.com:influxdata/influxdb.git + git commit -am 'Update changelog' + git push origin HEAD:${BRANCH_NAME} + fi + """ + } + } + } + + stage('64bit') { + agent { + docker { + image 'golang:1.10.6' + } + } + + steps { + sh """ + mkdir -p /go/src/github.com/influxdata + cp -a $WORKSPACE /go/src/github.com/influxdata/influxdb + + cd /go/src/github.com/influxdata/influxdb + go get github.com/golang/dep/cmd/dep + dep ensure -vendor-only + """ + + sh """ + cd /go/src/github.com/influxdata/influxdb + go test -parallel=1 ./... + """ + } + } + + stage('32bit') { + agent { + dockerfile { + filename 'Dockerfile_jenkins_ubuntu32' + } + } + + steps { + sh """ + mkdir -p /go/src/github.com/influxdata + cp -a $WORKSPACE /go/src/github.com/influxdata/influxdb + + cd /go/src/github.com/influxdata/influxdb + go get github.com/golang/dep/cmd/dep + dep ensure -vendor-only + """ + + sh """ + cd /go/src/github.com/influxdata/influxdb + go test -parallel=1 ./... + """ + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE new file mode 100644 index 0000000..63cef79 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2016 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 0000000..b5b0df8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,63 @@ +- # List +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE) +- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) +- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) +- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) +- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE) +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE) +- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) +- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE) +- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE) +- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE) +- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) +- github.com/klauspost/pgzip [MIT LICENSE](https://github.com/klauspost/pgzip/blob/master/LICENSE) +- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE) +- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) +- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md) +- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE) +- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE) +- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE) +- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) +- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE) +- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) +- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt) +- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) +- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE) +- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE) +- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE) + + + + + + + + + + + + + + diff --git a/vendor/github.com/influxdata/influxdb/QUERIES.md b/vendor/github.com/influxdata/influxdb/QUERIES.md new file mode 100644 index 0000000..c3f52ea --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/QUERIES.md @@ -0,0 +1,190 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT * FROM just_my_type +``` + +## Group By + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + + +# Delete + +```sql +DELETE FROM "cpu" +DELETE FROM "cpu" WHERE time < '2000-01-01T00:00:00Z' +DELETE WHERE time < '2000-01-01T00:00:00Z' +``` + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md new file mode 100644 index 0000000..cc2f21c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -0,0 +1,71 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/) + +## An Open-Source Time Series Database + +InfluxDB is an open source **time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Installation + +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: + +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +## Getting Started + +### Create your first database + +``` +curl -XPOST "http://localhost:8086/query" --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST "http://localhost:8086/write?db=mydb" \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST "http://localhost:8086/write?db=mydb" \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST "http://localhost:8086/write?db=mydb" \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G "http://localhost:8086/query?pretty=true" --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" +``` + +### Analyze the data +```JSON +curl -G "http://localhost:8086/query?pretty=true" --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Documentation + +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/concepts/key_concepts/). + +## Contributing + +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. + +## Looking for Support? + +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. diff --git a/vendor/github.com/influxdata/influxdb/TODO.md b/vendor/github.com/influxdata/influxdb/TODO.md new file mode 100644 index 0000000..56b5294 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/TODO.md @@ -0,0 +1,9 @@ +# TODO + +## v2 + +TODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change. + +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key. +- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries. diff --git a/vendor/github.com/influxdata/influxdb/appveyor.yml b/vendor/github.com/influxdata/influxdb/appveyor.yml new file mode 100644 index 0000000..f206536 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/appveyor.yml @@ -0,0 +1,37 @@ +version: 0.{build} +pull_requests: + do_not_increment_build_number: true +branches: + only: + - master + +os: Windows Server 2012 R2 + +# Custom clone folder (variables are not expanded here). +clone_folder: c:\gopath\src\github.com\influxdata\influxdb + +# Environment variables +environment: + GOROOT: C:\go110 + GOPATH: C:\gopath + +# Scripts that run after cloning repository +install: + - set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH% + - rmdir c:\go /s /q + - echo %PATH% + - echo %GOPATH% + - cd C:\gopath\src\github.com\influxdata\influxdb + - go version + - go env + - go get github.com/golang/dep/cmd/dep + - cd C:\gopath\src\github.com\influxdata\influxdb + - dep ensure -vendor-only + +# To run your custom scripts instead of automatic MSBuild +build_script: + - go get -t -v ./... + - go test -timeout 15m -v ./... + +# To disable deployment +deploy: off diff --git a/vendor/github.com/influxdata/influxdb/build.py b/vendor/github.com/influxdata/influxdb/build.py new file mode 100755 index 0000000..3f41b0e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.py @@ -0,0 +1,991 @@ +#!/usr/bin/python2.7 -u + +import sys +import os +import subprocess +import time +from datetime import datetime +import shutil +import tempfile +import hashlib +import re +import logging +import argparse + +################ +#### InfluxDB Variables +################ + +# Packaging variables +PACKAGE_NAME = "influxdb" +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/influxdb" +DATA_DIR = "/var/lib/influxdb" +SCRIPT_DIR = "/usr/lib/influxdb/scripts" +CONFIG_DIR = "/etc/influxdb" +LOGROTATE_DIR = "/etc/logrotate.d" +MAN_DIR = "/usr/share/man" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/influxdb.service" +PREINST_SCRIPT = "scripts/pre-install.sh" +POSTINST_SCRIPT = "scripts/post-install.sh" +POSTUNINST_SCRIPT = "scripts/post-uninstall.sh" +LOGROTATE_SCRIPT = "scripts/logrotate" +DEFAULT_CONFIG = "etc/config.sample.toml" + +# Default AWS S3 bucket for uploads +DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts" + +CONFIGURATION_FILES = [ + CONFIG_DIR + '/influxdb.conf', + LOGROTATE_DIR + '/influxdb', +] + +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/influxdb" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Distributed time-series database." + +prereqs = [ 'git', 'go' ] +go_vet_command = "go vet ./..." +optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ] + +fpm_common_args = "-f -s dir --log error \ +--vendor {} \ +--url {} \ +--after-install {} \ +--before-install {} \ +--after-remove {} \ +--license {} \ +--maintainer {} \ +--directories {} \ +--directories {} \ +--directories {} \ +--description \"{}\"".format( + VENDOR, + PACKAGE_URL, + POSTINST_SCRIPT, + PREINST_SCRIPT, + POSTUNINST_SCRIPT, + PACKAGE_LICENSE, + MAINTAINER, + LOG_DIR, + DATA_DIR, + MAN_DIR, + DESCRIPTION) + +for f in CONFIGURATION_FILES: + fpm_common_args += " --config-files {}".format(f) + +targets = { + 'influx' : './cmd/influx', + 'influxd' : './cmd/influxd', + 'influx_stress' : './cmd/influx_stress', + 'influx_inspect' : './cmd/influx_inspect', + 'influx_tsm' : './cmd/influx_tsm', +} + +supported_builds = { + 'darwin': [ "amd64" ], + 'windows': [ "amd64" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] +} + +supported_packages = { + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], + "windows": [ "zip" ], +} + +################ +#### InfluxDB Functions +################ + +def print_banner(): + logging.info(""" + ___ __ _ ___ ___ + |_ _|_ _ / _| |_ ___ _| \\| _ ) + | || ' \\| _| | || \\ \\ / |) | _ \\ + |___|_||_|_| |_|\\_,_/_\\_\\___/|___/ + Build Script +""") + +def create_package_fs(build_root): + """Create a filesystem structure to mimic the package filesystem. + """ + logging.debug("Creating package filesystem at location: {}".format(build_root)) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], + LOG_DIR[1:], + DATA_DIR[1:], + SCRIPT_DIR[1:], + CONFIG_DIR[1:], + LOGROTATE_DIR[1:], + MAN_DIR[1:] ] + for d in dirs: + os.makedirs(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) + +def package_scripts(build_root, config_only=False, windows=False): + """Copy the necessary scripts and configuration files to the package + filesystem. + """ + if config_only: + logging.debug("Copying configuration to build directory.") + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf")) + os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644) + else: + logging.debug("Copying scripts and sample configuration to build directory.") + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644) + +def package_man_files(build_root): + """Copy and gzip man pages to the package filesystem.""" + logging.debug("Installing man pages.") + run("make -C man/ clean install DESTDIR={}/usr".format(build_root)) + for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])): + for f in files: + run("gzip -9n {}".format(os.path.join(path, f))) + +def go_get(branch, update=False, no_uncommitted=False): + """Retrieve build dependencies or restore pinned dependencies. + """ + if local_changes() and no_uncommitted: + logging.error("There are uncommitted changes in the current directory.") + return False + if not check_path_for("dep"): + logging.info("Downloading `dep`...") + get_command = "go get github.com/golang/dep/cmd/dep" + run(get_command) + logging.info("Retrieving dependencies with `dep`...") + sys.stdout.flush() + run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH"))) + return True + +def run_tests(race, parallel, timeout, no_vet, junit=False): + """Run the Go test suite on binary output. + """ + logging.info("Starting tests...") + if race: + logging.info("Race is enabled.") + if parallel is not None: + logging.info("Using parallel: {}".format(parallel)) + if timeout is not None: + logging.info("Using timeout: {}".format(timeout)) + out = run("go fmt ./...") + if len(out) > 0: + logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + logging.error("{}".format(out)) + return False + if not no_vet: + logging.info("Running 'go vet'...") + out = run(go_vet_command) + if len(out) > 0: + logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") + logging.error("{}".format(out)) + return False + else: + logging.info("Skipping 'go vet' call...") + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + if junit: + logging.info("Retrieving go-junit-report...") + run("go get github.com/jstemmer/go-junit-report") + + # Retrieve the output from this command. + logging.info("Running tests...") + logging.debug("{}".format(test_command)) + proc = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output, unused_err = proc.communicate() + output = output.decode('utf-8').strip() + + # Process the output through go-junit-report. + with open('test-results.xml', 'w') as f: + logging.debug("{}".format("go-junit-report")) + junit_proc = subprocess.Popen(["go-junit-report"], stdin=subprocess.PIPE, stdout=f, stderr=subprocess.PIPE) + unused_output, err = junit_proc.communicate(output.encode('ascii', 'ignore')) + if junit_proc.returncode != 0: + logging.error("Command '{}' failed with error: {}".format("go-junit-report", err)) + sys.exit(1) + + if proc.returncode != 0: + logging.error("Command '{}' failed with error: {}".format(test_command, output.encode('ascii', 'ignore'))) + sys.exit(1) + else: + logging.info("Running tests...") + output = run(test_command) + logging.debug("Test output:\n{}".format(out.encode('ascii', 'ignore'))) + return True + +################ +#### All InfluxDB-specific content above this line +################ + +def run(command, allow_failure=False, shell=False): + """Run shell command (convenience wrapper around subprocess). + """ + out = None + logging.debug("{}".format(command)) + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode('utf-8').strip() + # logging.debug("Command output: {}".format(out)) + except subprocess.CalledProcessError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e.output)) + return None + else: + logging.error("Command '{}' failed with error: {}".format(command, e.output)) + sys.exit(1) + except OSError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e)) + return out + else: + logging.error("Command '{}' failed with error: {}".format(command, e)) + sys.exit(1) + else: + return out + +def create_temp_dir(prefix = None): + """ Create temporary directory with optional prefix. + """ + if prefix is None: + return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) + else: + return tempfile.mkdtemp(prefix=prefix) + +def increment_minor_version(version): + """Return the version with the minor version incremented and patch + version set to zero. + """ + ver_list = version.split('.') + if len(ver_list) != 3: + logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + return version + ver_list[1] = str(int(ver_list[1]) + 1) + ver_list[2] = str(0) + inc_version = '.'.join(ver_list) + logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) + return inc_version + +def get_current_version_tag(): + """Retrieve the raw git version tag. + """ + version = run("git describe --always --tags --abbrev=0") + return version + +def get_current_version(): + """Parse version information from git tag output. + """ + version_tag = get_current_version_tag() + # Remove leading 'v' + if version_tag[0] == 'v': + version_tag = version_tag[1:] + # Replace any '-'/'_' with '~' + if '-' in version_tag: + version_tag = version_tag.replace("-","~") + if '_' in version_tag: + version_tag = version_tag.replace("_","~") + return version_tag + +def get_current_commit(short=False): + """Retrieve the current git commit. + """ + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + """Retrieve the current git branch. + """ + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def local_changes(): + """Return True if there are local un-committed changes. + """ + output = run("git diff-files --ignore-submodules --").strip() + if len(output) > 0: + return True + return False + +def get_system_arch(): + """Retrieve current system architecture. + """ + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + elif arch == "386": + arch = "i386" + elif arch == "aarch64": + arch = "arm64" + elif 'arm' in arch: + # Prevent uname from reporting full ARM arch (eg 'armv7l') + arch = "arm" + return arch + +def get_system_platform(): + """Retrieve current system platform. + """ + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + """Retrieve version information for Go. + """ + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + """Check the the user's path for the provided binary. + """ + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + """Check environment for common Go variables. + """ + logging.info("Checking environment...") + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) + + cwd = os.getcwd() + if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + return True + +def check_prereqs(): + """Check user path for required dependencies. + """ + logging.info("Checking for dependencies...") + for req in prereqs: + if not check_path_for(req): + logging.error("Could not find dependency: {}".format(req)) + return False + return True + +def upload_packages(packages, bucket_name=None, overwrite=False): + """Upload provided package output to AWS S3. + """ + logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) + try: + import boto + from boto.s3.key import Key + from boto.s3.connection import OrdinaryCallingFormat + logging.getLogger("boto").setLevel(logging.WARNING) + except ImportError: + logging.warn("Cannot upload packages without 'boto' Python library!") + return False + logging.info("Connecting to AWS S3...") + # Up the number of attempts to 10 from default of 1 + boto.config.add_section("Boto") + boto.config.set("Boto", "metadata_service_num_attempts", "10") + c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) + if bucket_name is None: + bucket_name = DEFAULT_BUCKET + bucket = c.get_bucket(bucket_name.split('/')[0]) + for p in packages: + if '/' in bucket_name: + # Allow for nested paths within the bucket name (ex: + # bucket/folder). Assuming forward-slashes as path + # delimiter. + name = os.path.join('/'.join(bucket_name.split('/')[1:]), + os.path.basename(p)) + else: + name = os.path.basename(p) + logging.debug("Using key: {}".format(name)) + if bucket.get_key(name) is None or overwrite: + logging.info("Uploading file {}".format(name)) + k = Key(bucket) + k.key = name + if overwrite: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + else: + logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + return True + +def go_list(vendor=False, relative=False): + """ + Return a list of packages + If vendor is False vendor package are not included + If relative is True the package prefix defined by PACKAGE_URL is stripped + """ + p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + packages = out.split('\n') + if packages[-1] == '': + packages = packages[:-1] + if not vendor: + non_vendor = [] + for p in packages: + if '/vendor/' not in p: + non_vendor.append(p) + packages = non_vendor + if relative: + relative_pkgs = [] + for p in packages: + r = p.replace(PACKAGE_URL, '.') + if r != '.': + relative_pkgs.append(r) + packages = relative_pkgs + return packages + +def build(version=None, + platform=None, + arch=None, + nightly=False, + race=False, + clean=False, + outdir=".", + tags=[], + static=False): + """Build each target for the specified architecture and platform. + """ + logging.info("Starting build for {}/{}...".format(platform, arch)) + logging.info("Using Go version: {}".format(get_go_version())) + logging.info("Using git branch: {}".format(get_current_branch())) + logging.info("Using git commit: {}".format(get_current_commit())) + if static: + logging.info("Using statically-compiled output.") + if race: + logging.info("Race is enabled.") + if len(tags) > 0: + logging.info("Using build tags: {}".format(','.join(tags))) + + logging.info("Sending build output to: {}".format(outdir)) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/' and outdir != ".": + logging.info("Cleaning build directory '{}' before building.".format(outdir)) + shutil.rmtree(outdir) + os.makedirs(outdir) + + logging.info("Using version '{}' for build.".format(version)) + + for target, path in targets.items(): + logging.info("Building target: {}".format(target)) + build_command = "" + + # Handle static binary output + if static is True or "static_" in arch: + if "static_" in arch: + static = True + arch = arch.replace("static_", "") + build_command += "CGO_ENABLED=0 " + + # Handle variations in architecture output + if arch == "i386" or arch == "i686": + arch = "386" + elif "arm" in arch: + arch = "arm" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + # TODO(rossmcdonald) - Verify this is the correct setting for arm64 + build_command += "GOARM=7 " + else: + logging.error("Invalid ARM architecture specified: {}".format(arch)) + logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") + return False + if platform == 'windows': + target = target + '.exe' + build_command += "go build -o {} ".format(os.path.join(outdir, target)) + if race: + build_command += "-race " + if len(tags) > 0: + build_command += "-tags {} ".format(','.join(tags)) + if "1.4" in get_go_version(): + if static: + build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + + else: + # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + if static: + build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + if static: + build_command += "-a -installsuffix cgo " + build_command += path + start_time = datetime.utcnow() + run(build_command, shell=True) + end_time = datetime.utcnow() + logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) + return True + +def generate_md5_from_file(path): + """Generate MD5 signature based on the contents of the file at path. + """ + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() + +def generate_sig_from_file(path): + """Generate a detached GPG signature from the file at path. + """ + logging.debug("Generating GPG signature for file: {}".format(path)) + gpg_path = check_path_for('gpg') + if gpg_path is None: + logging.warn("gpg binary not found on path! Skipping signature creation.") + return False + if os.environ.get("GNUPG_HOME") is not None: + run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) + else: + run('gpg --armor --detach-sign --yes {}'.format(path)) + return True + +def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): + """Package the output of the build process. + """ + outfiles = [] + tmp_build_dir = create_temp_dir() + logging.debug("Packaging for build output: {}".format(build_output)) + logging.info("Using temporary directory: {}".format(tmp_build_dir)) + try: + for platform in build_output: + # Create top-level folder displaying which platform (linux, etc) + os.makedirs(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + logging.info("Creating packages for {}/{}".format(platform, arch)) + # Create second-level directory displaying the architecture (amd64, etc) + current_location = build_output[platform][arch] + + # Create directory tree to mimic file system of package + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) + os.makedirs(build_root) + + # Copy packaging scripts to build directory + if platform == "windows": + # For windows and static builds, just copy + # binaries to root of package (no other scripts or + # directories) + package_scripts(build_root, config_only=True, windows=True) + elif static or "static_" in arch: + package_scripts(build_root, config_only=True) + else: + create_package_fs(build_root) + package_scripts(build_root) + + if platform != "windows": + package_man_files(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + if platform == 'windows' or static or "static_" in arch: + # Where the binary should go in the package filesystem + to = os.path.join(build_root, binary) + # Where the binary currently is located + fr = os.path.join(current_location, binary) + else: + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + shutil.copy(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) + name = pkg_name + # Reset version, iteration, and current location on each run + # since they may be modified below. + package_version = version + package_iteration = iteration + if "static_" in arch: + # Remove the "static_" from the displayed arch on the package + package_arch = arch.replace("static_", "") + else: + package_arch = arch + if not release and not nightly: + # For non-release builds, just use the commit hash as the version + package_version = "{}~{}".format(version, + get_current_commit(short=True)) + package_iteration = "0" + package_build_root = build_root + current_location = build_output[platform][arch] + + if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) + package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) + if nightly: + if static or "static_" in arch: + name = '{}-static-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + name = '{}-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + if static or "static_" in arch: + name = '{}-{}-static_{}_{}'.format(name, + package_version, + platform, + package_arch) + else: + name = '{}-{}_{}_{}'.format(name, + package_version, + platform, + package_arch) + current_location = os.path.join(os.getcwd(), current_location) + if package_type == 'tar': + tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name) + run(tar_command, shell=True) + run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".tar.gz") + outfiles.append(outfile) + elif package_type == 'zip': + zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".zip") + outfiles.append(outfile) + elif package_type not in ['zip', 'tar'] and static or "static_" in arch: + logging.info("Skipping package type '{}' for static builds.".format(package_type)) + else: + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + package_arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT) + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + logging.warn("Could not determine output from packaging output!") + else: + if nightly: + # Strip nightly version from package name + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") + os.rename(outfile, new_outfile) + outfile = new_outfile + else: + if package_type == 'rpm': + # rpm's convert any dashes to underscores + package_version = package_version.replace("-", "_") + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) + os.rename(outfile, new_outfile) + outfile = new_outfile + outfiles.append(os.path.join(os.getcwd(), outfile)) + logging.debug("Produced package files: {}".format(outfiles)) + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def main(args): + global PACKAGE_NAME + + if args.release and args.nightly: + logging.error("Cannot be both a nightly and a release.") + return 1 + + if args.nightly: + args.version = increment_minor_version(args.version) + args.version = "{}~n{}".format(args.version, + datetime.utcnow().strftime("%Y%m%d%H%M")) + args.iteration = 0 + + # Pre-build checks + check_environ() + if not check_prereqs(): + return 1 + if args.build_tags is None: + args.build_tags = [] + else: + args.build_tags = args.build_tags.split(',') + + orig_commit = get_current_commit(short=True) + orig_branch = get_current_branch() + + if args.platform not in supported_builds and args.platform != 'all': + logging.error("Invalid build platform: {}".format(target_platform)) + return 1 + + build_output = {} + + if args.branch != orig_branch and args.commit != orig_commit: + logging.error("Can only specify one branch or commit to build from.") + return 1 + elif args.branch != orig_branch: + logging.info("Moving to git branch: {}".format(args.branch)) + run("git checkout {}".format(args.branch)) + elif args.commit != orig_commit: + logging.info("Moving to git commit: {}".format(args.commit)) + run("git checkout {}".format(args.commit)) + + if not args.no_get: + if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): + return 1 + + if args.test: + if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report): + return 1 + + platforms = [] + single_build = True + if args.platform == 'all': + platforms = supported_builds.keys() + single_build = False + else: + platforms = [args.platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if args.arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [args.arch] + + for arch in archs: + od = args.outdir + if not single_build: + od = os.path.join(args.outdir, platform, arch) + if not build(version=args.version, + platform=platform, + arch=arch, + nightly=args.nightly, + race=args.race, + clean=args.clean, + outdir=od, + tags=args.build_tags, + static=args.static): + return 1 + build_output.get(platform).update( { arch : od } ) + + # Build packages + if args.package: + if not check_path_for("fpm"): + logging.error("FPM ruby gem required for packaging. Stopping.") + return 1 + packages = package(build_output, + args.name, + args.version, + nightly=args.nightly, + iteration=args.iteration, + static=args.static, + release=args.release) + if args.sign: + logging.debug("Generating GPG signatures for packages: {}".format(packages)) + sigs = [] # retain signatures so they can be uploaded with packages + for p in packages: + if generate_sig_from_file(p): + sigs.append(p + '.asc') + else: + logging.error("Creation of signature for package [{}] failed!".format(p)) + return 1 + packages += sigs + if args.upload: + logging.debug("Files staged for upload: {}".format(packages)) + if args.nightly: + args.upload_overwrite = True + if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): + return 1 + logging.info("Packages created:") + for p in packages: + logging.info("{} (MD5={})".format(p.split('/')[-1:][0], + generate_md5_from_file(p))) + if orig_branch != get_current_branch(): + logging.info("Moving back to original git branch: {}".format(orig_branch)) + run("git checkout {}".format(orig_branch)) + + return 0 + +if __name__ == '__main__': + LOG_LEVEL = logging.INFO + if '--debug' in sys.argv[1:]: + LOG_LEVEL = logging.DEBUG + log_format = '[%(levelname)s] %(funcName)s: %(message)s' + logging.basicConfig(level=LOG_LEVEL, + format=log_format) + + parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') + parser.add_argument('--verbose','-v','--debug', + action='store_true', + help='Use debug output') + parser.add_argument('--outdir', '-o', + metavar='', + default='./build/', + type=os.path.abspath, + help='Output directory') + parser.add_argument('--name', '-n', + metavar='', + default=PACKAGE_NAME, + type=str, + help='Name to use for package name (when package is specified)') + parser.add_argument('--arch', + metavar='', + type=str, + default=get_system_arch(), + help='Target architecture for build output') + parser.add_argument('--platform', + metavar='', + type=str, + default=get_system_platform(), + help='Target platform for build output') + parser.add_argument('--branch', + metavar='', + type=str, + default=get_current_branch(), + help='Build from a specific branch') + parser.add_argument('--commit', + metavar='', + type=str, + default=get_current_commit(short=True), + help='Build from a specific commit') + parser.add_argument('--version', + metavar='', + type=str, + default=get_current_version(), + help='Version information to apply to build output (ex: 0.12.0)') + parser.add_argument('--iteration', + metavar='', + type=str, + default="1", + help='Package iteration to apply to build output (defaults to 1)') + parser.add_argument('--stats', + action='store_true', + help='Emit build metrics (requires InfluxDB Python client)') + parser.add_argument('--stats-server', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided hostname and port') + parser.add_argument('--stats-db', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided database name') + parser.add_argument('--nightly', + action='store_true', + help='Mark build output as nightly build (will incremement the minor version)') + parser.add_argument('--update', + action='store_true', + help='Update build dependencies prior to building') + parser.add_argument('--package', + action='store_true', + help='Package binary output') + parser.add_argument('--release', + action='store_true', + help='Mark build output as release') + parser.add_argument('--clean', + action='store_true', + help='Clean output directory before building') + parser.add_argument('--no-get', + action='store_true', + help='Do not retrieve pinned dependencies when building') + parser.add_argument('--no-uncommitted', + action='store_true', + help='Fail if uncommitted changes exist in the working directory') + parser.add_argument('--upload', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--upload-overwrite','-w', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--bucket', + metavar='', + type=str, + default=DEFAULT_BUCKET, + help='Destination bucket for uploads') + parser.add_argument('--build-tags', + metavar='', + help='Optional build tags to use for compilation') + parser.add_argument('--static', + action='store_true', + help='Create statically-compiled binary output') + parser.add_argument('--sign', + action='store_true', + help='Create GPG detached signatures for packages (when package is specified)') + parser.add_argument('--test', + action='store_true', + help='Run tests (does not produce build output)') + parser.add_argument('--junit-report', + action='store_true', + help='Output tests in the JUnit XML format') + parser.add_argument('--no-vet', + action='store_true', + help='Do not run "go vet" when running tests') + parser.add_argument('--race', + action='store_true', + help='Enable race flag for build output') + parser.add_argument('--parallel', + metavar='', + type=int, + help='Number of tests to run simultaneously') + parser.add_argument('--timeout', + metavar='', + type=str, + help='Timeout for tests before failing') + args = parser.parse_args() + print_banner() + sys.exit(main(args)) diff --git a/vendor/github.com/influxdata/influxdb/build.sh b/vendor/github.com/influxdata/influxdb/build.sh new file mode 100755 index 0000000..0f80ac7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Run the build utility via Docker + +set -e + +# Make sure our working dir is the dir of the script +DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +cd $DIR + + +# Build new docker image +docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR +echo "Running build.py" +# Run docker +docker run --rm \ + -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -v $HOME/.aws.conf:/root/.aws.conf \ + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ + influxdb-builder \ + "$@" + diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md new file mode 100644 index 0000000..20ddd9e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/README.md @@ -0,0 +1,312 @@ +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) + +## Description + +**NOTE:** The Go client library now has a "v2" version, with the old version +being deprecated. The new version can be imported at +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is +8086. You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Though not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import ( + "log" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +const ( + MyDB = "square_holes" + username = "bubba" + password = "bumblebeetuna" +) + + +func main() { + // Create a new HTTPClient + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: username, + Password: password, + }) + if err != nil { + log.Fatal(err) + } + defer c.Close() + + // Create a new point batch + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: MyDB, + Precision: "s", + }) + if err != nil { + log.Fatal(err) + } + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + log.Fatal(err) + } + bp.AddPoint(pt) + + // Write the batch + if err := c.Write(bp); err != nil { + log.Fatal(err) + } + + // Close client resources + if err := c.Close(); err != nil { + log.Fatal(err) + } +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka +*batch points* and write these to a given database and series. A series is a +combination of a measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. + +```go + +func writePoints(clnt client.Client) { + sampleSize := 1000 + + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + if err != nil { + log.Fatal(err) + } + + rand.Seed(time.Now().UnixNano()) + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + log.Fatal(err) + } + bp.AddPoint(pt) + } + + if err := clnt.Write(bp); err != nil { + log.Fatal(err) + } +} +``` + +#### Uint64 Support + +The `uint64` data type is supported if your server is version `1.4.0` or +greater. To write a data point as an unsigned integer, you must insert +the point as `uint64`. You cannot use `uint` or any of the other +derivatives because previous versions of the client have supported +writing those types as an integer. + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := clnt.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } else { + return res, err + } + return res, nil +} +``` + +#### Creating a Database + +```go +_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records + +```go +q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) +res, err := queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of %v records\n", count) +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 10) +res, err = queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val := row[1].(string) + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) +} +``` + +### Using the UDP Client + +The **InfluxDB** client also supports writing over UDP. + +```go +func WriteUDP() { + // Make client + c, err := client.NewUDPClient("localhost:8089") + if err != nil { + panic(err.Error()) + } + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + panic(err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} +``` + +### Point Splitting + +The UDP client now supports splitting single points that exceed the configured +payload size. The logic for processing each point is listed here, starting with +an empty payload. + +1. If adding the point to the current (non-empty) payload would exceed the + configured size, send the current payload. Otherwise, add it to the current + payload. +1. If the point is smaller than the configured size, add it to the payload. +1. If the point has no timestamp, just try to send the entire point as a single + UDP payload, and process the next point. +1. Since the point has a timestamp, re-use the existing measurement name, + tagset, and timestamp and create multiple new points by splitting up the + fields. The per-point length will be kept close to the configured size, + staying under it if possible. This does mean that one large field, maybe a + long string, could be sent as a larger-than-configured payload. + +The above logic attempts to respect configured payload sizes, but not sacrifice +any data integrity. Points without a timestamp can't be split, as that may +cause fields to have differing timestamps when processed by the server. + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff --git a/vendor/github.com/influxdata/influxdb/client/example_test.go b/vendor/github.com/influxdata/influxdb/client/example_test.go new file mode 100644 index 0000000..f375383 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/example_test.go @@ -0,0 +1,113 @@ +package client_test + +import ( + "fmt" + "log" + "math/rand" + "net/url" + "os" + "strconv" + "time" + + "github.com/influxdata/influxdb/client" +) + +func ExampleNewClient() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + conf := client.Config{ + URL: *host, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + log.Println("Connection", con) +} + +func ExampleClient_Ping() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a hippo! %v, %s", dur, ver) +} + +func ExampleClient_Query() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + q := client.Query{ + Command: "select count(value) from shapes", + Database: "square_holes", + } + if response, err := con.Query(q); err == nil && response.Error() == nil { + log.Println(response.Results) + } +} + +func ExampleClient_Write() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: "BumbeBeeTuna", + RetentionPolicy: "default", + } + _, err = con.Write(bps) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go new file mode 100644 index 0000000..40e0ef9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -0,0 +1,870 @@ +// Package client implements a now-deprecated client for InfluxDB; +// use github.com/influxdata/influxdb/client/v2 instead. +package client // import "github.com/influxdata/influxdb/client" + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string + + // RetentionPolicy tells the server which retention policy to use by default. + // This option is only effective when querying a server of version 1.6.0 or later. + RetentionPolicy string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int + + // NodeID sets the data node to use for the query results. This option only + // has any effect in the enterprise version of the software where there can be + // more than one data node and is primarily useful for analyzing differences in + // data. The default behavior is to automatically select the appropriate data + // nodes to retrieve all of the data. On a database where the number of data nodes + // is greater than the replication factor, it is expected that setting this option + // will only retrieve partial data. + NodeID int +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + Host: host, + } + if ssl { + u.Scheme = "https" + if port != 443 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + } + } else if port != 80 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + } + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + UnixSocket string + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + WriteConsistency string + UnsafeSsl bool + Proxy func(req *http.Request) (*url.URL, error) + TLS *tls.Config +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + unixSocket string + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := new(tls.Config) + if c.TLS != nil { + tlsConfig = c.TLS.Clone() + } + tlsConfig.InsecureSkipVerify = c.UnsafeSsl + + tr := &http.Transport{ + Proxy: c.Proxy, + TLSClientConfig: tlsConfig, + } + + if c.UnixSocket != "" { + // No need for compression in local communications. + tr.DisableCompression = true + + tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", c.UnixSocket) + } + } + + client := Client{ + url: c.URL, + unixSocket: c.UnixSocket, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + return c.QueryContext(context.Background(), q) +} + +// QueryContext sends a command to the server and returns the Response +// It uses a context that can be cancelled by the command line client +func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "query") + + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if q.RetentionPolicy != "" { + values.Set("rp", q.RetentionPolicy) + } + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + if q.NodeID > 0 { + values.Set("node_id", strconv.Itoa(q.NodeID)) + } + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + req = req.WithContext(ctx) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } + } + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "write") + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "write") + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + + u := c.url + u.Path = path.Join(u.Path, "ping") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + o.Messages = r.Messages + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + r.Messages = o.Messages + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + if c.unixSocket != "" { + return c.unixSocket + } + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go new file mode 100644 index 0000000..7e53372 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go @@ -0,0 +1,1022 @@ +package client_test + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/client" +) + +func BenchmarkWrite(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + {Fields: map[string]interface{}{"value": 101}}}, + } + for i := 0; i < b.N; i++ { + r, err := c.Write(bp) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + b.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } + } +} + +func BenchmarkUnmarshalJSON2Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func BenchmarkUnmarshalJSON10Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1", + "tag1": "value1", + "tag2": "value2", + "tag2": "value3", + "tag4": "value4", + "tag5": "value5", + "tag6": "value6", + "tag7": "value7", + "tag8": "value8" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func TestNewClient(t *testing.T) { + config := client.Config{} + _, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + d, version, err := c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if d.Nanoseconds() == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds()) + } + if version != "x.x" { + t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Query_RP(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + params := r.URL.Query() + if got, exp := params.Get("db"), "db0"; got != exp { + t.Errorf("unexpected db query parameter: %s != %s", exp, got) + } + if got, exp := params.Get("rp"), "rp0"; got != exp { + t.Errorf("unexpected rp query parameter: %s != %s", exp, got) + } + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{ + Database: "db0", + RetentionPolicy: "rp0", + } + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_QueryContext(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err = c.QueryContext(ctx, query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_QueryContext_Cancelled(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, err = c.QueryContext(ctx, query) + if err == nil { + t.Fatalf("Since context was cancelled an error was expected, but got nil.") + } +} + +func TestClient_ChunkedQuery_WithContext(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{Chunked: true} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err = c.QueryContext(ctx, query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + w.WriteHeader(http.StatusNoContent) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + u.User = url.UserPassword("username", "password") + config := client.Config{URL: *u, Username: "username", Password: "password"} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + in, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else if have, want := strings.TrimSpace(string(in)), `m0,host=server01 v1=2,v2=2i,v3=2u,v4="foobar",v5=true 0`; have != want { + t.Errorf("unexpected write protocol: %s != %s", have, want) + } + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + { + Measurement: "m0", + Tags: map[string]string{ + "host": "server01", + }, + Time: time.Unix(0, 0).UTC(), + Fields: map[string]interface{}{ + "v1": float64(2), + "v2": int64(2), + "v3": uint64(2), + "v4": "foobar", + "v5": true, + }, + }, + }, + } + r, err := c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, UserAgent: test.userAgent} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + receivedUserAgent = "" + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp := client.BatchPoints{} + _, err = c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_Messages(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`)) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + resp, err := c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + if got, exp := len(resp.Results), 1; got != exp { + t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got) + } + + r := resp.Results[0] + if got, exp := len(r.Messages), 1; got != exp { + t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got) + } + + m := r.Messages[0] + if got, exp := m.Level, "warning"; got != exp { + t.Errorf("unexpected message level. expected %v, actual %v", exp, got) + } + if got, exp := m.Text, "deprecation test"; got != exp { + t.Errorf("unexpected message text. expected %v, actual %v", exp, got) + } +} + +func TestPoint_UnmarshalEpoch(t *testing.T) { + now := time.Now() + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + { + name: "nanoseconds", + epoch: now.UnixNano(), + precision: "n", + expected: now, + }, + { + name: "microseconds", + epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), + precision: "u", + expected: now.Round(time.Microsecond), + }, + { + name: "milliseconds", + epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), + precision: "ms", + expected: now.Round(time.Millisecond), + }, + { + name: "seconds", + epoch: now.Round(time.Second).UnixNano() / int64(time.Second), + precision: "s", + expected: now.Round(time.Second), + }, + { + name: "minutes", + epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), + precision: "m", + expected: now.Round(time.Minute), + }, + { + name: "hours", + epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), + precision: "h", + expected: now.Round(time.Hour), + }, + { + name: "max int64", + epoch: 9223372036854775807, + precision: "n", + expected: time.Unix(0, 9223372036854775807), + }, + { + name: "100 years from now", + epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), + precision: "n", + expected: now.Add(time.Hour * 24 * 365 * 100), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_UnmarshalRFC(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + rfc string + now time.Time + expected time.Time + }{ + { + name: "RFC3339Nano", + rfc: time.RFC3339Nano, + now: now, + expected: now, + }, + { + name: "RFC3339", + rfc: time.RFC3339, + now: now.Round(time.Second), + expected: now.Round(time.Second), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + ts := test.now.Format(test.rfc) + data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_MarshalOmitempty(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + point client.Point + now time.Time + expected string + }{ + { + name: "all empty", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1}}`, + }, + { + name: "with time", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, + now: now, + expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), + }, + { + name: "with tags", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, + now: now, + expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, + }, + { + name: "with precision", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + b, err := json.Marshal(&test.point) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if test.expected != string(b) { + t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) + } + } +} + +func TestEpochToTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, + {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, + {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, + {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, + {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, + {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + tm, e := client.EpochToTime(test.epoch, test.precision) + if e != nil { + t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) + } + if !tm.Equal(test.expected) { + t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) + } + } +} + +// helper functions + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + w.Header().Set("X-Influxdb-Version", "x.x") + })) +} + +// Ensure that data with epoch times can be decoded. +func TestBatchPoints_Normal(t *testing.T) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069373, + "precision": "n", + "values": { + "value": 4541770385657154000 + } + }, + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069380, + "precision": "n", + "values": { + "value": 7199311900554737000 + } + } + ] +} +`) + + if err := json.Unmarshal(data, &bp); err != nil { + t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } +} + +func TestClient_Timeout(t *testing.T) { + done := make(chan bool) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-done + })) + defer ts.Close() + defer func() { done <- true }() + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + if err == nil { + t.Fatalf("unexpected success. expected timeout error") + } else if !strings.Contains(err.Error(), "request canceled") && + !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) + } +} + +func TestClient_NoTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_ParseConnectionString(t *testing.T) { + for _, tt := range []struct { + addr string + ssl bool + exp string + }{ + { + addr: "localhost", + exp: "http://localhost:8086", + }, + { + addr: "localhost:8086", + exp: "http://localhost:8086", + }, + { + addr: "localhost:80", + exp: "http://localhost", + }, + { + addr: "localhost", + exp: "https://localhost:8086", + ssl: true, + }, + { + addr: "localhost:443", + exp: "https://localhost", + ssl: true, + }, + { + addr: "localhost:80", + exp: "https://localhost:80", + ssl: true, + }, + { + addr: "localhost:443", + exp: "http://localhost:443", + }, + } { + name := tt.addr + if tt.ssl { + name += "+ssl" + } + t.Run(name, func(t *testing.T) { + u, err := client.ParseConnectionString(tt.addr, tt.ssl) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got, want := u.String(), tt.exp; got != want { + t.Fatalf("unexpected connection string: got=%s want=%s", got, want) + } + }) + } +} + +func TestClient_ParseConnectionString_IPv6(t *testing.T) { + path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" + u, err := client.ParseConnectionString(path, false) + if err != nil { + t.Fatalf("unexpected error, expected %v, actual %v", nil, err) + } + if u.Host != path { + t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) + } +} + +func TestClient_CustomCertificates(t *testing.T) { + // generated with: + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf + // influx.cnf: + // [req] + // distinguished_name = req_distinguished_name + // x509_extensions = v3_req + // prompt = no + // [req_distinguished_name] + // C = US + // ST = CA + // L = San Francisco + // O = InfluxDB + // CN = github.com/influxdata + // [v3_req] + // keyUsage = keyEncipherment, dataEncipherment + // extendedKeyUsage = serverAuth + // subjectAltName = @alt_names + // [alt_names] + // IP.1 = 127.0.0.1 + // + key := ` +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi +4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv +qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS +1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t +WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa +mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m +hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I +dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi +b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu +36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m +u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH +FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt +byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ +vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 +aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 +BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K +Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 +3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T +OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi +elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 +2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K +5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk +bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C +cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg +/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ +cvh4WzEegcExTppINW1NB5E= +-----END PRIVATE KEY----- +` + cert := ` +-----BEGIN CERTIFICATE----- +MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G +A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 +NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw +FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE +AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK +JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr +XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ +3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK +u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW +37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti +MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN +AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ +m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F +3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk +rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY +jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW +war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= +-----END CERTIFICATE----- +` + cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) + + if err != nil { + t.Fatalf("Received error: %v", err) + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} + server.TLS.BuildNameToCertificate() + server.StartTLS() + defer server.Close() + + certFile, _ := ioutil.TempFile("", "influx-cert-") + certFile.WriteString(cert) + certFile.Close() + defer os.Remove(certFile.Name()) + + u, _ := url.Parse(server.URL) + + tests := []struct { + name string + unsafeSsl bool + expected error + }{ + {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, + {name: "not validate certificates", unsafeSsl: true, expected: nil}, + } + + for _, test := range tests { + config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + + if (test.expected == nil) != (err == nil) { + t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) + } + } +} + +func TestChunkedResponse(t *testing.T) { + s := `{"results":[{},{}]}{"results":[{}]}` + r := client.NewChunkedResponse(strings.NewReader(s)) + resp, err := r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 2 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 1 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if resp != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, resp) + } +} + +func TestClient_Proxy(t *testing.T) { + pinged := false + server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + if got, want := req.URL.String(), "http://example.com:8086/ping"; got != want { + t.Errorf("invalid url in request: got=%s want=%s", got, want) + } + resp.WriteHeader(http.StatusNoContent) + pinged = true + })) + defer server.Close() + + proxyURL, _ := url.Parse(server.URL) + c, err := client.NewClient(client.Config{ + URL: url.URL{ + Scheme: "http", + Host: "example.com:8086", + }, + Proxy: http.ProxyURL(proxyURL), + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, _, err := c.Ping(); err != nil { + t.Fatalf("could not ping server: %s", err) + } + + if !pinged { + t.Fatalf("no http request was received") + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 0000000..6a5c238 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,662 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config + + // Proxy configures the Proxy function on the HTTP client. + Proxy func(req *http.Request) (*url.URL, error) +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + Proxy: conf.Proxy, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + + u := c.url + u.Path = path.Join(u.Path, "ping") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to synchronize access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precision string) string { + return p.pt.PrecisionString(precision) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return string(p.pt.Name()) +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if p == nil { + continue + } + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = path.Join(u.Path, "write") + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + RetentionPolicy string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithRP returns a query object. +// The database, retention policy, and precision arguments can be empty strings if they are not needed +// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater. +func NewQueryWithRP(command, database, retentionPolicy, precision string) Query { + return Query{ + Command: command, + Database: database, + RetentionPolicy: retentionPolicy, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return fmt.Errorf(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "query") + + jsonParameters, err := json.Marshal(q.Parameters) + + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + if q.RetentionPolicy != "" { + params.Set("rp", q.RetentionPolicy) + } + params.Set("params", string(jsonParameters)) + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb + // but instead some other service. If the error code is also a 500+ code, then some + // downstream loadbalancer/proxy/etc had an issue and we should report that. + if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError { + body, err := ioutil.ReadAll(resp.Body) + if err != nil || len(body) == 0 { + return nil, fmt.Errorf("received status code %d from downstream server", resp.StatusCode) + } + + return nil, fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body) + } + + // If we get an unexpected content type, then it is also not from influx direct and therefore + // we want to know what we received and what status code was returned for debugging purposes. + if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" { + // Read up to 1kb of the body to help identify downstream errors and limit the impact of things + // like downstream serving a large file + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024)) + if err != nil || len(body) == 0 { + return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode) + } + + return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body) + } + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go new file mode 100644 index 0000000..27dd13b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go @@ -0,0 +1,913 @@ +package client + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "path" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +func TestUDPClient_Query(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + query := Query{} + _, err = c.Query(query) + if err == nil { + t.Error("Querying UDP client should fail") + } +} + +func TestUDPClient_Ping(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + rtt, version, err := c.Ping(0) + if rtt != 0 || version != "" || err != nil { + t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) + } +} + +func TestUDPClient_Write(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + fields := make(map[string]interface{}) + fields["value"] = 1.0 + pt, _ := NewPoint("cpu", make(map[string]string), fields) + bp.AddPoint(pt) + + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestUDPClient_BadAddr(t *testing.T) { + config := UDPConfig{Addr: "foobar@wahoo"} + c, err := NewUDPClient(config) + if err == nil { + defer c.Close() + t.Error("Expected resolve error") + } +} + +func TestUDPClient_Batches(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 20 // should allow for two points per batch + + // expected point should look like this: "cpu a=1i" + fields := map[string]interface{}{"a": 1} + + p, _ := NewPoint("cpu", nil, fields, time.Time{}) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + for i := 0; i < 9; i++ { + bp.AddPoint(p) + } + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != 5 { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5) + } +} + +func TestUDPClient_Split(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 1 // force one field per point + + fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4} + + p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0)) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + bp.AddPoint(p) + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != len(fields) { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields)) + } +} + +type writeLogger struct { + writes [][]byte +} + +func (w *writeLogger) Write(b []byte) (int, error) { + w.writes = append(w.writes, append([]byte(nil), b...)) + return len(b), nil +} + +func (w *writeLogger) Close() error { return nil } + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_QueryWithRP(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + params := r.URL.Query() + if got, exp := params.Get("db"), "db0"; got != exp { + t.Errorf("unexpected db query parameter: %s != %s", exp, got) + } + if got, exp := params.Get("rp"), "rp0"; got != exp { + t.Errorf("unexpected rp query parameter: %s != %s", exp, got) + } + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := NewQueryWithRP("SELECT * FROM m0", "db0", "rp0", "") + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClientDownstream500WithBody_Query(t *testing.T) { + const err500page = ` + + 500 Internal Server Error + + Internal Server Error +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err500page)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + + expected := fmt.Sprintf("received status code 500 from downstream server, with response body: %q", err500page) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClientDownstream500_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + + expected := "received status code 500 from downstream server" + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClientDownstream400WithBody_Query(t *testing.T) { + const err403page = ` + + 403 Forbidden + + Forbidden +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(err403page)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + + expected := fmt.Sprintf(`expected json response, got "text/html", with status: %v and response body: %q`, http.StatusForbidden, err403page) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClientDownstream400_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + + expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClient500_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Influxdb-Version", "1.3.1") + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(`{"error":"test"}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + resp, err := c.Query(query) + + if err != nil { + t.Errorf("unexpected error. expected nothing, actual %v", err) + } + + if resp.Err != "test" { + t.Errorf(`unexpected response error. expected "test", actual %v`, resp.Err) + } +} + +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Influxdb-Version", "1.3.1") + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, err := NewHTTPClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClientDownstream500WithBody_ChunkedQuery(t *testing.T) { + const err500page = ` + + 500 Internal Server Error + + Internal Server Error +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err500page)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, err := NewHTTPClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := Query{Chunked: true} + _, err = c.Query(query) + + expected := fmt.Sprintf("received status code 500 from downstream server, with response body: %q", err500page) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClientDownstream500_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{Chunked: true} + _, err := c.Query(query) + + expected := "received status code 500 from downstream server" + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClient500_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Influxdb-Version", "1.3.1") + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(`{"error":"test"}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{Chunked: true} + resp, err := c.Query(query) + + if err != nil { + t.Errorf("unexpected error. expected nothing, actual %v", err) + } + + if resp.Err != "test" { + t.Errorf(`unexpected response error. expected "test", actual %v`, resp.Err) + } +} + +func TestClientDownstream400WithBody_ChunkedQuery(t *testing.T) { + const err403page = ` + + 403 Forbidden + + Forbidden +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(err403page)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{Chunked: true} + _, err := c.Query(query) + + expected := fmt.Sprintf(`expected json response, got "text/html", with status: %v and response body: %q`, http.StatusForbidden, err403page) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClientDownstream400_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{Chunked: true} + _, err := c.Query(query) + + expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) + if err.Error() != expected { + t.Errorf("unexpected error. expected %v, actual %v", expected, err) + } +} + +func TestClient_BoundParameters(t *testing.T) { + var parameterString string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + r.ParseForm() + parameterString = r.FormValue("params") + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + expectedParameters := map[string]interface{}{ + "testStringParameter": "testStringValue", + "testNumberParameter": 12.3, + } + + query := Query{ + Parameters: expectedParameters, + } + + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + var actualParameters map[string]interface{} + + err = json.Unmarshal([]byte(parameterString), &actualParameters) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + if !reflect.DeepEqual(expectedParameters, actualParameters) { + t.Errorf("unexpected parameters. expected %v, actual %v", expectedParameters, actualParameters) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Concurrent_Use(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + var wg sync.WaitGroup + wg.Add(3) + n := 1000 + + errC := make(chan error) + go func() { + defer wg.Done() + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + + for i := 0; i < n; i++ { + if err = c.Write(bp); err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + } + }() + + go func() { + defer wg.Done() + var q Query + for i := 0; i < n; i++ { + if _, err := c.Query(q); err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + } + }() + + go func() { + defer wg.Done() + for i := 0; i < n; i++ { + c.Ping(time.Second) + } + }() + + go func() { + wg.Wait() + close(errC) + }() + + for err := range errC { + if err != nil { + t.Error(err) + } + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + in, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else if have, want := strings.TrimSpace(string(in)), `m0,host=server01 v1=2,v2=2i,v3=2u,v4="foobar",v5=true 0`; have != want { + t.Errorf("unexpected write protocol: %s != %s", have, want) + } + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + pt, err := NewPoint( + "m0", + map[string]string{ + "host": "server01", + }, + map[string]interface{}{ + "v1": float64(2), + "v2": int64(2), + "v3": uint64(2), + "v4": "foobar", + "v5": true, + }, + time.Unix(0, 0).UTC(), + ) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + bp.AddPoint(pt) + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + + config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} + c, _ := NewHTTPClient(config) + defer c.Close() + + receivedUserAgent = "" + query := Query{} + _, err = c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp, _ := NewBatchPoints(BatchPointsConfig{}) + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_PointString(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointWithoutTimeString(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointName(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + exp := "cpu_usage" + if p.Name() != exp { + t.Errorf("Error, got %s, expected %s", + p.Name(), exp) + } +} + +func TestClient_PointTags(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(tags, p.Tags()) { + t.Errorf("Error, got %v, expected %v", + p.Tags(), tags) + } +} + +func TestClient_PointUnixNano(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + exp := int64(1359849600000000000) + if p.UnixNano() != exp { + t.Errorf("Error, got %d, expected %d", + p.UnixNano(), exp) + } +} + +func TestClient_PointFields(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + pfields, err := p.Fields() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(fields, pfields) { + t.Errorf("Error, got %v, expected %v", + pfields, fields) + } +} + +func TestBatchPoints_PrecisionError(t *testing.T) { + _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) + if err == nil { + t.Errorf("Precision: foobar should have errored") + } + + bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) + err = bp.SetPrecision("foobar") + if err == nil { + t.Errorf("Precision: foobar should have errored") + } +} + +func TestBatchPoints_SettersGetters(t *testing.T) { + bp, _ := NewBatchPoints(BatchPointsConfig{ + Precision: "ns", + Database: "db", + RetentionPolicy: "rp", + WriteConsistency: "wc", + }) + if bp.Precision() != "ns" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") + } + if bp.Database() != "db" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db") + } + if bp.RetentionPolicy() != "rp" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") + } + if bp.WriteConsistency() != "wc" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") + } + + bp.SetDatabase("db2") + bp.SetRetentionPolicy("rp2") + bp.SetWriteConsistency("wc2") + err := bp.SetPrecision("s") + if err != nil { + t.Errorf("Did not expect error: %s", err.Error()) + } + + if bp.Precision() != "s" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "s") + } + if bp.Database() != "db2" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db2") + } + if bp.RetentionPolicy() != "rp2" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") + } + if bp.WriteConsistency() != "wc2" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") + } +} + +func TestClientConcatURLPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.URL.String(), "/influxdbproxy/ping") || strings.Contains(r.URL.String(), "/ping/ping") { + t.Errorf("unexpected error. expected %v contains in %v", "/influxdbproxy/ping", r.URL) + } + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + url, _ := url.Parse(ts.URL) + url.Path = path.Join(url.Path, "influxdbproxy") + + fmt.Println("TestClientConcatURLPath: concat with path 'influxdbproxy' result ", url.String()) + + c, _ := NewHTTPClient(HTTPConfig{Addr: url.String()}) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClientProxy(t *testing.T) { + pinged := false + ts := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + if got, want := req.URL.String(), "http://example.com:8086/ping"; got != want { + t.Errorf("invalid url in request: got=%s want=%s", got, want) + } + resp.WriteHeader(http.StatusNoContent) + pinged = true + })) + defer ts.Close() + + proxyURL, _ := url.Parse(ts.URL) + c, _ := NewHTTPClient(HTTPConfig{ + Addr: "http://example.com:8086", + Proxy: http.ProxyURL(proxyURL), + }) + if _, _, err := c.Ping(0); err != nil { + t.Fatalf("could not ping server: %s", err) + } + + if !pinged { + t.Fatalf("no http request was received") + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/example_test.go b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go new file mode 100644 index 0000000..68bb24b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go @@ -0,0 +1,265 @@ +package client_test + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +// Create a new client +func ExampleClient() { + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + _, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } +} + +// Write a point using the UDP client +func ExampleClient_uDP() { + // Make client + config := client.UDPConfig{Addr: "localhost:8089"} + c, err := client.NewUDPClient(config) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Ping the cluster using the HTTP client +func ExampleClient_Ping() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + _, _, err = c.Ping(0) + if err != nil { + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) + } +} + +// Write a point using the HTTP client +func ExampleClient_write() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Create a batch and add a point +func ExampleBatchPoints() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Using the BatchPoints setter functions +func ExampleBatchPoints_setters() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) + bp.SetDatabase("BumbleBeeTuna") + bp.SetPrecision("ms") + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Create a new point with a timestamp +func ExamplePoint() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err == nil { + fmt.Println("We created a point: ", pt.String()) + } +} + +// Create a new point without a timestamp +func ExamplePoint_withoutTime() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields) + if err == nil { + fmt.Println("We created a point w/o time: ", pt.String()) + } +} + +// Write 1000 points +func ExampleClient_write1000() { + sampleSize := 1000 + + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + println("Error:", err.Error()) + continue + } + bp.AddPoint(pt) + } + + err = c.Write(bp) + if err != nil { + fmt.Println("Error: ", err.Error()) + } +} + +// Make a Query +func ExampleClient_query() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} + +// Create a Database with a query +func ExampleClient_createDatabase() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("CREATE DATABASE telegraf", "", "") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 0000000..779a28b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/README.md new file mode 100644 index 0000000..ed4915c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/README.md @@ -0,0 +1,76 @@ +`influx-tools export` +===================== + +Used with `influx-tools import`, the export tool transforms existing shards to a new shard duration in order to +consolidate into fewer shards. It is also possible to separate into a greater number of shards. + + +Field type conflicts +-------------------- + +A field type for a given measurement can be different per shard. This creates the potential for field type conflicts +when exporting new shard durations. If this happens, the field type will be determined by the first shard containing +data for that field in order to fulfil the target shard duration. All conflicting data will be written as line protocol +and gzipped to the path specified by `-conflict-path`, unless the `-no-conflict-path` option is specified. + + +Range +----- + +The optional `-range ` option specifies which target shards should be exported, based on their sequence number. +A use case for the `range` option is to parallelize the reshaping of a large data set. A machine with 4 cores could run two +export / import jobs concurrently over a subset of the total target shards. + +The sequence number is included in the plan output. For example: + +```sh +$ influx-tools export -config config.toml -database foo -rp autogen -duration 24h -format=binary -no-conflict-path -print-only +Source data from: 2018-02-19 00:00:00 +0000 UTC -> 2018-04-09 00:00:00 +0000 UTC + +Converting source from 4 shard group(s) to 28 shard groups + +Seq # ID Start End +0 608 2018-02-19 00:00:00 +0000 UTC 2018-02-26 00:00:00 +0000 UTC +1 609 2018-03-19 00:00:00 +0000 UTC 2018-03-26 00:00:00 +0000 UTC +2 610 2018-03-26 00:00:00 +0000 UTC 2018-04-02 00:00:00 +0000 UTC +3 612 2018-04-02 00:00:00 +0000 UTC 2018-04-09 00:00:00 +0000 UTC + +Seq # ID Start End +0 0 2018-02-19 00:00:00 +0000 UTC 2018-02-20 00:00:00 +0000 UTC +1 1 2018-02-20 00:00:00 +0000 UTC 2018-02-21 00:00:00 +0000 UTC +2 2 2018-02-21 00:00:00 +0000 UTC 2018-02-22 00:00:00 +0000 UTC +3 3 2018-02-22 00:00:00 +0000 UTC 2018-02-23 00:00:00 +0000 UTC +4 4 2018-02-23 00:00:00 +0000 UTC 2018-02-24 00:00:00 +0000 UTC +5 5 2018-02-24 00:00:00 +0000 UTC 2018-02-25 00:00:00 +0000 UTC +6 6 2018-02-25 00:00:00 +0000 UTC 2018-02-26 00:00:00 +0000 UTC +7 28 2018-03-19 00:00:00 +0000 UTC 2018-03-20 00:00:00 +0000 UTC +8 29 2018-03-20 00:00:00 +0000 UTC 2018-03-21 00:00:00 +0000 UTC +9 30 2018-03-21 00:00:00 +0000 UTC 2018-03-22 00:00:00 +0000 UTC +... +26 47 2018-04-07 00:00:00 +0000 UTC 2018-04-08 00:00:00 +0000 UTC +27 48 2018-04-08 00:00:00 +0000 UTC 2018-04-09 00:00:00 +0000 UTC +``` + +Adding `-range 2-4` would return the following plan: + +```sh +$ influx-tools export -config config.toml -database foo -rp autogen -duration 24h -format=binary -no-conflict-path -print-only -range=2-4 +Source data from: 2018-02-19 00:00:00 +0000 UTC -> 2018-04-09 00:00:00 +0000 UTC + +Converting source from 4 shard group(s) to 3 shard groups + +Seq # ID Start End +0 608 2018-02-19 00:00:00 +0000 UTC 2018-02-26 00:00:00 +0000 UTC +1 609 2018-03-19 00:00:00 +0000 UTC 2018-03-26 00:00:00 +0000 UTC +2 610 2018-03-26 00:00:00 +0000 UTC 2018-04-02 00:00:00 +0000 UTC +3 612 2018-04-02 00:00:00 +0000 UTC 2018-04-09 00:00:00 +0000 UTC + +Seq # ID Start End +2 2 2018-02-21 00:00:00 +0000 UTC 2018-02-22 00:00:00 +0000 UTC +3 3 2018-02-22 00:00:00 +0000 UTC 2018-02-23 00:00:00 +0000 UTC +4 4 2018-02-23 00:00:00 +0000 UTC 2018-02-24 00:00:00 +0000 UTC +``` + +A range can either be a single sequence number or an interval as shown previously. + +**Hint**: Include the `-print-only` option to display the plan and exit without exporting any data. \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket.go new file mode 100644 index 0000000..9319353 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket.go @@ -0,0 +1,48 @@ +package export + +import ( + "time" + + "github.com/influxdata/influxdb/services/meta" +) + +func makeShardGroupsForDuration(min, max time.Time, d time.Duration) meta.ShardGroupInfos { + start := min.Truncate(d).UTC() + end := max.Truncate(d).Add(d).UTC() + + groups := make(meta.ShardGroupInfos, end.Sub(start)/d) + var i uint64 + for start.Before(end) { + groups[i] = meta.ShardGroupInfo{ + ID: i, + StartTime: start, + EndTime: start.Add(d), + } + i++ + start = start.Add(d) + } + return groups[:i] +} + +// PlanShardGroups creates a new ShardGroup set using a shard group duration of d, for the time spanning min to max. +func planShardGroups(sourceShards []meta.ShardGroupInfo, min, max time.Time, d time.Duration) meta.ShardGroupInfos { + groups := makeShardGroupsForDuration(min, max, d) + var target []meta.ShardGroupInfo + for i := 0; i < len(groups); i++ { + g := groups[i] + // NOTE: EndTime.Add(-1) matches the Contains interval of [start, end) + if hasShardsGroupForTimeRange(sourceShards, g.StartTime, g.EndTime.Add(-1)) { + target = append(target, g) + } + } + return target +} + +func hasShardsGroupForTimeRange(groups []meta.ShardGroupInfo, min, max time.Time) bool { + for _, g := range groups { + if g.Overlaps(min, max) { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket_test.go new file mode 100644 index 0000000..df92457 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/bucket_test.go @@ -0,0 +1,116 @@ +package export + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/services/meta" +) + +func ts(s int64) time.Time { return time.Unix(s, 0).UTC() } + +func ms(times ...int64) meta.ShardGroupInfos { + sgis := make(meta.ShardGroupInfos, len(times)-1) + for i := range sgis { + sgis[i] = meta.ShardGroupInfo{ID: uint64(i), StartTime: ts(times[i]), EndTime: ts(times[i+1])} + } + return sgis +} + +func TestMakeShardGroupsForDuration(t *testing.T) { + tests := []struct { + name string + min time.Time + max time.Time + d time.Duration + exp meta.ShardGroupInfos + }{ + { + min: ts(15), + max: ts(25), + d: 10 * time.Second, + exp: ms(10, 20, 30), + }, + { + min: ts(15), + max: ts(20), + d: 10 * time.Second, + exp: ms(10, 20, 30), + }, + { + min: ts(15), + max: ts(17), + d: 10 * time.Second, + exp: ms(10, 20), + }, + { + min: ts(10), + max: ts(20), + d: 10 * time.Second, + exp: ms(10, 20, 30), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := makeShardGroupsForDuration(tt.min, tt.max, tt.d); !cmp.Equal(got, tt.exp) { + t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tt.exp)) + } + }) + } +} + +func ms2(times ...int64) meta.ShardGroupInfos { + sgis := make(meta.ShardGroupInfos, len(times)/2) + for i := 0; i < len(times); i += 2 { + sgis[i/2] = meta.ShardGroupInfo{ID: uint64(i / 2), StartTime: ts(times[i]), EndTime: ts(times[i+1])} + } + return sgis +} + +func shardGroupEqual(x, y meta.ShardGroupInfo) bool { + return x.StartTime == y.StartTime && x.EndTime == y.EndTime +} + +func TestPlanShardGroups(t *testing.T) { + tests := []struct { + name string + g meta.ShardGroupInfos + d time.Duration + exp meta.ShardGroupInfos + }{ + { + name: "20s->10s.nogap", + g: ms2(20, 40, 40, 60, 60, 80), + d: 10 * time.Second, + exp: ms2(20, 30, 30, 40, 40, 50, 50, 60, 60, 70, 70, 80), + }, + { + name: "20s->10s.gap", + g: ms2(20, 40, 60, 80, 80, 100), + d: 10 * time.Second, + exp: ms2(20, 30, 30, 40, 60, 70, 70, 80, 80, 90, 90, 100), + }, + { + name: "05s->10s.nogap", + g: ms2(15, 20, 20, 25, 25, 30), + d: 10 * time.Second, + exp: ms2(10, 20, 20, 30), + }, + { + name: "05s->10s.gap", + g: ms2(15, 20, 20, 25, 50, 55, 55, 60), + d: 10 * time.Second, + exp: ms2(10, 20, 20, 30, 50, 60), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + min, max := tc.g[0].StartTime, tc.g[len(tc.g)-1].EndTime + got := planShardGroups(tc.g, min, max, tc.d) + if !cmp.Equal(got, tc.exp, cmp.Comparer(shardGroupEqual)) { + t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tc.exp)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/command.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/command.go new file mode 100644 index 0000000..6d6cdfc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/command.go @@ -0,0 +1,217 @@ +package export + +import ( + "compress/gzip" + "errors" + "flag" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/line" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/text" + "github.com/influxdata/influxdb/cmd/influx-tools/server" + "go.uber.org/zap" +) + +var ( + _ line.Writer + _ binary.Writer +) + +// Command represents the program execution for "store query". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + Logger *zap.Logger + server server.Interface + + conflicts io.WriteCloser + + configPath string + database string + rp string + shardDuration time.Duration + format string + r rangeValue + conflictPath string + ignore bool + print bool +} + +// NewCommand returns a new instance of the export Command. +func NewCommand(server server.Interface) *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + server: server, + } +} + +// Run executes the export command using the specified args. +func (cmd *Command) Run(args []string) (err error) { + err = cmd.parseFlags(args) + if err != nil { + return err + } + + err = cmd.server.Open(cmd.configPath) + if err != nil { + return err + } + defer cmd.server.Close() + + e, err := cmd.openExporter() + if err != nil { + return err + } + defer e.Close() + + e.PrintPlan(cmd.Stderr) + + if cmd.print { + return nil + } + + if !cmd.ignore { + if f, err := os.Create(cmd.conflictPath); err != nil { + return err + } else { + cmd.conflicts = gzip.NewWriter(f) + defer func() { + cmd.conflicts.Close() + f.Close() + }() + } + } + + var wr format.Writer + switch cmd.format { + case "line": + wr = line.NewWriter(cmd.Stdout) + case "binary": + wr = binary.NewWriter(cmd.Stdout, cmd.database, cmd.rp, cmd.shardDuration) + case "series": + wr = text.NewWriter(cmd.Stdout, text.Series) + case "values": + wr = text.NewWriter(cmd.Stdout, text.Values) + case "discard": + wr = format.Discard + } + defer func() { + err = wr.Close() + }() + + if cmd.conflicts != nil { + wr = format.NewConflictWriter(wr, line.NewWriter(cmd.conflicts)) + } else { + wr = format.NewConflictWriter(wr, format.DevNull) + } + + return e.WriteTo(wr) +} + +func (cmd *Command) openExporter() (*exporter, error) { + cfg := &exporterConfig{Database: cmd.database, RP: cmd.rp, ShardDuration: cmd.shardDuration, Min: cmd.r.Min(), Max: cmd.r.Max()} + e, err := newExporter(cmd.server, cfg) + if err != nil { + return nil, err + } + + return e, e.Open() +} + +func (cmd *Command) parseFlags(args []string) error { + fs := flag.NewFlagSet("export", flag.ContinueOnError) + fs.StringVar(&cmd.configPath, "config", "", "Config file") + fs.StringVar(&cmd.database, "database", "", "Database name") + fs.StringVar(&cmd.rp, "rp", "", "Retention policy name") + fs.StringVar(&cmd.format, "format", "line", "Output format (line, binary)") + fs.StringVar(&cmd.conflictPath, "conflict-path", "", "File name for writing field conflicts using line protocol and gzipped") + fs.BoolVar(&cmd.ignore, "no-conflict-path", false, "Disable writing field conflicts to a file") + fs.Var(&cmd.r, "range", "Range of target shards to export (default: all)") + fs.BoolVar(&cmd.print, "print-only", false, "Print plan to stderr and exit") + fs.DurationVar(&cmd.shardDuration, "duration", time.Hour*24*7, "Target shard duration") + + if err := fs.Parse(args); err != nil { + return err + } + + if cmd.database == "" { + return errors.New("database is required") + } + + switch cmd.format { + case "line", "binary", "series", "values", "discard": + default: + return fmt.Errorf("invalid format '%s'", cmd.format) + } + + if cmd.conflictPath == "" && !cmd.ignore { + return errors.New("missing conflict-path") + } + + return nil +} + +type rangeValue struct { + min, max uint64 + set bool +} + +func (rv *rangeValue) Min() uint64 { return rv.min } + +func (rv *rangeValue) Max() uint64 { + if !rv.set { + return math.MaxUint64 + } + return rv.max +} + +func (rv *rangeValue) String() string { + if rv.Min() == rv.Max() { + return fmt.Sprint(rv.min) + } + return fmt.Sprintf("[%d,%d]", rv.Min(), rv.Max()) +} + +func (rv *rangeValue) Set(v string) (err error) { + p := strings.Split(v, "-") + switch { + case len(p) == 1: + rv.min, err = strconv.ParseUint(p[0], 10, 64) + if err != nil { + return fmt.Errorf("range error: invalid number %s", v) + } + rv.max = rv.min + case len(p) == 2: + rv.min, err = strconv.ParseUint(p[0], 10, 64) + if err != nil { + return fmt.Errorf("range error: min value %q is not a positive number", p[0]) + } + rv.max = math.MaxUint64 + if len(p[1]) > 0 { + rv.max, err = strconv.ParseUint(p[1], 10, 64) + if err != nil { + return fmt.Errorf("range error: max value %q is not empty or a positive number", p[1]) + } + } + default: + return fmt.Errorf("range error: %q is not a valid range", v) + } + + if rv.min > rv.max { + return errors.New("range error: min > max") + } + + rv.set = true + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/exporter.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/exporter.go new file mode 100644 index 0000000..8f419f6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/export/exporter.go @@ -0,0 +1,234 @@ +package export + +import ( + "context" + "fmt" + "io" + "sort" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/storage" + "github.com/influxdata/influxdb/cmd/influx-tools/server" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +type exporterConfig struct { + Database string + RP string + ShardDuration time.Duration + Min, Max uint64 +} + +type exporter struct { + metaClient server.MetaClient + tsdbStore *tsdb.Store + store *storage.Store + + min, max uint64 + db, rp string + d time.Duration + sourceGroups []meta.ShardGroupInfo + targetGroups []meta.ShardGroupInfo + + // source data time range + startDate time.Time + endDate time.Time +} + +func newExporter(server server.Interface, cfg *exporterConfig) (*exporter, error) { + client := server.MetaClient() + + dbi := client.Database(cfg.Database) + if dbi == nil { + return nil, fmt.Errorf("database '%s' does not exist", cfg.Database) + } + + if cfg.RP == "" { + // select default RP + cfg.RP = dbi.DefaultRetentionPolicy + } + + rpi, err := client.RetentionPolicy(cfg.Database, cfg.RP) + if rpi == nil || err != nil { + return nil, fmt.Errorf("retention policy '%s' does not exist", cfg.RP) + } + + store := tsdb.NewStore(server.TSDBConfig().Dir) + if server.Logger() != nil { + store.WithLogger(server.Logger()) + } + store.EngineOptions.Config = server.TSDBConfig() + store.EngineOptions.EngineVersion = server.TSDBConfig().Engine + store.EngineOptions.IndexVersion = server.TSDBConfig().Index + store.EngineOptions.DatabaseFilter = func(database string) bool { + return database == cfg.Database + } + store.EngineOptions.RetentionPolicyFilter = func(_, rp string) bool { + return rp == cfg.RP + } + store.EngineOptions.ShardFilter = func(_, _ string, _ uint64) bool { + return false + } + + return &exporter{ + metaClient: client, + tsdbStore: store, + store: &storage.Store{TSDBStore: store}, + min: cfg.Min, + max: cfg.Max, + db: cfg.Database, + rp: cfg.RP, + d: cfg.ShardDuration, + }, nil +} + +func (e *exporter) Open() (err error) { + err = e.tsdbStore.Open() + if err != nil { + return err + } + + err = e.loadShardGroups() + if err != nil { + return err + } + + e.targetGroups = planShardGroups(e.sourceGroups, e.startDate, e.endDate, e.d) + if e.max >= uint64(len(e.targetGroups)) { + e.max = uint64(len(e.targetGroups) - 1) + } + if e.min > e.max { + return fmt.Errorf("invalid shard group range %d to %d", e.min, e.max) + } + + e.targetGroups = e.targetGroups[e.min : e.max+1] + + return nil +} + +func (e *exporter) PrintPlan(w io.Writer) { + fmt.Fprintf(w, "Source data from: %s -> %s\n\n", e.startDate, e.endDate) + fmt.Fprintf(w, "Converting source from %d shard group(s) to %d shard groups:\n\n", len(e.sourceGroups), len(e.targetGroups)) + e.printShardGroups(w, 0, e.sourceGroups) + fmt.Fprintln(w) + e.printShardGroups(w, int(e.min), e.targetGroups) +} + +func (e *exporter) printShardGroups(w io.Writer, base int, target []meta.ShardGroupInfo) { + tw := tabwriter.NewWriter(w, 10, 8, 1, '\t', 0) + fmt.Fprintln(tw, "Seq #\tID\tStart\tEnd") + for i := 0; i < len(target); i++ { + g := target[i] + fmt.Fprintf(tw, "%d\t%d\t%s\t%s\n", i+base, g.ID, g.StartTime, g.EndTime) + } + tw.Flush() +} + +func (e *exporter) SourceTimeRange() (time.Time, time.Time) { return e.startDate, e.endDate } +func (e *exporter) SourceShardGroups() []meta.ShardGroupInfo { return e.sourceGroups } +func (e *exporter) TargetShardGroups() []meta.ShardGroupInfo { return e.targetGroups } + +func (e *exporter) loadShardGroups() error { + min := time.Unix(0, models.MinNanoTime) + max := time.Unix(0, models.MaxNanoTime) + + groups, err := e.metaClient.ShardGroupsByTimeRange(e.db, e.rp, min, max) + if err != nil { + return err + } + + if len(groups) == 0 { + return nil + } + + sort.Sort(meta.ShardGroupInfos(groups)) + e.sourceGroups = groups + e.startDate = groups[0].StartTime + e.endDate = groups[len(groups)-1].EndTime + + return nil +} + +func (e *exporter) shardsGroupsByTimeRange(min, max time.Time) []meta.ShardGroupInfo { + groups := make([]meta.ShardGroupInfo, 0, len(e.sourceGroups)) + for _, g := range e.sourceGroups { + if !g.Overlaps(min, max) { + continue + } + groups = append(groups, g) + } + return groups +} + +func (e *exporter) WriteTo(w format.Writer) error { + for _, g := range e.targetGroups { + min, max := g.StartTime, g.EndTime + rs, err := e.read(min, max.Add(-1)) + if err != nil || rs == nil { + return err + } + + format.WriteBucket(w, min.UnixNano(), max.UnixNano(), rs) + rs.Close() + } + return nil +} + +// Read creates a ResultSet that reads all points with a timestamp ts, such that start ≤ ts < end. +func (e *exporter) read(min, max time.Time) (*storage.ResultSet, error) { + shards, err := e.getShards(min, max) + if err != nil { + return nil, err + } + + req := storage.ReadRequest{ + Database: e.db, + RP: e.rp, + Shards: shards, + Start: min.UnixNano(), + End: max.UnixNano(), + } + + return e.store.Read(context.Background(), &req) +} + +func (e *exporter) Close() error { + return e.tsdbStore.Close() +} + +func (e *exporter) getShards(min, max time.Time) ([]*tsdb.Shard, error) { + groups := e.shardsGroupsByTimeRange(min, max) + var ids []uint64 + for _, g := range groups { + for _, s := range g.Shards { + ids = append(ids, s.ID) + } + } + + shards := e.tsdbStore.Shards(ids) + if len(shards) == len(ids) { + return shards, nil + } + + return e.openStoreWithShardsIDs(ids) +} + +func (e *exporter) openStoreWithShardsIDs(ids []uint64) ([]*tsdb.Shard, error) { + e.tsdbStore.Close() + e.tsdbStore.EngineOptions.ShardFilter = func(_, _ string, id uint64) bool { + for i := range ids { + if id == ids[i] { + return true + } + } + return false + } + if err := e.tsdbStore.Open(); err != nil { + return nil, err + } + return e.tsdbStore.Shards(ids), nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/help/help.go new file mode 100644 index 0000000..25bf5d6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/help/help.go @@ -0,0 +1,40 @@ +// Package help is the help subcommand of the influxd command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Tools for managing and querying InfluxDB data. + +Usage: influx-tools command [arguments] + +The commands are: + + export downloads a snapshot of a data node and saves it to disk + help display this help message + +Use "influx-tools command -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/README.md new file mode 100644 index 0000000..8fe66dc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/README.md @@ -0,0 +1,12 @@ +`influx-tools import` +===================== + +The import tool consumes binary data produced by `influx-tools export -format +binary` to write data directly to disk possibly under a new retention policy. +This tool handles the binary format only - exports of line protocol data should +be handled using the existing endpoints. Influx should be offline while this +tool is run. + +If the target retention policy already exists, the tool will error out if you +attempt to change the retention policy settings. However, it is possible to +replace on disk shards with the `-replace` option. \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/command.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/command.go new file mode 100644 index 0000000..91cde56 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/command.go @@ -0,0 +1,144 @@ +package importer + +import ( + "errors" + "flag" + "io" + "os" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary" + "github.com/influxdata/influxdb/cmd/influx-tools/server" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "go.uber.org/zap" +) + +// Command represents the program execution for "store query". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdin io.Reader + Logger *zap.Logger + server server.Interface + + configPath string + database string + retentionPolicy string + replication int + duration time.Duration + shardDuration time.Duration + buildTSI bool + replace bool +} + +// NewCommand returns a new instance of Command. +func NewCommand(server server.Interface) *Command { + return &Command{ + Stderr: os.Stderr, + Stdin: os.Stdin, + server: server, + } +} + +// Run executes the import command using the specified args. +func (cmd *Command) Run(args []string) (err error) { + err = cmd.parseFlags(args) + if err != nil { + return err + } + + err = cmd.server.Open(cmd.configPath) + if err != nil { + return err + } + + i := newImporter(cmd.server, cmd.database, cmd.retentionPolicy, cmd.replace, cmd.buildTSI, cmd.Logger) + + reader := binary.NewReader(cmd.Stdin) + _, err = reader.ReadHeader() + if err != nil { + return err + } + + rp := &meta.RetentionPolicySpec{Name: cmd.retentionPolicy, ShardGroupDuration: cmd.shardDuration} + if cmd.duration >= time.Hour { + rp.Duration = &cmd.duration + } + if cmd.replication > 0 { + rp.ReplicaN = &cmd.replication + } + err = i.CreateDatabase(rp) + if err != nil { + return err + } + + var bh *binary.BucketHeader + for bh, err = reader.NextBucket(); (bh != nil) && (err == nil); bh, err = reader.NextBucket() { + err = importShard(reader, i, bh.Start, bh.End) + if err != nil { + return err + } + } + + return err +} + +func importShard(reader *binary.Reader, i *importer, start int64, end int64) error { + err := i.StartShardGroup(start, end) + if err != nil { + return err + } + + el := errlist.NewErrorList() + var sh *binary.SeriesHeader + var next bool + for sh, err = reader.NextSeries(); (sh != nil) && (err == nil); sh, err = reader.NextSeries() { + i.AddSeries(sh.SeriesKey) + pr := reader.Points() + seriesFieldKey := tsm1.SeriesFieldKeyBytes(string(sh.SeriesKey), string(sh.Field)) + + for next, err = pr.Next(); next && (err == nil); next, err = pr.Next() { + err = i.Write(seriesFieldKey, pr.Values()) + if err != nil { + break + } + } + if err != nil { + break + } + } + + el.Add(err) + el.Add(i.CloseShardGroup()) + + return el.Err() +} + +func (cmd *Command) parseFlags(args []string) error { + fs := flag.NewFlagSet("import", flag.ContinueOnError) + fs.StringVar(&cmd.configPath, "config", "", "Config file") + fs.StringVar(&cmd.database, "database", "", "Database name") + fs.StringVar(&cmd.retentionPolicy, "rp", "", "Retention policy") + fs.IntVar(&cmd.replication, "replication", 0, "Retention policy replication") + fs.DurationVar(&cmd.duration, "duration", time.Hour*0, "Retention policy duration") + fs.DurationVar(&cmd.shardDuration, "shard-duration", time.Hour*24*7, "Retention policy shard duration") + fs.BoolVar(&cmd.buildTSI, "build-tsi", false, "Build the on disk TSI") + fs.BoolVar(&cmd.replace, "replace", false, "Enables replacing an existing retention policy") + + if err := fs.Parse(args); err != nil { + return err + } + + if cmd.database == "" { + return errors.New("database is required") + } + + if cmd.retentionPolicy == "" { + return errors.New("retention policy is required") + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/importer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/importer.go new file mode 100644 index 0000000..b266a17 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/importer.go @@ -0,0 +1,232 @@ +package importer + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist" + "github.com/influxdata/influxdb/cmd/influx-tools/server" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "go.uber.org/zap" +) + +type importer struct { + MetaClient server.MetaClient + db string + dataDir string + replace bool + + rpi *meta.RetentionPolicyInfo + log *zap.Logger + skipShard bool + currentShard uint64 + sh *shardWriter + sfile *tsdb.SeriesFile + sw *seriesWriter + buildTsi bool + seriesBuf []byte +} + +const seriesBatchSize = 1000 + +func newImporter(server server.Interface, db string, rp string, replace bool, buildTsi bool, log *zap.Logger) *importer { + i := &importer{MetaClient: server.MetaClient(), db: db, dataDir: server.TSDBConfig().Dir, replace: replace, buildTsi: buildTsi, log: log, skipShard: false} + + if !buildTsi { + i.seriesBuf = make([]byte, 0, 2048) + } + return i +} + +func (i *importer) Close() error { + el := errlist.NewErrorList() + if i.sh != nil { + el.Add(i.CloseShardGroup()) + } + return el.Err() +} + +func (i *importer) CreateDatabase(rp *meta.RetentionPolicySpec) error { + var rpi *meta.RetentionPolicyInfo + dbInfo := i.MetaClient.Database(i.db) + if dbInfo == nil { + return i.createDatabaseWithRetentionPolicy(rp) + } + + rpi, err := i.MetaClient.RetentionPolicy(i.db, rp.Name) + if err != nil { + return err + } + + nonmatchingRp := (rpi != nil) && ((rp.Duration != nil && rpi.Duration != *rp.Duration) || + (rp.ReplicaN != nil && rpi.ReplicaN != *rp.ReplicaN) || + (rpi.ShardGroupDuration != rp.ShardGroupDuration)) + if nonmatchingRp { + return fmt.Errorf("retention policy %v already exists with different parameters", rp.Name) + } else { + if _, err := i.MetaClient.CreateRetentionPolicy(i.db, rp, false); err != nil { + return err + } + } + + i.rpi, err = i.MetaClient.RetentionPolicy(i.db, rp.Name) + return err +} + +func (i *importer) createDatabaseWithRetentionPolicy(rp *meta.RetentionPolicySpec) error { + var err error + var dbInfo *meta.DatabaseInfo + if len(rp.Name) == 0 { + dbInfo, err = i.MetaClient.CreateDatabase(i.db) + } else { + dbInfo, err = i.MetaClient.CreateDatabaseWithRetentionPolicy(i.db, rp) + } + if err != nil { + return err + } + i.rpi = dbInfo.RetentionPolicy(rp.Name) + return nil +} + +func (i *importer) StartShardGroup(start int64, end int64) error { + existingSg, err := i.MetaClient.ShardGroupsByTimeRange(i.db, i.rpi.Name, time.Unix(0, start), time.Unix(0, end)) + if err != nil { + return err + } + + var sgi *meta.ShardGroupInfo + var shardID uint64 + + shardsPath := i.shardPath(i.rpi.Name) + var shardPath string + if len(existingSg) > 0 { + sgi = &existingSg[0] + if len(sgi.Shards) > 1 { + return fmt.Errorf("multiple shards for the same owner %v and time range %v to %v", sgi.Shards[0].Owners, start, end) + } + + shardID = sgi.Shards[0].ID + + shardPath = filepath.Join(shardsPath, strconv.Itoa(int(shardID))) + _, err = os.Stat(shardPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + if i.replace { + if err := os.RemoveAll(shardPath); err != nil { + return err + } + } else { + if i.log != nil { + i.log.Error(fmt.Sprintf("shard %d already exists, skipping over new shard data", sgi.ID)) + } + i.skipShard = true + return nil + } + } + } else { + sgi, err = i.MetaClient.CreateShardGroup(i.db, i.rpi.Name, time.Unix(0, start)) + if err != nil { + return err + } + shardID = sgi.Shards[0].ID + } + + shardPath = filepath.Join(shardsPath, strconv.Itoa(int(shardID))) + if err = os.MkdirAll(shardPath, 0777); err != nil { + return err + } + + i.skipShard = false + i.sh = newShardWriter(shardID, shardsPath) + i.currentShard = shardID + + i.startSeriesFile() + return nil +} + +func (i *importer) shardPath(rp string) string { + return filepath.Join(i.dataDir, i.db, rp) +} + +func (i *importer) removeShardGroup(rp string, shardID uint64) error { + shardPath := i.shardPath(rp) + err := os.RemoveAll(filepath.Join(shardPath, strconv.Itoa(int(shardID)))) + return err +} + +func (i *importer) Write(key []byte, values tsm1.Values) error { + if i.skipShard { + return nil + } + if i.sh == nil { + return errors.New("importer not currently writing a shard") + } + i.sh.Write(key, values) + if i.sh.err != nil { + el := errlist.NewErrorList() + el.Add(i.sh.err) + el.Add(i.CloseShardGroup()) + el.Add(i.removeShardGroup(i.rpi.Name, i.currentShard)) + i.sh = nil + i.currentShard = 0 + return el.Err() + } + return nil +} + +func (i *importer) CloseShardGroup() error { + if i.skipShard { + i.skipShard = false + return nil + } + el := errlist.NewErrorList() + el.Add(i.closeSeriesFile()) + i.sh.Close() + if i.sh.err != nil { + el.Add(i.sh.err) + } + i.sh = nil + return el.Err() +} + +func (i *importer) startSeriesFile() error { + dataPath := filepath.Join(i.dataDir, i.db) + shardPath := filepath.Join(i.dataDir, i.db, i.rpi.Name) + + i.sfile = tsdb.NewSeriesFile(filepath.Join(dataPath, tsdb.SeriesFileDirectory)) + if err := i.sfile.Open(); err != nil { + return err + } + + var err error + if i.buildTsi { + i.sw, err = newTSI1SeriesWriter(i.sfile, i.db, dataPath, shardPath, int(i.sh.id)) + } else { + i.sw, err = newInMemSeriesWriter(i.sfile, i.db, dataPath, shardPath, int(i.sh.id), i.seriesBuf) + } + + if err != nil { + return err + } + return nil +} + +func (i *importer) AddSeries(seriesKey []byte) error { + if i.skipShard { + return nil + } + return i.sw.AddSeries(seriesKey) +} + +func (i *importer) closeSeriesFile() error { + return i.sw.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/series_writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/series_writer.go new file mode 100644 index 0000000..158405a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/series_writer.go @@ -0,0 +1,114 @@ +package importer + +import ( + "fmt" + "path/filepath" + "strconv" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxdb/tsdb/index/tsi1" +) + +type seriesWriter struct { + keys [][]byte + names [][]byte + tags []models.Tags + seriesBatchSize int + sfile *tsdb.SeriesFile + idx seriesIndex +} + +func newInMemSeriesWriter(sfile *tsdb.SeriesFile, db string, dataPath string, shardPath string, shardID int, buf []byte) (*seriesWriter, error) { + return &seriesWriter{seriesBatchSize: seriesBatchSize, sfile: sfile, idx: &seriesFileAdapter{sf: sfile, buf: buf}}, nil +} + +func newTSI1SeriesWriter(sfile *tsdb.SeriesFile, db string, dataPath string, shardPath string, shardID int) (*seriesWriter, error) { + ti := tsi1.NewIndex(sfile, db, tsi1.WithPath(filepath.Join(shardPath, strconv.Itoa(shardID), "index"))) + if err := ti.Open(); err != nil { + return nil, fmt.Errorf("error opening TSI1 index %d: %s", shardID, err.Error()) + } + + return &seriesWriter{seriesBatchSize: seriesBatchSize, sfile: sfile, idx: &tsi1Adapter{ti: ti}}, nil +} + +func (sw *seriesWriter) AddSeries(key []byte) error { + seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key) + sw.keys = append(sw.keys, seriesKey) + + name, tag := models.ParseKeyBytes(seriesKey) + sw.names = append(sw.names, name) + sw.tags = append(sw.tags, tag) + + if len(sw.keys) == sw.seriesBatchSize { + if err := sw.idx.CreateSeriesListIfNotExists(sw.keys, sw.names, sw.tags); err != nil { + return err + } + sw.keys = sw.keys[:0] + sw.names = sw.names[:0] + sw.tags = sw.tags[:0] + } + + return nil +} + +func (sw *seriesWriter) Close() error { + el := errlist.NewErrorList() + el.Add(sw.idx.CreateSeriesListIfNotExists(sw.keys, sw.names, sw.tags)) + el.Add(sw.idx.Compact()) + el.Add(sw.idx.Close()) + el.Add(sw.sfile.Close()) + return el.Err() +} + +type seriesIndex interface { + CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) (err error) + Compact() error + Close() error +} + +type seriesFileAdapter struct { + sf *tsdb.SeriesFile + buf []byte +} + +func (s *seriesFileAdapter) CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) (err error) { + _, err = s.sf.CreateSeriesListIfNotExists(names, tagsSlice) + return err +} + +func (s *seriesFileAdapter) Compact() error { + parts := s.sf.Partitions() + for i, p := range parts { + c := tsdb.NewSeriesPartitionCompactor() + if err := c.Compact(p); err != nil { + return fmt.Errorf("error compacting series partition %d: %s", i, err.Error()) + } + } + + return nil +} + +func (s *seriesFileAdapter) Close() error { + return nil +} + +type tsi1Adapter struct { + ti *tsi1.Index +} + +func (t *tsi1Adapter) CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) (err error) { + return t.ti.CreateSeriesListIfNotExists(keys, names, tagsSlice) +} + +func (t *tsi1Adapter) Compact() error { + t.ti.Compact() + t.ti.Wait() + return nil +} + +func (t *tsi1Adapter) Close() error { + return t.ti.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/shard_writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/shard_writer.go new file mode 100644 index 0000000..1896117 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/importer/shard_writer.go @@ -0,0 +1,96 @@ +package importer + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +const ( + maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB +) + +type shardWriter struct { + w tsm1.TSMWriter + id uint64 + path string + gen, seq int + err error +} + +func newShardWriter(id uint64, path string) *shardWriter { + t := &shardWriter{id: id, path: path, gen: 1, seq: 1} + return t +} + +func (t *shardWriter) Write(key []byte, values tsm1.Values) { + if t.err != nil { + return + } + + if t.w == nil { + t.nextTSM() + } + + if t.w.Size() > maxTSMFileSize { + t.closeTSM() + t.nextTSM() + } + + if err := t.w.Write(key, values); err != nil { + if err == tsm1.ErrMaxBlocksExceeded { + t.closeTSM() + t.nextTSM() + } else { + t.err = err + } + } +} + +func (t *shardWriter) Close() { + if t.w != nil { + t.closeTSM() + } +} + +func (t *shardWriter) Err() error { return t.err } + +func (t *shardWriter) nextTSM() { + fileName := filepath.Join(t.path, strconv.Itoa(int(t.id)), fmt.Sprintf("%09d-%09d.%s", t.gen, t.seq, tsm1.TSMFileExtension)) + t.seq++ + + fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + t.err = err + return + } + + // Create the writer for the new TSM file. + t.w, err = tsm1.NewTSMWriter(fd) + if err != nil { + t.err = err + return + } +} + +func (t *shardWriter) closeTSM() { + el := errlist.NewErrorList() + if err := t.w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + el.Add(err) + } + + if err := t.w.Close(); err != nil { + el.Add(err) + } + + err := el.Err() + if err != nil { + t.err = err + } + + t.w = nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist/errlist.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist/errlist.go new file mode 100644 index 0000000..fe96f99 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/errlist/errlist.go @@ -0,0 +1,37 @@ +package errlist + +import ( + "bytes" +) + +// ErrorList is a simple error aggregator to return multiple errors as one. +type ErrorList struct { + errs []error +} + +func NewErrorList() *ErrorList { + return &ErrorList{errs: make([]error, 0)} +} + +func (el *ErrorList) Add(err error) { + if err == nil { + return + } + el.errs = append(el.errs, err) +} + +func (el *ErrorList) Err() error { + if len(el.errs) == 0 { + return nil + } + return el +} + +func (el *ErrorList) Error() string { + var buf bytes.Buffer + for _, err := range el.errs { + buf.WriteString(err.Error()) + buf.WriteByte('\n') + } + return buf.String() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.pb.go new file mode 100644 index 0000000..3d037e6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.pb.go @@ -0,0 +1,2231 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: binary.proto + +/* + Package binary is a generated protocol buffer package. + + It is generated from these files: + binary.proto + + It has these top-level messages: + Header + BucketHeader + BucketFooter + FloatPoints + IntegerPoints + UnsignedPoints + BooleanPoints + StringPoints + SeriesHeader + SeriesFooter +*/ +package binary + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import time "time" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldType int32 + +const ( + FloatFieldType FieldType = 0 + IntegerFieldType FieldType = 1 + UnsignedFieldType FieldType = 2 + BooleanFieldType FieldType = 3 + StringFieldType FieldType = 4 +) + +var FieldType_name = map[int32]string{ + 0: "FLOAT", + 1: "INTEGER", + 2: "UNSIGNED", + 3: "BOOLEAN", + 4: "STRING", +} +var FieldType_value = map[string]int32{ + "FLOAT": 0, + "INTEGER": 1, + "UNSIGNED": 2, + "BOOLEAN": 3, + "STRING": 4, +} + +func (x FieldType) String() string { + return proto.EnumName(FieldType_name, int32(x)) +} +func (FieldType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinary, []int{0} } + +type Header_Version int32 + +const ( + Version0 Header_Version = 0 +) + +var Header_Version_name = map[int32]string{ + 0: "VERSION_0", +} +var Header_Version_value = map[string]int32{ + "VERSION_0": 0, +} + +func (x Header_Version) String() string { + return proto.EnumName(Header_Version_name, int32(x)) +} +func (Header_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinary, []int{0, 0} } + +type Header struct { + Version Header_Version `protobuf:"varint,1,opt,name=version,proto3,enum=binary.Header_Version" json:"version,omitempty"` + Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"` + RetentionPolicy string `protobuf:"bytes,3,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` + ShardDuration time.Duration `protobuf:"varint,4,opt,name=shard_duration,json=shardDuration,proto3,stdduration" json:"shard_duration,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{0} } + +type BucketHeader struct { + Start int64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` + End int64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *BucketHeader) Reset() { *m = BucketHeader{} } +func (m *BucketHeader) String() string { return proto.CompactTextString(m) } +func (*BucketHeader) ProtoMessage() {} +func (*BucketHeader) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{1} } + +type BucketFooter struct { +} + +func (m *BucketFooter) Reset() { *m = BucketFooter{} } +func (m *BucketFooter) String() string { return proto.CompactTextString(m) } +func (*BucketFooter) ProtoMessage() {} +func (*BucketFooter) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{2} } + +type FloatPoints struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []float64 `protobuf:"fixed64,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *FloatPoints) Reset() { *m = FloatPoints{} } +func (m *FloatPoints) String() string { return proto.CompactTextString(m) } +func (*FloatPoints) ProtoMessage() {} +func (*FloatPoints) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{3} } + +type IntegerPoints struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []int64 `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *IntegerPoints) Reset() { *m = IntegerPoints{} } +func (m *IntegerPoints) String() string { return proto.CompactTextString(m) } +func (*IntegerPoints) ProtoMessage() {} +func (*IntegerPoints) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{4} } + +type UnsignedPoints struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []uint64 `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *UnsignedPoints) Reset() { *m = UnsignedPoints{} } +func (m *UnsignedPoints) String() string { return proto.CompactTextString(m) } +func (*UnsignedPoints) ProtoMessage() {} +func (*UnsignedPoints) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{5} } + +type BooleanPoints struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []bool `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *BooleanPoints) Reset() { *m = BooleanPoints{} } +func (m *BooleanPoints) String() string { return proto.CompactTextString(m) } +func (*BooleanPoints) ProtoMessage() {} +func (*BooleanPoints) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{6} } + +type StringPoints struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"` +} + +func (m *StringPoints) Reset() { *m = StringPoints{} } +func (m *StringPoints) String() string { return proto.CompactTextString(m) } +func (*StringPoints) ProtoMessage() {} +func (*StringPoints) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{7} } + +type SeriesHeader struct { + FieldType FieldType `protobuf:"varint,1,opt,name=field_type,json=fieldType,proto3,enum=binary.FieldType" json:"field_type,omitempty"` + SeriesKey []byte `protobuf:"bytes,2,opt,name=series_key,json=seriesKey,proto3" json:"series_key,omitempty"` + Field []byte `protobuf:"bytes,3,opt,name=field,proto3" json:"field,omitempty"` +} + +func (m *SeriesHeader) Reset() { *m = SeriesHeader{} } +func (m *SeriesHeader) String() string { return proto.CompactTextString(m) } +func (*SeriesHeader) ProtoMessage() {} +func (*SeriesHeader) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{8} } + +type SeriesFooter struct { +} + +func (m *SeriesFooter) Reset() { *m = SeriesFooter{} } +func (m *SeriesFooter) String() string { return proto.CompactTextString(m) } +func (*SeriesFooter) ProtoMessage() {} +func (*SeriesFooter) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{9} } + +func init() { + proto.RegisterType((*Header)(nil), "binary.Header") + proto.RegisterType((*BucketHeader)(nil), "binary.BucketHeader") + proto.RegisterType((*BucketFooter)(nil), "binary.BucketFooter") + proto.RegisterType((*FloatPoints)(nil), "binary.FloatPoints") + proto.RegisterType((*IntegerPoints)(nil), "binary.IntegerPoints") + proto.RegisterType((*UnsignedPoints)(nil), "binary.UnsignedPoints") + proto.RegisterType((*BooleanPoints)(nil), "binary.BooleanPoints") + proto.RegisterType((*StringPoints)(nil), "binary.StringPoints") + proto.RegisterType((*SeriesHeader)(nil), "binary.SeriesHeader") + proto.RegisterType((*SeriesFooter)(nil), "binary.SeriesFooter") + proto.RegisterEnum("binary.FieldType", FieldType_name, FieldType_value) + proto.RegisterEnum("binary.Header_Version", Header_Version_name, Header_Version_value) +} +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Version != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.Version)) + } + if len(m.Database) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Database))) + i += copy(dAtA[i:], m.Database) + } + if len(m.RetentionPolicy) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.RetentionPolicy))) + i += copy(dAtA[i:], m.RetentionPolicy) + } + if m.ShardDuration != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.ShardDuration)) + } + return i, nil +} + +func (m *BucketHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Start != 0 { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Binary(dAtA, i, uint64(m.Start)) + } + if m.End != 0 { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Binary(dAtA, i, uint64(m.End)) + } + return i, nil +} + +func (m *BucketFooter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketFooter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *FloatPoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatPoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Values)*8)) + for _, num := range m.Values { + f1 := math.Float64bits(float64(num)) + dAtA[i] = uint8(f1) + i++ + dAtA[i] = uint8(f1 >> 8) + i++ + dAtA[i] = uint8(f1 >> 16) + i++ + dAtA[i] = uint8(f1 >> 24) + i++ + dAtA[i] = uint8(f1 >> 32) + i++ + dAtA[i] = uint8(f1 >> 40) + i++ + dAtA[i] = uint8(f1 >> 48) + i++ + dAtA[i] = uint8(f1 >> 56) + i++ + } + } + return i, nil +} + +func (m *IntegerPoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntegerPoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA3 := make([]byte, len(m.Values)*10) + var j2 int + for _, num1 := range m.Values { + num := uint64(num1) + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(j2)) + i += copy(dAtA[i:], dAtA3[:j2]) + } + return i, nil +} + +func (m *UnsignedPoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnsignedPoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA5 := make([]byte, len(m.Values)*10) + var j4 int + for _, num := range m.Values { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(j4)) + i += copy(dAtA[i:], dAtA5[:j4]) + } + return i, nil +} + +func (m *BooleanPoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BooleanPoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Values))) + for _, b := range m.Values { + if b { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + } + return i, nil +} + +func (m *StringPoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringPoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SeriesHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.FieldType)) + } + if len(m.SeriesKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.SeriesKey))) + i += copy(dAtA[i:], m.SeriesKey) + } + if len(m.Field) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Field))) + i += copy(dAtA[i:], m.Field) + } + return i, nil +} + +func (m *SeriesFooter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesFooter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Binary(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Binary(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintBinary(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Header) Size() (n int) { + var l int + _ = l + if m.Version != 0 { + n += 1 + sovBinary(uint64(m.Version)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + l = len(m.RetentionPolicy) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + if m.ShardDuration != 0 { + n += 1 + sovBinary(uint64(m.ShardDuration)) + } + return n +} + +func (m *BucketHeader) Size() (n int) { + var l int + _ = l + if m.Start != 0 { + n += 9 + } + if m.End != 0 { + n += 9 + } + return n +} + +func (m *BucketFooter) Size() (n int) { + var l int + _ = l + return n +} + +func (m *FloatPoints) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovBinary(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + n += 1 + sovBinary(uint64(len(m.Values)*8)) + len(m.Values)*8 + } + return n +} + +func (m *IntegerPoints) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovBinary(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + l = 0 + for _, e := range m.Values { + l += sovBinary(uint64(e)) + } + n += 1 + sovBinary(uint64(l)) + l + } + return n +} + +func (m *UnsignedPoints) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovBinary(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + l = 0 + for _, e := range m.Values { + l += sovBinary(uint64(e)) + } + n += 1 + sovBinary(uint64(l)) + l + } + return n +} + +func (m *BooleanPoints) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovBinary(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + n += 1 + sovBinary(uint64(len(m.Values))) + len(m.Values)*1 + } + return n +} + +func (m *StringPoints) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovBinary(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovBinary(uint64(l)) + } + } + return n +} + +func (m *SeriesHeader) Size() (n int) { + var l int + _ = l + if m.FieldType != 0 { + n += 1 + sovBinary(uint64(m.FieldType)) + } + l = len(m.SeriesKey) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + l = len(m.Field) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + return n +} + +func (m *SeriesFooter) Size() (n int) { + var l int + _ = l + return n +} + +func sovBinary(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozBinary(x uint64) (n int) { + return sovBinary(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (Header_Version(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetentionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RetentionPolicy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardDuration", wireType) + } + m.ShardDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardDuration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.Start = int64(dAtA[iNdEx-8]) + m.Start |= int64(dAtA[iNdEx-7]) << 8 + m.Start |= int64(dAtA[iNdEx-6]) << 16 + m.Start |= int64(dAtA[iNdEx-5]) << 24 + m.Start |= int64(dAtA[iNdEx-4]) << 32 + m.Start |= int64(dAtA[iNdEx-3]) << 40 + m.Start |= int64(dAtA[iNdEx-2]) << 48 + m.Start |= int64(dAtA[iNdEx-1]) << 56 + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.End = int64(dAtA[iNdEx-8]) + m.End |= int64(dAtA[iNdEx-7]) << 8 + m.End |= int64(dAtA[iNdEx-6]) << 16 + m.End |= int64(dAtA[iNdEx-5]) << 24 + m.End |= int64(dAtA[iNdEx-4]) << 32 + m.End |= int64(dAtA[iNdEx-3]) << 40 + m.End |= int64(dAtA[iNdEx-2]) << 48 + m.End |= int64(dAtA[iNdEx-1]) << 56 + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketFooter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketFooter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketFooter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatPoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatPoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatPoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntegerPoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntegerPoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntegerPoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnsignedPoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnsignedPoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnsignedPoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BooleanPoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BooleanPoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BooleanPoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, bool(v != 0)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, bool(v != 0)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringPoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringPoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringPoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldType", wireType) + } + m.FieldType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldType |= (FieldType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeriesKey = append(m.SeriesKey[:0], dAtA[iNdEx:postIndex]...) + if m.SeriesKey == nil { + m.SeriesKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = append(m.Field[:0], dAtA[iNdEx:postIndex]...) + if m.Field == nil { + m.Field = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesFooter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesFooter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesFooter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBinary(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthBinary + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipBinary(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthBinary = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBinary = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("binary.proto", fileDescriptorBinary) } + +var fileDescriptorBinary = []byte{ + // 584 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcf, 0x4e, 0xdb, 0x4c, + 0x1c, 0xcc, 0xe2, 0x10, 0x92, 0xdf, 0x67, 0xc2, 0xe2, 0x8f, 0xa2, 0xc8, 0x55, 0x8d, 0x9b, 0x5e, + 0xd2, 0x7f, 0x01, 0xb5, 0x52, 0xef, 0x44, 0x24, 0x21, 0x2a, 0x72, 0xd0, 0x26, 0x70, 0x8d, 0x36, + 0x78, 0x31, 0x16, 0xc1, 0x1b, 0xad, 0xd7, 0x48, 0x7e, 0x83, 0x2a, 0xa7, 0x1e, 0x7b, 0xc9, 0xa9, + 0xaf, 0xd1, 0x07, 0xe0, 0xd8, 0x07, 0xa8, 0xd4, 0x96, 0xbe, 0x48, 0x95, 0x5d, 0xc7, 0xd0, 0x6b, + 0x6e, 0x3b, 0xbf, 0x99, 0x9d, 0xdd, 0xf5, 0x8c, 0xc1, 0x1c, 0x87, 0x11, 0x15, 0x69, 0x73, 0x2a, + 0xb8, 0xe4, 0x56, 0x49, 0x23, 0xfb, 0x6d, 0x10, 0xca, 0xab, 0x64, 0xdc, 0xbc, 0xe0, 0x37, 0xfb, + 0x01, 0x0f, 0xf8, 0xbe, 0xa2, 0xc7, 0xc9, 0xa5, 0x42, 0x0a, 0xa8, 0x95, 0xde, 0x56, 0xff, 0x81, + 0xa0, 0x74, 0xcc, 0xa8, 0xcf, 0x84, 0x75, 0x00, 0x1b, 0xb7, 0x4c, 0xc4, 0x21, 0x8f, 0x6a, 0xc8, + 0x45, 0x8d, 0xea, 0xbb, 0xdd, 0x66, 0x76, 0x82, 0x16, 0x34, 0xcf, 0x35, 0x4b, 0x96, 0x32, 0xcb, + 0x86, 0xb2, 0x4f, 0x25, 0x1d, 0xd3, 0x98, 0xd5, 0xd6, 0x5c, 0xd4, 0xa8, 0x90, 0x1c, 0x5b, 0x2f, + 0x01, 0x0b, 0x26, 0x59, 0x24, 0x43, 0x1e, 0x8d, 0xa6, 0x7c, 0x12, 0x5e, 0xa4, 0x35, 0x43, 0x69, + 0xb6, 0xf2, 0xf9, 0xa9, 0x1a, 0x5b, 0xaf, 0xa1, 0x1a, 0x5f, 0x51, 0xe1, 0x8f, 0xfc, 0x44, 0xd0, + 0xc5, 0xbc, 0x56, 0x74, 0x51, 0xc3, 0x68, 0x15, 0xbf, 0xfc, 0xdc, 0x43, 0x64, 0x53, 0x71, 0x47, + 0x19, 0x55, 0x7f, 0x03, 0x1b, 0xd9, 0x3d, 0xac, 0xa7, 0x50, 0x39, 0x6f, 0x93, 0x41, 0xaf, 0xef, + 0x8d, 0x0e, 0x70, 0xc1, 0x36, 0x67, 0x73, 0xb7, 0x9c, 0x71, 0x07, 0x76, 0xf1, 0xd3, 0x57, 0xa7, + 0x50, 0xff, 0x00, 0x66, 0x2b, 0xb9, 0xb8, 0x66, 0x32, 0x7b, 0xe3, 0x0e, 0xac, 0xc7, 0x92, 0x0a, + 0xa9, 0x5e, 0x88, 0x89, 0x06, 0x16, 0x06, 0x83, 0x45, 0xbe, 0x7a, 0x02, 0x26, 0x8b, 0x65, 0xbd, + 0xba, 0xdc, 0xd7, 0xe1, 0x5c, 0x32, 0x51, 0x6f, 0xc3, 0x7f, 0x9d, 0x09, 0xa7, 0xf2, 0x94, 0x87, + 0x91, 0x8c, 0x2d, 0x07, 0x40, 0x86, 0x37, 0x2c, 0x96, 0xf4, 0x66, 0x1a, 0xd7, 0x90, 0x6b, 0x34, + 0x30, 0x79, 0x34, 0xb1, 0x76, 0xa1, 0x74, 0x4b, 0x27, 0x09, 0x8b, 0x6b, 0x6b, 0xae, 0xd1, 0x40, + 0x24, 0x43, 0xf5, 0x2e, 0x6c, 0xf6, 0x22, 0xc9, 0x02, 0x26, 0x56, 0x32, 0x32, 0x72, 0xa3, 0x63, + 0xa8, 0x9e, 0x45, 0x71, 0x18, 0x44, 0xcc, 0x5f, 0xc9, 0xa9, 0xf8, 0xf8, 0x4a, 0x2d, 0xce, 0x27, + 0x8c, 0x46, 0x2b, 0x19, 0x95, 0x73, 0xa3, 0x0e, 0x98, 0x03, 0x29, 0xc2, 0x28, 0x58, 0xc9, 0xa7, + 0x92, 0xfb, 0x24, 0x60, 0x0e, 0x98, 0x08, 0x59, 0x9c, 0xd7, 0x12, 0x2e, 0x43, 0x36, 0xf1, 0x47, + 0x32, 0x9d, 0xb2, 0xac, 0x99, 0xdb, 0xcb, 0x66, 0x76, 0x16, 0xcc, 0x30, 0x9d, 0x32, 0x52, 0xb9, + 0x5c, 0x2e, 0xad, 0x67, 0x00, 0xb1, 0x72, 0x18, 0x5d, 0xb3, 0x54, 0xa5, 0x6a, 0x92, 0x8a, 0x9e, + 0x7c, 0x64, 0xe9, 0xa2, 0x03, 0x4a, 0xab, 0xea, 0x68, 0x12, 0x0d, 0x16, 0x89, 0xeb, 0x63, 0x75, + 0xe2, 0xaf, 0xbe, 0x21, 0xa8, 0x74, 0x1e, 0x59, 0xae, 0x77, 0x4e, 0xfa, 0x87, 0x43, 0x5c, 0xb0, + 0xad, 0xd9, 0xdc, 0xad, 0xaa, 0x32, 0x3c, 0xd0, 0xcf, 0x61, 0xa3, 0xe7, 0x0d, 0xdb, 0xdd, 0x36, + 0xc1, 0xc8, 0xde, 0x99, 0xcd, 0x5d, 0x9c, 0xc5, 0xfc, 0x20, 0x79, 0x01, 0xe5, 0x33, 0x6f, 0xd0, + 0xeb, 0x7a, 0xed, 0x23, 0xbc, 0x66, 0x3f, 0x99, 0xcd, 0xdd, 0xed, 0x65, 0x82, 0xff, 0xf8, 0xb4, + 0xfa, 0xfd, 0x93, 0xf6, 0xa1, 0x87, 0x0d, 0xed, 0x93, 0x65, 0xf3, 0x20, 0xd9, 0x83, 0xd2, 0x60, + 0x48, 0x7a, 0x5e, 0x17, 0x17, 0xed, 0xff, 0x67, 0x73, 0x77, 0x4b, 0x7f, 0xf4, 0x5c, 0xa0, 0x8b, + 0xdf, 0xda, 0xb9, 0xfb, 0xed, 0x14, 0xee, 0xee, 0x1d, 0xf4, 0xfd, 0xde, 0x41, 0xbf, 0xee, 0x1d, + 0xf4, 0xf9, 0x8f, 0x53, 0x18, 0x97, 0xd4, 0x4f, 0xff, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xd5, 0x6e, 0x9c, 0xfe, 0x3b, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.proto b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.proto new file mode 100644 index 0000000..3e2e3b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/binary.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; +package binary; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_getters_all) = false; + +message Header { + enum Version { + option (gogoproto.goproto_enum_prefix) = false; + + VERSION_0 = 0 [(gogoproto.enumvalue_customname) = "Version0"]; + } + + Version version = 1; + string database = 2; + string retention_policy = 3; + int64 shard_duration = 4 [(gogoproto.stdduration) = true]; +} + +message BucketHeader { + sfixed64 start = 1; + sfixed64 end = 2; +} + +message BucketFooter { + +} + +message FloatPoints { + repeated sfixed64 timestamps = 1; + repeated double values = 2; +} + +message IntegerPoints { + repeated sfixed64 timestamps = 1; + repeated int64 values = 2; +} + +message UnsignedPoints { + repeated sfixed64 timestamps = 1; + repeated uint64 values = 2; +} + +message BooleanPoints { + repeated sfixed64 timestamps = 1; + repeated bool values = 2; +} + +message StringPoints { + repeated sfixed64 timestamps = 1; + repeated string values = 2; +} + +enum FieldType { + option (gogoproto.goproto_enum_prefix) = false; + + FLOAT = 0 [(gogoproto.enumvalue_customname) = "FloatFieldType"]; + INTEGER = 1 [(gogoproto.enumvalue_customname) = "IntegerFieldType"]; + UNSIGNED = 2 [(gogoproto.enumvalue_customname) = "UnsignedFieldType"]; + BOOLEAN = 3 [(gogoproto.enumvalue_customname) = "BooleanFieldType"]; + STRING = 4 [(gogoproto.enumvalue_customname) = "StringFieldType"]; +} + +message SeriesHeader { + FieldType field_type = 1; + bytes series_key = 2; + bytes field = 3; +} + +message SeriesFooter { +} \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/common.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/common.go new file mode 100644 index 0000000..fc32dc2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/common.go @@ -0,0 +1,60 @@ +package binary + +//go:generate protoc -I$GOPATH/src/github.com/influxdata/influxdb/vendor -I. --gogofaster_out=Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types:. binary.proto +//go:generate stringer -type=MessageType + +import "errors" + +var ( + ErrWriteAfterClose = errors.New("format/binary: write after close") + ErrWriteBucketAfterClose = errors.New("format/binary: write to closed bucket") +) + +var ( + Magic = [...]byte{0x49, 0x46, 0x4c, 0x58, 0x44, 0x55, 0x4d, 0x50} // IFLXDUMP +) + +type MessageType byte + +const ( + HeaderType MessageType = iota + 1 + BucketHeaderType + BucketFooterType + SeriesHeaderType + FloatPointsType + IntegerPointsType + UnsignedPointsType + BooleanPointsType + StringPointsType + SeriesFooterType +) + +type message interface { + Size() int + MarshalTo(dAtA []byte) (int, error) +} + +/* +Stream format + +FILE: +┌─────────────────┬────────────────────┬─────────────────┐ +│ │ │ │ +│ IFLXDUMP (8) │ Header │ BUCKET 0..n │ +│ │ │ │ +└─────────────────┴────────────────────┴─────────────────┘ + +BUCKET: +┌─────────────────┬────────────────────┬─────────────────┐ +│ │ │ │ +│ Bucket Header │ SERIES DATA 0..n │ Bucket Footer │ +│ │ │ │ +└─────────────────┴────────────────────┴─────────────────┘ + +SERIES DATA: +┌─────────────────┬────────────────────┬─────────────────┐ +│ │ │ │ +│ Series Header │ POINTS 0..n │ Series Footer │ +│ │ │ │ +└─────────────────┴────────────────────┴─────────────────┘ +*/ diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/messagetype_string.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/messagetype_string.go new file mode 100644 index 0000000..6a1cfdf --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/messagetype_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=MessageType"; DO NOT EDIT. + +package binary + +import "fmt" + +const _MessageType_name = "HeaderTypeBucketHeaderTypeBucketFooterTypeSeriesHeaderTypeFloatPointsTypeIntegerPointsTypeUnsignedPointsTypeBooleanPointsTypeStringPointsTypeSeriesFooterType" + +var _MessageType_index = [...]uint8{0, 10, 26, 42, 58, 73, 90, 108, 125, 141, 157} + +func (i MessageType) String() string { + i -= 1 + if i >= MessageType(len(_MessageType_index)-1) { + return fmt.Sprintf("MessageType(%d)", i+1) + } + return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]] +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader.go new file mode 100644 index 0000000..1a82841 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader.go @@ -0,0 +1,281 @@ +package binary + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +type Reader struct { + r io.Reader + pr *PointsReader + state *readerState + stats *readerStats +} + +type readerStats struct { + series int + counts [8]struct { + series, values int + } +} +type readerState byte + +const ( + readHeader readerState = iota + 1 + readBucket + readSeries + readPoints + done +) + +func NewReader(reader io.Reader) *Reader { + state := readHeader + var stats readerStats + r := &Reader{r: reader, state: &state, stats: &stats, + pr: &PointsReader{r: reader, values: make(tsm1.Values, tsdb.DefaultMaxPointsPerBlock), state: &state, stats: &stats}} + return r +} + +func (r *Reader) ReadHeader() (*Header, error) { + if *r.state != readHeader { + return nil, fmt.Errorf("expected reader in state %v, was in state %v\n", readHeader, *r.state) + } + + var magic [len(Magic)]byte + n, err := r.r.Read(magic[:]) + if err != nil { + return nil, err + } + + if n < len(Magic) || !bytes.Equal(magic[:], Magic[:]) { + return nil, errors.New("IFLXDUMP header not present") + } + + t, lv, err := tlv.ReadTLV(r.r) + if err != nil { + return nil, err + } + if t != byte(HeaderType) { + return nil, fmt.Errorf("expected header type, got %v", t) + } + h := &Header{} + err = h.Unmarshal(lv) + *r.state = readBucket + + return h, err +} + +func (r *Reader) Close() error { + return nil +} + +func (r *Reader) NextBucket() (*BucketHeader, error) { + if *r.state != readBucket { + return nil, fmt.Errorf("expected reader in state %v, was in state %v", readBucket, *r.state) + } + + t, lv, err := tlv.ReadTLV(r.r) + if err != nil { + if err == io.EOF { + *r.state = done + return nil, nil + } + return nil, err + } + if t != byte(BucketHeaderType) { + return nil, fmt.Errorf("expected bucket header type, got %v", t) + } + + bh := &BucketHeader{} + err = bh.Unmarshal(lv) + if err != nil { + return nil, err + } + *r.state = readSeries + + return bh, nil +} + +func (r *Reader) NextSeries() (*SeriesHeader, error) { + if *r.state != readSeries { + return nil, fmt.Errorf("expected reader in state %v, was in state %v", readSeries, *r.state) + } + + t, lv, err := tlv.ReadTLV(r.r) + if err != nil { + return nil, err + } + if t == byte(BucketFooterType) { + *r.state = readBucket + return nil, nil + } + if t != byte(SeriesHeaderType) { + return nil, fmt.Errorf("expected series header type, got %v", t) + } + sh := &SeriesHeader{} + err = sh.Unmarshal(lv) + if err != nil { + return nil, err + } + r.stats.series++ + r.stats.counts[sh.FieldType&7].series++ + + var pointsType MessageType + switch sh.FieldType { + case FloatFieldType: + pointsType = FloatPointsType + case IntegerFieldType: + pointsType = IntegerPointsType + case UnsignedFieldType: + pointsType = UnsignedPointsType + case BooleanFieldType: + pointsType = BooleanPointsType + case StringFieldType: + pointsType = StringPointsType + default: + return nil, fmt.Errorf("unsupported series field type %v", sh.FieldType) + } + + *r.state = readPoints + r.pr.Reset(pointsType) + return sh, nil +} + +func (r *Reader) Points() *PointsReader { + return r.pr +} + +type PointsReader struct { + pointsType MessageType + r io.Reader + values tsm1.Values + n int + state *readerState + stats *readerStats +} + +func (pr *PointsReader) Reset(pointsType MessageType) { + pr.pointsType = pointsType + pr.n = 0 +} + +func (pr *PointsReader) Next() (bool, error) { + if *pr.state != readPoints { + return false, fmt.Errorf("expected reader in state %v, was in state %v", readPoints, *pr.state) + } + + t, lv, err := tlv.ReadTLV(pr.r) + if err != nil { + return false, err + } + if t == byte(SeriesFooterType) { + *pr.state = readSeries + return false, nil + } + if t != byte(pr.pointsType) { + return false, fmt.Errorf("expected message type %v, got %v", pr.pointsType, t) + } + err = pr.marshalValues(lv) + if err != nil { + return false, err + } + + return true, nil +} + +func (pr *PointsReader) Values() tsm1.Values { + return pr.values[:pr.n] +} + +func (pr *PointsReader) marshalValues(lv []byte) error { + switch pr.pointsType { + case FloatPointsType: + return pr.marshalFloats(lv) + case IntegerPointsType: + return pr.marshalIntegers(lv) + case UnsignedPointsType: + return pr.marshalUnsigned(lv) + case BooleanPointsType: + return pr.marshalBooleans(lv) + case StringPointsType: + return pr.marshalStrings(lv) + default: + return fmt.Errorf("unsupported points type %v", pr.pointsType) + } +} + +func (pr *PointsReader) marshalFloats(lv []byte) error { + fp := &FloatPoints{} + err := fp.Unmarshal(lv) + if err != nil { + return err + } + for i, t := range fp.Timestamps { + pr.values[i] = tsm1.NewFloatValue(t, fp.Values[i]) + } + pr.stats.counts[0].values += len(fp.Timestamps) + pr.n = len(fp.Timestamps) + return nil +} + +func (pr *PointsReader) marshalIntegers(lv []byte) error { + ip := &IntegerPoints{} + err := ip.Unmarshal(lv) + if err != nil { + return err + } + for i, t := range ip.Timestamps { + pr.values[i] = tsm1.NewIntegerValue(t, ip.Values[i]) + } + pr.stats.counts[1].values += len(ip.Timestamps) + pr.n = len(ip.Timestamps) + return nil +} + +func (pr *PointsReader) marshalUnsigned(lv []byte) error { + up := &UnsignedPoints{} + err := up.Unmarshal(lv) + if err != nil { + return err + } + for i, t := range up.Timestamps { + pr.values[i] = tsm1.NewUnsignedValue(t, up.Values[i]) + } + pr.stats.counts[2].values += len(up.Timestamps) + pr.n = len(up.Timestamps) + return nil +} + +func (pr *PointsReader) marshalBooleans(lv []byte) error { + bp := &BooleanPoints{} + err := bp.Unmarshal(lv) + if err != nil { + return err + } + for i, t := range bp.Timestamps { + pr.values[i] = tsm1.NewBooleanValue(t, bp.Values[i]) + } + pr.stats.counts[3].values += len(bp.Timestamps) + pr.n = len(bp.Timestamps) + return nil +} + +func (pr *PointsReader) marshalStrings(lv []byte) error { + sp := &StringPoints{} + err := sp.Unmarshal(lv) + if err != nil { + return err + } + for i, t := range sp.Timestamps { + pr.values[i] = tsm1.NewStringValue(t, sp.Values[i]) + } + pr.stats.counts[4].values += len(sp.Timestamps) + pr.n = len(sp.Timestamps) + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader_test.go new file mode 100644 index 0000000..5c138a1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/reader_test.go @@ -0,0 +1,457 @@ +package binary_test + +import ( + "bytes" + "fmt" + "math" + "testing" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" +) + +func TestReader_OneBucketOneIntegerSeries(t *testing.T) { + var buf bytes.Buffer + ts := []int64{0, 1, 2} + ints := []int64{10, 11, 12} + vs := make([]interface{}, len(ints)) + for i, v := range ints { + vs[i] = v + } + s := &oneSeriesData{ + db: "database", + rp: "default", + sd: time.Hour * 24, + start: int64(0), + end: int64(time.Hour * 24), + seriesName: []byte("series"), + seriesField: []byte("field"), + seriesTags: models.NewTags(map[string]string{"k": "v"}), + fieldType: binary.IntegerFieldType, + ts: ts, + vs: vs, + } + + w := binary.NewWriter(&buf, s.db, s.rp, s.sd) + bw, _ := w.NewBucket(s.start, s.end) + bw.BeginSeries(s.seriesName, s.seriesField, influxql.Integer, s.seriesTags) + bw.WriteIntegerCursor(&intCursor{1, s.ts, ints}) + bw.EndSeries() + bw.Close() + w.Close() + + verifySingleSeries(t, buf, s) +} + +func TestReader_OneBucketOneFloatSeries(t *testing.T) { + var buf bytes.Buffer + ts := []int64{0, 1, 2} + floats := []float64{0.1, 11.1, 1200.0} + vs := make([]interface{}, len(floats)) + for i, v := range floats { + vs[i] = v + } + s := &oneSeriesData{ + db: "database", + rp: "default", + sd: time.Hour * 24, + start: int64(0), + end: int64(time.Hour * 24), + seriesName: []byte("series"), + seriesField: []byte("field"), + seriesTags: models.NewTags(map[string]string{"k": "v"}), + fieldType: binary.FloatFieldType, + ts: ts, + vs: vs, + } + + w := binary.NewWriter(&buf, s.db, s.rp, s.sd) + bw, _ := w.NewBucket(s.start, s.end) + bw.BeginSeries(s.seriesName, s.seriesField, influxql.Float, s.seriesTags) + bw.WriteFloatCursor(&floatCursor{1, s.ts, floats}) + bw.EndSeries() + bw.Close() + w.Close() + + verifySingleSeries(t, buf, s) +} + +func TestReader_OneBucketOneUnsignedSeries(t *testing.T) { + var buf bytes.Buffer + ts := []int64{0, 1, 2} + uints := []uint64{0, 1, math.MaxUint64} + vs := make([]interface{}, len(uints)) + for i, v := range uints { + vs[i] = v + } + s := &oneSeriesData{ + db: "database", + rp: "default", + sd: time.Hour * 24, + start: int64(0), + end: int64(time.Hour * 24), + seriesName: []byte("series"), + seriesField: []byte("field"), + seriesTags: models.NewTags(map[string]string{"k": "v"}), + fieldType: binary.UnsignedFieldType, + ts: ts, + vs: vs, + } + + w := binary.NewWriter(&buf, s.db, s.rp, s.sd) + bw, _ := w.NewBucket(s.start, s.end) + bw.BeginSeries(s.seriesName, s.seriesField, influxql.Unsigned, s.seriesTags) + bw.WriteUnsignedCursor(&unsignedCursor{1, s.ts, uints}) + bw.EndSeries() + bw.Close() + w.Close() + + verifySingleSeries(t, buf, s) +} + +func TestReader_OneBucketOneBooleanSeries(t *testing.T) { + var buf bytes.Buffer + ts := []int64{0, 1, 2} + bools := []bool{true, true, false} + vs := make([]interface{}, len(bools)) + for i, v := range bools { + vs[i] = v + } + s := &oneSeriesData{ + db: "database", + rp: "default", + sd: time.Hour * 24, + start: int64(0), + end: int64(time.Hour * 24), + seriesName: []byte("series"), + seriesField: []byte("field"), + seriesTags: models.NewTags(map[string]string{"k": "v"}), + fieldType: binary.BooleanFieldType, + ts: ts, + vs: vs, + } + + w := binary.NewWriter(&buf, s.db, s.rp, s.sd) + bw, _ := w.NewBucket(s.start, s.end) + bw.BeginSeries(s.seriesName, s.seriesField, influxql.Boolean, s.seriesTags) + bw.WriteBooleanCursor(&booleanCursor{1, s.ts, bools}) + bw.EndSeries() + bw.Close() + w.Close() + + verifySingleSeries(t, buf, s) +} + +func TestReader_OneBucketOneStringSeries(t *testing.T) { + var buf bytes.Buffer + ts := []int64{0, 1, 2} + strings := []string{"", "a", "a《 》"} + vs := make([]interface{}, len(strings)) + for i, v := range strings { + vs[i] = v + } + s := &oneSeriesData{ + db: "database", + rp: "default", + sd: time.Hour * 24, + start: int64(0), + end: int64(time.Hour * 24), + seriesName: []byte("series"), + seriesField: []byte("field"), + seriesTags: models.NewTags(map[string]string{"k": "v"}), + fieldType: binary.StringFieldType, + ts: ts, + vs: vs, + } + + w := binary.NewWriter(&buf, s.db, s.rp, s.sd) + bw, _ := w.NewBucket(s.start, s.end) + bw.BeginSeries(s.seriesName, s.seriesField, influxql.String, s.seriesTags) + bw.WriteStringCursor(&stringCursor{1, s.ts, strings}) + bw.EndSeries() + bw.Close() + w.Close() + + verifySingleSeries(t, buf, s) +} + +type oneSeriesData struct { + db string + rp string + sd time.Duration + start int64 + end int64 + seriesName []byte + seriesField []byte + seriesTags models.Tags + fieldType binary.FieldType + ts []int64 + vs []interface{} +} + +func verifySingleSeries(t *testing.T, buf bytes.Buffer, s *oneSeriesData) { + t.Helper() + r := binary.NewReader(&buf) + h, err := r.ReadHeader() + assertNoError(t, err) + assertEqual(t, h, &binary.Header{Database: s.db, RetentionPolicy: s.rp, ShardDuration: s.sd}) + + bh, err := r.NextBucket() + assertNoError(t, err) + assertEqual(t, bh, &binary.BucketHeader{Start: s.start, End: s.end}) + + sh, err := r.NextSeries() + assertNoError(t, err) + + seriesKey := make([]byte, 0) + seriesKey = models.AppendMakeKey(seriesKey[:0], s.seriesName, s.seriesTags) + assertEqual(t, sh, &binary.SeriesHeader{FieldType: s.fieldType, SeriesKey: seriesKey, Field: s.seriesField}) + + for i := 0; i < len(s.ts); i++ { + next, err := r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, true) + values := r.Points().Values() + assertEqual(t, len(values), 1) + assertEqual(t, values[0].UnixNano(), s.ts[i]) + assertEqual(t, values[0].Value(), s.vs[i]) + } + + next, err := r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, false) + + sh, err = r.NextSeries() + assertNoError(t, err) + assertNil(t, sh) + + bh, err = r.NextBucket() + assertNoError(t, err) + assertNil(t, bh) +} + +func TestReader_OneBucketMixedSeries(t *testing.T) { + var buf bytes.Buffer + db := "db" + rp := "rp" + start := int64(0) + end := int64(time.Hour * 24) + seriesName := []byte("cpu") + seriesField := []byte("idle") + seriesTags1 := models.NewTags(map[string]string{"host": "host1", "region": "us-west-1"}) + seriesTags2 := models.NewTags(map[string]string{"host": "host2", "region": "us-west-1"}) + + w := binary.NewWriter(&buf, db, rp, time.Hour*24) + bw, _ := w.NewBucket(start, end) + bw.BeginSeries(seriesName, seriesField, influxql.Integer, seriesTags1) + t1s := []int64{0, 1, 2} + v1s := []int64{10, 11, 12} + bw.WriteIntegerCursor(&intCursor{1, t1s, v1s}) + bw.EndSeries() + bw.BeginSeries(seriesName, seriesField, influxql.Integer, seriesTags2) + t2s := []int64{1, 2, 3} + v2s := []float64{7, 8, 9} + bw.WriteFloatCursor(&floatCursor{1, t2s, v2s}) + bw.EndSeries() + bw.Close() + w.Close() + + r := binary.NewReader(&buf) + h, err := r.ReadHeader() + assertNoError(t, err) + assertEqual(t, h, &binary.Header{Database: db, RetentionPolicy: rp, ShardDuration: time.Hour * 24}) + + bh, err := r.NextBucket() + assertNoError(t, err) + assertEqual(t, bh, &binary.BucketHeader{Start: start, End: end}) + + sh, err := r.NextSeries() + assertNoError(t, err) + + seriesKey := make([]byte, 0) + seriesKey = models.AppendMakeKey(seriesKey[:0], seriesName, seriesTags1) + assertEqual(t, sh, &binary.SeriesHeader{FieldType: binary.IntegerFieldType, SeriesKey: seriesKey, Field: seriesField}) + + for i := 0; i < len(t1s); i++ { + next, err := r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, true) + values := r.Points().Values() + assertEqual(t, len(values), 1) + assertEqual(t, values[0].UnixNano(), t1s[i]) + assertEqual(t, values[0].Value(), v1s[i]) + } + + next, err := r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, false) + + sh, err = r.NextSeries() + assertNoError(t, err) + + seriesKey = models.AppendMakeKey(seriesKey[:0], seriesName, seriesTags2) + assertEqual(t, sh, &binary.SeriesHeader{FieldType: binary.FloatFieldType, SeriesKey: seriesKey, Field: seriesField}) + + for i := 0; i < len(t2s); i++ { + next, err := r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, true) + values := r.Points().Values() + assertEqual(t, len(values), 1) + assertEqual(t, values[0].UnixNano(), t2s[i]) + assertEqual(t, values[0].Value(), v2s[i]) + } + + next, err = r.Points().Next() + assertNoError(t, err) + assertEqual(t, next, false) + + sh, err = r.NextSeries() + assertNoError(t, err) + assertNil(t, sh) + + bh, err = r.NextBucket() + assertNoError(t, err) + assertNil(t, bh) +} + +func TestReader_EmptyBucket(t *testing.T) { + var buf bytes.Buffer + db := "db" + rp := "default" + start := int64(0) + end := int64(time.Hour * 24) + + w := binary.NewWriter(&buf, db, rp, time.Hour*24) + bw, _ := w.NewBucket(start, end) + bw.Close() + w.Close() + + r := binary.NewReader(&buf) + h, err := r.ReadHeader() + assertNoError(t, err) + assertEqual(t, h, &binary.Header{Database: db, RetentionPolicy: rp, ShardDuration: time.Hour * 24}) + + bh, err := r.NextBucket() + assertNoError(t, err) + assertEqual(t, bh, &binary.BucketHeader{Start: start, End: end}) + + sh, err := r.NextSeries() + assertNoError(t, err) + assertNil(t, sh) + + bh, err = r.NextBucket() + assertNoError(t, err) + assertNil(t, bh) +} + +func TestReader_States(t *testing.T) { + var buf bytes.Buffer + r := binary.NewReader(&buf) + + next, err := r.Points().Next() + assertError(t, err, fmt.Errorf("expected reader in state %v, was in state %v", 4, 1)) + assertEqual(t, next, false) + + sh, err := r.NextSeries() + assertError(t, err, fmt.Errorf("expected reader in state %v, was in state %v", 3, 1)) + assertNil(t, sh) + + bh, err := r.NextBucket() + assertError(t, err, fmt.Errorf("expected reader in state %v, was in state %v", 2, 1)) + assertNil(t, bh) +} + +type floatCursor struct { + c int // number of values to return per call to Next + keys []int64 + vals []float64 +} + +func (c *floatCursor) Close() {} +func (c *floatCursor) Err() error { return nil } + +func (c *floatCursor) Next() (keys []int64, values []float64) { + if c.c > len(c.keys) { + c.c = len(c.keys) + } + + k, v := c.keys[:c.c], c.vals[:c.c] + c.keys, c.vals = c.keys[c.c:], c.vals[c.c:] + return k, v +} + +type unsignedCursor struct { + c int // number of values to return per call to Next + keys []int64 + vals []uint64 +} + +func (c *unsignedCursor) Close() {} +func (c *unsignedCursor) Err() error { return nil } + +func (c *unsignedCursor) Next() (keys []int64, values []uint64) { + if c.c > len(c.keys) { + c.c = len(c.keys) + } + + k, v := c.keys[:c.c], c.vals[:c.c] + c.keys, c.vals = c.keys[c.c:], c.vals[c.c:] + return k, v +} + +type booleanCursor struct { + c int // number of values to return per call to Next + keys []int64 + vals []bool +} + +func (c *booleanCursor) Close() {} +func (c *booleanCursor) Err() error { return nil } + +func (c *booleanCursor) Next() (keys []int64, values []bool) { + if c.c > len(c.keys) { + c.c = len(c.keys) + } + + k, v := c.keys[:c.c], c.vals[:c.c] + c.keys, c.vals = c.keys[c.c:], c.vals[c.c:] + return k, v +} + +type stringCursor struct { + c int // number of values to return per call to Next + keys []int64 + vals []string +} + +func (c *stringCursor) Close() {} +func (c *stringCursor) Err() error { return nil } + +func (c *stringCursor) Next() (keys []int64, values []string) { + if c.c > len(c.keys) { + c.c = len(c.keys) + } + + k, v := c.keys[:c.c], c.vals[:c.c] + c.keys, c.vals = c.keys[c.c:], c.vals[c.c:] + return k, v +} + +func assertNil(t *testing.T, got interface{}) { + t.Helper() + if got == nil { + t.Fatalf("not nil: got:\n%s", got) + } +} + +func assertError(t *testing.T, got error, exp error) { + t.Helper() + if got == nil { + t.Fatalf("did not receive expected error: %s", exp) + } else { + assertEqual(t, got.Error(), exp.Error()) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer.go new file mode 100644 index 0000000..58dac27 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer.go @@ -0,0 +1,375 @@ +package binary + +import ( + "bufio" + "fmt" + "io" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +type Writer struct { + w *bufio.Writer + buf []byte + db, rp string + duration time.Duration + err error + bw *bucketWriter + state writeState + wroteHeader bool + + msg struct { + bucketHeader BucketHeader + bucketFooter BucketFooter + seriesHeader SeriesHeader + seriesFooter SeriesFooter + } + + stats struct { + series int + counts [8]struct { + series, values int + } + } +} + +type writeState int + +const ( + writeHeader writeState = iota + writeBucket + writeSeries + writeSeriesHeader + writePoints +) + +func NewWriter(w io.Writer, database, rp string, duration time.Duration) *Writer { + var wr *bufio.Writer + if wr, _ = w.(*bufio.Writer); wr == nil { + wr = bufio.NewWriter(w) + } + return &Writer{w: wr, db: database, rp: rp, duration: duration} +} + +func (w *Writer) WriteStats(o io.Writer) { + fmt.Fprintf(o, "total series: %d\n", w.stats.series) + + for i := 0; i < 5; i++ { + ft := FieldType(i) + fmt.Fprintf(o, "%s unique series: %d\n", ft, w.stats.counts[i].series) + fmt.Fprintf(o, "%s total values : %d\n", ft, w.stats.counts[i].values) + } +} + +func (w *Writer) NewBucket(start, end int64) (format.BucketWriter, error) { + if w.state == writeHeader { + w.writeHeader() + } + + if w.err != nil { + return nil, w.err + } + + if w.state != writeBucket { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", w.state, writeBucket)) + } + + w.bw = &bucketWriter{w: w, start: start, end: end} + w.writeBucketHeader(start, end) + + return w.bw, w.err +} + +func (w *Writer) Close() error { + if w.err == ErrWriteAfterClose { + return nil + } + if w.err != nil { + return w.err + } + + w.err = ErrWriteAfterClose + + return nil +} + +func (w *Writer) writeHeader() { + w.state = writeBucket + w.wroteHeader = true + + w.write(Magic[:]) + + h := Header{ + Version: Version0, + Database: w.db, + RetentionPolicy: w.rp, + ShardDuration: w.duration, + } + w.writeTypeMessage(HeaderType, &h) +} + +func (w *Writer) writeBucketHeader(start, end int64) { + w.state = writeSeries + w.msg.bucketHeader.Start = start + w.msg.bucketHeader.End = end + w.writeTypeMessage(BucketHeaderType, &w.msg.bucketHeader) +} + +func (w *Writer) writeBucketFooter() { + w.state = writeBucket + w.writeTypeMessage(BucketFooterType, &w.msg.bucketFooter) +} + +func (w *Writer) writeSeriesHeader(key, field []byte, ft FieldType) { + w.state = writePoints + w.stats.series++ + w.stats.counts[ft&7].series++ + + w.msg.seriesHeader.SeriesKey = key + w.msg.seriesHeader.Field = field + w.msg.seriesHeader.FieldType = ft + w.writeTypeMessage(SeriesHeaderType, &w.msg.seriesHeader) +} + +func (w *Writer) writeSeriesFooter(ft FieldType, count int) { + w.stats.counts[ft&7].values += count + w.writeTypeMessage(SeriesFooterType, &w.msg.seriesFooter) +} + +func (w *Writer) write(p []byte) { + if w.err != nil { + return + } + _, w.err = w.w.Write(p) +} + +func (w *Writer) writeTypeMessage(typ MessageType, msg message) { + if w.err != nil { + return + } + + // ensure size + n := msg.Size() + if n > cap(w.buf) { + w.buf = make([]byte, n) + } else { + w.buf = w.buf[:n] + } + + _, w.err = msg.MarshalTo(w.buf) + w.writeTypeBytes(typ, w.buf) +} + +func (w *Writer) writeTypeBytes(typ MessageType, b []byte) { + if w.err != nil { + return + } + w.err = tlv.WriteTLV(w.w, byte(typ), w.buf) +} + +type bucketWriter struct { + w *Writer + err error + start, end int64 + key []byte + field []byte + n int + closed bool +} + +func (bw *bucketWriter) Err() error { + if bw.w.err != nil { + return bw.w.err + } + return bw.err +} + +func (bw *bucketWriter) hasErr() bool { + return bw.w.err != nil || bw.err != nil +} + +func (bw *bucketWriter) BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) { + if bw.hasErr() { + return + } + + if bw.w.state != writeSeries { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writeSeries)) + } + bw.w.state = writeSeriesHeader + + bw.key = models.AppendMakeKey(bw.key[:0], name, tags) + bw.field = field +} + +func (bw *bucketWriter) EndSeries() { + if bw.hasErr() { + return + } + + if bw.w.state != writePoints && bw.w.state != writeSeriesHeader { + panic(fmt.Sprintf("writer state: got=%v, exp=%v,%v", bw.w.state, writeSeriesHeader, writePoints)) + } + if bw.w.state == writePoints { + bw.w.writeSeriesFooter(IntegerFieldType, bw.n) + } + bw.w.state = writeSeries +} + +func (bw *bucketWriter) WriteIntegerCursor(cur tsdb.IntegerBatchCursor) { + if bw.hasErr() { + return + } + + if bw.w.state == writeSeriesHeader { + bw.w.writeSeriesHeader(bw.key, bw.field, IntegerFieldType) + } + + if bw.w.state != writePoints { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writePoints)) + } + + var msg IntegerPoints + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + bw.n += len(ts) + msg.Timestamps = ts + msg.Values = vs + bw.w.writeTypeMessage(IntegerPointsType, &msg) + } +} + +func (bw *bucketWriter) WriteFloatCursor(cur tsdb.FloatBatchCursor) { + if bw.hasErr() { + return + } + + if bw.w.state == writeSeriesHeader { + bw.w.writeSeriesHeader(bw.key, bw.field, FloatFieldType) + } + + if bw.w.state != writePoints { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writePoints)) + } + + var msg FloatPoints + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + bw.n += len(ts) + msg.Timestamps = ts + msg.Values = vs + bw.w.writeTypeMessage(FloatPointsType, &msg) + } +} + +func (bw *bucketWriter) WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) { + if bw.hasErr() { + return + } + + if bw.w.state == writeSeriesHeader { + bw.w.writeSeriesHeader(bw.key, bw.field, UnsignedFieldType) + } + + if bw.w.state != writePoints { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writePoints)) + } + + var msg UnsignedPoints + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + bw.n += len(ts) + msg.Timestamps = ts + msg.Values = vs + bw.w.writeTypeMessage(UnsignedPointsType, &msg) + } +} + +func (bw *bucketWriter) WriteBooleanCursor(cur tsdb.BooleanBatchCursor) { + if bw.hasErr() { + return + } + + if bw.w.state == writeSeriesHeader { + bw.w.writeSeriesHeader(bw.key, bw.field, BooleanFieldType) + } + + if bw.w.state != writePoints { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writePoints)) + } + + var msg BooleanPoints + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + bw.n += len(ts) + msg.Timestamps = ts + msg.Values = vs + bw.w.writeTypeMessage(BooleanPointsType, &msg) + } +} + +func (bw *bucketWriter) WriteStringCursor(cur tsdb.StringBatchCursor) { + if bw.hasErr() { + return + } + + if bw.w.state == writeSeriesHeader { + bw.w.writeSeriesHeader(bw.key, bw.field, StringFieldType) + } + + if bw.w.state != writePoints { + panic(fmt.Sprintf("writer state: got=%v, exp=%v", bw.w.state, writePoints)) + } + + var msg StringPoints + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + bw.n += len(ts) + msg.Timestamps = ts + msg.Values = vs + bw.w.writeTypeMessage(StringPointsType, &msg) + } +} + +func (bw *bucketWriter) Close() error { + if bw.closed { + return nil + } + + bw.closed = true + + if bw.hasErr() { + return bw.Err() + } + + bw.w.bw = nil + bw.w.writeBucketFooter() + bw.err = ErrWriteBucketAfterClose + + return bw.w.w.Flush() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer_test.go new file mode 100644 index 0000000..fb8aac6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary/writer_test.go @@ -0,0 +1,114 @@ +package binary_test + +import ( + "bytes" + "io" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format/binary" + "github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" +) + +func TestWriter_WriteOneBucketOneSeries(t *testing.T) { + var buf bytes.Buffer + w := binary.NewWriter(&buf, "db", "rp", time.Second) + bw, _ := w.NewBucket(0, int64(time.Second)) + bw.BeginSeries([]byte("cpu"), []byte("idle"), influxql.Integer, models.NewTags(map[string]string{"host": "host1", "region": "us-west-1"})) + ts := []int64{0, 1, 2} + vs := []int64{10, 11, 12} + bw.WriteIntegerCursor(&intCursor{1, ts, vs}) + bw.EndSeries() + bw.Close() + w.Close() + + // magic + var in [8]byte + buf.Read(in[:]) + assertEqual(t, in[:], binary.Magic[:]) + + // header + var hdr binary.Header + assertTypeValue(t, &buf, binary.HeaderType, &hdr) + assertEqual(t, hdr, binary.Header{Version: binary.Version0, Database: "db", RetentionPolicy: "rp", ShardDuration: time.Second}) + + // bucket header + var bh binary.BucketHeader + assertTypeValue(t, &buf, binary.BucketHeaderType, &bh) + assertEqual(t, bh, binary.BucketHeader{Start: 0, End: int64(time.Second)}) + + // series + var sh binary.SeriesHeader + assertTypeValue(t, &buf, binary.SeriesHeaderType, &sh) + assertEqual(t, sh, binary.SeriesHeader{ + FieldType: binary.IntegerFieldType, + SeriesKey: []byte("cpu,host=host1,region=us-west-1"), + Field: []byte("idle"), + }) + + // values + for i := 0; i < len(ts); i++ { + var ip binary.IntegerPoints + assertTypeValue(t, &buf, binary.IntegerPointsType, &ip) + assertEqual(t, ip, binary.IntegerPoints{Timestamps: ts[i : i+1], Values: vs[i : i+1]}) + } + + // series footer + var sf binary.SeriesFooter + assertTypeValue(t, &buf, binary.SeriesFooterType, &sf) + + // bucket footer + var bf binary.BucketFooter + assertTypeValue(t, &buf, binary.BucketFooterType, &bf) +} + +type intCursor struct { + c int // number of values to return per call to Next + keys []int64 + vals []int64 +} + +func (c *intCursor) Close() {} +func (c *intCursor) Err() error { return nil } + +func (c *intCursor) Next() (keys []int64, values []int64) { + if c.c > len(c.keys) { + c.c = len(c.keys) + } + + k, v := c.keys[:c.c], c.vals[:c.c] + c.keys, c.vals = c.keys[c.c:], c.vals[c.c:] + return k, v +} + +func assertEqual(t *testing.T, got, exp interface{}) { + t.Helper() + if !cmp.Equal(got, exp) { + t.Fatalf("not equal: -got/+exp\n%s", cmp.Diff(got, exp)) + } +} + +func assertNoError(t *testing.T, err error) { + t.Helper() + if err == nil { + return + } + t.Fatalf("unexpected error: %v", err) +} + +type message interface { + Unmarshal([]byte) error +} + +func assertTypeValue(t *testing.T, r io.Reader, expType binary.MessageType, m message) { + t.Helper() + typ, d, err := tlv.ReadTLV(r) + assertNoError(t, err) + assertEqual(t, typ, byte(expType)) + + err = m.Unmarshal(d) + assertNoError(t, err) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/conflictwriter.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/conflictwriter.go new file mode 100644 index 0000000..cb4028b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/conflictwriter.go @@ -0,0 +1,176 @@ +package format + +import ( + "bytes" + "fmt" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +// ConflictWriter is a Writer that redirects conflicting data to an alternate output. +type ConflictWriter struct { + w Writer + c Writer + bw aggregateBucketWriter +} + +// NewConflictWriter returns a Writer that redirects invalid point data to the conflict Writer. +func NewConflictWriter(w, conflict Writer) *ConflictWriter { + return &ConflictWriter{w: w, c: conflict} +} + +func (cw *ConflictWriter) NewBucket(start, end int64) (bw BucketWriter, err error) { + cw.bw.w, err = cw.w.NewBucket(start, end) + if err != nil { + return nil, err + } + + cw.bw.c, err = cw.c.NewBucket(start, end) + if err != nil { + cw.bw.w.Close() + return nil, err + } + return &cw.bw, nil +} + +func (cw *ConflictWriter) Close() error { + // we care if either error and prioritize the conflict writer lower. + cerr := cw.c.Close() + if err := cw.w.Close(); err != nil { + return err + } + + return cerr +} + +type bucketState int + +const ( + beginSeriesBucketState bucketState = iota + writeBucketState + writeConflictsBucketState +) + +type aggregateBucketWriter struct { + w BucketWriter + c BucketWriter + + state bucketState + + // current series + name []byte + field []byte + typ influxql.DataType + tags models.Tags + mf map[string]influxql.DataType +} + +func (bw *aggregateBucketWriter) Err() error { + switch { + case bw.w.Err() != nil: + return bw.w.Err() + case bw.c.Err() != nil: + return bw.c.Err() + default: + return nil + } +} + +func (bw *aggregateBucketWriter) BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) { + bw.w.BeginSeries(name, field, typ, tags) + + if !bytes.Equal(bw.name, name) { + // new measurement + bw.name = append(bw.name[:0], name...) + bw.mf = make(map[string]influxql.DataType) + } + + bw.field = append(bw.field[:0], field...) + bw.tags = tags + + var ok bool + bw.typ, ok = bw.mf[string(field)] + if !ok { + bw.mf[string(field)] = typ + bw.typ = typ + } + + bw.state = writeBucketState +} + +func (bw *aggregateBucketWriter) EndSeries() { + switch { + case bw.state == writeBucketState: + bw.w.EndSeries() + case bw.state == writeConflictsBucketState: + bw.w.EndSeries() + bw.c.EndSeries() + default: + panic(fmt.Sprintf("ConflictWriter state: got=%v, exp=%v,%v", bw.state, writeBucketState, writeConflictsBucketState)) + } + bw.state = beginSeriesBucketState +} + +func (bw *aggregateBucketWriter) conflictState(other influxql.DataType) { + if bw.state == writeBucketState { + bw.c.BeginSeries(bw.name, bw.field, bw.typ, bw.tags) + bw.state = writeConflictsBucketState + } +} + +func (bw *aggregateBucketWriter) WriteIntegerCursor(cur tsdb.IntegerBatchCursor) { + if bw.typ == influxql.Integer { + bw.w.WriteIntegerCursor(cur) + } else { + bw.conflictState(influxql.Integer) + bw.c.WriteIntegerCursor(cur) + } +} + +func (bw *aggregateBucketWriter) WriteFloatCursor(cur tsdb.FloatBatchCursor) { + if bw.typ == influxql.Float { + bw.w.WriteFloatCursor(cur) + } else { + bw.conflictState(influxql.Float) + bw.c.WriteFloatCursor(cur) + } +} + +func (bw *aggregateBucketWriter) WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) { + if bw.typ == influxql.Unsigned { + bw.w.WriteUnsignedCursor(cur) + } else { + bw.conflictState(influxql.Unsigned) + bw.c.WriteUnsignedCursor(cur) + } +} + +func (bw *aggregateBucketWriter) WriteBooleanCursor(cur tsdb.BooleanBatchCursor) { + if bw.typ == influxql.Boolean { + bw.w.WriteBooleanCursor(cur) + } else { + bw.conflictState(influxql.Boolean) + bw.c.WriteBooleanCursor(cur) + } +} + +func (bw *aggregateBucketWriter) WriteStringCursor(cur tsdb.StringBatchCursor) { + if bw.typ == influxql.String { + bw.w.WriteStringCursor(cur) + } else { + bw.conflictState(influxql.String) + bw.c.WriteStringCursor(cur) + } +} + +func (bw *aggregateBucketWriter) Close() error { + // we care if either error and prioritize the conflict writer lower. + cerr := bw.c.Close() + if err := bw.w.Close(); err != nil { + return err + } + + return cerr +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/line/writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/line/writer.go new file mode 100644 index 0000000..c9f1f59 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/line/writer.go @@ -0,0 +1,181 @@ +package line + +import ( + "bufio" + "fmt" + "io" + "strconv" + "time" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/escape" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +type Writer struct { + w *bufio.Writer + key []byte + err error +} + +func NewWriter(w io.Writer) *Writer { + var wr *bufio.Writer + if wr, _ = w.(*bufio.Writer); wr == nil { + wr = bufio.NewWriter(w) + } + return &Writer{ + w: wr, + } +} + +func (w *Writer) NewBucket(start, end int64) (format.BucketWriter, error) { + fmt.Fprintf(w.w, "# new shard group start: %s -> end: %s\n", time.Unix(0, start).UTC(), time.Unix(0, end).UTC()) + return w, nil +} + +func (w *Writer) Close() error { return w.w.Flush() } +func (w *Writer) Err() error { return w.err } + +func (w *Writer) BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) { + if w.err != nil { + return + } + + w.key = models.AppendMakeKey(w.key[:0], name, tags) + w.key = append(w.key, ' ') + w.key = append(w.key, escape.Bytes(field)...) + w.key = append(w.key, '=') +} + +func (w *Writer) EndSeries() {} + +func (w *Writer) WriteIntegerCursor(cur tsdb.IntegerBatchCursor) { + if w.err != nil { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:len(w.key)] // Re-slice buf to be " =". + + buf = strconv.AppendInt(buf, vs[i], 10) + buf = append(buf, 'i') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteFloatCursor(cur tsdb.FloatBatchCursor) { + if w.err != nil { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:len(w.key)] // Re-slice buf to be " =". + + buf = strconv.AppendFloat(buf, vs[i], 'g', -1, 64) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) { + if w.err != nil { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:len(w.key)] // Re-slice buf to be " =". + + buf = strconv.AppendUint(buf, vs[i], 10) + buf = append(buf, 'u') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteBooleanCursor(cur tsdb.BooleanBatchCursor) { + if w.err != nil { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:len(w.key)] // Re-slice buf to be " =". + + buf = strconv.AppendBool(buf, vs[i]) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteStringCursor(cur tsdb.StringBatchCursor) { + if w.err != nil { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:len(w.key)] // Re-slice buf to be " =". + + buf = append(buf, '"') + buf = append(buf, models.EscapeStringField(vs[i])...) + buf = append(buf, '"') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/text/writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/text/writer.go new file mode 100644 index 0000000..6d27061 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/text/writer.go @@ -0,0 +1,191 @@ +package text + +import ( + "bufio" + "io" + "strconv" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/format" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/escape" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +type Writer struct { + w *bufio.Writer + key []byte + err error + m Mode +} + +type Mode bool + +const ( + Series Mode = false + Values Mode = true +) + +func NewWriter(w io.Writer, mode Mode) *Writer { + var wr *bufio.Writer + if wr, _ = w.(*bufio.Writer); wr == nil { + wr = bufio.NewWriter(w) + } + return &Writer{ + w: wr, + key: make([]byte, 1024), + m: mode, + } +} + +func (w *Writer) NewBucket(start, end int64) (format.BucketWriter, error) { + return w, nil +} + +func (w *Writer) Close() error { return w.w.Flush() } +func (w *Writer) Err() error { return w.err } + +func (w *Writer) BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) { + if w.err != nil { + return + } + + if w.m == Series { + w.key = models.AppendMakeKey(w.key[:0], name, tags) + w.key = append(w.key, ' ') + w.key = append(w.key, escape.Bytes(field)...) + w.w.Write(w.key) + w.w.WriteByte('\n') + } +} + +func (w *Writer) EndSeries() {} + +func (w *Writer) WriteIntegerCursor(cur tsdb.IntegerBatchCursor) { + if w.err != nil || w.m == Series { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:0] + + buf = strconv.AppendInt(buf, vs[i], 10) + buf = append(buf, 'i') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteFloatCursor(cur tsdb.FloatBatchCursor) { + if w.err != nil || w.m == Series { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:0] + + buf = strconv.AppendFloat(buf, vs[i], 'g', -1, 64) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) { + if w.err != nil || w.m == Series { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:0] + + buf = strconv.AppendUint(buf, vs[i], 10) + buf = append(buf, 'u') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteBooleanCursor(cur tsdb.BooleanBatchCursor) { + if w.err != nil || w.m == Series { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:0] + + buf = strconv.AppendBool(buf, vs[i]) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} + +func (w *Writer) WriteStringCursor(cur tsdb.StringBatchCursor) { + if w.err != nil || w.m == Series { + return + } + + buf := w.key + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + for i := range ts { + buf = buf[:0] + + buf = append(buf, '"') + buf = append(buf, models.EscapeStringField(vs[i])...) + buf = append(buf, '"') + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts[i], 10) + buf = append(buf, '\n') + if _, w.err = w.w.Write(buf); w.err != nil { + return + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/writer.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/writer.go new file mode 100644 index 0000000..904d48c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/format/writer.go @@ -0,0 +1,156 @@ +package format + +import ( + "fmt" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/storage" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +var ( + // Discard is a Writer where all write calls succeed. The source data is also read completely, which can be useful + // for testing performance. + Discard Writer = &devNull{true} + + // DevNull is a Writer where all write calls succeed, however, no source data is read. + DevNull Writer = &devNull{} +) + +type Writer interface { + NewBucket(start, end int64) (BucketWriter, error) + Close() error +} + +type BucketWriter interface { + Err() error + BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) + EndSeries() + + WriteIntegerCursor(cur tsdb.IntegerBatchCursor) + WriteFloatCursor(cur tsdb.FloatBatchCursor) + WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) + WriteBooleanCursor(cur tsdb.BooleanBatchCursor) + WriteStringCursor(cur tsdb.StringBatchCursor) + Close() error +} + +// WriteBucket reads data from rs covering the time range [start, end) and streams to w. +// The ResultSet must guarantee series+field keys are produced in ascending lexicographical order and values in +// ascending time order. +func WriteBucket(w Writer, start, end int64, rs *storage.ResultSet) error { + bw, err := w.NewBucket(start, end) + if err != nil { + return err + } + defer bw.Close() + + for rs.Next() { + bw.BeginSeries(rs.Name(), rs.Field(), rs.FieldType(), rs.Tags()) + + ci := rs.CursorIterator() + for ci.Next() { + cur := ci.Cursor() + switch c := cur.(type) { + case tsdb.IntegerBatchCursor: + bw.WriteIntegerCursor(c) + case tsdb.FloatBatchCursor: + bw.WriteFloatCursor(c) + case tsdb.UnsignedBatchCursor: + bw.WriteUnsignedCursor(c) + case tsdb.BooleanBatchCursor: + bw.WriteBooleanCursor(c) + case tsdb.StringBatchCursor: + bw.WriteStringCursor(c) + case nil: + // no data for series key + field combination in this shard + continue + default: + panic(fmt.Sprintf("unreachable: %T", c)) + + } + cur.Close() + } + + bw.EndSeries() + + if bw.Err() != nil { + return bw.Err() + } + } + return nil +} + +type devNull struct { + r bool +} + +func (w *devNull) NewBucket(start, end int64) (BucketWriter, error) { + return w, nil +} +func (w *devNull) BeginSeries(name, field []byte, typ influxql.DataType, tags models.Tags) {} +func (w *devNull) EndSeries() {} + +func (w *devNull) Err() error { return nil } +func (w *devNull) Close() error { return nil } + +func (w *devNull) WriteIntegerCursor(cur tsdb.IntegerBatchCursor) { + if !w.r { + return + } + for { + ts, _ := cur.Next() + if len(ts) == 0 { + break + } + } +} + +func (w *devNull) WriteFloatCursor(cur tsdb.FloatBatchCursor) { + if !w.r { + return + } + for { + ts, _ := cur.Next() + if len(ts) == 0 { + break + } + } +} + +func (w *devNull) WriteUnsignedCursor(cur tsdb.UnsignedBatchCursor) { + if !w.r { + return + } + for { + ts, _ := cur.Next() + if len(ts) == 0 { + break + } + } +} + +func (w *devNull) WriteBooleanCursor(cur tsdb.BooleanBatchCursor) { + if !w.r { + return + } + for { + ts, _ := cur.Next() + if len(ts) == 0 { + break + } + } +} + +func (w *devNull) WriteStringCursor(cur tsdb.StringBatchCursor) { + if !w.r { + return + } + for { + ts, _ := cur.Next() + if len(ts) == 0 { + break + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/resultset.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/resultset.go new file mode 100644 index 0000000..3590a34 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/resultset.go @@ -0,0 +1,86 @@ +package storage + +import ( + "context" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +type ResultSet struct { + cur seriesCursor + row seriesRow + ci CursorIterator +} + +func newResultSet(ctx context.Context, req *ReadRequest, cur seriesCursor) *ResultSet { + return &ResultSet{ + cur: cur, + ci: CursorIterator{ + ctx: ctx, + req: tsdb.CursorRequest{ + Ascending: true, + StartTime: req.Start, + EndTime: req.End, + }, + }, + } +} + +func (r *ResultSet) Close() { + r.row.query = nil + r.cur.Close() +} + +// Next moves to the result set forward to the next series key. +func (r *ResultSet) Next() bool { + row := r.cur.Next() + if row == nil { + return false + } + + r.row = *row + + return true +} + +func (r *ResultSet) Name() []byte { return r.row.name } +func (r *ResultSet) Tags() models.Tags { return r.row.tags } +func (r *ResultSet) Field() []byte { return []byte(r.row.field.n) } +func (r *ResultSet) FieldType() influxql.DataType { return r.row.field.d } + +func (r *ResultSet) CursorIterator() *CursorIterator { + r.ci.req.Name = r.row.name + r.ci.req.Tags = r.row.tags + r.ci.req.Field = r.row.field.n + r.ci.itrs = r.row.query + + return &r.ci +} + +type CursorIterator struct { + ctx context.Context + req tsdb.CursorRequest + itrs tsdb.CursorIterators + cur tsdb.Cursor +} + +func (ci *CursorIterator) Next() bool { + if len(ci.itrs) == 0 { + return false + } + + var shard tsdb.CursorIterator + ci.cur = nil + for ci.cur == nil && len(ci.itrs) > 0 { + shard, ci.itrs = ci.itrs[0], ci.itrs[1:] + ci.cur, _ = shard.Next(ci.ctx, &ci.req) + } + + return ci.cur != nil +} + +func (ci *CursorIterator) Cursor() tsdb.Cursor { + return ci.cur +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/series_cursor.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/series_cursor.go new file mode 100644 index 0000000..ef2602a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/series_cursor.go @@ -0,0 +1,176 @@ +package storage + +import ( + "context" + "errors" + "sort" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +type seriesCursor interface { + Close() + Next() *seriesRow + Err() error +} + +type seriesRow struct { + name []byte // measurement name + tags models.Tags // unmodified series tags + field field + query tsdb.CursorIterators +} + +type indexSeriesCursor struct { + sqry tsdb.SeriesCursor + fields measurementFields + nf []field + err error + row seriesRow + eof bool +} + +func newIndexSeriesCursor(ctx context.Context, shards []*tsdb.Shard) (*indexSeriesCursor, error) { + queries, err := tsdb.CreateCursorIterators(ctx, shards) + if err != nil { + return nil, err + } + + if queries == nil { + return nil, nil + } + + p := &indexSeriesCursor{row: seriesRow{query: queries}} + + sg := tsdb.Shards(shards) + p.sqry, err = sg.CreateSeriesCursor(ctx, tsdb.SeriesCursorRequest{}, nil) + if p.sqry != nil && err == nil { + var itr query.Iterator + var fi query.FloatIterator + var opt = query.IteratorOptions{ + Aux: []influxql.VarRef{{Val: "key"}}, + Authorizer: query.OpenAuthorizer, + Ascending: true, + Ordered: true, + } + + if itr, err = sg.CreateIterator(ctx, &influxql.Measurement{SystemIterator: "_fieldKeys"}, opt); itr != nil && err == nil { + if fi, err = toFloatIterator(itr); err != nil { + goto CLEANUP + } + + p.fields = extractFields(fi) + fi.Close() + return p, nil + } + } + +CLEANUP: + p.Close() + return nil, err +} + +func (c *indexSeriesCursor) Close() { + if !c.eof { + c.eof = true + if c.sqry != nil { + c.sqry.Close() + c.sqry = nil + } + } +} + +func (c *indexSeriesCursor) Next() *seriesRow { + if c.eof { + return nil + } + + if len(c.nf) == 0 { + // next series key + sr, err := c.sqry.Next() + if err != nil { + c.err = err + c.Close() + return nil + } else if sr == nil { + c.Close() + return nil + } + + c.row.name = sr.Name + c.row.tags = sr.Tags + + c.nf = c.fields[string(sr.Name)] + } + + c.row.field, c.nf = c.nf[0], c.nf[1:] + + return &c.row +} + +func (c *indexSeriesCursor) Err() error { + return c.err +} + +func toFloatIterator(iter query.Iterator) (query.FloatIterator, error) { + sitr, ok := iter.(query.FloatIterator) + if !ok { + return nil, errors.New("expected FloatIterator") + } + + return sitr, nil +} + +type measurementFields map[string][]field + +type field struct { + n string + d influxql.DataType +} + +func extractFields(itr query.FloatIterator) measurementFields { + mf := make(measurementFields) + + for { + p, err := itr.Next() + if err != nil { + return nil + } else if p == nil { + break + } + + // Aux is populated by `fieldKeysIterator#Next` + fields := append(mf[p.Name], field{ + n: p.Aux[0].(string), + d: influxql.DataTypeFromString(p.Aux[1].(string)), + }) + + mf[p.Name] = fields + } + + if len(mf) == 0 { + return nil + } + + for k, fields := range mf { + sort.Slice(fields, func(i, j int) bool { + return fields[i].n < fields[j].n + }) + + // deduplicate + i := 1 + for j := 1; j < len(fields); j++ { + if fields[j].n != fields[j-1].n { + fields[i] = fields[j] + i++ + } + } + + mf[k] = fields[:i] + } + + return mf +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/store.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/store.go new file mode 100644 index 0000000..4a40450 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/storage/store.go @@ -0,0 +1,40 @@ +package storage + +import ( + "context" + "time" + + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +type MetaClient interface { + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) +} + +// Request message for Storage.Read. +type ReadRequest struct { + Database string + RP string + Shards []*tsdb.Shard + Start int64 // start time + End int64 // end time +} + +type Store struct { + TSDBStore *tsdb.Store +} + +// Read creates a ResultSet that reads all points with a timestamp ts, such that start ≤ ts < end. +func (s *Store) Read(ctx context.Context, req *ReadRequest) (*ResultSet, error) { + var cur seriesCursor + if ic, err := newIndexSeriesCursor(ctx, req.Shards); err != nil { + return nil, err + } else if ic == nil { + return nil, nil + } else { + cur = ic + } + + return newResultSet(ctx, req, cur), nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv.go new file mode 100644 index 0000000..8f7e492 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv.go @@ -0,0 +1,96 @@ +// Package tlv contains code to read and write type-length-value messages. +package tlv + +import ( + "encoding/binary" + "fmt" + "io" +) + +// MaxMessageSize defines how large a message can be before we reject it. +const MaxMessageSize = 1024 * 1024 * 1024 // 1GB + +// ReadTLV reads a type-length-value record from r. +func ReadTLV(r io.Reader) (byte, []byte, error) { + typ, err := ReadType(r) + if err != nil { + return 0, nil, err + } + + buf, err := ReadLV(r) + if err != nil { + return 0, nil, err + } + return typ, buf, err +} + +// ReadType reads the type from a TLV record. +func ReadType(r io.Reader) (byte, error) { + var typ [1]byte + if _, err := io.ReadFull(r, typ[:]); err != nil { + if err == io.EOF { + return 0, err + } else { + return 0, fmt.Errorf("read message type: %s", err) + } + } + return typ[0], nil +} + +// ReadLV reads the length-value from a TLV record. +func ReadLV(r io.Reader) ([]byte, error) { + // Read the size of the message. + var sz int64 + if err := binary.Read(r, binary.BigEndian, &sz); err != nil { + return nil, fmt.Errorf("read message size: %s", err) + } + + if sz < 0 { + return nil, fmt.Errorf("negative message size is invalid: %d", sz) + } + + if sz >= MaxMessageSize { + return nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) + } + + // Read the value. + buf := make([]byte, sz) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("read message value: %s", err) + } + + return buf, nil +} + +// WriteTLV writes a type-length-value record to w. +func WriteTLV(w io.Writer, typ byte, buf []byte) error { + if err := WriteType(w, typ); err != nil { + return err + } + if err := WriteLV(w, buf); err != nil { + return err + } + return nil +} + +// WriteType writes the type in a TLV record to w. +func WriteType(w io.Writer, typ byte) error { + if _, err := w.Write([]byte{typ}); err != nil { + return fmt.Errorf("write message type: %s", err) + } + return nil +} + +// WriteLV writes the length-value in a TLV record to w. +func WriteLV(w io.Writer, buf []byte) error { + // Write the size of the message. + if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { + return fmt.Errorf("write message size: %s", err) + } + + // Write the value. + if _, err := w.Write(buf); err != nil { + return fmt.Errorf("write message value: %s", err) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv_test.go new file mode 100644 index 0000000..d9d0335 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv/tlv_test.go @@ -0,0 +1,42 @@ +package tlv_test + +import ( + "bytes" + "encoding/binary" + "strings" + "testing" + + "github.com/influxdata/influxdb/cmd/influx-tools/internal/tlv" +) + +func TestReadLV_LengthExceedsMax(t *testing.T) { + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, int64(tlv.MaxMessageSize)); err != nil { + t.Fatal(err) + } + + _, err := tlv.ReadLV(&buf) + if err == nil { + t.Fatal("ReadLV should have rejected message with L = MaxMessageSize") + } + + if !strings.Contains(err.Error(), "max message size") { + t.Fatalf("got error %q, expected message about max message size", err.Error()) + } +} + +func TestReadLV_LengthNegative(t *testing.T) { + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, int64(-1)); err != nil { + t.Fatal(err) + } + + _, err := tlv.ReadLV(&buf) + if err == nil { + t.Fatal("ReadLV should have rejected message with negative length") + } + + if !strings.Contains(err.Error(), "negative message size") { + t.Fatalf("got error %q, expected message about negative message size", err.Error()) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/main.go new file mode 100644 index 0000000..bcf5529 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/main.go @@ -0,0 +1,143 @@ +// The influx-tools command displays detailed information about InfluxDB data files. +package main + +import ( + "errors" + "fmt" + "io" + "os" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influx-tools/export" + "github.com/influxdata/influxdb/cmd/influx-tools/help" + "github.com/influxdata/influxdb/cmd/influx-tools/importer" + "github.com/influxdata/influxdb/cmd/influx-tools/server" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + _ "github.com/influxdata/influxdb/tsdb/engine" + "go.uber.org/zap" +) + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + case "export": + c := export.NewCommand(&ossServer{logger: zap.NewNop()}) + if err := c.Run(args); err != nil { + return fmt.Errorf("export: %s", err) + } + case "import": + cmd := importer.NewCommand(&ossServer{logger: zap.NewNop()}) + if err := cmd.Run(args); err != nil { + return fmt.Errorf("import: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx-tools help' for usage`+"\n\n", name) + } + + return nil +} + +type ossServer struct { + logger *zap.Logger + config *run.Config + client *meta.Client +} + +func (s *ossServer) Open(path string) (err error) { + s.config, err = s.parseConfig(path) + if err != nil { + return err + } + + // Validate the configuration. + if err = s.config.Validate(); err != nil { + return fmt.Errorf("validate config: %s", err) + } + + s.client = meta.NewClient(s.config.Meta) + if err = s.client.Open(); err != nil { + s.client = nil + return err + } + return nil +} + +func (s *ossServer) Close() { + if s.client != nil { + s.client.Close() + s.client = nil + } +} + +func (s *ossServer) MetaClient() server.MetaClient { return s.client } +func (s *ossServer) TSDBConfig() tsdb.Config { return s.config.Data } +func (s *ossServer) Logger() *zap.Logger { return s.logger } + +// ParseConfig parses the config at path. +// It returns a demo configuration if path is blank. +func (s *ossServer) parseConfig(path string) (*run.Config, error) { + path = s.resolvePath(path) + // Use demo configuration if no config path is specified. + if path == "" { + return nil, errors.New("missing config file") + } + + config := run.NewConfig() + if err := config.FromTomlFile(path); err != nil { + return nil, err + } + + return config, nil +} + +func (s *ossServer) resolvePath(path string) string { + if path != "" { + if path == os.DevNull { + return "" + } + return path + } + + for _, p := range []string{ + os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"), + "/etc/influxdb/influxdb.conf", + } { + if _, err := os.Stat(p); err == nil { + return p + } + } + return "" +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx-tools/server/server.go b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/server/server.go new file mode 100644 index 0000000..4c21e25 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx-tools/server/server.go @@ -0,0 +1,29 @@ +package server + +import ( + "time" + + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +type Interface interface { + Open(path string) error + Close() + MetaClient() MetaClient + TSDBConfig() tsdb.Config + Logger() *zap.Logger +} + +type MetaClient interface { + Database(name string) *meta.DatabaseInfo + RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + CreateDatabase(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + DeleteShardGroup(database, policy string, id uint64) error + CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go new file mode 100644 index 0000000..a67edd3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go @@ -0,0 +1,1149 @@ +// Package cli contains the logic of the influx command line client. +package cli // import "github.com/influxdata/influxdb/cmd/influx/cli" + +import ( + "bytes" + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "syscall" + "text/tabwriter" + + "golang.org/x/crypto/ssh/terminal" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/importer/v8" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" + "github.com/peterh/liner" +) + +// ErrBlankCommand is returned when a parsed command is empty. +var ErrBlankCommand = errors.New("empty input") + +// CommandLine holds CLI configuration and state. +type CommandLine struct { + Line *liner.State + Host string + Port int + Database string + Ssl bool + RetentionPolicy string + ClientVersion string + ServerVersion string + Pretty bool // controls pretty print for json + Format string // controls the output format. Valid values are json, csv, or column + Execute string + ShowVersion bool + Import bool + Chunked bool + ChunkSize int + NodeID int + Quit chan struct{} + IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing) + ForceTTY bool // Force the CLI to act as if it were connected to a TTY + osSignals chan os.Signal + historyFilePath string + + Client *client.Client + ClientConfig client.Config // Client config options. + ImporterConfig v8.Config // Importer configuration options. +} + +// New returns an instance of CommandLine with the specified client version. +func New(version string) *CommandLine { + return &CommandLine{ + ClientVersion: version, + Quit: make(chan struct{}, 1), + osSignals: make(chan os.Signal, 1), + Chunked: true, + } +} + +// Run executes the CLI. +func (c *CommandLine) Run() error { + hasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd())) + + var promptForPassword bool + // determine if they set the password flag but provided no value + for _, v := range os.Args { + v = strings.ToLower(v) + if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.ClientConfig.Password == "" { + promptForPassword = true + break + } + } + + // Check if we will be able to prompt for the password later. + if promptForPassword && !hasTTY { + return errors.New("unable to prompt for a password with no TTY") + } + + // Read environment variables for username/password. + if c.ClientConfig.Username == "" { + c.ClientConfig.Username = os.Getenv("INFLUX_USERNAME") + } + // If we are going to be prompted for a password, always use the entered password. + if promptForPassword { + // Open the liner (temporarily) and prompt for the password. + p, e := func() (string, error) { + l := liner.NewLiner() + defer l.Close() + return l.PasswordPrompt("password: ") + }() + if e != nil { + return errors.New("Unable to parse password") + } + c.ClientConfig.Password = p + } else if c.ClientConfig.Password == "" { + c.ClientConfig.Password = os.Getenv("INFLUX_PASSWORD") + } + + if err := c.Connect(""); err != nil { + msg := "Please check your connection settings and ensure 'influxd' is running." + if !c.Ssl && strings.Contains(err.Error(), "malformed HTTP response") { + // Attempt to connect with SSL and disable secure SSL for this test. + c.Ssl = true + unsafeSsl := c.ClientConfig.UnsafeSsl + c.ClientConfig.UnsafeSsl = true + if err := c.Connect(""); err == nil { + msg = "Please use the -ssl flag to connect using SSL." + } + c.Ssl = false + c.ClientConfig.UnsafeSsl = unsafeSsl + } else if c.Ssl && !c.ClientConfig.UnsafeSsl && strings.Contains(err.Error(), "certificate is valid for") { + // Attempt to connect with an insecure connection just to see if it works. + c.ClientConfig.UnsafeSsl = true + if err := c.Connect(""); err == nil { + msg = "You may use -unsafeSsl to connect anyway, but the SSL connection will not be secure." + } + c.ClientConfig.UnsafeSsl = false + } + return fmt.Errorf("Failed to connect to %s: %s\n%s", c.Client.Addr(), err.Error(), msg) + } + + // Modify precision. + c.SetPrecision(c.ClientConfig.Precision) + + if c.Execute != "" { + // Make the non-interactive mode send everything through the CLI's parser + // the same way the interactive mode works + lines := strings.Split(c.Execute, "\n") + for _, line := range lines { + if err := c.ParseCommand(line); err != nil { + return err + } + } + return nil + } + + if c.Import { + addr := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + u, e := client.ParseConnectionString(addr, c.Ssl) + if e != nil { + return e + } + + // Copy the latest importer config and inject the latest client config + // into it. + config := c.ImporterConfig + config.Config = c.ClientConfig + config.URL = u + + i := v8.NewImporter(config) + if err := i.Import(); err != nil { + err = fmt.Errorf("ERROR: %s", err) + return err + } + return nil + } + + if !hasTTY { + cmd, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + return c.ExecuteQuery(string(cmd)) + } + + if !c.IgnoreSignals { + // register OS signals for graceful termination + signal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM) + } + + c.Line = liner.NewLiner() + defer c.Line.Close() + + c.Line.SetMultiLineMode(true) + + if len(c.ServerVersion) == 0 { + fmt.Printf("WARN: Connected to %s, but found no server version.\n", c.Client.Addr()) + fmt.Printf("Are you sure an InfluxDB server is listening at the given address?\n") + } else { + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + } + + c.Version() + + // Only load/write history if HOME environment variable is set. + var historyDir string + if runtime.GOOS == "windows" { + if userDir := os.Getenv("USERPROFILE"); userDir != "" { + historyDir = userDir + } + } + + if homeDir := os.Getenv("HOME"); homeDir != "" { + historyDir = homeDir + } + + // Attempt to load the history file. + if historyDir != "" { + c.historyFilePath = filepath.Join(historyDir, ".influx_history") + if historyFile, err := os.Open(c.historyFilePath); err == nil { + c.Line.ReadHistory(historyFile) + historyFile.Close() + } + } + + // read from prompt until exit is run + return c.mainLoop() +} + +// mainLoop runs the main prompt loop for the CLI. +func (c *CommandLine) mainLoop() error { + for { + select { + case <-c.osSignals: + c.exit() + return nil + case <-c.Quit: + c.exit() + return nil + default: + l, e := c.Line.Prompt("> ") + if e == io.EOF { + // Instead of die, register that someone exited the program gracefully + l = "exit" + } else if e != nil { + c.exit() + return e + } + if err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), "auth") { + l = influxql.Sanitize(l) + c.Line.AppendHistory(l) + c.saveHistory() + } + } + } +} + +// ParseCommand parses an instruction and calls the related method +// or executes the command as a query against InfluxDB. +func (c *CommandLine) ParseCommand(cmd string) error { + lcmd := strings.TrimSpace(strings.ToLower(cmd)) + tokens := strings.Fields(lcmd) + + if len(tokens) > 0 { + switch tokens[0] { + case "exit", "quit": + close(c.Quit) + case "gopher": + c.gopher() + case "connect": + return c.Connect(cmd) + case "auth": + c.SetAuth(cmd) + case "help": + c.help() + case "history": + c.history() + case "format": + c.SetFormat(cmd) + case "precision": + c.SetPrecision(cmd) + case "consistency": + c.SetWriteConsistency(cmd) + case "settings": + c.Settings() + case "chunked": + c.Chunked = !c.Chunked + if c.Chunked { + fmt.Println("chunked responses enabled") + } else { + fmt.Println("chunked reponses disabled") + } + case "chunk": + c.SetChunkSize(cmd) + case "pretty": + c.Pretty = !c.Pretty + if c.Pretty { + fmt.Println("Pretty print enabled") + } else { + fmt.Println("Pretty print disabled") + } + case "use": + c.use(cmd) + case "node": + c.node(cmd) + case "insert": + return c.Insert(cmd) + case "clear": + c.clear(cmd) + default: + return c.ExecuteQuery(cmd) + } + + return nil + } + return ErrBlankCommand +} + +// Connect connects to a server. +func (c *CommandLine) Connect(cmd string) error { + // normalize cmd + cmd = strings.ToLower(cmd) + + // Remove the "connect" keyword if it exists + addr := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) + if addr == "" { + // If they didn't provide a connection string, use the current settings + addr = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + } + + URL, err := client.ParseConnectionString(addr, c.Ssl) + if err != nil { + return err + } + + // Create copy of the current client config and create a new client. + ClientConfig := c.ClientConfig + ClientConfig.UserAgent = "InfluxDBShell/" + c.ClientVersion + ClientConfig.URL = URL + ClientConfig.Proxy = http.ProxyFromEnvironment + + client, err := client.NewClient(ClientConfig) + if err != nil { + return fmt.Errorf("Could not create client %s", err) + } + c.Client = client + + _, v, err := c.Client.Ping() + if err != nil { + return err + } + c.ServerVersion = v + + // Update the command with the current connection information + if host, port, err := net.SplitHostPort(ClientConfig.URL.Host); err == nil { + c.Host = host + if i, err := strconv.Atoi(port); err == nil { + c.Port = i + } + } + + return nil +} + +// SetAuth sets client authentication credentials. +func (c *CommandLine) SetAuth(cmd string) { + // If they pass in the entire command, we should parse it + // auth + args := strings.Fields(cmd) + if len(args) == 3 { + args = args[1:] + } else { + args = []string{} + } + + if len(args) == 2 { + c.ClientConfig.Username = args[0] + c.ClientConfig.Password = args[1] + } else { + u, e := c.Line.Prompt("username: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.ClientConfig.Username = strings.TrimSpace(u) + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.ClientConfig.Password = p + } + + // Update the client as well + c.Client.SetAuth(c.ClientConfig.Username, c.ClientConfig.Password) +} + +func (c *CommandLine) clear(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + v := strings.ToLower(strings.Join(args[1:], " ")) + switch v { + case "database", "db": + c.Database = "" + fmt.Println("database context cleared") + return + case "retention policy", "rp": + c.RetentionPolicy = "" + fmt.Println("retention policy context cleared") + return + default: + if len(args) > 1 { + fmt.Printf("invalid command %q.\n", v) + } + fmt.Println(`Possible commands for 'clear' are: + # Clear the database context + clear database + clear db + + # Clear the retention policy context + clear retention policy + clear rp + `) + } +} + +func (c *CommandLine) use(cmd string) { + args := strings.SplitAfterN(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ", 2) + if len(args) != 2 { + fmt.Printf("Could not parse database name from %q.\n", cmd) + return + } + + stmt := args[1] + db, rp, err := parseDatabaseAndRetentionPolicy([]byte(stmt)) + if err != nil { + fmt.Printf("Unable to parse database or retention policy from %s", stmt) + return + } + + if !c.databaseExists(db) { + fmt.Println("DB does not exist!") + return + } + + c.Database = db + fmt.Printf("Using database %s\n", db) + + if rp != "" { + if !c.retentionPolicyExists(db, rp) { + return + } + c.RetentionPolicy = rp + fmt.Printf("Using retention policy %s\n", rp) + } +} + +func (c *CommandLine) databaseExists(db string) bool { + // Validate if specified database exists + response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return false + } else if err := response.Error(); err != nil { + if c.ClientConfig.Username == "" { + fmt.Printf("ERR: %s\n", err) + return false + } + // TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397. + // If we are unable to run SHOW DATABASES, display a warning and use the + // database anyway in case the person doesn't have permission to run the + // command, but does have permission to use the database. + fmt.Printf("WARN: %s\n", err) + } else { + // Verify the provided database exists + if databaseExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + if row.Name == "databases" { + for _, values := range row.Values { + for _, database := range values { + if database == db { + return true + } + } + } + } + } + } + return false + }(); !databaseExists { + fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", db) + return false + } + } + return true +} + +func (c *CommandLine) retentionPolicyExists(db, rp string) bool { + // Validate if specified database exists + response, err := c.Client.Query(client.Query{Command: fmt.Sprintf("SHOW RETENTION POLICIES ON %q", db)}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return false + } else if err := response.Error(); err != nil { + if c.ClientConfig.Username == "" { + fmt.Printf("ERR: %s\n", err) + return false + } + fmt.Printf("WARN: %s\n", err) + } else { + // Verify the provided database exists + if retentionPolicyExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + for _, values := range row.Values { + for i, v := range values { + if i != 0 { + continue + } + if v == rp { + return true + } + } + } + } + } + return false + }(); !retentionPolicyExists { + fmt.Printf("ERR: RETENTION POLICY %s doesn't exist. Run SHOW RETENTION POLICIES ON %q for a list of existing retention polices.\n", rp, db) + return false + } + } + return true +} + +func (c *CommandLine) node(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + if len(args) != 2 { + fmt.Println("Improper number of arguments for 'node' command, requires exactly one.") + return + } + + if args[1] == "clear" { + c.NodeID = 0 + return + } + + id, err := strconv.Atoi(args[1]) + if err != nil { + fmt.Printf("Unable to parse node id from %s. Must be an integer or 'clear'.\n", args[1]) + return + } + c.NodeID = id +} + +// SetChunkSize sets the chunk size +// 0 sets it back to the default +func (c *CommandLine) SetChunkSize(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + cmd = strings.Join(strings.Fields(cmd), " ") + + // Remove the "chunk size" keyword if it exists + cmd = strings.TrimPrefix(cmd, "chunk size ") + + // Remove the "chunk" keyword if it exists + // allows them to use `chunk 50` as a shortcut + cmd = strings.TrimPrefix(cmd, "chunk ") + + if n, err := strconv.ParseInt(cmd, 10, 64); err == nil { + c.ChunkSize = int(n) + if c.ChunkSize <= 0 { + c.ChunkSize = 0 + } + fmt.Printf("chunk size set to %d\n", c.ChunkSize) + } else { + fmt.Printf("unable to parse chunk size from %q\n", cmd) + } +} + +// SetPrecision sets client precision. +func (c *CommandLine) SetPrecision(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + + // Remove the "precision" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1)) + + switch cmd { + case "h", "m", "s", "ms", "u", "ns": + c.ClientConfig.Precision = cmd + c.Client.SetPrecision(c.ClientConfig.Precision) + case "rfc3339": + c.ClientConfig.Precision = "" + c.Client.SetPrecision(c.ClientConfig.Precision) + default: + fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd) + } +} + +// SetFormat sets output format. +func (c *CommandLine) SetFormat(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + // Remove the "format" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) + + switch cmd { + case "json", "csv", "column": + c.Format = cmd + default: + fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) + } +} + +// SetWriteConsistency sets write consistency level. +func (c *CommandLine) SetWriteConsistency(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + // Remove the "consistency" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1)) + + _, err := models.ParseConsistencyLevel(cmd) + if err != nil { + fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) + return + } + c.ClientConfig.WriteConsistency = cmd +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } + +// isLetter returns true if the rune is a letter. +func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } + +// isDigit returns true if the rune is a digit. +func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } + +// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. +func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } + +// isIdentChar returns true if the rune can be used in an unquoted identifier. +func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } + +func parseUnquotedIdentifier(stmt string) (string, string) { + if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, fields[0]) + } + return "", stmt +} + +func parseDoubleQuotedIdentifier(stmt string) (string, string) { + escapeNext := false + fields := strings.FieldsFunc(stmt, func(ch rune) bool { + if ch == '\\' { + escapeNext = true + } else if ch == '"' { + if !escapeNext { + return true + } + escapeNext = false + } + return false + }) + if len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") + } + return "", stmt +} + +func parseNextIdentifier(stmt string) (ident, remainder string) { + if len(stmt) > 0 { + switch { + case isWhitespace(rune(stmt[0])): + return parseNextIdentifier(stmt[1:]) + case isIdentFirstChar(rune(stmt[0])): + return parseUnquotedIdentifier(stmt) + case stmt[0] == '"': + return parseDoubleQuotedIdentifier(stmt) + } + } + return "", stmt +} + +func (c *CommandLine) parseInto(stmt string) *client.BatchPoints { + ident, stmt := parseNextIdentifier(stmt) + db, rp := c.Database, c.RetentionPolicy + if strings.HasPrefix(stmt, ".") { + db = ident + ident, stmt = parseNextIdentifier(stmt[1:]) + } + if strings.HasPrefix(stmt, " ") { + rp = ident + stmt = stmt[1:] + } + + return &client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: stmt}, + }, + Database: db, + RetentionPolicy: rp, + Precision: c.ClientConfig.Precision, + WriteConsistency: c.ClientConfig.WriteConsistency, + } +} + +func (c *CommandLine) parseInsert(stmt string) (*client.BatchPoints, error) { + i, point := parseNextIdentifier(stmt) + if !strings.EqualFold(i, "insert") { + return nil, fmt.Errorf("found %s, expected INSERT", i) + } + if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { + bp := c.parseInto(r) + return bp, nil + } + return &client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: point}, + }, + Database: c.Database, + RetentionPolicy: c.RetentionPolicy, + Precision: c.ClientConfig.Precision, + WriteConsistency: c.ClientConfig.WriteConsistency, + }, nil +} + +// Insert runs an INSERT statement. +func (c *CommandLine) Insert(stmt string) error { + bp, err := c.parseInsert(stmt) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return nil + } + if _, err := c.Client.Write(*bp); err != nil { + fmt.Printf("ERR: %s\n", err) + if c.Database == "" { + fmt.Println("Note: error may be due to not setting a database or retention policy.") + fmt.Println(`Please set a database with the command "use " or`) + fmt.Println("INSERT INTO . ") + } + } + return nil +} + +// query creates a query struct to be used with the client. +func (c *CommandLine) query(query string) client.Query { + return client.Query{ + Command: query, + Database: c.Database, + RetentionPolicy: c.RetentionPolicy, + Chunked: c.Chunked, + ChunkSize: c.ChunkSize, + NodeID: c.NodeID, + } +} + +// ExecuteQuery runs any query statement. +func (c *CommandLine) ExecuteQuery(query string) error { + // If we have a retention policy, we need to rewrite the statement sources + if c.RetentionPolicy != "" { + pq, err := influxql.NewParser(strings.NewReader(query)).ParseQuery() + if err != nil { + fmt.Printf("ERR: %s\n", err) + return err + } + for _, stmt := range pq.Statements { + if selectStatement, ok := stmt.(*influxql.SelectStatement); ok { + influxql.WalkFunc(selectStatement.Sources, func(n influxql.Node) { + if t, ok := n.(*influxql.Measurement); ok { + if t.Database == "" && c.Database != "" { + t.Database = c.Database + } + if t.RetentionPolicy == "" && c.RetentionPolicy != "" { + t.RetentionPolicy = c.RetentionPolicy + } + } + }) + } + } + query = pq.String() + } + + ctx := context.Background() + if !c.IgnoreSignals { + done := make(chan struct{}) + defer close(done) + + var cancel func() + ctx, cancel = context.WithCancel(ctx) + go func() { + select { + case <-done: + case <-c.osSignals: + cancel() + } + }() + } + + response, err := c.Client.QueryContext(ctx, c.query(query)) + if err != nil { + if err.Error() == "" { + err = ctx.Err() + if err == context.Canceled { + err = errors.New("aborted by user") + } + } + fmt.Printf("ERR: %s\n", err) + return err + } + c.FormatResponse(response, os.Stdout) + if err := response.Error(); err != nil { + fmt.Printf("ERR: %s\n", response.Error()) + if c.Database == "" { + fmt.Println("Warning: It is possible this error is due to not setting a database.") + fmt.Println(`Please set a database with the command "use ".`) + } + return err + } + return nil +} + +// FormatResponse formats output to the previously chosen format. +func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { + switch c.Format { + case "json": + c.writeJSON(response, w) + case "csv": + c.writeCSV(response, w) + case "column": + c.writeColumns(response, w) + default: + fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) + } +} + +func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { + var data []byte + var err error + if c.Pretty { + data, err = json.MarshalIndent(response, "", " ") + } else { + data, err = json.Marshal(response) + } + if err != nil { + fmt.Fprintf(w, "Unable to parse json: %s\n", err) + return + } + fmt.Fprintln(w, string(data)) +} + +func tagsEqual(prev, current map[string]string) bool { + return reflect.DeepEqual(prev, current) +} + +func columnsEqual(prev, current []string) bool { + return reflect.DeepEqual(prev, current) +} + +func headersEqual(prev, current models.Row) bool { + if prev.Name != current.Name { + return false + } + return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns) +} + +func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { + csvw := csv.NewWriter(w) + var previousHeaders models.Row + for _, result := range response.Results { + suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0]) + if !suppressHeaders && len(result.Series) > 0 { + previousHeaders = models.Row{ + Name: result.Series[0].Name, + Tags: result.Series[0].Tags, + Columns: result.Series[0].Columns, + } + } + + // Create a tabbed writer for each result as they won't always line up + rows := c.formatResults(result, "\t", suppressHeaders) + for _, r := range rows { + csvw.Write(strings.Split(r, "\t")) + } + } + csvw.Flush() +} + +func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { + // Create a tabbed writer for each result as they won't always line up + writer := new(tabwriter.Writer) + writer.Init(w, 0, 8, 1, ' ', 0) + + var previousHeaders models.Row + for i, result := range response.Results { + // Print out all messages first + for _, m := range result.Messages { + fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text) + } + // Check to see if the headers are the same as the previous row. If so, suppress them in the output + suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0]) + if !suppressHeaders && len(result.Series) > 0 { + previousHeaders = models.Row{ + Name: result.Series[0].Name, + Tags: result.Series[0].Tags, + Columns: result.Series[0].Columns, + } + } + + // If we are suppressing headers, don't output the extra line return. If we + // aren't suppressing headers, then we put out line returns between results + // (not before the first result, and not after the last result). + if !suppressHeaders && i > 0 { + fmt.Fprintln(writer, "") + } + + rows := c.formatResults(result, "\t", suppressHeaders) + for _, r := range rows { + fmt.Fprintln(writer, r) + } + + } + writer.Flush() +} + +// formatResults will behave differently if you are formatting for columns or csv +func (c *CommandLine) formatResults(result client.Result, separator string, suppressHeaders bool) []string { + rows := []string{} + // Create a tabbed writer for each result as they won't always line up + for i, row := range result.Series { + // gather tags + tags := []string{} + for k, v := range row.Tags { + tags = append(tags, fmt.Sprintf("%s=%s", k, v)) + sort.Strings(tags) + } + + columnNames := []string{} + + // Only put name/tags in a column if format is csv + if c.Format == "csv" { + if len(tags) > 0 { + columnNames = append([]string{"tags"}, columnNames...) + } + + if row.Name != "" { + columnNames = append([]string{"name"}, columnNames...) + } + } + + columnNames = append(columnNames, row.Columns...) + + // Output a line separator if we have more than one set or results and format is column + if i > 0 && c.Format == "column" && !suppressHeaders { + rows = append(rows, "") + } + + // If we are column format, we break out the name/tag to separate lines + if c.Format == "column" && !suppressHeaders { + if row.Name != "" { + n := fmt.Sprintf("name: %s", row.Name) + rows = append(rows, n) + } + if len(tags) > 0 { + t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) + rows = append(rows, t) + } + } + + if !suppressHeaders { + rows = append(rows, strings.Join(columnNames, separator)) + } + + // if format is column, write dashes under each column + if c.Format == "column" && !suppressHeaders { + lines := []string{} + for _, columnName := range columnNames { + lines = append(lines, strings.Repeat("-", len(columnName))) + } + rows = append(rows, strings.Join(lines, separator)) + } + + for _, v := range row.Values { + var values []string + if c.Format == "csv" { + if row.Name != "" { + values = append(values, row.Name) + } + if len(tags) > 0 { + values = append(values, strings.Join(tags, ",")) + } + } + + for _, vv := range v { + values = append(values, interfaceToString(vv)) + } + rows = append(rows, strings.Join(values, separator)) + } + } + return rows +} + +func interfaceToString(v interface{}) string { + switch t := v.(type) { + case nil: + return "" + case bool: + return fmt.Sprintf("%v", v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: + return fmt.Sprintf("%d", t) + case float32, float64: + return fmt.Sprintf("%v", t) + default: + return fmt.Sprintf("%v", t) + } +} + +// Settings prints current settings. +func (c *CommandLine) Settings() { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 1, 1, ' ', 0) + fmt.Fprintln(w, "Setting\tValue") + fmt.Fprintln(w, "--------\t--------") + if c.Port > 0 { + fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) + } else { + fmt.Fprintf(w, "Host\t%s\n", c.Host) + } + fmt.Fprintf(w, "Username\t%s\n", c.ClientConfig.Username) + fmt.Fprintf(w, "Database\t%s\n", c.Database) + fmt.Fprintf(w, "RetentionPolicy\t%s\n", c.RetentionPolicy) + fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) + fmt.Fprintf(w, "Format\t%s\n", c.Format) + fmt.Fprintf(w, "Write Consistency\t%s\n", c.ClientConfig.WriteConsistency) + fmt.Fprintf(w, "Chunked\t%v\n", c.Chunked) + fmt.Fprintf(w, "Chunk Size\t%d\n", c.ChunkSize) + fmt.Fprintln(w) + w.Flush() +} + +func (c *CommandLine) help() { + fmt.Println(`Usage: + connect connects to another node specified by host:port + auth prompts for username and password + pretty toggles pretty print for the json format + chunked turns on chunked responses from server + chunk size sets the size of the chunked responses. Set to 0 to reset to the default chunked size + use sets current database + format specifies the format of the server responses: json, csv, or column + precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns + consistency sets write consistency level: any, one, quorum, or all + history displays command history + settings outputs the current settings for the shell + clear clears settings such as database or retention policy. run 'clear' for help + exit/quit/ctrl+d quits the influx shell + + show databases show database names + show series show series information + show measurements show measurement information + show tag keys show tag key information + show field keys show field key information + + A full list of influxql commands can be found at: + https://docs.influxdata.com/influxdb/latest/query_language/spec/`) +} + +func (c *CommandLine) history() { + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + fmt.Print(buf.String()) +} + +func (c *CommandLine) saveHistory() { + if c.historyFilePath == "" { + return + } + if historyFile, err := os.Create(c.historyFilePath); err != nil { + fmt.Printf("There was an error writing history file: %s\n", err) + } else { + c.Line.WriteHistory(historyFile) + historyFile.Close() + } +} + +func (c *CommandLine) gopher() { + fmt.Println(` + .-::-::://:-::- .:/++/' + '://:-''/oo+//++o+/.://o- ./+: + .:-. '++- .o/ '+yydhy' o- + .:/. .h: :osoys .smMN- :/ + -/:.' s- /MMMymh. '/y/ s' + -+s:'''' d -mMMms// '-/o: + -/++/++/////:. o: '... s- :s. + :+-+s-' ':/' 's- /+ 'o: + '+-'o: /ydhsh. '//. '-o- o- + .y. o: .MMMdm+y ':+++:::/+:.' s: + .-h/ y- 'sdmds'h -+ydds:::-.' 'h. + .//-.d' o: '.' 'dsNMMMNh:.:++' :y + +y. 'd 's. .s:mddds: ++ o/ + 'N- odd 'o/. './o-s-' .---+++' o- + 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' + so' .h '''' ////s: '+. .s +y' + os/-.y' 's' 'y::+ +d' + '.:o/ -+:-:.' so.---.' + o' 'd-.''/s' + .s' :y.''.y + -s mo:::' + :: yh + // '''' /M' + o+ .s///:/. 'N: + :+ /: -s' ho + 's- -/s/:+/.+h' +h + ys' ':' '-. -d + oh .h + /o .s + s. .h + -y .d + m/ -h + +d /o + 'N- y: + h: m. + s- -d + o- s+ + +- 'm' + s/ oo--. + y- /s ':+' + s' 'od--' .d: + -+ ':o: ':+-/+ + y- .:+- ' + //o- '.:+/. + .-:+/' ''-/+/. + ./:' ''.:o+/-' + .+o:/:/+-' ''.-+ooo/-' + o: -h///++////-. + /: .o/ + //+ 'y + ./sooy.`) +} + +// Version prints the CLI version. +func (c *CommandLine) Version() { + fmt.Println("InfluxDB shell version:", c.ClientVersion) +} + +func (c *CommandLine) exit() { + // write to history file + c.saveHistory() + // release line resources + c.Line.Close() + c.Line = nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go new file mode 100644 index 0000000..a1eacd0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go @@ -0,0 +1,58 @@ +package cli + +import "testing" + +func TestParseCommand_InsertInto(t *testing.T) { + t.Parallel() + + c := CommandLine{} + + tests := []struct { + cmd, db, rp string + }{ + { + cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test", + }, + { + cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, + db: "d b", + rp: "test test", + }, + } + + for _, test := range tests { + t.Logf("command: %s", test.cmd) + bp, err := c.parseInsert(test.cmd) + if err != nil { + t.Fatal(err) + } + if bp.Database != test.db { + t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, bp.Database) + } + if bp.RetentionPolicy != test.rp { + t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, bp.RetentionPolicy) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go new file mode 100644 index 0000000..7ecd424 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go @@ -0,0 +1,674 @@ +package cli_test + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" + "github.com/influxdata/influxql" + "github.com/peterh/liner" +) + +const ( + CLIENT_VERSION = "y.y" + SERVER_VERSION = "x.x" +) + +func TestNewCLI(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + + if c == nil { + t.Fatal("CommandLine shouldn't be nil.") + } + + if c.ClientVersion != CLIENT_VERSION { + t.Fatalf("CommandLine version is %s but should be %s", c.ClientVersion, CLIENT_VERSION) + } +} + +func TestRunCLI(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.IgnoreSignals = true + c.ForceTTY = true + go func() { + close(c.Quit) + }() + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestRunCLI_ExecuteInsert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.ClientConfig.Precision = "ms" + c.Execute = "INSERT sensor,floor=1 value=2" + c.IgnoreSignals = true + c.ForceTTY = true + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestRunCLI_WithSignals(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.IgnoreSignals = false + c.ForceTTY = true + go func() { + close(c.Quit) + }() + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestRunCLI_ExecuteInsert_WithSignals(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.ClientConfig.Precision = "ms" + c.Execute = "INSERT sensor,floor=1 value=2" + c.IgnoreSignals = false + c.ForceTTY = true + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestSetAuth(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + u := "userx" + p := "pwdy" + c.SetAuth("auth " + u + " " + p) + + // validate CLI configuration + if c.ClientConfig.Username != u { + t.Fatalf("Username is %s but should be %s", c.ClientConfig.Username, u) + } + if c.ClientConfig.Password != p { + t.Fatalf("Password is %s but should be %s", c.ClientConfig.Password, p) + } +} + +func TestSetPrecision(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default precision + p := "ns" + c.SetPrecision("precision " + p) + if c.ClientConfig.Precision != p { + t.Fatalf("Precision is %s but should be %s", c.ClientConfig.Precision, p) + } + up := "NS" + c.SetPrecision("PRECISION " + up) + if c.ClientConfig.Precision != p { + t.Fatalf("Precision is %s but should be %s", c.ClientConfig.Precision, p) + } + mixed := "ns" + c.SetPrecision("PRECISION " + mixed) + if c.ClientConfig.Precision != p { + t.Fatalf("Precision is %s but should be %s", c.ClientConfig.Precision, p) + } + + // validate set default precision which equals empty string + p = "rfc3339" + c.SetPrecision("precision " + p) + if c.ClientConfig.Precision != "" { + t.Fatalf("Precision is %s but should be empty", c.ClientConfig.Precision) + } + p = "RFC3339" + c.SetPrecision("precision " + p) + if c.ClientConfig.Precision != "" { + t.Fatalf("Precision is %s but should be empty", c.ClientConfig.Precision) + } +} + +func TestSetFormat(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default format + f := "json" + c.SetFormat("format " + f) + if c.Format != f { + t.Fatalf("Format is %s but should be %s", c.Format, f) + } + + uf := "JSON" + c.SetFormat("format " + uf) + if c.Format != f { + t.Fatalf("Format is %s but should be %s", c.Format, f) + } + mixed := "json" + c.SetFormat("FORMAT " + mixed) + if c.Format != f { + t.Fatalf("Format is %s but should be %s", c.Format, f) + } +} + +func Test_SetChunked(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // make sure chunked is on by default + if got, exp := c.Chunked, true; got != exp { + t.Fatalf("chunked should be on by default. got %v, exp %v", got, exp) + } + + // turn chunked off + if err := c.ParseCommand("Chunked"); err != nil { + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.Chunked, false; got != exp { + t.Fatalf("setting chunked failed. got %v, exp %v", got, exp) + } + + // turn chunked back on + if err := c.ParseCommand("Chunked"); err != nil { + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.Chunked, true; got != exp { + t.Fatalf("setting chunked failed. got %v, exp %v", got, exp) + } +} + +func Test_SetChunkSize(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // check default chunk size + if got, exp := c.ChunkSize, 0; got != exp { + t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp) + } + + tests := []struct { + command string + exp int + }{ + {"chunk size 20", 20}, + {" CHunk siZE 55 ", 55}, + {"chunk 10", 10}, + {" chuNK 15", 15}, + {"chunk size -60", 0}, + {"chunk size 10", 10}, + {"chunk size 0", 0}, + {"chunk size 10", 10}, + {"chunk size junk", 10}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.command); err != nil { + t.Logf("command: %q", test.command) + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.ChunkSize, test.exp; got != exp { + t.Logf("command: %q", test.command) + t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp) + } + } +} + +func TestSetWriteConsistency(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // set valid write consistency + consistency := "all" + c.SetWriteConsistency("consistency " + consistency) + if c.ClientConfig.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } + + // set different valid write consistency and validate change + consistency = "quorum" + c.SetWriteConsistency("consistency " + consistency) + if c.ClientConfig.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } + + consistency = "QUORUM" + c.SetWriteConsistency("consistency " + consistency) + if c.ClientConfig.WriteConsistency != "quorum" { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, "quorum") + } + + consistency = "quorum" + c.SetWriteConsistency("CONSISTENCY " + consistency) + if c.ClientConfig.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } + + // set invalid write consistency and verify there was no change + invalidConsistency := "invalid_consistency" + c.SetWriteConsistency("consistency " + invalidConsistency) + if c.ClientConfig.WriteConsistency == invalidConsistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } +} + +func TestParseCommand_CommandsExist(t *testing.T) { + t.Parallel() + c, err := client.NewClient(client.Config{}) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + m := cli.CommandLine{Client: c, Line: liner.NewLiner()} + tests := []struct { + cmd string + }{ + {cmd: "gopher"}, + {cmd: "auth"}, + {cmd: "help"}, + {cmd: "format"}, + {cmd: "precision"}, + {cmd: "settings"}, + } + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil`, err, test.cmd) + } + } +} + +func TestParseCommand_Connect(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + cmd := "connect " + u.Host + c := cli.CommandLine{} + + // assert connection is established + if err := c.ParseCommand(cmd); err != nil { + t.Fatalf("There was an error while connecting to %v: %v", u.Path, err) + } + + // assert server version is populated + if c.ServerVersion != SERVER_VERSION { + t.Fatalf("Server version is %s but should be %s.", c.ServerVersion, SERVER_VERSION) + } +} + +func TestParseCommand_TogglePretty(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } + c.ParseCommand("pretty") + if !c.Pretty { + t.Fatalf(`Pretty should be true.`) + } + c.ParseCommand("pretty") + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } +} + +func TestParseCommand_Exit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "exit"}, + {cmd: " exit"}, + {cmd: "exit "}, + {cmd: "Exit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "exit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Quit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "quit"}, + {cmd: " quit"}, + {cmd: "quit "}, + {cmd: "Quit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "quit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Use(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + cmd string + db string + }{ + {cmd: "use db", db: "db"}, + {cmd: " use db", db: "db"}, + {cmd: "use db ", db: "db"}, + {cmd: "use db;", db: "db"}, + {cmd: "use db; ", db: "db"}, + {cmd: "Use db", db: "db"}, + {cmd: `Use "db"`, db: "db"}, + {cmd: `Use "db db"`, db: "db db"}, + } + + for _, test := range tests { + m := cli.CommandLine{Client: c} + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if m.Database != test.db { + t.Fatalf(`Command "%s" changed database to %q. Expected %s`, test.cmd, m.Database, test.db) + } + } +} + +func TestParseCommand_UseAuth(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + tests := []struct { + cmd string + user string + database string + }{ + { + cmd: "use db", + user: "admin", + database: "db", + }, + { + cmd: "use blank", + user: "admin", + database: "", + }, + { + cmd: "use db", + user: "anonymous", + database: "db", + }, + { + cmd: "use blank", + user: "anonymous", + database: "blank", + }, + } + + for i, tt := range tests { + config := client.Config{URL: *u, Username: tt.user} + fmt.Println("using auth:", tt.user) + c, err := client.NewClient(config) + if err != nil { + t.Errorf("%d. unexpected error. expected %v, actual %v", i, nil, err) + continue + } + m := cli.CommandLine{Client: c} + m.ClientConfig.Username = tt.user + + if err := m.ParseCommand(tt.cmd); err != nil { + t.Fatalf(`%d. Got error %v for command %q, expected nil.`, i, err, tt.cmd) + } + + if m.Database != tt.database { + t.Fatalf(`%d. Command "use" changed database to %q. Expected %q`, i, m.Database, tt.database) + } + } +} + +func TestParseCommand_Consistency(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "consistency one"}, + {cmd: " consistency one"}, + {cmd: "consistency one "}, + {cmd: "consistency one;"}, + {cmd: "consistency one; "}, + {cmd: "Consistency one"}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if c.ClientConfig.WriteConsistency != "one" { + t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.ClientConfig.WriteConsistency) + } + } +} + +func TestParseCommand_Insert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := cli.CommandLine{Client: c} + + tests := []struct { + cmd string + }{ + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, + {cmd: "insert"}, + {cmd: "Insert "}, + {cmd: "insert c"}, + {cmd: "insert int"}, + } + + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } +} + +func TestParseCommand_History(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("abc") + + tests := []struct { + cmd string + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "History "}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } + + // buf size should be at least 1 + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + if buf.Len() < 1 { + t.Fatal("History is borked") + } +} + +func TestParseCommand_HistoryWithBlankCommand(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("x") + + tests := []struct { + cmd string + err error + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + } + + // a blank command will return cli.ErrBlankCommand. + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != test.err { + t.Errorf(`Got error %v for command %q, expected %v`, err, test.cmd, test.err) + } + } + + // buf shall not contain empty commands + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + scanner := bufio.NewScanner(&buf) + for scanner.Scan() { + if strings.TrimSpace(scanner.Text()) == "" { + t.Fatal("Empty commands should not be persisted in history.") + } + } +} + +// helper methods + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Influxdb-Version", SERVER_VERSION) + + // Fake authorization entirely based on the username. + authorized := false + user, _, _ := r.BasicAuth() + switch user { + case "", "admin": + authorized = true + } + + switch r.URL.Path { + case "/query": + values := r.URL.Query() + parser := influxql.NewParser(bytes.NewBufferString(values.Get("q"))) + q, err := parser.ParseQuery() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + stmt := q.Statements[0] + + switch stmt.(type) { + case *influxql.ShowDatabasesStatement: + if authorized { + io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db", "db db"]]}]}]}`) + } else { + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, fmt.Sprintf(`{"error":"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege"}`, user)) + } + case *influxql.ShowDiagnosticsStatement: + io.WriteString(w, `{"results":[{}]}`) + } + case "/write": + w.WriteHeader(http.StatusOK) + } + })) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go new file mode 100644 index 0000000..4134c02 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go @@ -0,0 +1,34 @@ +package cli + +import ( + "bytes" + "fmt" +) + +func parseDatabaseAndRetentionPolicy(stmt []byte) (string, string, error) { + var db, rp []byte + var quoted bool + var seperatorCount int + + stmt = bytes.TrimSpace(stmt) + + for _, b := range stmt { + if b == '"' { + quoted = !quoted + continue + } + if b == '.' && !quoted { + seperatorCount++ + if seperatorCount > 1 { + return "", "", fmt.Errorf("unable to parse database and retention policy from %s", string(stmt)) + } + continue + } + if seperatorCount == 1 { + rp = append(rp, b) + continue + } + db = append(db, b) + } + return string(db), string(rp), nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go new file mode 100644 index 0000000..5bf955c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go @@ -0,0 +1,90 @@ +package cli + +import ( + "errors" + "testing" +) + +func Test_parseDatabaseAndretentionPolicy(t *testing.T) { + tests := []struct { + stmt string + db string + rp string + err error + }{ + { + stmt: `foo`, + db: "foo", + }, + { + stmt: `"foo.bar"`, + db: "foo.bar", + }, + { + stmt: `"foo.bar".`, + db: "foo.bar", + }, + { + stmt: `."foo.bar"`, + rp: "foo.bar", + }, + { + stmt: `foo.bar`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo".bar`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo"."bar"`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo.bin"."bar"`, + db: "foo.bin", + rp: "bar", + }, + { + stmt: `"foo.bin"."bar.baz...."`, + db: "foo.bin", + rp: "bar.baz....", + }, + { + stmt: ` "foo.bin"."bar.baz...." `, + db: "foo.bin", + rp: "bar.baz....", + }, + + { + stmt: `"foo.bin"."bar".boom`, + err: errors.New("foo"), + }, + { + stmt: "foo.bar.", + err: errors.New("foo"), + }, + } + + for _, test := range tests { + db, rp, err := parseDatabaseAndRetentionPolicy([]byte(test.stmt)) + if err != nil && test.err == nil { + t.Errorf("unexpected error: got %s", err) + continue + } + if test.err != nil && err == nil { + t.Errorf("expected err: got: nil, exp: %s", test.err) + continue + } + if db != test.db { + t.Errorf("unexpected database: got: %s, exp: %s", db, test.db) + } + if rp != test.rp { + t.Errorf("unexpected retention policy: got: %s, exp: %s", rp, test.rp) + } + } + +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go new file mode 100644 index 0000000..4ab0fa5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go @@ -0,0 +1,128 @@ +// The influx command is a CLI client to InfluxDB. +package main + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" +) + +// These variables are populated via the Go linker. +var ( + version string +) + +const ( + // defaultFormat is the default format of the results when issuing queries + defaultFormat = "column" + + // defaultPrecision is the default timestamp format of the results when issuing queries + defaultPrecision = "ns" + + // defaultPPS is the default points per second that the import will throttle at + // by default it's 0, which means it will not throttle + defaultPPS = 0 +) + +func init() { + // If version is not set, make that clear. + if version == "" { + version = "unknown" + } +} + +func main() { + c := cli.New(version) + + fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) + fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") + fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") + fs.StringVar(&c.ClientConfig.UnixSocket, "socket", "", "Influxdb unix socket to connect to.") + fs.StringVar(&c.ClientConfig.Username, "username", "", "Username to connect to the server.") + fs.StringVar(&c.ClientConfig.Password, "password", "", `Password to connect to the server. Leaving blank will prompt for password (--password="").`) + fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") + fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") + fs.BoolVar(&c.ClientConfig.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.") + fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") + fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") + fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.") + fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") + fs.IntVar(&c.NodeID, "node", 0, "Specify the node that data should be retrieved from (enterprise only).") + fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") + fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") + fs.BoolVar(&c.Import, "import", false, "Import a previous database.") + fs.IntVar(&c.ImporterConfig.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") + fs.StringVar(&c.ImporterConfig.Path, "path", "", "path to the file to import") + fs.BoolVar(&c.ImporterConfig.Compressed, "compressed", false, "set to true if the import file is compressed") + + // Define our own custom usage to print + fs.Usage = func() { + fmt.Println(`Usage of influx: + -version + Display the version and exit. + -host 'host name' + Host to connect to. + -port 'port #' + Port to connect to. + -socket 'unix domain socket' + Unix socket to connect to. + -database 'database name' + Database to connect to the server. + -password 'password' + Password to connect to the server. Leaving blank will prompt for password (--password ''). + -username 'username' + Username to connect to the server. + -ssl + Use https for requests. + -unsafeSsl + Set this when connecting to the cluster using https and not use SSL verification. + -execute 'command' + Execute command and quit. + -format 'json|csv|column' + Format specifies the format of the server responses: json, csv, or column. + -precision 'rfc3339|h|m|s|ms|u|ns' + Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. + -consistency 'any|one|quorum|all' + Set write consistency level: any, one, quorum, or all + -pretty + Turns on pretty print for the json format. + -import + Import a previous database export from file + -pps + How many points per second the import will allow. By default it is zero and will not throttle importing. + -path + Path to file to import + -compressed + Set to true if the import file is compressed + +Examples: + + # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: + $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty + + # Connect to a specific database on startup and set database context: + $ influx -database 'metrics' -host 'localhost' -port '8086'`) + } + fs.Parse(os.Args[1:]) + + argsNotParsed := fs.Args() + if len(argsNotParsed) > 0 { + fmt.Fprintf(os.Stderr, "unknown arguments: %s\n", strings.Join(argsNotParsed, " ")) + fs.Usage() + os.Exit(1) + } + + if c.ShowVersion { + c.Version() + os.Exit(0) + } + + if err := c.Run(); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md new file mode 100644 index 0000000..bcdf418 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md @@ -0,0 +1,107 @@ +# `influx_inspect` + +## Ways to run + +### `influx_inspect` +Will print usage for the tool. + +### `influx_inspect report` +Displays series meta-data for all shards. Default location [$HOME/.influxdb] + +### `influx_inspect dumptsm` +Dumps low-level details about tsm1 files + +#### Flags + +##### `-index` bool +Dump raw index data. + +`default` = false + +#### `-blocks` bool +Dump raw block data. + +`default` = false + +#### `-all` +Dump all data. Caution: This may print a lot of information. + +`default` = false + +#### `-filter-key` +Only display index and block data match this key substring. + +`default` = "" + + +### `influx_inspect export` +Exports all tsm files to line protocol. This output file can be imported via the [influx](https://github.com/influxdata/influxdb/tree/master/importer#running-the-import-command) command. + + +#### `-datadir` string +Data storage path. + +`default` = "$HOME/.influxdb/data" + +#### `-waldir` string +WAL storage path. + +`default` = "$HOME/.influxdb/wal" + +#### `-out` string +Destination file to export to + +`default` = "$HOME/.influxdb/export" + +#### `-database` string (optional) +Database to export. + +`default` = "" + +#### `-retention` string (optional) +Retention policy to export. + +`default` = "" + +#### `-start` string (optional) +Optional. The time range to start at. + +#### `-end` string (optional) +Optional. The time range to end at. + +#### `-compress` bool (optional) +Compress the output. + +`default` = false + +#### Sample Commands + +Export entire database and compress output: +``` +influx_inspect export --compress +``` + +Export specific retention policy: +``` +influx_inspect export --database mydb --retention autogen +``` + +##### Sample Data +This is a sample of what the output will look like. + +``` +# DDL +CREATE DATABASE MY_DB_NAME +CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1 + +# DML +# CONTEXT-DATABASE:MY_DB_NAME +# CONTEXT-RETENTION-POLICY:autogen +randset value=97.9296104805 1439856000000000000 +randset value=25.3849066842 1439856100000000000 +``` + +# Caveats + +The system does not have access to the meta store when exporting TSM shards. As such, it always creates the retention policy with infinite duration and replication factor of 1. +End users may want to change this prior to re-importing if they are importing to a cluster or want a different duration for retention. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go new file mode 100644 index 0000000..6362224 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go @@ -0,0 +1,410 @@ +// Package buildtsi reads an in-memory index and exports it as a TSI index. +package buildtsi + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync/atomic" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxdb/tsdb/index/tsi1" + "go.uber.org/zap" +) + +const defaultBatchSize = 10000 + +// Command represents the program execution for "influx_inspect buildtsi". +type Command struct { + Stderr io.Writer + Stdout io.Writer + Verbose bool + Logger *zap.Logger + + concurrency int // Number of goroutines to dedicate to shard index building. + databaseFilter string + retentionFilter string + shardFilter string + maxLogFileSize int64 + maxCacheSize uint64 + batchSize int +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + Logger: zap.NewNop(), + batchSize: defaultBatchSize, + concurrency: runtime.GOMAXPROCS(0), + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("buildtsi", flag.ExitOnError) + dataDir := fs.String("datadir", "", "data directory") + walDir := fs.String("waldir", "", "WAL directory") + fs.IntVar(&cmd.concurrency, "concurrency", runtime.GOMAXPROCS(0), "Number of workers to dedicate to shard index building. Defaults to GOMAXPROCS") + fs.StringVar(&cmd.databaseFilter, "database", "", "optional: database name") + fs.StringVar(&cmd.retentionFilter, "retention", "", "optional: retention policy") + fs.StringVar(&cmd.shardFilter, "shard", "", "optional: shard id") + fs.Int64Var(&cmd.maxLogFileSize, "max-log-file-size", tsdb.DefaultMaxIndexLogFileSize, "optional: maximum log file size") + fs.Uint64Var(&cmd.maxCacheSize, "max-cache-size", tsdb.DefaultCacheMaxMemorySize, "optional: maximum cache size") + fs.IntVar(&cmd.batchSize, "batch-size", defaultBatchSize, "optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements") + fs.BoolVar(&cmd.Verbose, "v", false, "verbose") + fs.SetOutput(cmd.Stdout) + if err := fs.Parse(args); err != nil { + return err + } else if fs.NArg() > 0 || *dataDir == "" || *walDir == "" { + fs.Usage() + return nil + } + cmd.Logger = logger.New(cmd.Stderr) + + return cmd.run(*dataDir, *walDir) +} + +func (cmd *Command) run(dataDir, walDir string) error { + // Verify the user actually wants to run as root. + if isRoot() { + fmt.Println("You are currently running as root. This will build your") + fmt.Println("index files with root ownership and will be inaccessible") + fmt.Println("if you run influxd as a non-root user. You should run") + fmt.Println("buildtsi as the same user you are running influxd.") + fmt.Print("Are you sure you want to continue? (y/N): ") + var answer string + if fmt.Scanln(&answer); !strings.HasPrefix(strings.TrimSpace(strings.ToLower(answer)), "y") { + return fmt.Errorf("operation aborted") + } + } + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + for _, fi := range fis { + name := fi.Name() + if !fi.IsDir() { + continue + } else if cmd.databaseFilter != "" && name != cmd.databaseFilter { + continue + } + + if err := cmd.processDatabase(name, filepath.Join(dataDir, name), filepath.Join(walDir, name)); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) processDatabase(dbName, dataDir, walDir string) error { + cmd.Logger.Info("Rebuilding database", zap.String("name", dbName)) + + sfile := tsdb.NewSeriesFile(filepath.Join(dataDir, tsdb.SeriesFileDirectory)) + sfile.Logger = cmd.Logger + if err := sfile.Open(); err != nil { + return err + } + defer sfile.Close() + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + for _, fi := range fis { + rpName := fi.Name() + if !fi.IsDir() { + continue + } else if rpName == tsdb.SeriesFileDirectory { + continue + } else if cmd.retentionFilter != "" && rpName != cmd.retentionFilter { + continue + } + + if err := cmd.processRetentionPolicy(sfile, dbName, rpName, filepath.Join(dataDir, rpName), filepath.Join(walDir, rpName)); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) processRetentionPolicy(sfile *tsdb.SeriesFile, dbName, rpName, dataDir, walDir string) error { + cmd.Logger.Info("Rebuilding retention policy", logger.Database(dbName), logger.RetentionPolicy(rpName)) + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + type shard struct { + ID uint64 + Path string + } + + var shards []shard + + for _, fi := range fis { + if !fi.IsDir() { + continue + } else if cmd.shardFilter != "" && fi.Name() != cmd.shardFilter { + continue + } + + shardID, err := strconv.ParseUint(fi.Name(), 10, 64) + if err != nil { + continue + } + + shards = append(shards, shard{shardID, fi.Name()}) + } + + errC := make(chan error, len(shards)) + var maxi uint32 // index of maximum shard being worked on. + for k := 0; k < cmd.concurrency; k++ { + go func() { + for { + i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on. + if i >= len(shards) { + return // No more work. + } + + id, name := shards[i].ID, shards[i].Path + errC <- cmd.processShard(sfile, dbName, rpName, id, filepath.Join(dataDir, name), filepath.Join(walDir, name)) + } + }() + } + + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return err + } + } + return nil +} + +func (cmd *Command) processShard(sfile *tsdb.SeriesFile, dbName, rpName string, shardID uint64, dataDir, walDir string) error { + log := cmd.Logger.With(logger.Database(dbName), logger.RetentionPolicy(rpName), logger.Shard(shardID)) + log.Info("Rebuilding shard") + + // Check if shard already has a TSI index. + indexPath := filepath.Join(dataDir, "index") + log.Info("Checking index path", zap.String("path", indexPath)) + if _, err := os.Stat(indexPath); !os.IsNotExist(err) { + log.Info("tsi1 index already exists, skipping", zap.String("path", indexPath)) + return nil + } + + log.Info("Opening shard") + + // Find shard files. + tsmPaths, err := cmd.collectTSMFiles(dataDir) + if err != nil { + return err + } + walPaths, err := cmd.collectWALFiles(walDir) + if err != nil { + return err + } + + // Remove temporary index files if this is being re-run. + tmpPath := filepath.Join(dataDir, ".index") + log.Info("Cleaning up partial index from previous run, if any") + if err := os.RemoveAll(tmpPath); err != nil { + return err + } + + // Open TSI index in temporary path. + tsiIndex := tsi1.NewIndex(sfile, dbName, + tsi1.WithPath(tmpPath), + tsi1.WithMaximumLogFileSize(cmd.maxLogFileSize), + tsi1.DisableFsync(), + // Each new series entry in a log file is ~12 bytes so this should + // roughly equate to one flush to the file for every batch. + tsi1.WithLogFileBufferSize(12*cmd.batchSize), + ) + + tsiIndex.WithLogger(cmd.Logger) + + log.Info("Opening tsi index in temporary location", zap.String("path", tmpPath)) + if err := tsiIndex.Open(); err != nil { + return err + } + defer tsiIndex.Close() + + // Write out tsm1 files. + log.Info("Iterating over tsm files") + for _, path := range tsmPaths { + log.Info("Processing tsm file", zap.String("path", path)) + if err := cmd.processTSMFile(tsiIndex, path, log); err != nil { + return err + } + } + + // Write out wal files. + log.Info("Building cache from wal files") + cache := tsm1.NewCache(cmd.maxCacheSize) + loader := tsm1.NewCacheLoader(walPaths) + loader.WithLogger(cmd.Logger) + if err := loader.Load(cache); err != nil { + return err + } + + log.Info("Iterating over cache") + keysBatch := make([][]byte, 0, cmd.batchSize) + namesBatch := make([][]byte, 0, cmd.batchSize) + tagsBatch := make([]models.Tags, 0, cmd.batchSize) + + for _, key := range cache.Keys() { + seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key) + name, tags := models.ParseKeyBytes(seriesKey) + + if cmd.Verbose { + log.Info("Series", zap.String("name", string(name)), zap.String("tags", tags.String())) + } + + keysBatch = append(keysBatch, seriesKey) + namesBatch = append(namesBatch, name) + tagsBatch = append(tagsBatch, tags) + + // Flush batch? + if len(keysBatch) == cmd.batchSize { + if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil { + return fmt.Errorf("problem creating series: (%s)", err) + } + keysBatch = keysBatch[:0] + namesBatch = namesBatch[:0] + tagsBatch = tagsBatch[:0] + } + } + + // Flush any remaining series in the batches + if len(keysBatch) > 0 { + if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil { + return fmt.Errorf("problem creating series: (%s)", err) + } + keysBatch = nil + namesBatch = nil + tagsBatch = nil + } + + // Attempt to compact the index & wait for all compactions to complete. + log.Info("compacting index") + tsiIndex.Compact() + tsiIndex.Wait() + + // Close TSI index. + log.Info("Closing tsi index") + if err := tsiIndex.Close(); err != nil { + return err + } + + // Rename TSI to standard path. + log.Info("Moving tsi to permanent location") + return os.Rename(tmpPath, indexPath) +} + +func (cmd *Command) processTSMFile(index *tsi1.Index, path string, log *zap.Logger) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + r, err := tsm1.NewTSMReader(f) + if err != nil { + log.Warn("Unable to read, skipping", zap.String("path", path), zap.Error(err)) + return nil + } + defer r.Close() + + keysBatch := make([][]byte, 0, cmd.batchSize) + namesBatch := make([][]byte, 0, cmd.batchSize) + tagsBatch := make([]models.Tags, cmd.batchSize) + var ti int + for i := 0; i < r.KeyCount(); i++ { + key, _ := r.KeyAt(i) + seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key) + var name []byte + name, tagsBatch[ti] = models.ParseKeyBytesWithTags(seriesKey, tagsBatch[ti]) + + if cmd.Verbose { + log.Info("Series", zap.String("name", string(name)), zap.String("tags", tagsBatch[ti].String())) + } + + keysBatch = append(keysBatch, seriesKey) + namesBatch = append(namesBatch, name) + ti++ + + // Flush batch? + if len(keysBatch) == cmd.batchSize { + if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil { + return fmt.Errorf("problem creating series: (%s)", err) + } + keysBatch = keysBatch[:0] + namesBatch = namesBatch[:0] + ti = 0 // Reset tags. + } + } + + // Flush any remaining series in the batches + if len(keysBatch) > 0 { + if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil { + return fmt.Errorf("problem creating series: (%s)", err) + } + } + return nil +} + +func (cmd *Command) collectTSMFiles(path string) ([]string, error) { + fis, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var paths []string + for _, fi := range fis { + if filepath.Ext(fi.Name()) != "."+tsm1.TSMFileExtension { + continue + } + paths = append(paths, filepath.Join(path, fi.Name())) + } + return paths, nil +} + +func (cmd *Command) collectWALFiles(path string) ([]string, error) { + fis, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var paths []string + for _, fi := range fis { + if filepath.Ext(fi.Name()) != "."+tsm1.WALFileExtension { + continue + } + paths = append(paths, filepath.Join(path, fi.Name())) + } + return paths, nil +} + +func isRoot() bool { + user, _ := user.Current() + return user != nil && user.Username == "root" +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/deletetsm/deletetsm.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/deletetsm/deletetsm.go new file mode 100644 index 0000000..7a96140 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/deletetsm/deletetsm.go @@ -0,0 +1,156 @@ +// Package deletetsm bulk deletes a measurement from a raw tsm file. +package deletetsm + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd deletetsm". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + measurement string // measurement to delete + sanitize bool // remove all keys with non-printable unicode + verbose bool // verbose logging +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) (err error) { + fs := flag.NewFlagSet("deletetsm", flag.ExitOnError) + fs.StringVar(&cmd.measurement, "measurement", "", "") + fs.BoolVar(&cmd.sanitize, "sanitize", false, "") + fs.BoolVar(&cmd.verbose, "v", false, "") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } else if fs.NArg() == 0 { + fmt.Printf("path required\n\n") + fs.Usage() + return nil + } + + if !cmd.verbose { + log.SetOutput(ioutil.Discard) + } + + // Validate measurement or sanitize flag. + if cmd.measurement == "" && !cmd.sanitize { + return fmt.Errorf("-measurement or -sanitize flag required") + } + + // Process each TSM file. + for _, path := range fs.Args() { + log.Printf("processing: %s", path) + if err := cmd.process(path); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) process(path string) error { + // Open TSM reader. + input, err := os.Open(path) + if err != nil { + return err + } + defer input.Close() + + r, err := tsm1.NewTSMReader(input) + if err != nil { + return fmt.Errorf("unable to read %s: %s", path, err) + } + defer r.Close() + + // Remove previous temporary files. + outputPath := path + ".rewriting.tmp" + if err := os.RemoveAll(outputPath); err != nil { + return err + } else if err := os.RemoveAll(outputPath + ".idx.tmp"); err != nil { + return err + } + + // Create TSMWriter to temporary location. + output, err := os.Create(outputPath) + if err != nil { + return err + } + defer output.Close() + + w, err := tsm1.NewTSMWriter(output) + if err != nil { + return err + } + defer w.Close() + + // Iterate over the input blocks. + itr := r.BlockIterator() + for itr.Next() { + // Read key & time range. + key, minTime, maxTime, _, _, block, err := itr.Read() + if err != nil { + return err + } + + // Skip block if this is the measurement and time range we are deleting. + series, _ := tsm1.SeriesAndFieldFromCompositeKey(key) + measurement, tags := models.ParseKey(series) + if string(measurement) == cmd.measurement || (cmd.sanitize && !models.ValidKeyTokens(measurement, tags)) { + log.Printf("deleting block: %s (%s-%s) sz=%d", + key, + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + len(block), + ) + continue + } + + if err := w.WriteBlock(key, minTime, maxTime, block); err != nil { + return err + } + } + + // Write index & close. + if err := w.WriteIndex(); err != nil { + return err + } else if err := w.Close(); err != nil { + return err + } + + // Replace original file with new file. + return os.Rename(outputPath, path) +} + +func (cmd *Command) printUsage() { + fmt.Print(`Deletes a measurement from a raw tsm file. + +Usage: influx_inspect deletetsm [flags] path... + + -measurement NAME + The name of the measurement to remove. + -sanitize + Remove all keys with non-printable unicode characters. + -v + Enable verbose logging.`) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go new file mode 100644 index 0000000..4028394 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go @@ -0,0 +1,525 @@ +// Package dumptsi inspects low-level details about tsi1 files. +package dumptsi + +import ( + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "text/tabwriter" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/index/tsi1" +) + +// Command represents the program execution for "influxd dumptsi". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + seriesFilePath string + paths []string + + showSeries bool + showMeasurements bool + showTagKeys bool + showTagValues bool + showTagValueSeries bool + + measurementFilter *regexp.Regexp + tagKeyFilter *regexp.Regexp + tagValueFilter *regexp.Regexp +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var measurementFilter, tagKeyFilter, tagValueFilter string + fs := flag.NewFlagSet("dumptsi", flag.ExitOnError) + fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Path to series file") + fs.BoolVar(&cmd.showSeries, "series", false, "Show raw series data") + fs.BoolVar(&cmd.showMeasurements, "measurements", false, "Show raw measurement data") + fs.BoolVar(&cmd.showTagKeys, "tag-keys", false, "Show raw tag key data") + fs.BoolVar(&cmd.showTagValues, "tag-values", false, "Show raw tag value data") + fs.BoolVar(&cmd.showTagValueSeries, "tag-value-series", false, "Show raw series data for each value") + fs.StringVar(&measurementFilter, "measurement-filter", "", "Regex measurement filter") + fs.StringVar(&tagKeyFilter, "tag-key-filter", "", "Regex tag key filter") + fs.StringVar(&tagValueFilter, "tag-value-filter", "", "Regex tag value filter") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } + + // Parse filters. + if measurementFilter != "" { + re, err := regexp.Compile(measurementFilter) + if err != nil { + return err + } + cmd.measurementFilter = re + } + if tagKeyFilter != "" { + re, err := regexp.Compile(tagKeyFilter) + if err != nil { + return err + } + cmd.tagKeyFilter = re + } + if tagValueFilter != "" { + re, err := regexp.Compile(tagValueFilter) + if err != nil { + return err + } + cmd.tagValueFilter = re + } + + // Validate series file path. + if cmd.seriesFilePath == "" { + return errors.New("series file path required") + } + + cmd.paths = fs.Args() + if len(cmd.paths) == 0 { + fmt.Printf("at least one path required\n\n") + fs.Usage() + return nil + } + + // Some flags imply other flags. + if cmd.showTagValueSeries { + cmd.showTagValues = true + } + if cmd.showTagValues { + cmd.showTagKeys = true + } + if cmd.showTagKeys { + cmd.showMeasurements = true + } + + return cmd.run() +} + +func (cmd *Command) run() error { + sfile := tsdb.NewSeriesFile(cmd.seriesFilePath) + sfile.Logger = logger.New(os.Stderr) + if err := sfile.Open(); err != nil { + return err + } + defer sfile.Close() + + // Build a file set from the paths on the command line. + idx, fs, err := cmd.readFileSet(sfile) + if err != nil { + return err + } + + if cmd.showSeries { + if err := cmd.printSeries(sfile); err != nil { + return err + } + } + + // If this is an ad-hoc fileset then process it and close afterward. + if fs != nil { + defer fs.Release() + defer fs.Close() + if cmd.showSeries || cmd.showMeasurements { + return cmd.printMeasurements(sfile, fs) + } + return cmd.printFileSummaries(fs) + } + + // Otherwise iterate over each partition in the index. + defer idx.Close() + for i := 0; i < int(idx.PartitionN); i++ { + if err := func() error { + fs, err := idx.PartitionAt(i).RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + if cmd.showSeries || cmd.showMeasurements { + return cmd.printMeasurements(sfile, fs) + } + return cmd.printFileSummaries(fs) + }(); err != nil { + return err + } + } + return nil +} + +func (cmd *Command) readFileSet(sfile *tsdb.SeriesFile) (*tsi1.Index, *tsi1.FileSet, error) { + // If only one path exists and it's a directory then open as an index. + if len(cmd.paths) == 1 { + fi, err := os.Stat(cmd.paths[0]) + if err != nil { + return nil, nil, err + } else if fi.IsDir() { + // Verify directory is an index before opening it. + if ok, err := tsi1.IsIndexDir(cmd.paths[0]); err != nil { + return nil, nil, err + } else if !ok { + return nil, nil, fmt.Errorf("Not an index directory: %q", cmd.paths[0]) + } + + idx := tsi1.NewIndex(sfile, + "", + tsi1.WithPath(cmd.paths[0]), + tsi1.DisableCompactions(), + ) + if err := idx.Open(); err != nil { + return nil, nil, err + } + return idx, nil, nil + } + } + + // Open each file and group into a fileset. + var files []tsi1.File + for _, path := range cmd.paths { + switch ext := filepath.Ext(path); ext { + case tsi1.LogFileExt: + f := tsi1.NewLogFile(sfile, path) + if err := f.Open(); err != nil { + return nil, nil, err + } + files = append(files, f) + + case tsi1.IndexFileExt: + f := tsi1.NewIndexFile(sfile) + f.SetPath(path) + if err := f.Open(); err != nil { + return nil, nil, err + } + files = append(files, f) + + default: + return nil, nil, fmt.Errorf("unexpected file extension: %s", ext) + } + } + + fs, err := tsi1.NewFileSet(nil, sfile, files) + if err != nil { + return nil, nil, err + } + fs.Retain() + + return nil, fs, nil +} + +func (cmd *Command) printSeries(sfile *tsdb.SeriesFile) error { + if !cmd.showSeries { + return nil + } + + // Print header. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, "Series\t") + + // Iterate over each series. + itr := sfile.SeriesIDIterator() + for { + e, err := itr.Next() + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) + + if !cmd.matchSeries(name, tags) { + continue + } + + deleted := sfile.IsDeleted(e.SeriesID) + + fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(deleted)) + } + + // Flush & write footer spacing. + if err := tw.Flush(); err != nil { + return err + } + fmt.Fprint(cmd.Stdout, "\n\n") + + return nil +} + +func (cmd *Command) printMeasurements(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error { + if !cmd.showMeasurements { + return nil + } + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, "Measurement\t") + + // Iterate over each series. + if itr := fs.MeasurementIterator(); itr != nil { + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(e.Name()) { + continue + } + + fmt.Fprintf(tw, "%s\t%v\n", e.Name(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagKeys(sfile, fs, e.Name()); err != nil { + return err + } + } + } + + fmt.Fprint(cmd.Stdout, "\n\n") + + return nil +} + +func (cmd *Command) printTagKeys(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name []byte) error { + if !cmd.showTagKeys { + return nil + } + + // Iterate over each key. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagKeyIterator(name) + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.tagKeyFilter != nil && !cmd.tagKeyFilter.Match(e.Key()) { + continue + } + + fmt.Fprintf(tw, " %s\t%v\n", e.Key(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagValues(sfile, fs, name, e.Key()); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printTagValues(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key []byte) error { + if !cmd.showTagValues { + return nil + } + + // Iterate over each value. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagValueIterator(name, key) + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.tagValueFilter != nil && !cmd.tagValueFilter.Match(e.Value()) { + continue + } + + fmt.Fprintf(tw, " %s\t%v\n", e.Value(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagValueSeries(sfile, fs, name, key, e.Value()); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printTagValueSeries(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key, value []byte) error { + if !cmd.showTagValueSeries { + return nil + } + + // Iterate over each series. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagValueSeriesIDIterator(name, key, value) + for { + e, err := itr.Next() + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + + name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) + + if !cmd.matchSeries(name, tags) { + continue + } + + fmt.Fprintf(tw, " %s%s\n", name, tags.HashKey()) + if err := tw.Flush(); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printFileSummaries(fs *tsi1.FileSet) error { + for _, f := range fs.Files() { + switch f := f.(type) { + case *tsi1.LogFile: + if err := cmd.printLogFileSummary(f); err != nil { + return err + } + case *tsi1.IndexFile: + if err := cmd.printIndexFileSummary(f); err != nil { + return err + } + default: + panic("unreachable") + } + fmt.Fprintln(cmd.Stdout, "") + } + return nil +} + +func (cmd *Command) printLogFileSummary(f *tsi1.LogFile) error { + fmt.Fprintf(cmd.Stdout, "[LOG FILE] %s\n", filepath.Base(f.Path())) + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintf(tw, "Series:\t%d\n", f.SeriesN()) + fmt.Fprintf(tw, "Measurements:\t%d\n", f.MeasurementN()) + fmt.Fprintf(tw, "Tag Keys:\t%d\n", f.TagKeyN()) + fmt.Fprintf(tw, "Tag Values:\t%d\n", f.TagValueN()) + return tw.Flush() +} + +func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error { + fmt.Fprintf(cmd.Stdout, "[INDEX FILE] %s\n", filepath.Base(f.Path())) + + // Calculate summary stats. + var measurementN, measurementSeriesN, measurementSeriesSize uint64 + var keyN uint64 + var valueN, valueSeriesN, valueSeriesSize uint64 + + if mitr := f.MeasurementIterator(); mitr != nil { + for me, _ := mitr.Next().(*tsi1.MeasurementBlockElem); me != nil; me, _ = mitr.Next().(*tsi1.MeasurementBlockElem) { + kitr := f.TagKeyIterator(me.Name()) + for ke, _ := kitr.Next().(*tsi1.TagBlockKeyElem); ke != nil; ke, _ = kitr.Next().(*tsi1.TagBlockKeyElem) { + vitr := f.TagValueIterator(me.Name(), ke.Key()) + for ve, _ := vitr.Next().(*tsi1.TagBlockValueElem); ve != nil; ve, _ = vitr.Next().(*tsi1.TagBlockValueElem) { + valueN++ + valueSeriesN += uint64(ve.SeriesN()) + valueSeriesSize += uint64(len(ve.SeriesData())) + } + keyN++ + } + measurementN++ + measurementSeriesN += uint64(me.SeriesN()) + measurementSeriesSize += uint64(len(me.SeriesData())) + } + } + + // Write stats. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN) + fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize)) + fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN)) + fmt.Fprintf(tw, "Tag Keys:\t%d\n", keyN) + fmt.Fprintf(tw, "Tag Values:\t%d\n", valueN) + fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN) + fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize)) + fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN)) + return tw.Flush() +} + +// matchSeries returns true if the command filters matches the series. +func (cmd *Command) matchSeries(name []byte, tags models.Tags) bool { + // Filter by measurement. + if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(name) { + return false + } + + // Filter by tag key/value. + if cmd.tagKeyFilter != nil || cmd.tagValueFilter != nil { + var matched bool + for _, tag := range tags { + if (cmd.tagKeyFilter == nil || cmd.tagKeyFilter.Match(tag.Key)) && (cmd.tagValueFilter == nil || cmd.tagValueFilter.Match(tag.Value)) { + matched = true + break + } + } + if !matched { + return false + } + } + + return true +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Dumps low-level details about tsi1 files. + +Usage: influx_inspect dumptsi [flags] path... + + -series + Dump raw series data + -measurements + Dump raw measurement data + -tag-keys + Dump raw tag keys + -tag-values + Dump raw tag values + -tag-value-series + Dump raw series for each tag value + -measurement-filter REGEXP + Filters data by measurement regular expression + -series-file PATH + Path to the "_series" directory under the database data directory. + Required. + -tag-key-filter REGEXP + Filters data by tag key regular expression + -tag-value-filter REGEXP + Filters data by tag value regular expression + +One or more paths are required. Path must specify either a TSI index directory +or it should specify one or more .tsi/.tsl files. If no flags are specified +then summary stats are provided for each file. +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +// deletedString returns "(deleted)" if v is true. +func deletedString(v bool) string { + if v { + return "(deleted)" + } + return "" +} + +func formatSize(v uint64) string { + denom := uint64(1) + var uom string + for _, uom = range []string{"b", "kb", "mb", "gb", "tb"} { + if denom*1024 > v { + break + } + denom *= 1024 + } + return fmt.Sprintf("%0.01f%s", float64(v)/float64(denom), uom) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go new file mode 100644 index 0000000..3dd5f1f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go @@ -0,0 +1,335 @@ +// Package dumptsm inspects low-level details about tsm1 files. +package dumptsm + +import ( + "encoding/binary" + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd dumptsm". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dumpIndex bool + dumpBlocks bool + dumpAll bool + filterKey string + path string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("file", flag.ExitOnError) + fs.BoolVar(&cmd.dumpIndex, "index", false, "Dump raw index data") + fs.BoolVar(&cmd.dumpBlocks, "blocks", false, "Dump raw block data") + fs.BoolVar(&cmd.dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") + fs.StringVar(&cmd.filterKey, "filter-key", "", "Only display index and block data match this key substring") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + if fs.Arg(0) == "" { + fmt.Printf("TSM file not specified\n\n") + fs.Usage() + return nil + } + cmd.path = fs.Args()[0] + cmd.dumpBlocks = cmd.dumpBlocks || cmd.dumpAll || cmd.filterKey != "" + cmd.dumpIndex = cmd.dumpIndex || cmd.dumpAll || cmd.filterKey != "" + return cmd.dump() +} + +func (cmd *Command) dump() error { + var errors []error + + f, err := os.Open(cmd.path) + if err != nil { + return err + } + + // Get the file size + stat, err := f.Stat() + if err != nil { + return err + } + b := make([]byte, 8) + + r, err := tsm1.NewTSMReader(f) + if err != nil { + return fmt.Errorf("Error opening TSM files: %s", err.Error()) + } + defer r.Close() + + minTime, maxTime := r.TimeRange() + keyCount := r.KeyCount() + + blockStats := &blockStats{} + + println("Summary:") + fmt.Printf(" File: %s\n", cmd.path) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) + fmt.Printf(" Series: %d ", keyCount) + fmt.Printf(" File Size: %d\n", stat.Size()) + println() + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + + if cmd.dumpIndex { + println("Index:") + tw.Flush() + println() + + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) + var pos int + for i := 0; i < keyCount; i++ { + key, _ := r.KeyAt(i) + for _, e := range r.Entries(key) { + pos++ + split := strings.Split(string(key), "#!~#") + + // Possible corruption? Try to read as much as we can and point to the problem. + measurement := split[0] + field := split[1] + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + continue + } + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(int64(pos), 10), + time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(e.Offset), 10), + strconv.FormatInt(int64(e.Size), 10), + measurement, + field, + }, "\t")) + tw.Flush() + } + } + } + + tw = tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) + + // Starting at 5 because the magic number is 4 bytes + 1 byte version + i := int64(5) + var blockCount, pointCount, blockSize int64 + indexSize := r.IndexSize() + + // Start at the beginning and read every block + for j := 0; j < keyCount; j++ { + key, _ := r.KeyAt(j) + for _, e := range r.Entries(key) { + + f.Seek(int64(e.Offset), 0) + f.Read(b[:4]) + + chksum := binary.BigEndian.Uint32(b[:4]) + + buf := make([]byte, e.Size-4) + f.Read(buf) + + blockSize += int64(e.Size) + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + i += blockSize + blockCount++ + continue + } + + blockType := buf[0] + + encoded := buf[1:] + + var v []tsm1.Value + v, err := tsm1.DecodeBlock(buf, v) + if err != nil { + return err + } + startTime := time.Unix(0, v[0].UnixNano()) + + pointCount += int64(len(v)) + + // Length of the timestamp block + tsLen, j := binary.Uvarint(encoded) + + // Unpack the timestamp bytes + ts := encoded[int(j) : int(j)+int(tsLen)] + + // Unpack the value bytes + values := encoded[int(j)+int(tsLen):] + + tsEncoding := timeEnc[int(ts[0]>>4)] + vEncoding := encDescs[int(blockType+1)][values[0]>>4] + + typeDesc := blockTypes[blockType] + + blockStats.inc(0, ts[0]>>4) + blockStats.inc(int(blockType+1), values[0]>>4) + blockStats.size(len(buf)) + + if cmd.dumpBlocks { + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(blockCount, 10), + strconv.FormatUint(uint64(chksum), 10), + strconv.FormatInt(i, 10), + strconv.FormatInt(int64(len(buf)), 10), + typeDesc, + startTime.UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(len(v)), 10), + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), + fmt.Sprintf("%d/%d", len(ts), len(values)), + }, "\t")) + } + + i += blockSize + blockCount++ + } + } + + if cmd.dumpBlocks { + println("Blocks:") + tw.Flush() + println() + } + + var blockSizeAvg int64 + if blockCount > 0 { + blockSizeAvg = blockSize / blockCount + } + fmt.Printf("Statistics\n") + fmt.Printf(" Blocks:\n") + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", + blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) + fmt.Printf(" Index:\n") + fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) + fmt.Printf(" Points:\n") + fmt.Printf(" Total: %d", pointCount) + println() + + println(" Encoding:") + for i, counts := range blockStats.counts { + if len(counts) == 0 { + continue + } + fmt.Printf(" %s: ", strings.Title(fieldType[i])) + for j, v := range counts { + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) + } + println() + } + fmt.Printf(" Compression:\n") + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) + + if len(errors) > 0 { + println() + fmt.Printf("Errors (%d):\n", len(errors)) + for _, err := range errors { + fmt.Printf(" * %v\n", err) + } + println() + return fmt.Errorf("error count %d", len(errors)) + } + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Dumps low-level details about tsm1 files. + +Usage: influx_inspect dumptsm [flags] + Only display index and block data match this key substring +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +var ( + fieldType = []string{ + "timestamp", "float", "int", "bool", "string", "unsigned", + } + blockTypes = []string{ + "float64", "int64", "bool", "string", "unsigned", + } + timeEnc = []string{ + "none", "s8b", "rle", + } + floatEnc = []string{ + "none", "gor", + } + intEnc = []string{ + "none", "s8b", "rle", + } + boolEnc = []string{ + "none", "bp", + } + stringEnc = []string{ + "none", "snpy", + } + unsignedEnc = []string{ + "none", "s8b", "rle", + } + encDescs = [][]string{ + timeEnc, floatEnc, intEnc, boolEnc, stringEnc, unsignedEnc, + } +) + +type blockStats struct { + min, max int + counts [][]int +} + +func (b *blockStats) inc(typ int, enc byte) { + for len(b.counts) <= typ { + b.counts = append(b.counts, []int{}) + } + for len(b.counts[typ]) <= int(enc) { + b.counts[typ] = append(b.counts[typ], 0) + } + b.counts[typ][enc]++ +} + +func (b *blockStats) size(sz int) { + if b.min == 0 || sz < b.min { + b.min = sz + } + if b.min == 0 || sz > b.max { + b.max = sz + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go new file mode 100644 index 0000000..6a01a65 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go @@ -0,0 +1,3 @@ +package dumptsm_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsmwal/dumptsmwal.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsmwal/dumptsmwal.go new file mode 100644 index 0000000..c1b833b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsmwal/dumptsmwal.go @@ -0,0 +1,162 @@ +// Package dumptsmwal dumps all data from a WAL file. +package dumptsmwal + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd dumptsmwal". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + showDuplicates bool +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) (err error) { + fs := flag.NewFlagSet("dumptsmwal", flag.ExitOnError) + fs.SetOutput(cmd.Stdout) + fs.BoolVar(&cmd.showDuplicates, "show-duplicates", false, "prints keys with out-of-order or duplicate values") + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } else if fs.NArg() == 0 { + fmt.Printf("path required\n\n") + fs.Usage() + return nil + } + + // Process each TSM WAL file. + for _, path := range fs.Args() { + if err := cmd.process(path); err != nil { + return err + } + } + return nil +} + +func (cmd *Command) process(path string) error { + if filepath.Ext(path) != "."+tsm1.WALFileExtension { + log.Printf("invalid wal filename, skipping %s", path) + return nil + } + + // Track the earliest timestamp for each key and a set of keys with out-of-order points. + minTimestampByKey := make(map[string]int64) + duplicateKeys := make(map[string]struct{}) + + // Open WAL reader. + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + r := tsm1.NewWALSegmentReader(f) + + // Iterate over the WAL entries. + for r.Next() { + entry, err := r.Read() + if err != nil { + return fmt.Errorf("cannot read entry: %s", err) + } + + switch entry := entry.(type) { + case *tsm1.WriteWALEntry: + if !cmd.showDuplicates { + fmt.Printf("[write] sz=%d\n", entry.MarshalSize()) + } + + keys := make([]string, 0, len(entry.Values)) + for k := range entry.Values { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + for _, v := range entry.Values[k] { + t := v.UnixNano() + + // Check for duplicate/out of order keys. + if min, ok := minTimestampByKey[k]; ok && t <= min { + duplicateKeys[k] = struct{}{} + } + minTimestampByKey[k] = t + + // Skip printing if we are only showing duplicate keys. + if cmd.showDuplicates { + continue + } + + switch v := v.(type) { + case tsm1.IntegerValue: + fmt.Printf("%s %vi %d\n", k, v.Value(), t) + case tsm1.UnsignedValue: + fmt.Printf("%s %vu %d\n", k, v.Value(), t) + case tsm1.FloatValue: + fmt.Printf("%s %v %d\n", k, v.Value(), t) + case tsm1.BooleanValue: + fmt.Printf("%s %v %d\n", k, v.Value(), t) + case tsm1.StringValue: + fmt.Printf("%s %q %d\n", k, v.Value(), t) + default: + fmt.Printf("%s EMPTY\n", k) + } + } + } + + case *tsm1.DeleteWALEntry: + fmt.Printf("[delete] sz=%d\n", entry.MarshalSize()) + for _, k := range entry.Keys { + fmt.Printf("%s\n", string(k)) + } + + case *tsm1.DeleteRangeWALEntry: + fmt.Printf("[delete-range] min=%d max=%d sz=%d\n", entry.Min, entry.Max, entry.MarshalSize()) + for _, k := range entry.Keys { + fmt.Printf("%s\n", string(k)) + } + + default: + return fmt.Errorf("invalid wal entry: %#v", entry) + } + } + + // Print keys with duplicate or out-of-order points, if requested. + if cmd.showDuplicates { + keys := make([]string, 0, len(duplicateKeys)) + for k := range duplicateKeys { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Println(k) + } + } + + return nil +} + +func (cmd *Command) printUsage() { + fmt.Print(`Dumps all entries from one or more TSM WAL files. + +Usage: influx_inspect dumptsmwal path...`) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go new file mode 100644 index 0000000..2f4bbf8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go @@ -0,0 +1,419 @@ +// Package export exports TSM files into InfluxDB line protocol format. +package export + +import ( + "bufio" + "compress/gzip" + "flag" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/escape" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/influxdata/influxql" +) + +// Command represents the program execution for "influx_inspect export". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dataDir string + walDir string + out string + database string + retentionPolicy string + startTime int64 + endTime int64 + compress bool + + manifest map[string]struct{} + tsmFiles map[string][]string + walFiles map[string][]string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + + manifest: make(map[string]struct{}), + tsmFiles: make(map[string][]string), + walFiles: make(map[string][]string), + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var start, end string + fs := flag.NewFlagSet("export", flag.ExitOnError) + fs.StringVar(&cmd.dataDir, "datadir", os.Getenv("HOME")+"/.influxdb/data", "Data storage path") + fs.StringVar(&cmd.walDir, "waldir", os.Getenv("HOME")+"/.influxdb/wal", "WAL storage path") + fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "Destination file to export to") + fs.StringVar(&cmd.database, "database", "", "Optional: the database to export") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to export (requires -database)") + fs.StringVar(&start, "start", "", "Optional: the start time to export (RFC3339 format)") + fs.StringVar(&end, "end", "", "Optional: the end time to export (RFC3339 format)") + fs.BoolVar(&cmd.compress, "compress", false, "Compress the output") + + fs.SetOutput(cmd.Stdout) + fs.Usage = func() { + fmt.Fprintf(cmd.Stdout, "Exports TSM files into InfluxDB line protocol format.\n\n") + fmt.Fprintf(cmd.Stdout, "Usage: %s export [flags]\n\n", filepath.Base(os.Args[0])) + fs.PrintDefaults() + } + + if err := fs.Parse(args); err != nil { + return err + } + + // set defaults + if start != "" { + s, err := time.Parse(time.RFC3339, start) + if err != nil { + return err + } + cmd.startTime = s.UnixNano() + } else { + cmd.startTime = math.MinInt64 + } + if end != "" { + e, err := time.Parse(time.RFC3339, end) + if err != nil { + return err + } + cmd.endTime = e.UnixNano() + } else { + // set end time to max if it is not set. + cmd.endTime = math.MaxInt64 + } + + if err := cmd.validate(); err != nil { + return err + } + + return cmd.export() +} + +func (cmd *Command) validate() error { + if cmd.retentionPolicy != "" && cmd.database == "" { + return fmt.Errorf("must specify a db") + } + if cmd.startTime != 0 && cmd.endTime != 0 && cmd.endTime < cmd.startTime { + return fmt.Errorf("end time before start time") + } + return nil +} + +func (cmd *Command) export() error { + if err := cmd.walkTSMFiles(); err != nil { + return err + } + if err := cmd.walkWALFiles(); err != nil { + return err + } + return cmd.write() +} + +func (cmd *Command) walkTSMFiles() error { + return filepath.Walk(cmd.dataDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a tsm file + if filepath.Ext(path) != "."+tsm1.TSMFileExtension { + return nil + } + + relPath, err := filepath.Rel(cmd.dataDir, path) + if err != nil { + return err + } + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", path) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + cmd.manifest[key] = struct{}{} + cmd.tsmFiles[key] = append(cmd.tsmFiles[key], path) + } + } + return nil + }) +} + +func (cmd *Command) walkWALFiles() error { + return filepath.Walk(cmd.walDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a wal file + fileName := filepath.Base(path) + if filepath.Ext(path) != "."+tsm1.WALFileExtension || !strings.HasPrefix(fileName, tsm1.WALFilePrefix) { + return nil + } + + relPath, err := filepath.Rel(cmd.walDir, path) + if err != nil { + return err + } + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", path) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + cmd.manifest[key] = struct{}{} + cmd.walFiles[key] = append(cmd.walFiles[key], path) + } + } + return nil + }) +} + +func (cmd *Command) write() error { + // open our output file and create an output buffer + f, err := os.Create(cmd.out) + if err != nil { + return err + } + defer f.Close() + + // Because calling (*os.File).Write is relatively expensive, + // and we don't *need* to sync to disk on every written line of export, + // use a sized buffered writer so that we only sync the file every megabyte. + bw := bufio.NewWriterSize(f, 1024*1024) + defer bw.Flush() + + var w io.Writer = bw + + if cmd.compress { + gzw := gzip.NewWriter(w) + defer gzw.Close() + w = gzw + } + + s, e := time.Unix(0, cmd.startTime).Format(time.RFC3339), time.Unix(0, cmd.endTime).Format(time.RFC3339) + fmt.Fprintf(w, "# INFLUXDB EXPORT: %s - %s\n", s, e) + + // Write out all the DDL + fmt.Fprintln(w, "# DDL") + for key := range cmd.manifest { + keys := strings.Split(key, string(os.PathSeparator)) + db, rp := influxql.QuoteIdent(keys[0]), influxql.QuoteIdent(keys[1]) + fmt.Fprintf(w, "CREATE DATABASE %s WITH NAME %s\n", db, rp) + } + + fmt.Fprintln(w, "# DML") + for key := range cmd.manifest { + keys := strings.Split(key, string(os.PathSeparator)) + fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", keys[0]) + fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1]) + if files, ok := cmd.tsmFiles[key]; ok { + fmt.Fprintf(cmd.Stdout, "writing out tsm file data for %s...", key) + if err := cmd.writeTsmFiles(w, files); err != nil { + return err + } + fmt.Fprintln(cmd.Stdout, "complete.") + } + if _, ok := cmd.walFiles[key]; ok { + fmt.Fprintf(cmd.Stdout, "writing out wal file data for %s...", key) + if err := cmd.writeWALFiles(w, cmd.walFiles[key], key); err != nil { + return err + } + fmt.Fprintln(cmd.Stdout, "complete.") + } + } + return nil +} + +func (cmd *Command) writeTsmFiles(w io.Writer, files []string) error { + fmt.Fprintln(w, "# writing tsm data") + + // we need to make sure we write the same order that the files were written + sort.Strings(files) + + for _, f := range files { + if err := cmd.exportTSMFile(f, w); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error { + f, err := os.Open(tsmFilePath) + if err != nil { + if os.IsNotExist(err) { + fmt.Fprintf(w, "skipped missing file: %s", tsmFilePath) + return nil + } + return err + } + defer f.Close() + + r, err := tsm1.NewTSMReader(f) + if err != nil { + fmt.Fprintf(cmd.Stderr, "unable to read %s, skipping: %s\n", tsmFilePath, err.Error()) + return nil + } + defer r.Close() + + if sgStart, sgEnd := r.TimeRange(); sgStart > cmd.endTime || sgEnd < cmd.startTime { + return nil + } + + for i := 0; i < r.KeyCount(); i++ { + key, _ := r.KeyAt(i) + values, err := r.ReadAll(key) + if err != nil { + fmt.Fprintf(cmd.Stderr, "unable to read key %q in %s, skipping: %s\n", string(key), tsmFilePath, err.Error()) + continue + } + measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) + field = escape.Bytes(field) + + if err := cmd.writeValues(w, measurement, string(field), values); err != nil { + // An error from writeValues indicates an IO error, which should be returned. + return err + } + } + return nil +} + +func (cmd *Command) writeWALFiles(w io.Writer, files []string, key string) error { + fmt.Fprintln(w, "# writing wal data") + + // we need to make sure we write the same order that the wal received the data + sort.Strings(files) + + var once sync.Once + warnDelete := func() { + once.Do(func() { + msg := fmt.Sprintf(`WARNING: detected deletes in wal file. +Some series for %q may be brought back by replaying this data. +To resolve, you can either let the shard snapshot prior to exporting the data +or manually editing the exported file. + `, key) + fmt.Fprintln(cmd.Stderr, msg) + }) + } + + for _, f := range files { + if err := cmd.exportWALFile(f, w, warnDelete); err != nil { + return err + } + } + + return nil +} + +// exportWAL reads every WAL entry from r and exports it to w. +func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete func()) error { + f, err := os.Open(walFilePath) + if err != nil { + if os.IsNotExist(err) { + fmt.Fprintf(w, "skipped missing file: %s", walFilePath) + return nil + } + return err + } + defer f.Close() + + r := tsm1.NewWALSegmentReader(f) + defer r.Close() + + for r.Next() { + entry, err := r.Read() + if err != nil { + n := r.Count() + fmt.Fprintf(cmd.Stderr, "file %s corrupt at position %d: %v", walFilePath, n, err) + break + } + + switch t := entry.(type) { + case *tsm1.DeleteWALEntry, *tsm1.DeleteRangeWALEntry: + warnDelete() + continue + case *tsm1.WriteWALEntry: + for key, values := range t.Values { + measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) + // measurements are stored escaped, field names are not + field = escape.Bytes(field) + + if err := cmd.writeValues(w, measurement, string(field), values); err != nil { + // An error from writeValues indicates an IO error, which should be returned. + return err + } + } + } + } + return nil +} + +// writeValues writes every value in values to w, using the given series key and field name. +// If any call to w.Write fails, that error is returned. +func (cmd *Command) writeValues(w io.Writer, seriesKey []byte, field string, values []tsm1.Value) error { + buf := []byte(string(seriesKey) + " " + field + "=") + prefixLen := len(buf) + + for _, value := range values { + ts := value.UnixNano() + if (ts < cmd.startTime) || (ts > cmd.endTime) { + continue + } + + // Re-slice buf to be " =". + buf = buf[:prefixLen] + + // Append the correct representation of the value. + switch v := value.Value().(type) { + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case int64: + buf = strconv.AppendInt(buf, v, 10) + buf = append(buf, 'i') + case uint64: + buf = strconv.AppendUint(buf, v, 10) + buf = append(buf, 'u') + case bool: + buf = strconv.AppendBool(buf, v) + case string: + buf = append(buf, '"') + buf = append(buf, models.EscapeStringField(v)...) + buf = append(buf, '"') + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", v)...) + } + + // Now buf has " =". + // Append the timestamp and a newline, then write it. + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts, 10) + buf = append(buf, '\n') + if _, err := w.Write(buf); err != nil { + // Underlying IO error needs to be returned. + return err + } + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go new file mode 100644 index 0000000..45a7a43 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go @@ -0,0 +1,358 @@ +package export + +import ( + "bytes" + "fmt" + "io/ioutil" + "math" + "math/rand" + "os" + "sort" + "strconv" + "strings" + "testing" + + "github.com/golang/snappy" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +type corpus map[string][]tsm1.Value + +var ( + basicCorpus = corpus{ + tsm1.SeriesFieldKey("floats,k=f", "f"): []tsm1.Value{ + tsm1.NewValue(1, float64(1.5)), + tsm1.NewValue(2, float64(3)), + }, + tsm1.SeriesFieldKey("ints,k=i", "i"): []tsm1.Value{ + tsm1.NewValue(10, int64(15)), + tsm1.NewValue(20, int64(30)), + }, + tsm1.SeriesFieldKey("bools,k=b", "b"): []tsm1.Value{ + tsm1.NewValue(100, true), + tsm1.NewValue(200, false), + }, + tsm1.SeriesFieldKey("strings,k=s", "s"): []tsm1.Value{ + tsm1.NewValue(1000, "1k"), + tsm1.NewValue(2000, "2k"), + }, + tsm1.SeriesFieldKey("uints,k=u", "u"): []tsm1.Value{ + tsm1.NewValue(3000, uint64(45)), + tsm1.NewValue(4000, uint64(60)), + }, + } + + basicCorpusExpLines = []string{ + "floats,k=f f=1.5 1", + "floats,k=f f=3 2", + "ints,k=i i=15i 10", + "ints,k=i i=30i 20", + "bools,k=b b=true 100", + "bools,k=b b=false 200", + `strings,k=s s="1k" 1000`, + `strings,k=s s="2k" 2000`, + `uints,k=u u=45u 3000`, + `uints,k=u u=60u 4000`, + } + + escapeStringCorpus = corpus{ + tsm1.SeriesFieldKey("t", "s"): []tsm1.Value{ + tsm1.NewValue(1, `1. "quotes"`), + tsm1.NewValue(2, `2. back\slash`), + tsm1.NewValue(3, `3. bs\q"`), + }, + } + + escCorpusExpLines = []string{ + `t s="1. \"quotes\"" 1`, + `t s="2. back\\slash" 2`, + `t s="3. bs\\q\"" 3`, + } +) + +func Test_exportWALFile(t *testing.T) { + for _, c := range []struct { + corpus corpus + lines []string + }{ + {corpus: basicCorpus, lines: basicCorpusExpLines}, + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, + } { + walFile := writeCorpusToWALFile(c.corpus) + defer os.Remove(walFile.Name()) + + var out bytes.Buffer + if err := newCommand().exportWALFile(walFile.Name(), &out, func() {}); err != nil { + t.Fatal(err) + } + + lines := strings.Split(out.String(), "\n") + for _, exp := range c.lines { + found := false + for _, l := range lines { + if exp == l { + found = true + break + } + } + + if !found { + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) + } + } + } + + // Missing .wal file should not cause a failure. + var out bytes.Buffer + if err := newCommand().exportWALFile("file-that-does-not-exist.wal", &out, func() {}); err != nil { + t.Fatal(err) + } +} + +func Test_exportTSMFile(t *testing.T) { + for _, c := range []struct { + corpus corpus + lines []string + }{ + {corpus: basicCorpus, lines: basicCorpusExpLines}, + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, + } { + tsmFile := writeCorpusToTSMFile(c.corpus) + defer os.Remove(tsmFile.Name()) + + var out bytes.Buffer + if err := newCommand().exportTSMFile(tsmFile.Name(), &out); err != nil { + t.Fatal(err) + } + + lines := strings.Split(out.String(), "\n") + for _, exp := range c.lines { + found := false + for _, l := range lines { + if exp == l { + found = true + break + } + } + + if !found { + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) + } + } + } + + // Missing .tsm file should not cause a failure. + var out bytes.Buffer + if err := newCommand().exportTSMFile("file-that-does-not-exist.tsm", &out); err != nil { + t.Fatal(err) + } +} + +var sink interface{} + +func benchmarkExportTSM(c corpus, b *testing.B) { + // Garbage collection is relatively likely to happen during export, so track allocations. + b.ReportAllocs() + + f := writeCorpusToTSMFile(c) + defer os.Remove(f.Name()) + + cmd := newCommand() + var out bytes.Buffer + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if err := cmd.exportTSMFile(f.Name(), &out); err != nil { + b.Fatal(err) + } + + sink = out.Bytes() + out.Reset() + } +} + +func BenchmarkExportTSMFloats_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeFloatsCorpus(100, 250), b) +} + +func BenchmarkExportTSMInts_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeIntsCorpus(100, 250), b) +} + +func BenchmarkExportTSMBools_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeBoolsCorpus(100, 250), b) +} + +func BenchmarkExportTSMStrings_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeStringsCorpus(100, 250), b) +} + +func benchmarkExportWAL(c corpus, b *testing.B) { + // Garbage collection is relatively likely to happen during export, so track allocations. + b.ReportAllocs() + + f := writeCorpusToWALFile(c) + defer os.Remove(f.Name()) + + cmd := newCommand() + var out bytes.Buffer + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if err := cmd.exportWALFile(f.Name(), &out, func() {}); err != nil { + b.Fatal(err) + } + + sink = out.Bytes() + out.Reset() + } +} + +func BenchmarkExportWALFloats_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeFloatsCorpus(100, 250), b) +} + +func BenchmarkExportWALInts_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeIntsCorpus(100, 250), b) +} + +func BenchmarkExportWALBools_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeBoolsCorpus(100, 250), b) +} + +func BenchmarkExportWALStrings_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeStringsCorpus(100, 250), b) +} + +// newCommand returns a command that discards its output and that accepts all timestamps. +func newCommand() *Command { + return &Command{ + Stderr: ioutil.Discard, + Stdout: ioutil.Discard, + startTime: math.MinInt64, + endTime: math.MaxInt64, + } +} + +// makeCorpus returns a new corpus filled with values generated by fn. +// The RNG passed to fn is seeded with numSeries * numValuesPerSeries, for predictable output. +func makeCorpus(numSeries, numValuesPerSeries int, fn func(*rand.Rand) interface{}) corpus { + rng := rand.New(rand.NewSource(int64(numSeries) * int64(numValuesPerSeries))) + var unixNano int64 + corpus := make(corpus, numSeries) + for i := 0; i < numSeries; i++ { + vals := make([]tsm1.Value, numValuesPerSeries) + for j := 0; j < numValuesPerSeries; j++ { + vals[j] = tsm1.NewValue(unixNano, fn(rng)) + unixNano++ + } + + k := fmt.Sprintf("m,t=%d", i) + corpus[tsm1.SeriesFieldKey(k, "x")] = vals + } + + return corpus +} + +func makeFloatsCorpus(numSeries, numFloatsPerSeries int) corpus { + return makeCorpus(numSeries, numFloatsPerSeries, func(rng *rand.Rand) interface{} { + return rng.Float64() + }) +} + +func makeIntsCorpus(numSeries, numIntsPerSeries int) corpus { + return makeCorpus(numSeries, numIntsPerSeries, func(rng *rand.Rand) interface{} { + // This will only return positive integers. That's probably okay. + return rng.Int63() + }) +} + +func makeBoolsCorpus(numSeries, numBoolsPerSeries int) corpus { + return makeCorpus(numSeries, numBoolsPerSeries, func(rng *rand.Rand) interface{} { + return rand.Int63n(2) == 1 + }) +} + +func makeStringsCorpus(numSeries, numStringsPerSeries int) corpus { + return makeCorpus(numSeries, numStringsPerSeries, func(rng *rand.Rand) interface{} { + // The string will randomly have 2-6 parts + parts := make([]string, rand.Intn(4)+2) + + for i := range parts { + // Each part is a random base36-encoded number + parts[i] = strconv.FormatInt(rand.Int63(), 36) + } + + // Join the individual parts with underscores. + return strings.Join(parts, "_") + }) +} + +// writeCorpusToWALFile writes the given corpus as a WAL file, and returns a handle to that file. +// It is the caller's responsibility to remove the returned temp file. +// writeCorpusToWALFile will panic on any error that occurs. +func writeCorpusToWALFile(c corpus) *os.File { + walFile, err := ioutil.TempFile("", "export_test_corpus_wal") + if err != nil { + panic(err) + } + + e := &tsm1.WriteWALEntry{Values: c} + b, err := e.Encode(nil) + if err != nil { + panic(err) + } + + w := tsm1.NewWALSegmentWriter(walFile) + if err := w.Write(e.Type(), snappy.Encode(nil, b)); err != nil { + panic(err) + } + + if err := w.Flush(); err != nil { + panic(err) + } + // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway. + if err := walFile.Sync(); err != nil { + panic(err) + } + + return walFile +} + +// writeCorpusToTSMFile writes the given corpus as a TSM file, and returns a handle to that file. +// It is the caller's responsibility to remove the returned temp file. +// writeCorpusToTSMFile will panic on any error that occurs. +func writeCorpusToTSMFile(c corpus) *os.File { + tsmFile, err := ioutil.TempFile("", "export_test_corpus_tsm") + if err != nil { + panic(err) + } + + w, err := tsm1.NewTSMWriter(tsmFile) + if err != nil { + panic(err) + } + + // Write the series in alphabetical order so that each test run is comparable, + // given an identical corpus. + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if err := w.Write([]byte(k), c[k]); err != nil { + panic(err) + } + } + + if err := w.WriteIndex(); err != nil { + panic(err) + } + + if err := w.Close(); err != nil { + panic(err) + } + + return tsmFile +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go new file mode 100644 index 0000000..58833ee --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go @@ -0,0 +1,47 @@ +// Package help contains the help for the influx_inspect command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Usage: influx_inspect [[command] [arguments]] + +The commands are: + + deletetsm bulk measurement deletion of raw tsm file + dumptsi dumps low-level details about tsi1 files + dumptsm dumps low-level details about tsm1 files + export exports raw data from a shard to line protocol + buildtsi generates tsi1 indexes from tsm1 data + help display this help message + report displays a shard level report + verify verifies integrity of TSM files + verify-seriesfile verifies integrity of the Series file + +"help" is the default command. + +Use "influx_inspect [command] -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go new file mode 100644 index 0000000..31d1632 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go @@ -0,0 +1,3 @@ +package help_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go new file mode 100644 index 0000000..38ff966 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go @@ -0,0 +1,120 @@ +// The influx_inspect command displays detailed information about InfluxDB data files. +package main + +import ( + "fmt" + "io" + "log" + "os" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi" + "github.com/influxdata/influxdb/cmd/influx_inspect/deletetsm" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsmwal" + "github.com/influxdata/influxdb/cmd/influx_inspect/export" + "github.com/influxdata/influxdb/cmd/influx_inspect/help" + "github.com/influxdata/influxdb/cmd/influx_inspect/report" + "github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi" + "github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile" + "github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm" + _ "github.com/influxdata/influxdb/tsdb/engine" +) + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger *log.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: log.New(os.Stderr, "[influx_inspect] ", log.LstdFlags), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + case "deletetsm": + name := deletetsm.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("deletetsm: %s", err) + } + case "dumptsi": + name := dumptsi.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsi: %s", err) + } + case "dumptsmdev": + fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n") + fallthrough + case "dumptsm": + name := dumptsm.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsm: %s", err) + } + case "dumptsmwal": + name := dumptsmwal.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsmwal: %s", err) + } + case "export": + name := export.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("export: %s", err) + } + case "buildtsi": + name := buildtsi.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("buildtsi: %s", err) + } + case "report": + name := report.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("report: %s", err) + } + case "reporttsi": + name := reporttsi.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("reporttsi: %s", err) + } + case "verify": + name := tsm.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("verify: %s", err) + } + case "verify-seriesfile": + name := seriesfile.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("verify-seriesfile: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx_inspect help' for usage`+"\n\n", name) + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go new file mode 100644 index 0000000..5b6c1f1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go @@ -0,0 +1,326 @@ +// Package report reports statistics about TSM files. +package report + +import ( + "flag" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/retailnext/hllpp" +) + +// Command represents the program execution for "influxd report". +type Command struct { + Stderr io.Writer + Stdout io.Writer + + dir string + pattern string + detailed, exact bool +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("report", flag.ExitOnError) + fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern") + fs.BoolVar(&cmd.detailed, "detailed", false, "Report detailed cardinality estimates") + fs.BoolVar(&cmd.exact, "exact", false, "Report exact counts") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + newCounterFn := newHLLCounter + estTitle := " (est)" + if cmd.exact { + estTitle = "" + newCounterFn = newExactCounter + } + + cmd.dir = fs.Arg(0) + + err := cmd.isShardDir(cmd.dir) + if cmd.detailed && err != nil { + return fmt.Errorf("-detailed only supported for shard dirs") + } + + totalSeries := newCounterFn() + tagCardinalities := map[string]counter{} + measCardinalities := map[string]counter{} + fieldCardinalities := map[string]counter{} + + dbCardinalities := map[string]counter{} + + start := time.Now() + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 2, 1, ' ', 0) + fmt.Fprintln(tw, strings.Join([]string{"DB", "RP", "Shard", "File", "Series", "New" + estTitle, "Min Time", "Max Time", "Load Time"}, "\t")) + + minTime, maxTime := int64(math.MaxInt64), int64(math.MinInt64) + var fileCount int + if err := cmd.walkShardDirs(cmd.dir, func(db, rp, id, path string) error { + if cmd.pattern != "" && strings.Contains(path, cmd.pattern) { + return nil + } + + file, err := os.OpenFile(path, os.O_RDONLY, 0600) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", path, err) + return nil + } + + loadStart := time.Now() + reader, err := tsm1.NewTSMReader(file) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) + return nil + } + loadTime := time.Since(loadStart) + fileCount++ + + dbCount := dbCardinalities[db] + if dbCount == nil { + dbCount = newCounterFn() + dbCardinalities[db] = dbCount + } + + oldCount := dbCount.Count() + + seriesCount := reader.KeyCount() + for i := 0; i < seriesCount; i++ { + key, _ := reader.KeyAt(i) + totalSeries.Add([]byte(key)) + dbCount.Add([]byte(key)) + + if cmd.detailed { + sep := strings.Index(string(key), "#!~#") + seriesKey, field := key[:sep], key[sep+4:] + measurement, tags := models.ParseKey(seriesKey) + + measCount := measCardinalities[measurement] + if measCount == nil { + measCount = newCounterFn() + measCardinalities[measurement] = measCount + } + measCount.Add([]byte(key)) + + fieldCount := fieldCardinalities[measurement] + if fieldCount == nil { + fieldCount = newCounterFn() + fieldCardinalities[measurement] = fieldCount + } + fieldCount.Add([]byte(field)) + + for _, t := range tags { + tagCount := tagCardinalities[string(t.Key)] + if tagCount == nil { + tagCount = newCounterFn() + tagCardinalities[string(t.Key)] = tagCount + } + tagCount.Add(t.Value) + } + } + } + minT, maxT := reader.TimeRange() + if minT < minTime { + minTime = minT + } + if maxT > maxTime { + maxTime = maxT + } + reader.Close() + + fmt.Fprintln(tw, strings.Join([]string{ + db, rp, id, + filepath.Base(file.Name()), + strconv.FormatInt(int64(seriesCount), 10), + strconv.FormatInt(int64(dbCount.Count()-oldCount), 10), + time.Unix(0, minT).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxT).UTC().Format(time.RFC3339Nano), + loadTime.String(), + }, "\t")) + if cmd.detailed { + tw.Flush() + } + return nil + }); err != nil { + return err + } + + tw.Flush() + println() + + println("Summary:") + fmt.Printf(" Files: %d\n", fileCount) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s \n", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) + println() + + fmt.Printf("Statistics\n") + fmt.Printf(" Series:\n") + for db, counts := range dbCardinalities { + fmt.Printf(" - %s%s: %d (%d%%)\n", db, estTitle, counts.Count(), int(float64(counts.Count())/float64(totalSeries.Count())*100)) + } + fmt.Printf(" Total%s: %d\n", estTitle, totalSeries.Count()) + + if cmd.detailed { + fmt.Printf("\n Measurements (est):\n") + for _, t := range sortKeys(measCardinalities) { + fmt.Printf(" - %v: %d (%d%%)\n", t, measCardinalities[t].Count(), int((float64(measCardinalities[t].Count())/float64(totalSeries.Count()))*100)) + } + + fmt.Printf("\n Fields (est):\n") + for _, t := range sortKeys(fieldCardinalities) { + fmt.Printf(" - %v: %d\n", t, fieldCardinalities[t].Count()) + } + + fmt.Printf("\n Tags (est):\n") + for _, t := range sortKeys(tagCardinalities) { + fmt.Printf(" - %v: %d\n", t, tagCardinalities[t].Count()) + } + } + + fmt.Printf("Completed in %s\n", time.Since(start)) + return nil +} + +// sortKeys is a quick helper to return the sorted set of a map's keys +func sortKeys(vals map[string]counter) (keys []string) { + for k := range vals { + keys = append(keys, k) + } + sort.Strings(keys) + + return keys +} + +func (cmd *Command) isShardDir(dir string) error { + name := filepath.Base(dir) + if id, err := strconv.Atoi(name); err != nil || id < 1 { + return fmt.Errorf("not a valid shard dir: %v", dir) + } + + return nil +} + +func (cmd *Command) walkShardDirs(root string, fn func(db, rp, id, path string) error) error { + type location struct { + db, rp, id, path string + } + + var dirs []location + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + if filepath.Ext(info.Name()) == "."+tsm1.TSMFileExtension { + shardDir := filepath.Dir(path) + + if err := cmd.isShardDir(shardDir); err != nil { + return err + } + absPath, err := filepath.Abs(path) + if err != nil { + return err + } + parts := strings.Split(absPath, string(filepath.Separator)) + db, rp, id := parts[len(parts)-4], parts[len(parts)-3], parts[len(parts)-2] + dirs = append(dirs, location{db: db, rp: rp, id: id, path: path}) + return nil + } + return nil + }); err != nil { + return err + } + + sort.Slice(dirs, func(i, j int) bool { + a, _ := strconv.Atoi(dirs[i].id) + b, _ := strconv.Atoi(dirs[j].id) + return a < b + }) + + for _, shard := range dirs { + if err := fn(shard.db, shard.rp, shard.id, shard.path); err != nil { + return err + } + } + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Displays shard level report. + +Usage: influx_inspect report [flags] + + -pattern + Include only files matching a pattern. + -exact + Report exact cardinality counts instead of estimates. Note: this can use a lot of memory. + Defaults to "false". + -detailed + Report detailed cardinality estimates. + Defaults to "false". +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +// counter abstracts a a method of counting keys. +type counter interface { + Add(key []byte) + Count() uint64 +} + +// newHLLCounter returns an approximate counter using HyperLogLogs for cardinality estimation. +func newHLLCounter() counter { + return hllpp.New() +} + +// exactCounter returns an exact count for keys using counting all distinct items in a set. +type exactCounter struct { + m map[string]struct{} +} + +func (c *exactCounter) Add(key []byte) { + c.m[string(key)] = struct{}{} +} + +func (c *exactCounter) Count() uint64 { + return uint64(len(c.m)) +} + +func newExactCounter() counter { + return &exactCounter{ + m: make(map[string]struct{}), + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go new file mode 100644 index 0000000..3a6ba2c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go @@ -0,0 +1,3 @@ +package report_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi/report.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi/report.go new file mode 100644 index 0000000..c9d4ff6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi/report.go @@ -0,0 +1,487 @@ +// Package reporttsi provides a report about the series cardinality in one or more TSI indexes. +package reporttsi + +import ( + "errors" + "flag" + "fmt" + "io" + "math" + "os" + "path" + "path/filepath" + "runtime" + "sort" + "strconv" + "sync/atomic" + "text/tabwriter" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/index/tsi1" +) + +const ( + // Number of series IDs to stored in slice before we convert to a roaring + // bitmap. Roaring bitmaps have a non-trivial initial cost to construct. + useBitmapN = 25 +) + +// Command represents the program execution for "influxd reporttsi". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dbPath string + shardPaths map[uint64]string + shardIdxs map[uint64]*tsi1.Index + cardinalities map[uint64]map[string]*cardinality + + seriesFilePath string // optional. Defaults to dbPath/_series + sfile *tsdb.SeriesFile + + topN int + byMeasurement bool + byTagKey bool + + // How many goroutines to dedicate to calculating cardinality. + concurrency int +} + +// NewCommand returns a new instance of Command with default setting applied. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + shardPaths: map[uint64]string{}, + shardIdxs: map[uint64]*tsi1.Index{}, + cardinalities: map[uint64]map[string]*cardinality{}, + topN: 0, + byMeasurement: true, + byTagKey: false, + concurrency: runtime.GOMAXPROCS(0), + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("reporttsi", flag.ExitOnError) + fs.StringVar(&cmd.dbPath, "db-path", "", "Path to database. Required.") + fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Optional path to series file. Defaults /path/to/db-path/_series") + fs.BoolVar(&cmd.byMeasurement, "measurements", true, "Segment cardinality by measurements") + // TODO(edd): Not yet implemented. + // fs.BoolVar(&cmd.byTagKey, "tag-key", false, "Segment cardinality by tag keys (overrides `measurements`") + fs.IntVar(&cmd.topN, "top", 0, "Limit results to top n") + fs.IntVar(&cmd.concurrency, "c", runtime.GOMAXPROCS(0), "Set worker concurrency. Defaults to GOMAXPROCS setting.") + + fs.SetOutput(cmd.Stdout) + if err := fs.Parse(args); err != nil { + return err + } + + if cmd.byTagKey { + return errors.New("Segmenting cardinality by tag key is not yet implemented") + } + + if cmd.dbPath == "" { + return errors.New("path to database must be provided") + } + + if cmd.seriesFilePath == "" { + cmd.seriesFilePath = path.Join(cmd.dbPath, tsdb.SeriesFileDirectory) + } + + // Walk database directory to get shards. + if err := filepath.Walk(cmd.dbPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if !info.IsDir() { + return nil + } + + // TODO(edd): this would be a problem if the retention policy was named + // "index". + if info.Name() == tsdb.SeriesFileDirectory || info.Name() == "index" { + return filepath.SkipDir + } + + id, err := strconv.Atoi(info.Name()) + if err != nil { + return nil + } + cmd.shardPaths[uint64(id)] = path + return nil + }); err != nil { + return err + } + + if len(cmd.shardPaths) == 0 { + fmt.Fprintf(cmd.Stderr, "No shards under %s\n", cmd.dbPath) + return nil + } + + return cmd.run() +} + +func (cmd *Command) run() error { + cmd.sfile = tsdb.NewSeriesFile(cmd.seriesFilePath) + cmd.sfile.Logger = logger.New(os.Stderr) + if err := cmd.sfile.Open(); err != nil { + return err + } + defer cmd.sfile.Close() + + // Open all the indexes. + for id, pth := range cmd.shardPaths { + pth = path.Join(pth, "index") + // Verify directory is an index before opening it. + if ok, err := tsi1.IsIndexDir(pth); err != nil { + return err + } else if !ok { + return fmt.Errorf("not a TSI index directory: %q", pth) + } + + cmd.shardIdxs[id] = tsi1.NewIndex(cmd.sfile, + "", + tsi1.WithPath(pth), + tsi1.DisableCompactions(), + ) + if err := cmd.shardIdxs[id].Open(); err != nil { + return err + } + defer cmd.shardIdxs[id].Close() + + // Initialise cardinality set to store cardinalities for this shard. + cmd.cardinalities[id] = map[string]*cardinality{} + } + + // Calculate cardinalities of shards. + fn := cmd.cardinalityByMeasurement + // if cmd.byTagKey { + // TODO(edd) + // } + + // Blocks until all work done. + cmd.calculateCardinalities(fn) + + // Print summary. + if err := cmd.printSummaryByMeasurement(); err != nil { + return err + } + + allIDs := make([]uint64, 0, len(cmd.shardIdxs)) + for id := range cmd.shardIdxs { + allIDs = append(allIDs, id) + } + sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] }) + + for _, id := range allIDs { + if err := cmd.printShardByMeasurement(id); err != nil { + return err + } + } + return nil +} + +// calculateCardinalities calculates the cardinalities of the set of shard being +// worked on concurrently. The provided function determines how cardinality is +// calculated and broken down. +func (cmd *Command) calculateCardinalities(fn func(id uint64) error) error { + // Get list of shards to work on. + shardIDs := make([]uint64, 0, len(cmd.shardIdxs)) + for id := range cmd.shardIdxs { + shardIDs = append(shardIDs, id) + } + + errC := make(chan error, len(shardIDs)) + var maxi uint32 // index of maximumm shard being worked on. + for k := 0; k < cmd.concurrency; k++ { + go func() { + for { + i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on. + if i >= len(shardIDs) { + return // No more work. + } + errC <- fn(shardIDs[i]) + } + }() + } + + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return err + } + } + return nil +} + +type cardinality struct { + name []byte + short []uint32 + set *tsdb.SeriesIDSet +} + +func (c *cardinality) add(x uint64) { + if c.set != nil { + c.set.AddNoLock(x) + return + } + + c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32 + + // Cheaper to store in bitmap. + if len(c.short) > useBitmapN { + c.set = tsdb.NewSeriesIDSet() + for i := 0; i < len(c.short); i++ { + c.set.AddNoLock(uint64(c.short[i])) + } + c.short = nil + return + } +} + +func (c *cardinality) cardinality() int64 { + if c == nil || (c.short == nil && c.set == nil) { + return 0 + } + + if c.short != nil { + return int64(len(c.short)) + } + return int64(c.set.Cardinality()) +} + +type cardinalities []*cardinality + +func (a cardinalities) Len() int { return len(a) } +func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() } +func (a cardinalities) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (cmd *Command) cardinalityByMeasurement(shardID uint64) error { + idx := cmd.shardIdxs[shardID] + itr, err := idx.MeasurementIterator() + if err != nil { + return err + } else if itr == nil { + return nil + } + defer itr.Close() + +OUTER: + for { + name, err := itr.Next() + if err != nil { + return err + } else if name == nil { + break OUTER + } + + // Get series ID set to track cardinality under measurement. + c, ok := cmd.cardinalities[shardID][string(name)] + if !ok { + c = &cardinality{name: name} + cmd.cardinalities[shardID][string(name)] = c + } + + sitr, err := idx.MeasurementSeriesIDIterator(name) + if err != nil { + return err + } else if sitr == nil { + continue + } + + var e tsdb.SeriesIDElem + for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() { + if e.SeriesID > math.MaxUint32 { + panic(fmt.Sprintf("series ID is too large: %d (max %d). Corrupted series file?", e.SeriesID, uint32(math.MaxUint32))) + } + c.add(e.SeriesID) + } + sitr.Close() + + if err != nil { + return err + } + } + return nil +} + +type result struct { + name []byte + count int64 + + // For low cardinality measurements just track series using map + lowCardinality map[uint32]struct{} + + // For higher cardinality measurements track using bitmap. + set *tsdb.SeriesIDSet +} + +func (r *result) addShort(ids []uint32) { + // There is already a bitset of this result. + if r.set != nil { + for _, id := range ids { + r.set.AddNoLock(uint64(id)) + } + return + } + + // Still tracking low cardinality sets + if r.lowCardinality == nil { + r.lowCardinality = map[uint32]struct{}{} + } + + for _, id := range ids { + r.lowCardinality[id] = struct{}{} + } + + // Cardinality is large enough that we will benefit from using a bitmap + if len(r.lowCardinality) > useBitmapN { + r.set = tsdb.NewSeriesIDSet() + for id := range r.lowCardinality { + r.set.AddNoLock(uint64(id)) + } + r.lowCardinality = nil + } +} + +func (r *result) merge(other *tsdb.SeriesIDSet) { + if r.set == nil { + r.set = tsdb.NewSeriesIDSet() + for id := range r.lowCardinality { + r.set.AddNoLock(uint64(id)) + } + r.lowCardinality = nil + } + r.set.Merge(other) +} + +type results []*result + +func (a results) Len() int { return len(a) } +func (a results) Less(i, j int) bool { return a[i].count < a[j].count } +func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (cmd *Command) printSummaryByMeasurement() error { + // Get global set of measurement names across shards. + idxs := &tsdb.IndexSet{SeriesFile: cmd.sfile} + for _, idx := range cmd.shardIdxs { + idxs.Indexes = append(idxs.Indexes, idx) + } + + mitr, err := idxs.MeasurementIterator() + if err != nil { + return err + } else if mitr == nil { + return errors.New("got nil measurement iterator for index set") + } + defer mitr.Close() + + var name []byte + var totalCardinality int64 + measurements := results{} + for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() { + res := &result{name: name} + for _, shardCards := range cmd.cardinalities { + other, ok := shardCards[string(name)] + if !ok { + continue // this shard doesn't have anything for this measurement. + } + + if other.short != nil && other.set != nil { + panic("cardinality stored incorrectly") + } + + if other.short != nil { // low cardinality case + res.addShort(other.short) + } else if other.set != nil { // High cardinality case + res.merge(other.set) + } + + // Shard does not have any series for this measurement. + } + + // Determine final cardinality and allow intermediate structures to be + // GCd. + if res.lowCardinality != nil { + res.count = int64(len(res.lowCardinality)) + } else { + res.count = int64(res.set.Cardinality()) + } + totalCardinality += res.count + res.set = nil + res.lowCardinality = nil + measurements = append(measurements, res) + } + + if err != nil { + return err + } + + // sort measurements by cardinality. + sort.Sort(sort.Reverse(measurements)) + + if cmd.topN > 0 { + // There may not be "topN" measurement cardinality to sub-slice. + n := int(math.Min(float64(cmd.topN), float64(len(measurements)))) + measurements = measurements[:n] + } + + tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0) + fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", cmd.dbPath, totalCardinality) + fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n") + for _, res := range measurements { + fmt.Fprintf(tw, "%q\t\t%d\n", res.name, res.count) + } + + if err := tw.Flush(); err != nil { + return err + } + fmt.Fprint(cmd.Stdout, "\n\n") + return nil +} + +func (cmd *Command) printShardByMeasurement(id uint64) error { + allMap, ok := cmd.cardinalities[id] + if !ok { + return nil + } + + var totalCardinality int64 + all := make(cardinalities, 0, len(allMap)) + for _, card := range allMap { + n := card.cardinality() + if n == 0 { + continue + } + + totalCardinality += n + all = append(all, card) + } + + sort.Sort(sort.Reverse(all)) + + // Trim to top-n + if cmd.topN > 0 { + // There may not be "topN" measurement cardinality to sub-slice. + n := int(math.Min(float64(cmd.topN), float64(len(all)))) + all = all[:n] + } + + tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0) + fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, cmd.shardPaths[id], totalCardinality) + fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n") + for _, card := range all { + fmt.Fprintf(tw, "%q\t\t%d\n", card.name, card.cardinality()) + } + fmt.Fprint(tw, "===============\n\n") + if err := tw.Flush(); err != nil { + return err + } + fmt.Fprint(cmd.Stdout, "\n\n") + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/command.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/command.go new file mode 100644 index 0000000..7eb4cfb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/command.go @@ -0,0 +1,120 @@ +// Package seriesfile verifies integrity of series files. +package seriesfile + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/influxdata/influxdb/logger" + "go.uber.org/zap/zapcore" +) + +// Command represents the program execution for "influx_inspect verify-seriesfile". +type Command struct { + Stdout io.Writer + Stderr io.Writer + + dir string + db string + seriesFile string + verbose bool + concurrent int +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("verify-seriesfile", flag.ExitOnError) + fs.StringVar(&cmd.dir, "dir", filepath.Join(os.Getenv("HOME"), ".influxdb", "data"), + "Data directory.") + fs.StringVar(&cmd.db, "db", "", + "Only use this database inside of the data directory.") + fs.StringVar(&cmd.seriesFile, "series-file", "", + "Path to a series file. This overrides -db and -dir.") + fs.BoolVar(&cmd.verbose, "v", false, + "Verbose output.") + fs.IntVar(&cmd.concurrent, "c", runtime.GOMAXPROCS(0), + "How many concurrent workers to run.") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + config := logger.NewConfig() + config.Level = zapcore.WarnLevel + if cmd.verbose { + config.Level = zapcore.InfoLevel + } + logger, err := config.New(cmd.Stderr) + if err != nil { + return err + } + + v := NewVerify() + v.Logger = logger + v.Concurrent = cmd.concurrent + + if cmd.seriesFile != "" { + _, err := v.VerifySeriesFile(cmd.seriesFile) + return err + } + + if cmd.db != "" { + _, err := v.VerifySeriesFile(filepath.Join(cmd.dir, cmd.db, "_series")) + return err + } + + dbs, err := ioutil.ReadDir(cmd.dir) + if err != nil { + return err + } + + for _, db := range dbs { + if !db.IsDir() { + continue + } + _, err := v.VerifySeriesFile(filepath.Join(cmd.dir, db.Name(), "_series")) + if err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) printUsage() { + usage := `Verifies the integrity of Series files. + +Usage: influx_inspect verify-seriesfile [flags] + + -dir + Root data path. + Defaults to "%[1]s/.influxdb/data". + -db + Only verify this database inside of the data directory. + -series-file + Path to a series file. This overrides -db and -dir. + -v + Enable verbose logging. + -c + How many concurrent workers to run. + Defaults to "%[2]d" on this machine. +` + + fmt.Printf(usage, os.Getenv("HOME"), runtime.GOMAXPROCS(0)) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify.go new file mode 100644 index 0000000..a41b685 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify.go @@ -0,0 +1,402 @@ +package seriesfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "sync" + + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +// verifyResult contains the result of a Verify... call +type verifyResult struct { + valid bool + err error +} + +// Verify contains configuration for running verification of series files. +type Verify struct { + Concurrent int + Logger *zap.Logger + + done chan struct{} +} + +// NewVerify constructs a Verify with good defaults. +func NewVerify() Verify { + return Verify{ + Concurrent: runtime.GOMAXPROCS(0), + Logger: zap.NewNop(), + } +} + +// VerifySeriesFile performs verifications on a series file. The error is only returned +// if there was some fatal problem with operating, not if there was a problem with the series file. +func (v Verify) VerifySeriesFile(filePath string) (valid bool, err error) { + v.Logger = v.Logger.With(zap.String("path", filePath)) + v.Logger.Info("Verifying series file") + + defer func() { + if rec := recover(); rec != nil { + v.Logger.Error("Panic verifying file", zap.String("recovered", fmt.Sprint(rec))) + valid = false + } + }() + + partitionInfos, err := ioutil.ReadDir(filePath) + if os.IsNotExist(err) { + v.Logger.Error("Series file does not exist") + return false, nil + } + if err != nil { + return false, err + } + + // Check every partition in concurrently. + concurrent := v.Concurrent + if concurrent <= 0 { + concurrent = 1 + } + in := make(chan string, len(partitionInfos)) + out := make(chan verifyResult, len(partitionInfos)) + + // Make sure all the workers are cleaned up when we return. + var wg sync.WaitGroup + defer wg.Wait() + + // Set up cancellation. Any return will cause the workers to be cancelled. + v.done = make(chan struct{}) + defer close(v.done) + + for i := 0; i < concurrent; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for partitionPath := range in { + valid, err := v.VerifyPartition(partitionPath) + select { + case out <- verifyResult{valid: valid, err: err}: + case <-v.done: + return + } + } + }() + } + + // send off the work and read the results. + for _, partitionInfo := range partitionInfos { + in <- filepath.Join(filePath, partitionInfo.Name()) + } + close(in) + + for range partitionInfos { + result := <-out + if result.err != nil { + return false, err + } else if !result.valid { + return false, nil + } + } + + return true, nil +} + +// VerifyPartition performs verifications on a partition of a series file. The error is only returned +// if there was some fatal problem with operating, not if there was a problem with the partition. +func (v Verify) VerifyPartition(partitionPath string) (valid bool, err error) { + v.Logger = v.Logger.With(zap.String("partition", filepath.Base(partitionPath))) + v.Logger.Info("Verifying partition") + + defer func() { + if rec := recover(); rec != nil { + v.Logger.Error("Panic verifying partition", zap.String("recovered", fmt.Sprint(rec))) + valid = false + } + }() + + segmentInfos, err := ioutil.ReadDir(partitionPath) + if err != nil { + return false, err + } + + segments := make([]*tsdb.SeriesSegment, 0, len(segmentInfos)) + ids := make(map[uint64]IDData) + + // check every segment + for _, segmentInfo := range segmentInfos { + select { + default: + case <-v.done: + return false, nil + } + + segmentPath := filepath.Join(partitionPath, segmentInfo.Name()) + segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentInfo.Name()) + if err != nil { + continue + } + + if valid, err := v.VerifySegment(segmentPath, ids); err != nil { + return false, err + } else if !valid { + return false, nil + } + + // open the segment for verifying the index. we want it to be open outside + // the for loop as well, so the defer is ok. + segment := tsdb.NewSeriesSegment(segmentID, segmentPath) + if err := segment.Open(); err != nil { + return false, err + } + defer segment.Close() + + segments = append(segments, segment) + } + + // check the index + indexPath := filepath.Join(partitionPath, "index") + if valid, err := v.VerifyIndex(indexPath, segments, ids); err != nil { + return false, err + } else if !valid { + return false, nil + } + + return true, nil +} + +// IDData keeps track of data about a series ID. +type IDData struct { + Offset int64 + Key []byte + Deleted bool +} + +// VerifySegment performs verifications on a segment of a series file. The error is only returned +// if there was some fatal problem with operating, not if there was a problem with the partition. +// The ids map is populated with information about the ids stored in the segment. +func (v Verify) VerifySegment(segmentPath string, ids map[uint64]IDData) (valid bool, err error) { + segmentName := filepath.Base(segmentPath) + v.Logger = v.Logger.With(zap.String("segment", segmentName)) + v.Logger.Info("Verifying segment") + + // Open up the segment and grab it's data. + segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentName) + if err != nil { + return false, err + } + segment := tsdb.NewSeriesSegment(segmentID, segmentPath) + if err := segment.Open(); err != nil { + v.Logger.Error("Error opening segment", zap.Error(err)) + return false, nil + } + defer segment.Close() + buf := newBuffer(segment.Data()) + + defer func() { + if rec := recover(); rec != nil { + v.Logger.Error("Panic verifying segment", zap.String("recovered", fmt.Sprint(rec)), + zap.Int64("offset", buf.offset)) + valid = false + } + }() + + // Skip the header: it has already been verified by the Open call. + if err := buf.advance(tsdb.SeriesSegmentHeaderSize); err != nil { + v.Logger.Error("Unable to advance buffer", + zap.Int64("offset", buf.offset), + zap.Error(err)) + return false, nil + } + + prevID, firstID := uint64(0), true + +entries: + for len(buf.data) > 0 { + select { + default: + case <-v.done: + return false, nil + } + + flag, id, key, sz := tsdb.ReadSeriesEntry(buf.data) + + // Check the flag is valid and for id monotonicity. + switch flag { + case tsdb.SeriesEntryInsertFlag: + if !firstID && prevID > id { + v.Logger.Error("ID is not monotonically increasing", + zap.Uint64("prev_id", prevID), + zap.Uint64("id", id), + zap.Int64("offset", buf.offset)) + return false, nil + } + + firstID = false + prevID = id + + if ids != nil { + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + + ids[id] = IDData{ + Offset: tsdb.JoinSeriesOffset(segment.ID(), uint32(buf.offset)), + Key: keyCopy, + } + } + + case tsdb.SeriesEntryTombstoneFlag: + if ids != nil { + data := ids[id] + data.Deleted = true + ids[id] = data + } + + case 0: // if zero, there are no more entries + if err := buf.advance(sz); err != nil { + v.Logger.Error("Unable to advance buffer", + zap.Int64("offset", buf.offset), + zap.Error(err)) + return false, nil + } + break entries + + default: + v.Logger.Error("Invalid flag", + zap.Uint8("flag", flag), + zap.Int64("offset", buf.offset)) + return false, nil + } + + // Ensure the key parses. This may panic, but our defer handler should + // make the error message more usable by providing the key. + parsed := false + func() { + defer func() { + if rec := recover(); rec != nil { + v.Logger.Error("Panic parsing key", + zap.String("key", fmt.Sprintf("%x", key)), + zap.Int64("offset", buf.offset), + zap.String("recovered", fmt.Sprint(rec))) + } + }() + tsdb.ParseSeriesKey(key) + parsed = true + }() + if !parsed { + return false, nil + } + + // Advance past the entry. + if err := buf.advance(sz); err != nil { + v.Logger.Error("Unable to advance buffer", + zap.Int64("offset", buf.offset), + zap.Error(err)) + return false, nil + } + } + + return true, nil +} + +// VerifyIndex performs verification on an index in a series file. The error is only returned +// if there was some fatal problem with operating, not if there was a problem with the partition. +// The ids map must be built from verifying the passed in segments. +func (v Verify) VerifyIndex(indexPath string, segments []*tsdb.SeriesSegment, + ids map[uint64]IDData) (valid bool, err error) { + v.Logger.Info("Verifying index") + + defer func() { + if rec := recover(); rec != nil { + v.Logger.Error("Panic verifying index", zap.String("recovered", fmt.Sprint(rec))) + valid = false + } + }() + + index := tsdb.NewSeriesIndex(indexPath) + if err := index.Open(); err != nil { + v.Logger.Error("Error opening index", zap.Error(err)) + return false, nil + } + defer index.Close() + + if err := index.Recover(segments); err != nil { + v.Logger.Error("Error recovering index", zap.Error(err)) + return false, nil + } + + // we check all the ids in a consistent order to get the same errors if + // there is a problem + idsList := make([]uint64, 0, len(ids)) + for id := range ids { + idsList = append(idsList, id) + } + sort.Slice(idsList, func(i, j int) bool { + return idsList[i] < idsList[j] + }) + + for _, id := range idsList { + select { + default: + case <-v.done: + return false, nil + } + + IDData := ids[id] + + expectedOffset, expectedID := IDData.Offset, id + if IDData.Deleted { + expectedOffset, expectedID = 0, 0 + } + + // check both that the offset is right and that we get the right + // id for the key + if gotOffset := index.FindOffsetByID(id); gotOffset != expectedOffset { + v.Logger.Error("Index inconsistency", + zap.Uint64("id", id), + zap.Int64("got_offset", gotOffset), + zap.Int64("expected_offset", expectedOffset)) + return false, nil + } + + if gotID := index.FindIDBySeriesKey(segments, IDData.Key); gotID != expectedID { + v.Logger.Error("Index inconsistency", + zap.Uint64("id", id), + zap.Uint64("got_id", gotID), + zap.Uint64("expected_id", expectedID)) + return false, nil + } + } + + return true, nil +} + +// buffer allows one to safely advance a byte slice and keep track of how many bytes were advanced. +type buffer struct { + offset int64 + data []byte +} + +// newBuffer constructs a buffer with the provided data. +func newBuffer(data []byte) *buffer { + return &buffer{ + offset: 0, + data: data, + } +} + +// advance will consume n bytes from the data slice and return an error if there is not enough +// data to do so. +func (b *buffer) advance(n int64) error { + if int64(len(b.data)) < n { + return fmt.Errorf("unable to advance %d bytes: %d remaining", n, len(b.data)) + } + b.data = b.data[n:] + b.offset += n + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify_test.go new file mode 100644 index 0000000..c2b0afe --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile/verify_test.go @@ -0,0 +1,168 @@ +package seriesfile_test + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +func TestVerifies_Valid(t *testing.T) { + test := NewTest(t) + defer test.Close() + + verify := seriesfile.NewVerify() + if testing.Verbose() { + verify.Logger, _ = zap.NewDevelopment() + } + passed, err := verify.VerifySeriesFile(test.Path) + test.AssertNoError(err) + test.Assert(passed) +} + +func TestVerifies_Invalid(t *testing.T) { + test := NewTest(t) + defer test.Close() + + test.AssertNoError(filepath.Walk(test.Path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + test.Backup(path) + defer test.Restore(path) + + fh, err := os.OpenFile(path, os.O_RDWR, 0) + test.AssertNoError(err) + defer fh.Close() + + _, err = fh.WriteAt([]byte("BOGUS"), 0) + test.AssertNoError(err) + test.AssertNoError(fh.Close()) + + passed, err := seriesfile.NewVerify().VerifySeriesFile(test.Path) + test.AssertNoError(err) + test.Assert(!passed) + + return nil + })) +} + +// +// helpers +// + +type Test struct { + *testing.T + Path string +} + +func NewTest(t *testing.T) *Test { + t.Helper() + + dir, err := ioutil.TempDir("", "verify-seriesfile-") + if err != nil { + t.Fatal(err) + } + + // create a series file in the directory + err = func() error { + seriesFile := tsdb.NewSeriesFile(dir) + if err := seriesFile.Open(); err != nil { + return err + } + defer seriesFile.Close() + seriesFile.EnableCompactions() + + const ( + compactionThreshold = 100 + numSeries = 2 * tsdb.SeriesFilePartitionN * compactionThreshold + ) + + for _, partition := range seriesFile.Partitions() { + partition.CompactThreshold = compactionThreshold + } + + var names [][]byte + var tagsSlice []models.Tags + + for i := 0; i < numSeries; i++ { + names = append(names, []byte(fmt.Sprintf("series%d", i))) + tagsSlice = append(tagsSlice, nil) + } + + _, err := seriesFile.CreateSeriesListIfNotExists(names, tagsSlice) + if err != nil { + return err + } + + // wait for compaction to make sure we detect issues with the index + partitions := seriesFile.Partitions() + wait: + for _, partition := range partitions { + if partition.Compacting() { + time.Sleep(100 * time.Millisecond) + goto wait + } + } + + return seriesFile.Close() + }() + if err != nil { + os.RemoveAll(dir) + t.Fatal(err) + } + + return &Test{ + T: t, + Path: dir, + } +} + +func (t *Test) Close() { + os.RemoveAll(t.Path) +} + +func (t *Test) AssertNoError(err error) { + t.Helper() + if err != nil { + t.Fatal("unexpected error:", err) + } +} + +func (t *Test) Assert(x bool) { + t.Helper() + if !x { + t.Fatal("unexpected condition") + } +} + +// Backup makes a copy of the path for a later Restore. +func (t *Test) Backup(path string) { + in, err := os.Open(path) + t.AssertNoError(err) + defer in.Close() + + out, err := os.Create(path + ".backup") + t.AssertNoError(err) + defer out.Close() + + _, err = io.Copy(out, in) + t.AssertNoError(err) +} + +// Restore restores the file at the path to the time when Backup was called last. +func (t *Test) Restore(path string) { + t.AssertNoError(os.Rename(path+".backup", path)) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify.go new file mode 100644 index 0000000..287f27a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify.go @@ -0,0 +1,120 @@ +// Package tsm verifies integrity of TSM files. +package tsm + +import ( + "flag" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influx_inspect verify". +type Command struct { + Stderr io.Writer + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var path string + fs := flag.NewFlagSet("verify", flag.ExitOnError) + fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + start := time.Now() + dataPath := filepath.Join(path, "data") + + brokenBlocks := 0 + totalBlocks := 0 + + // No need to do this in a loop + ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension) + + // Get all TSM files by walking through the data dir + files := []string{} + err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) == ext { + files = append(files, path) + } + return nil + }) + if err != nil { + panic(err) + } + + tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0) + + // Verify the checksums of every block in every file + for _, f := range files { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + return err + } + + reader, err := tsm1.NewTSMReader(file) + if err != nil { + return err + } + + blockItr := reader.BlockIterator() + brokenFileBlocks := 0 + count := 0 + for blockItr.Next() { + totalBlocks++ + key, _, _, _, checksum, buf, err := blockItr.Read() + if err != nil { + brokenBlocks++ + fmt.Fprintf(tw, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err) + } else if expected := crc32.ChecksumIEEE(buf); checksum != expected { + brokenBlocks++ + fmt.Fprintf(tw, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count) + } + count++ + } + if brokenFileBlocks == 0 { + fmt.Fprintf(tw, "%s: healthy\n", f) + } + reader.Close() + } + + fmt.Fprintf(tw, "Broken Blocks: %d / %d, in %vs\n", brokenBlocks, totalBlocks, time.Since(start).Seconds()) + tw.Flush() + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := fmt.Sprintf(`Verifies the integrity of TSM files. + +Usage: influx_inspect verify [flags] + + -dir + Root storage path + Defaults to "%[1]s/.influxdb". + `, os.Getenv("HOME")) + + fmt.Fprintf(cmd.Stdout, usage) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify_test.go new file mode 100644 index 0000000..7b6a391 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/tsm/verify_test.go @@ -0,0 +1,3 @@ +package tsm_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md new file mode 100644 index 0000000..c403633 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md @@ -0,0 +1,43 @@ +# `influx_stress` + +If you run into any issues with this tool please mention @jackzampolin when you create an issue. + +## Ways to run + +### `influx_stress` +This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. + +### `influx_stress -config someConfig.toml` +This runs the stress test with a valid configuration file located at `someConfig.tom` + +### `influx_stress -v2 -config someConfig.iql` +This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md). + +## Flags + +If flags are defined they overwrite the config from any file passed in. + +### `-addr` string +IP address and port of database where response times will persist (e.g., localhost:8086) + +`default` = "http://localhost:8086" + +### `-config` string +The relative path to the stress test configuration file. + +`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) + +### `-cpuprofile` filename +Writes the result of Go's cpu profile to filename + +`default` = no profiling + +### `-database` string +Name of database on `-addr` that `influx_stress` will persist write and query response times + +`default` = "stress" + +### `-tags` value +A comma separated list of tags to add to write and query response times. + +`default` = "" diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml new file mode 100644 index 0000000..e410cd9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml @@ -0,0 +1,92 @@ +# This section can be removed +[provision] + # The basic provisioner simply deletes and creates database. + # If `reset_database` is false, it will not attempt to delete the database + [provision.basic] + # If enabled the provisioner will actually run + enabled = true + # Address of the instance that is to be provisioned + address = "localhost:8086" + # Database that will be created/deleted + database = "stress" + # Attempt to delete database + reset_database = true + +# This section cannot be commented out +# To prevent writes set `enabled=false` +# in [write.influx_client.basic] +[write] + [write.point_generator] + # The basic point generator will generate points of the form + # `cpu,host=server-%v,location=us-west value=234 123456` + [write.point_generator.basic] + # number of points that will be written for each of the series + point_count = 100 + # number of series + series_count = 100000 + # How much time between each timestamp + tick = "10s" + # Randomize timestamp a bit (not functional) + jitter = true + # Precision of points that are being written + precision = "s" + # name of the measurement that will be written + measurement = "cpu" + # The date for the first point that is written into influx + start_date = "2006-Jan-02" + # Defines a tag for a series + [[write.point_generator.basic.tag]] + key = "host" + value = "server" + [[write.point_generator.basic.tag]] + key = "location" + value = "us-west" + # Defines a field for a series + [[write.point_generator.basic.field]] + key = "value" + value = "float64" # supported types: float64, int, bool + + + [write.influx_client] + [write.influx_client.basic] + # If enabled the writer will actually write + enabled = true + # Addresses is an array of the Influxdb instances + addresses = ["localhost:8086"] # stress_test_server runs on port 1234 + # Database that is being written to + database = "stress" + # Precision of points that are being written + precision = "s" + # Size of batches that are sent to db + batch_size = 10000 + # Interval between each batch + batch_interval = "0s" + # How many concurrent writers to the db + concurrency = 10 + # ssl enabled? + ssl = false + # format of points that are written to influxdb + format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet) + +# This section can be removed +[read] + [read.query_generator] + [read.query_generator.basic] + # Template of the query that will be ran against the instance + template = "SELECT count(value) FROM cpu where host='server-%v'" + # How many times the templated query will be ran + query_count = 250 + + [read.query_client] + [read.query_client.basic] + # if enabled the reader will actually read + enabled = true + # Address of the instance that will be queried + addresses = ["localhost:8086"] + # Database that will be queried + database = "stress" + # Interval bewteen queries + query_interval = "100ms" + # Number of concurrent queriers + concurrency = 1 + diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go new file mode 100644 index 0000000..5186d18 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go @@ -0,0 +1,71 @@ +// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "runtime/pprof" + + "github.com/influxdata/influxdb/stress" + v2 "github.com/influxdata/influxdb/stress/v2" +) + +var ( + useV2 = flag.Bool("v2", false, "Use version 2 of stress tool") + config = flag.String("config", "", "The stress test file") + cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`") + db = flag.String("db", "", "target database within test system for write and query load") +) + +func main() { + o := stress.NewOutputConfig() + flag.Parse() + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Println(err) + return + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + if *useV2 { + if *config != "" { + v2.RunStress(*config) + } else { + v2.RunStress("stress/v2/iql/file.iql") + } + } else { + + c, err := stress.NewConfig(*config) + if err != nil { + log.Fatal(err) + return + } + + if *db != "" { + c.Provision.Basic.Database = *db + c.Write.InfluxClients.Basic.Database = *db + c.Read.QueryClients.Basic.Database = *db + } + + w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) + r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) + s := stress.NewStressTest(&c.Provision.Basic, w, r) + + bw := stress.NewBroadcastChannel() + bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) + bw.Register(o.HTTPHandler("write")) + + br := stress.NewBroadcastChannel() + br.Register(c.Read.QueryClients.Basic.BasicReadHandler) + br.Register(o.HTTPHandler("read")) + + s.Start(bw.Handle, br.Handle) + + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md new file mode 100644 index 0000000..d63c152 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md @@ -0,0 +1,152 @@ +# Converting b1 and bz1 shards to tsm1 + +`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 +format. Converting shards to tsm1 format results in a very significant +reduction in disk usage, and significantly improved write-throughput, +when writing data into those shards. + +Conversion can be controlled on a database-by-database basis. By +default a database is backed up before it is converted, allowing you +to roll back any changes. Because of the backup process, ensure the +host system has at least as much free disk space as the disk space +consumed by the _data_ directory of your InfluxDB system. + +The tool automatically ignores tsm1 shards, and can be run +idempotently on any database. + +Conversion is an offline process, and the InfluxDB system must be +stopped during conversion. However the conversion process reads and +writes shards directly on disk and should be fast. + +## Steps + +Follow these steps to perform a conversion. + +* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted. +* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase). +* Stop all write-traffic to your InfluxDB system. +* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards. +* Stop the InfluxDB service. It should not be restarted until conversion is complete. +* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions. +* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories. +* Restart node and ensure data looks correct. +* If everything looks OK, you may then wish to remove or archive the backed-up databases. +* Restart write traffic. + +## Example session + +Below is an example session, showing a database being converted. + +``` +$ # Create a backup location that the `influxdb` user has full access to +$ mkdir -m 0777 /path/to/influxdb_backup +$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data + +b1 and bz1 shard conversion. +----------------------------------- +Data directory is: /var/lib/influxdb/data +Backup directory is: /path/to/influxdb_backup +Databases specified: all +Database backups enabled: yes +Parallel mode enabled (GOMAXPROCS): yes (8) + + +Found 1 shards that will be converted. + +Database Retention Path Engine Size +_internal monitor /var/lib/influxdb/data/_internal/monitor/1 bz1 65536 + +These shards will be converted. Proceed? y/N: y +Conversion starting.... +Backing up 1 databases... +2016/01/28 12:23:43.699266 Backup of databse '_internal' started +2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs) +2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms) + +Summary statistics +======================================== +Databases converted: 1 +Shards converted: 1 +TSM files created: 1 +Points read: 369 +Points written: 369 +NaN filtered: 0 +Inf filtered: 0 +Points without fields filtered: 0 +Disk usage pre-conversion (bytes): 65536 +Disk usage post-conversion (bytes): 11000 +Reduction factor: 83% +Bytes per TSM point: 29.81 +Total conversion time: 7.330443ms + +$ # restart node, verify data +$ sudo rm -r /path/to/influxdb_backup +``` + +Note that the tool first lists the shards that will be converted, +before asking for confirmation. You can abort the conversion process +at this step if you just wish to see what would be converted, or if +the list of shards does not look correct. + +__WARNING:__ If you run the `influx_tsm` tool as a user other than the +`influxdb` user (or the user that the InfluxDB process runs under), +please make sure to verify the shard permissions are correct prior to +starting InfluxDB. If needed, shard permissions can be corrected with +the `chown` command. For example: + +``` +sudo chown -R influxdb:influxdb /var/lib/influxdb +``` + +## Rolling back a conversion + +After a successful backup (the message `Database XYZ backed up` was +logged), you have a duplicate of that database in the _backup_ +directory you provided on the command line. If, when checking your +data after a successful conversion, you notice things missing or +something just isn't right, you can "undo" the conversion: + +- Shut down your node (this is very important) +- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations) +- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory. + +Using the same directories as above, and assuming a database named `stats`: + +``` +$ sudo rm -r /var/lib/influxdb/data/stats +$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/ +$ # restart influxd node +``` + +#### How to avoid downtime when upgrading shards + +*Identify non-`tsm1` shards* + +Non-`tsm1` shards are files of the form: `data///`. + +`tsm1` shards are files of the form: `data////.tsm`. + +*Determine which `bz`/`bz1` shards are cold for writes* + +Run the `SHOW SHARDS` query to see the start and end dates for shards. +If the date range for a shard does not span the current time then the shard is said to be cold for writes. +This means that no new points are expected to be added to the shard. +The shard whose date range spans now is said to be hot for writes. +You can only safely convert cold shards without stopping the InfluxDB process. + +*Convert cold shards* + +1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data///`. +2. Run the `influx_tsm` tool on the copied files: +``` +influx_tsm -parallel /tmp/data/ +``` +3. Remove the existing cold `b1`/`bz1` shards from the production data directory. +4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors. +5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards. + +> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard. +Nothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible. +It is your responsibility to prevent writes to cold shards to prevent data loss. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go new file mode 100644 index 0000000..0ed844f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go @@ -0,0 +1,270 @@ +// Package b1 reads data from b1 shards. +package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + +import ( + "encoding/binary" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// DefaultChunkSize is the size of chunks read from the b1 shard +const DefaultChunkSize int = 1000 + +var excludedBuckets = map[string]bool{ + "fields": true, + "meta": true, + "series": true, + "wal": true, +} + +// Reader is used to read all data from a b1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + values []tsm1.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the b1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + r.values = make([]tsm1.Value, chunkSize) + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + // Load fields. + if err := r.db.View(func(tx *bolt.Tx) error { + meta := tx.Bucket([]byte("fields")) + c := meta.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + mf := &tsdb.MeasurementFields{} + if err := mf.UnmarshalBinary(v); err != nil { + return err + } + r.fields[string(k)] = mf + r.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields) + } + return nil + }); err != nil { + return err + } + + seriesSet := make(map[string]bool) + + // ignore series index and find all series in this shard + if err := r.db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + key := string(name) + if !excludedBuckets[key] { + seriesSet[key] = true + } + return nil + }) + return nil + }); err != nil { + return err + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether any data remains to be read. It must be called before +// the next call to Read(). +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.values[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + // Bolt cursor and readahead buffer. + cursor *bolt.Cursor + keyBuf int64 + valBuf interface{} + + series string + field string + dec *tsdb.FieldCodec +} + +// Cursor returns an iterator for a key over a single field. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + cur := &cursor{ + keyBuf: -2, + series: series, + field: field, + dec: dec, + } + + // Retrieve series bucket. + b := tx.Bucket([]byte(series)) + if b != nil { + cur.cursor = b.Cursor() + } + + return cur +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + k, v := c.cursor.Seek(seekBytes[:]) + c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v) +} + +// Next returns the next key/value pair from the cursor. +func (c *cursor) Next() (key int64, value interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + k, v := c.cursor.Next() + if k == nil { + return -1, nil + } + return tsdb.DecodeKeyValue(c.field, c.dec, k, v) + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// Sort b1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go new file mode 100644 index 0000000..b369aff --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go @@ -0,0 +1,371 @@ +// Package bz1 reads data from bz1 shards. +package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/snappy" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// DefaultChunkSize is the size of chunks read from the bz1 shard +const DefaultChunkSize = 1000 + +// Reader is used to read all data from a bz1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + values []tsm1.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the bz1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + r.values = make([]tsm1.Value, chunkSize) + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + seriesSet := make(map[string]bool) + + if err := r.db.View(func(tx *bolt.Tx) error { + var data []byte + + meta := tx.Bucket([]byte("meta")) + if meta == nil { + // No data in this shard. + return nil + } + + pointsBucket := tx.Bucket([]byte("points")) + if pointsBucket == nil { + return nil + } + + if err := pointsBucket.ForEach(func(key, _ []byte) error { + seriesSet[string(key)] = true + return nil + }); err != nil { + return err + } + + buf := meta.Get([]byte("fields")) + if buf == nil { + // No data in this shard. + return nil + } + + data, err = snappy.Decode(nil, buf) + if err != nil { + return err + } + if err := json.Unmarshal(data, &r.fields); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + // Build the codec for each measurement. + for k, v := range r.fields { + r.codecs[k] = tsdb.NewFieldCodec(v.Fields) + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + if c == nil { + continue + } + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether there is any more data to be read. +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.values[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + cursor *bolt.Cursor + buf []byte // uncompressed buffer + off int // buffer offset + fieldIndices []int + index int + + series string + field string + dec *tsdb.FieldCodec + + keyBuf int64 + valBuf interface{} +} + +// newCursor returns an instance of a bz1 cursor. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + // Retrieve points bucket. Ignore if there is no bucket. + b := tx.Bucket([]byte("points")).Bucket([]byte(series)) + if b == nil { + return nil + } + + return &cursor{ + cursor: b.Cursor(), + series: series, + field: field, + dec: dec, + keyBuf: -2, + } +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + + // Move cursor to appropriate block and set to buffer. + k, v := c.cursor.Seek(seekBytes[:]) + if v == nil { // get the last block, it might have this time + _, v = c.cursor.Last() + } else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check + _, v = c.cursor.Prev() + + // if the previous block max time is less than the seek value, reset to where we were originally + if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) { + _, v = c.cursor.Seek(seekBytes[:]) + } + } + c.setBuf(v) + + // Read current block up to seek position. + c.seekBuf(seekBytes[:]) + + // Return current entry. + c.keyBuf, c.valBuf = c.read() +} + +// seekBuf moves the cursor to a position within the current buffer. +func (c *cursor) seekBuf(seek []byte) (key, value []byte) { + for { + // Slice off the current entry. + buf := c.buf[c.off:] + + // Exit if current entry's timestamp is on or after the seek. + if len(buf) == 0 { + return + } + + if bytes.Compare(buf[0:8], seek) != -1 { + return + } + + c.off += entryHeaderSize + entryDataSize(buf) + } +} + +// Next returns the next key/value pair from the cursor. If there are no values +// remaining, -1 is returned. +func (c *cursor) Next() (int64, interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + // Ignore if there is no buffer. + if len(c.buf) == 0 { + return -1, nil + } + + // Move forward to next entry. + c.off += entryHeaderSize + entryDataSize(c.buf[c.off:]) + + // If no items left then read first item from next block. + if c.off >= len(c.buf) { + _, v := c.cursor.Next() + c.setBuf(v) + } + + return c.read() + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// setBuf saves a compressed block to the buffer. +func (c *cursor) setBuf(block []byte) { + // Clear if the block is empty. + if len(block) == 0 { + c.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0 + return + } + + // Otherwise decode block into buffer. + // Skip over the first 8 bytes since they are the max timestamp. + buf, err := snappy.Decode(nil, block[8:]) + if err != nil { + c.buf = c.buf[0:0] + fmt.Printf("block decode error: %s\n", err) + } + + c.buf, c.off = buf, 0 +} + +// read reads the current key and value from the current block. +func (c *cursor) read() (key int64, value interface{}) { + // Return nil if the offset is at the end of the buffer. + if c.off >= len(c.buf) { + return -1, nil + } + + // Otherwise read the current entry. + buf := c.buf[c.off:] + dataSize := entryDataSize(buf) + + return tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize]) +} + +// Sort bz1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} + +// entryHeaderSize is the number of bytes required for the header. +const entryHeaderSize = 8 + 4 + +// entryDataSize returns the size of an entry's data field, in bytes. +func entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go new file mode 100644 index 0000000..2d9ab16 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +const ( + maxBlocksPerKey = 65535 +) + +// KeyIterator is used to iterate over b* keys for conversion to tsm keys +type KeyIterator interface { + Next() bool + Read() (string, []tsm1.Value, error) +} + +// Converter encapsulates the logic for converting b*1 shards to tsm1 shards. +type Converter struct { + path string + maxTSMFileSize uint32 + sequence int + stats *stats.Stats +} + +// NewConverter returns a new instance of the Converter. +func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter { + return &Converter{ + path: path, + maxTSMFileSize: sz, + stats: stats, + } +} + +// Process writes the data provided by iter to a tsm1 shard. +func (c *Converter) Process(iter KeyIterator) error { + // Ensure the tsm1 directory exists. + if err := os.MkdirAll(c.path, 0777); err != nil { + return err + } + + // Iterate until no more data remains. + var w tsm1.TSMWriter + var keyCount map[string]int + + for iter.Next() { + k, v, err := iter.Read() + if err != nil { + return err + } + + if w == nil { + w, err = c.nextTSMWriter() + if err != nil { + return err + } + keyCount = map[string]int{} + } + if err := w.Write([]byte(k), v); err != nil { + return err + } + keyCount[k]++ + + c.stats.AddPointsRead(len(v)) + c.stats.AddPointsWritten(len(v)) + + // If we have a max file size configured and we're over it, start a new TSM file. + if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + w = nil + } + } + + if w != nil { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + } + + return nil +} + +// nextTSMWriter returns the next TSMWriter for the Converter. +func (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) { + c.sequence++ + fileName := filepath.Join(c.path, fmt.Sprintf("%09d-%09d.%s", 1, c.sequence, tsm1.TSMFileExtension)) + + fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + + // Create the writer for the new TSM file. + w, err := tsm1.NewTSMWriter(fd) + if err != nil { + return nil, err + } + + c.stats.IncrTSMFileCount() + return w, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go new file mode 100644 index 0000000..ddfc05c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go @@ -0,0 +1,419 @@ +// Command influx_tsm converts b1 or bz1 shards (from InfluxDB releases earlier than v0.11) +// to the current tsm1 format. +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strings" + "text/tabwriter" + "time" + + "net/http" + _ "net/http/pprof" + + "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// ShardReader reads b* shards and converts to tsm shards +type ShardReader interface { + KeyIterator + Open() error + Close() error +} + +const ( + tsmExt = "tsm" +) + +var description = ` +Convert a database from b1 or bz1 format to tsm1 format. + +This tool will backup the directories before conversion (if not disabled). +The backed-up files must be removed manually, generally after starting up the +node again to make sure all of data has been converted correctly. + +To restore a backup: + Shut down the node, remove the converted directory, and + copy the backed-up directory to the original location.` + +type options struct { + DataPath string + BackupPath string + DBs []string + DebugAddr string + TSMSize uint64 + Parallel bool + SkipBackup bool + UpdateInterval time.Duration + Yes bool + CPUFile string +} + +func (o *options) Parse() error { + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + + var dbs string + + fs.StringVar(&dbs, "dbs", "", "Comma-delimited list of databases to convert. Default is to convert all databases.") + fs.Uint64Var(&opts.TSMSize, "sz", maxTSMSz, "Maximum size of individual TSM files.") + fs.BoolVar(&opts.Parallel, "parallel", false, "Perform parallel conversion. (up to GOMAXPROCS shards at once)") + fs.BoolVar(&opts.SkipBackup, "nobackup", false, "Disable database backups. Not recommended.") + fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directory.") + fs.StringVar(&opts.DebugAddr, "debug", "", "If set, http debugging endpoints will be enabled on the given address") + fs.DurationVar(&opts.UpdateInterval, "interval", 5*time.Second, "How often status updates are printed.") + fs.BoolVar(&opts.Yes, "y", false, "Don't ask, just convert") + fs.StringVar(&opts.CPUFile, "profile", "", "CPU Profile location") + fs.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage: %v [options] \n", os.Args[0]) + fmt.Fprintf(os.Stderr, "%v\n\nOptions:\n", description) + fs.PrintDefaults() + fmt.Fprintf(os.Stderr, "\n") + } + + if err := fs.Parse(os.Args[1:]); err != nil { + return err + } + + if len(fs.Args()) < 1 { + return errors.New("no data directory specified") + } + var err error + if o.DataPath, err = filepath.Abs(fs.Args()[0]); err != nil { + return err + } + if o.DataPath, err = filepath.EvalSymlinks(filepath.Clean(o.DataPath)); err != nil { + return err + } + + if o.TSMSize > maxTSMSz { + return fmt.Errorf("bad TSM file size, maximum TSM file size is %d", maxTSMSz) + } + + // Check if specific databases were requested. + o.DBs = strings.Split(dbs, ",") + if len(o.DBs) == 1 && o.DBs[0] == "" { + o.DBs = nil + } + + if !o.SkipBackup { + if o.BackupPath == "" { + return errors.New("either -nobackup or -backup DIR must be set") + } + if o.BackupPath, err = filepath.Abs(o.BackupPath); err != nil { + return err + } + if o.BackupPath, err = filepath.EvalSymlinks(filepath.Clean(o.BackupPath)); err != nil { + if os.IsNotExist(err) { + return errors.New("backup directory must already exist") + } + return err + } + + if strings.HasPrefix(o.BackupPath, o.DataPath) { + fmt.Println(o.BackupPath, o.DataPath) + return errors.New("backup directory cannot be contained within data directory") + } + } + + if o.DebugAddr != "" { + log.Printf("Starting debugging server on http://%v", o.DebugAddr) + go func() { + log.Fatal(http.ListenAndServe(o.DebugAddr, nil)) + }() + } + + return nil +} + +var opts options + +const maxTSMSz uint64 = 2 * 1024 * 1024 * 1024 + +func init() { + log.SetOutput(os.Stderr) + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds) +} + +func main() { + if err := opts.Parse(); err != nil { + log.Fatal(err) + } + + // Determine the list of databases + dbs, err := ioutil.ReadDir(opts.DataPath) + if err != nil { + log.Fatalf("failed to access data directory at %v: %v\n", opts.DataPath, err) + } + fmt.Println() // Cleanly separate output from start of program. + + if opts.Parallel { + if !isEnvSet("GOMAXPROCS") { + // Only modify GOMAXPROCS if it wasn't set in the environment + // This means 'GOMAXPROCS=1 influx_tsm -parallel' will not actually + // run in parallel + runtime.GOMAXPROCS(runtime.NumCPU()) + } + } + + var badUser string + if opts.SkipBackup { + badUser = "(NOT RECOMMENDED)" + } + + // Dump summary of what is about to happen. + fmt.Println("b1 and bz1 shard conversion.") + fmt.Println("-----------------------------------") + fmt.Println("Data directory is: ", opts.DataPath) + if !opts.SkipBackup { + fmt.Println("Backup directory is: ", opts.BackupPath) + } + fmt.Println("Databases specified: ", allDBs(opts.DBs)) + fmt.Println("Database backups enabled: ", yesno(!opts.SkipBackup), badUser) + fmt.Printf("Parallel mode enabled (GOMAXPROCS): %s (%d)\n", yesno(opts.Parallel), runtime.GOMAXPROCS(0)) + fmt.Println() + + shards := collectShards(dbs) + + // Anything to convert? + fmt.Printf("\nFound %d shards that will be converted.\n", len(shards)) + if len(shards) == 0 { + fmt.Println("Nothing to do.") + return + } + + // Display list of convertible shards. + fmt.Println() + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + fmt.Fprintln(w, "Database\tRetention\tPath\tEngine\tSize") + for _, si := range shards { + fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%d\n", si.Database, si.RetentionPolicy, si.FullPath(opts.DataPath), si.FormatAsString(), si.Size) + } + w.Flush() + + if !opts.Yes { + // Get confirmation from user. + fmt.Printf("\nThese shards will be converted. Proceed? y/N: ") + liner := bufio.NewReader(os.Stdin) + yn, err := liner.ReadString('\n') + if err != nil { + log.Fatalf("failed to read response: %v", err) + } + yn = strings.TrimRight(strings.ToLower(yn), "\n") + if yn != "y" { + log.Fatal("Conversion aborted.") + } + } + fmt.Println("Conversion starting....") + + if opts.CPUFile != "" { + f, err := os.Create(opts.CPUFile) + if err != nil { + log.Fatal(err) + } + if err = pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + defer pprof.StopCPUProfile() + } + + tr := newTracker(shards, opts) + + if err := tr.Run(); err != nil { + log.Fatalf("Error occurred preventing completion: %v\n", err) + } + + tr.PrintStats() +} + +func collectShards(dbs []os.FileInfo) tsdb.ShardInfos { + // Get the list of shards for conversion. + var shards tsdb.ShardInfos + for _, db := range dbs { + d := tsdb.NewDatabase(filepath.Join(opts.DataPath, db.Name())) + shs, err := d.Shards() + if err != nil { + log.Fatalf("Failed to access shards for database %v: %v\n", d.Name(), err) + } + shards = append(shards, shs...) + } + + sort.Sort(shards) + shards = shards.FilterFormat(tsdb.TSM1) + if len(dbs) > 0 { + shards = shards.ExclusiveDatabases(opts.DBs) + } + + return shards +} + +// backupDatabase backs up the database named db +func backupDatabase(db string) error { + copyFile := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Strip the DataPath from the path and replace with BackupPath. + toPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1) + + if info.IsDir() { + return os.MkdirAll(toPath, info.Mode()) + } + + in, err := os.Open(path) + if err != nil { + return err + } + defer in.Close() + + srcInfo, err := os.Stat(path) + if err != nil { + return err + } + + out, err := os.OpenFile(toPath, os.O_CREATE|os.O_WRONLY, info.Mode()) + if err != nil { + return err + } + defer out.Close() + + dstInfo, err := os.Stat(toPath) + if err != nil { + return err + } + + if dstInfo.Size() == srcInfo.Size() { + log.Printf("Backup file already found for %v with correct size, skipping.", path) + return nil + } + + if dstInfo.Size() > srcInfo.Size() { + log.Printf("Invalid backup file found for %v, replacing with good copy.", path) + if err := out.Truncate(0); err != nil { + return err + } + if _, err := out.Seek(0, io.SeekStart); err != nil { + return err + } + } + + if dstInfo.Size() > 0 { + log.Printf("Resuming backup of file %v, starting at %v bytes", path, dstInfo.Size()) + } + + off, err := out.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := in.Seek(off, io.SeekStart); err != nil { + return err + } + + log.Printf("Backing up file %v", path) + + _, err = io.Copy(out, in) + + return err + } + + return filepath.Walk(filepath.Join(opts.DataPath, db), copyFile) +} + +// convertShard converts the shard in-place. +func convertShard(si *tsdb.ShardInfo, tr *tracker) error { + src := si.FullPath(opts.DataPath) + dst := fmt.Sprintf("%v.%v", src, tsmExt) + + var reader ShardReader + switch si.Format { + case tsdb.BZ1: + reader = bz1.NewReader(src, &tr.Stats, 0) + case tsdb.B1: + reader = b1.NewReader(src, &tr.Stats, 0) + default: + return fmt.Errorf("Unsupported shard format: %v", si.FormatAsString()) + } + + // Open the shard, and create a converter. + if err := reader.Open(); err != nil { + return fmt.Errorf("Failed to open %v for conversion: %v", src, err) + } + defer reader.Close() + converter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats) + + // Perform the conversion. + if err := converter.Process(reader); err != nil { + return fmt.Errorf("Conversion of %v failed: %v", src, err) + } + + // Delete source shard, and rename new tsm1 shard. + if err := reader.Close(); err != nil { + return fmt.Errorf("Conversion of %v failed due to close: %v", src, err) + } + + if err := os.RemoveAll(si.FullPath(opts.DataPath)); err != nil { + return fmt.Errorf("Deletion of %v failed: %v", src, err) + } + if err := os.Rename(dst, src); err != nil { + return fmt.Errorf("Rename of %v to %v failed: %v", dst, src, err) + } + + return nil +} + +// ParallelGroup allows the maximum parrallelism of a set of operations to be controlled. +type ParallelGroup chan struct{} + +// NewParallelGroup returns a group which allows n operations to run in parallel. A value of 0 +// means no operations will ever run. +func NewParallelGroup(n int) ParallelGroup { + return make(chan struct{}, n) +} + +// Do executes one operation of the ParallelGroup +func (p ParallelGroup) Do(f func()) { + p <- struct{}{} // acquire working slot + defer func() { <-p }() + + f() +} + +// yesno returns "yes" for true, "no" for false. +func yesno(b bool) string { + if b { + return "yes" + } + return "no" +} + +// allDBs returns "all" if all databases are requested for conversion. +func allDBs(dbs []string) string { + if dbs == nil { + return "all" + } + return fmt.Sprintf("%v", dbs) +} + +// isEnvSet checks to see if a variable was set in the environment +func isEnvSet(name string) bool { + for _, s := range os.Environ() { + if strings.SplitN(s, "=", 2)[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go new file mode 100644 index 0000000..c3a7d3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go @@ -0,0 +1,55 @@ +// Package stats contains statistics for converting non-TSM shards to TSM. +package stats + +import ( + "sync/atomic" + "time" +) + +// Stats are the statistics captured while converting non-TSM shards to TSM +type Stats struct { + NanFiltered uint64 + InfFiltered uint64 + FieldsFiltered uint64 + PointsWritten uint64 + PointsRead uint64 + TsmFilesCreated uint64 + TsmBytesWritten uint64 + CompletedShards uint64 + TotalTime time.Duration +} + +// AddPointsRead increments the number of read points. +func (s *Stats) AddPointsRead(n int) { + atomic.AddUint64(&s.PointsRead, uint64(n)) +} + +// AddPointsWritten increments the number of written points. +func (s *Stats) AddPointsWritten(n int) { + atomic.AddUint64(&s.PointsWritten, uint64(n)) +} + +// AddTSMBytes increments the number of TSM Bytes. +func (s *Stats) AddTSMBytes(n uint32) { + atomic.AddUint64(&s.TsmBytesWritten, uint64(n)) +} + +// IncrTSMFileCount increments the number of TSM files created. +func (s *Stats) IncrTSMFileCount() { + atomic.AddUint64(&s.TsmFilesCreated, 1) +} + +// IncrNaN increments the number of NaNs filtered. +func (s *Stats) IncrNaN() { + atomic.AddUint64(&s.NanFiltered, 1) +} + +// IncrInf increments the number of Infs filtered. +func (s *Stats) IncrInf() { + atomic.AddUint64(&s.InfFiltered, 1) +} + +// IncrFiltered increments the number of fields filtered. +func (s *Stats) IncrFiltered() { + atomic.AddUint64(&s.FieldsFiltered, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go new file mode 100644 index 0000000..b91d9b9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go @@ -0,0 +1,130 @@ +package main + +import ( + "fmt" + "log" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// tracker will orchestrate and track the conversions of non-TSM shards to TSM +type tracker struct { + Stats stats.Stats + + shards tsdb.ShardInfos + opts options + + pg ParallelGroup + wg sync.WaitGroup +} + +// newTracker will setup and return a clean tracker instance +func newTracker(shards tsdb.ShardInfos, opts options) *tracker { + t := &tracker{ + shards: shards, + opts: opts, + pg: NewParallelGroup(runtime.GOMAXPROCS(0)), + } + + return t +} + +func (t *tracker) Run() error { + conversionStart := time.Now() + + // Backup each directory. + if !opts.SkipBackup { + databases := t.shards.Databases() + fmt.Printf("Backing up %d databases...\n", len(databases)) + t.wg.Add(len(databases)) + for i := range databases { + db := databases[i] + go t.pg.Do(func() { + defer t.wg.Done() + + start := time.Now() + log.Printf("Backup of database '%v' started", db) + err := backupDatabase(db) + if err != nil { + log.Fatalf("Backup of database %v failed: %v\n", db, err) + } + log.Printf("Database %v backed up (%v)\n", db, time.Since(start)) + }) + } + t.wg.Wait() + } else { + fmt.Println("Database backup disabled.") + } + + t.wg.Add(len(t.shards)) + for i := range t.shards { + si := t.shards[i] + go t.pg.Do(func() { + defer func() { + atomic.AddUint64(&t.Stats.CompletedShards, 1) + t.wg.Done() + }() + + start := time.Now() + log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath)) + if err := convertShard(si, t); err != nil { + log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err) + } + log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start)) + }) + } + + done := make(chan struct{}) + go func() { + t.wg.Wait() + close(done) + }() + +WAIT_LOOP: + for { + select { + case <-done: + break WAIT_LOOP + case <-time.After(opts.UpdateInterval): + t.StatusUpdate() + } + } + + t.Stats.TotalTime = time.Since(conversionStart) + + return nil +} + +func (t *tracker) StatusUpdate() { + shardCount := atomic.LoadUint64(&t.Stats.CompletedShards) + pointCount := atomic.LoadUint64(&t.Stats.PointsRead) + pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten) + + log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten) +} + +func (t *tracker) PrintStats() { + preSize := t.shards.Size() + postSize := int64(t.Stats.TsmBytesWritten) + + fmt.Printf("\nSummary statistics\n========================================\n") + fmt.Printf("Databases converted: %d\n", len(t.shards.Databases())) + fmt.Printf("Shards converted: %d\n", len(t.shards)) + fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated) + fmt.Printf("Points read: %d\n", t.Stats.PointsRead) + fmt.Printf("Points written: %d\n", t.Stats.PointsWritten) + fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered) + fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered) + fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered) + fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize) + fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize) + fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize) + fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten)) + fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime) + fmt.Println() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go new file mode 100644 index 0000000..4c3a7b6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go @@ -0,0 +1,119 @@ +package tsdb + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +const ( + fieldFloat = 1 + fieldInteger = 2 + fieldBoolean = 3 + fieldString = 4 +) + +var ( + // ErrFieldNotFound is returned when a field cannot be found. + ErrFieldNotFound = errors.New("field not found") + + // ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID + // there is no mapping for. + ErrFieldUnmappedID = errors.New("field ID not mapped") +) + +// FieldCodec provides encoding and decoding functionality for the fields of a given +// Measurement. +type FieldCodec struct { + fieldsByID map[uint8]*Field + fieldsByName map[string]*Field +} + +// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with +// a RLock that protects the Measurement. +func NewFieldCodec(fields map[string]*Field) *FieldCodec { + fieldsByID := make(map[uint8]*Field, len(fields)) + fieldsByName := make(map[string]*Field, len(fields)) + for _, f := range fields { + fieldsByID[f.ID] = f + fieldsByName[f.Name] = f + } + return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName} +} + +// FieldIDByName returns the ID for the given field. +func (f *FieldCodec) FieldIDByName(s string) (uint8, error) { + fi := f.fieldsByName[s] + if fi == nil { + return 0, ErrFieldNotFound + } + return fi.ID, nil +} + +// DecodeByID scans a byte slice for a field with the given ID, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) { + var value interface{} + for { + if len(b) == 0 { + // No more bytes. + return nil, ErrFieldNotFound + } + + field := f.fieldsByID[b[0]] + if field == nil { + // This can happen, though is very unlikely. If this node receives encoded data, to be written + // to disk, and is queried for that data before its metastore is updated, there will be no field + // mapping for the data during decode. All this can happen because data is encoded by the node + // that first received the write request, not the node that actually writes the data to disk. + // So if this happens, the read must be aborted. + return nil, ErrFieldUnmappedID + } + + switch field.Type { + case fieldFloat: + if field.ID == targetID { + value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) + } + b = b[9:] + case fieldInteger: + if field.ID == targetID { + value = int64(binary.BigEndian.Uint64(b[1:9])) + } + b = b[9:] + case fieldBoolean: + if field.ID == targetID { + value = b[1] == 1 + } + b = b[2:] + case fieldString: + length := binary.BigEndian.Uint16(b[1:3]) + if field.ID == targetID { + value = string(b[3 : 3+length]) + } + b = b[3+length:] + default: + panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type)) + } + + if value != nil { + return value, nil + } + } +} + +// DecodeByName scans a byte slice for a field with the given name, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) { + fi := f.FieldByName(name) + if fi == nil { + return 0, ErrFieldNotFound + } + return f.DecodeByID(fi.ID, b) +} + +// FieldByName returns the field by its name. It will return a nil if not found +func (f *FieldCodec) FieldByName(name string) *Field { + return f.fieldsByName[name] +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go new file mode 100644 index 0000000..c24ef9f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go @@ -0,0 +1,244 @@ +// Package tsdb abstracts the various shard types supported by the influx_tsm command. +package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/pkg/slices" +) + +// Flags for differentiating between engines +const ( + B1 = iota + BZ1 + TSM1 +) + +// EngineFormat holds the flag for the engine +type EngineFormat int + +// String returns the string format of the engine. +func (e EngineFormat) String() string { + switch e { + case TSM1: + return "tsm1" + case B1: + return "b1" + case BZ1: + return "bz1" + default: + panic("unrecognized shard engine format") + } +} + +// ShardInfo is the description of a shard on disk. +type ShardInfo struct { + Database string + RetentionPolicy string + Path string + Format EngineFormat + Size int64 +} + +// FormatAsString returns the format of the shard as a string. +func (s *ShardInfo) FormatAsString() string { + return s.Format.String() +} + +// FullPath returns the full path to the shard, given the data directory root. +func (s *ShardInfo) FullPath(dataPath string) string { + return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path) +} + +// ShardInfos is an array of ShardInfo +type ShardInfos []*ShardInfo + +func (s ShardInfos) Len() int { return len(s) } +func (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ShardInfos) Less(i, j int) bool { + if s[i].Database == s[j].Database { + if s[i].RetentionPolicy == s[j].RetentionPolicy { + return s[i].Path < s[j].Path + } + + return s[i].RetentionPolicy < s[j].RetentionPolicy + } + + return s[i].Database < s[j].Database +} + +// Databases returns the sorted unique set of databases for the shards. +func (s ShardInfos) Databases() []string { + dbm := make(map[string]bool) + for _, ss := range s { + dbm[ss.Database] = true + } + + var dbs []string + for k := range dbm { + dbs = append(dbs, k) + } + sort.Strings(dbs) + return dbs +} + +// FilterFormat returns a copy of the ShardInfos, with shards of the given +// format removed. +func (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos { + var a ShardInfos + for _, si := range s { + if si.Format != fmt { + a = append(a, si) + } + } + return a +} + +// Size returns the space on disk consumed by the shards. +func (s ShardInfos) Size() int64 { + var sz int64 + for _, si := range s { + sz += si.Size + } + return sz +} + +// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated +// with the given databases present. If the given set is empty, all databases +// are returned. +func (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos { + var a ShardInfos + + // Empty set? Return everything. + if len(exc) == 0 { + a = make(ShardInfos, len(s)) + copy(a, s) + return a + } + + for _, si := range s { + if slices.Exists(exc, si.Database) { + a = append(a, si) + } + } + return a +} + +// Database represents an entire database on disk. +type Database struct { + path string +} + +// NewDatabase creates a database instance using data at path. +func NewDatabase(path string) *Database { + return &Database{path: path} +} + +// Name returns the name of the database. +func (d *Database) Name() string { + return path.Base(d.path) +} + +// Path returns the path to the database. +func (d *Database) Path() string { + return d.path +} + +// Shards returns information for every shard in the database. +func (d *Database) Shards() ([]*ShardInfo, error) { + fd, err := os.Open(d.path) + if err != nil { + return nil, err + } + + // Get each retention policy. + rps, err := fd.Readdirnames(-1) + if err != nil { + return nil, err + } + + // Process each retention policy. + var shardInfos []*ShardInfo + for _, rp := range rps { + rpfd, err := os.Open(filepath.Join(d.path, rp)) + if err != nil { + return nil, err + } + + // Process each shard + shards, err := rpfd.Readdirnames(-1) + if err != nil { + return nil, err + } + + for _, sh := range shards { + fmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh)) + if err != nil { + return nil, err + } + + si := &ShardInfo{ + Database: d.Name(), + RetentionPolicy: path.Base(rp), + Path: sh, + Format: fmt, + Size: sz, + } + shardInfos = append(shardInfos, si) + } + } + + sort.Sort(ShardInfos(shardInfos)) + return shardInfos, nil +} + +// shardFormat returns the format and size on disk of the shard at path. +func shardFormat(path string) (EngineFormat, int64, error) { + // If it's a directory then it's a tsm1 engine + fi, err := os.Stat(path) + if err != nil { + return 0, 0, err + } + if fi.Mode().IsDir() { + return TSM1, fi.Size(), nil + } + + // It must be a BoltDB-based engine. + db, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return 0, 0, err + } + defer db.Close() + + var format EngineFormat + err = db.View(func(tx *bolt.Tx) error { + // Retrieve the meta bucket. + b := tx.Bucket([]byte("meta")) + + // If no format is specified then it must be an original b1 database. + if b == nil { + format = B1 + return nil + } + + // There is an actual format indicator. + switch f := string(b.Get([]byte("format"))); f { + case "b1", "v1": + format = B1 + case "bz1": + format = BZ1 + default: + return fmt.Errorf("unrecognized engine format: %s", f) + } + + return nil + }) + + return format, fi.Size(), err +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go new file mode 100644 index 0000000..1397774 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/meta.proto + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/meta.proto + +It has these top-level messages: + Series + Tag + MeasurementFields + Field +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Series struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} + +func (m *Series) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Series) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +type Tag struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} + +func (m *Tag) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Tag) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type MeasurementFields struct { + Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MeasurementFields) Reset() { *m = MeasurementFields{} } +func (m *MeasurementFields) String() string { return proto.CompactTextString(m) } +func (*MeasurementFields) ProtoMessage() {} + +func (m *MeasurementFields) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +type Field struct { + ID *int32 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` + Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` + Type *int32 `protobuf:"varint,3,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} + +func (m *Field) GetID() int32 { + if m != nil && m.ID != nil { + return *m.ID + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Field) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go new file mode 100644 index 0000000..f9e3aa3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go @@ -0,0 +1,60 @@ +package tsdb + +import ( + "encoding/binary" + "strings" + + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal" + "github.com/influxdata/influxql" + + "github.com/gogo/protobuf/proto" +) + +// Field represents an encoded field. +type Field struct { + ID uint8 `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type influxql.DataType `json:"type,omitempty"` +} + +// MeasurementFields is a mapping from measurements to its fields. +type MeasurementFields struct { + Fields map[string]*Field `json:"fields"` + Codec *FieldCodec +} + +// UnmarshalBinary decodes the object from a binary format. +func (m *MeasurementFields) UnmarshalBinary(buf []byte) error { + var pb internal.MeasurementFields + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + m.Fields = make(map[string]*Field) + for _, f := range pb.Fields { + m.Fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())} + } + return nil +} + +// Series represents a series in the shard. +type Series struct { + Key string + Tags map[string]string +} + +// MeasurementFromSeriesKey returns the Measurement name for a given series. +func MeasurementFromSeriesKey(key string) string { + return strings.SplitN(key, ",", 2)[0] +} + +// DecodeKeyValue decodes the key and value from bytes. +func DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) { + // Convert key to a timestamp. + key := int64(binary.BigEndian.Uint64(k[0:8])) + + decValue, err := dec.DecodeByName(field, v) + if err != nil { + return key, nil + } + return key, decValue +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go new file mode 100644 index 0000000..f874009 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go @@ -0,0 +1,620 @@ +// Package backup implements both the backup and export subcommands for the influxd command. +package backup + +import ( + "encoding/binary" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/influxdata/influxdb/cmd/influxd/backup_util" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/tcp" + gzip "github.com/klauspost/pgzip" +) + +const ( + // Suffix is a suffix added to the backup while it's in-process. + Suffix = ".pending" + + // Metafile is the base name given to the metastore backups. + Metafile = "meta" + + // BackupFilePattern is the beginning of the pattern for a backup + // file. They follow the scheme ... + BackupFilePattern = "%s.%s.%05d" +) + +// Command represents the program execution for "influxd backup". +type Command struct { + // The logger passed to the ticker during execution. + StdoutLogger *log.Logger + StderrLogger *log.Logger + + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + host string + path string + database string + retentionPolicy string + shardID string + + isBackup bool + since time.Time + start time.Time + end time.Time + + portable bool + manifest backup_util.Manifest + portableFileBase string + + BackupFiles []string +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.StdoutLogger = log.New(cmd.Stdout, "", log.LstdFlags) + cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags) + + // Parse command line arguments. + err := cmd.parseFlags(args) + if err != nil { + return err + } + + if cmd.shardID != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupShard(cmd.database, cmd.retentionPolicy, cmd.shardID) + + } else if cmd.retentionPolicy != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupRetentionPolicy() + } else if cmd.database != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupDatabase() + } else { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + + cmd.StdoutLogger.Println("No database, retention policy or shard ID given. Full meta store backed up.") + if cmd.portable { + cmd.StdoutLogger.Println("Backing up all databases in portable format") + if err := cmd.backupDatabase(); err != nil { + cmd.StderrLogger.Printf("backup failed: %v", err) + return err + } + + } + + } + + if cmd.portable { + filename := cmd.portableFileBase + ".manifest" + if err := cmd.manifest.Save(filepath.Join(cmd.path, filename)); err != nil { + cmd.StderrLogger.Printf("manifest save failed: %v", err) + return err + } + cmd.BackupFiles = append(cmd.BackupFiles, filename) + } + + if err != nil { + cmd.StderrLogger.Printf("backup failed: %v", err) + return err + } + cmd.StdoutLogger.Println("backup complete:") + for _, v := range cmd.BackupFiles { + cmd.StdoutLogger.Println("\t" + filepath.Join(cmd.path, v)) + } + + return nil +} + +// parseFlags parses and validates the command line arguments into a request object. +func (cmd *Command) parseFlags(args []string) (err error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + + fs.StringVar(&cmd.host, "host", "localhost:8088", "") + fs.StringVar(&cmd.database, "database", "", "") + fs.StringVar(&cmd.database, "db", "", "") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "") + fs.StringVar(&cmd.retentionPolicy, "rp", "", "") + fs.StringVar(&cmd.shardID, "shard", "", "") + var sinceArg string + var startArg string + var endArg string + fs.StringVar(&sinceArg, "since", "", "") + fs.StringVar(&startArg, "start", "", "") + fs.StringVar(&endArg, "end", "", "") + fs.BoolVar(&cmd.portable, "portable", false, "") + + fs.SetOutput(cmd.Stderr) + fs.Usage = cmd.printUsage + + err = fs.Parse(args) + if err != nil { + return err + } + + cmd.BackupFiles = []string{} + + // for portable saving, if needed + cmd.portableFileBase = time.Now().UTC().Format(backup_util.PortableFileNamePattern) + + // if startArg and endArg are unspecified, or if we are using -since then assume we are doing a full backup of the shards + cmd.isBackup = (startArg == "" && endArg == "") || sinceArg != "" + + if sinceArg != "" { + cmd.since, err = time.Parse(time.RFC3339, sinceArg) + if err != nil { + return err + } + } + if startArg != "" { + if cmd.isBackup { + return errors.New("backup command uses one of -since or -start/-end") + } + cmd.start, err = time.Parse(time.RFC3339, startArg) + if err != nil { + return err + } + } + + if endArg != "" { + if cmd.isBackup { + return errors.New("backup command uses one of -since or -start/-end") + } + cmd.end, err = time.Parse(time.RFC3339, endArg) + if err != nil { + return err + } + + // start should be < end + if !cmd.start.Before(cmd.end) { + return errors.New("start date must be before end date") + } + } + + // Ensure that only one arg is specified. + if fs.NArg() != 1 { + return errors.New("Exactly one backup path is required.") + } + cmd.path = fs.Arg(0) + + err = os.MkdirAll(cmd.path, 0700) + + return err +} + +func (cmd *Command) backupShard(db, rp, sid string) error { + reqType := snapshotter.RequestShardBackup + if !cmd.isBackup { + reqType = snapshotter.RequestShardExport + } + + id, err := strconv.ParseUint(sid, 10, 64) + if err != nil { + return err + } + + shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(backup_util.BackupFilePattern, db, rp, id))) + if err != nil { + return err + } + + if cmd.isBackup { + cmd.StdoutLogger.Printf("backing up db=%v rp=%v shard=%v to %s since %s", + db, rp, sid, shardArchivePath, cmd.since.Format(time.RFC3339)) + } else { + cmd.StdoutLogger.Printf("backing up db=%v rp=%v shard=%v to %s with boundaries start=%s, end=%s", + db, rp, sid, shardArchivePath, cmd.start.Format(time.RFC3339), cmd.end.Format(time.RFC3339)) + } + req := &snapshotter.Request{ + Type: reqType, + BackupDatabase: db, + BackupRetentionPolicy: rp, + ShardID: id, + Since: cmd.since, + ExportStart: cmd.start, + ExportEnd: cmd.end, + } + + // TODO: verify shard backup data + err = cmd.downloadAndVerify(req, shardArchivePath, nil) + if !cmd.portable { + cmd.BackupFiles = append(cmd.BackupFiles, shardArchivePath) + } + + if err != nil { + return err + } + + if cmd.portable { + f, err := os.Open(shardArchivePath) + if err != nil { + return err + } + defer f.Close() + defer os.Remove(shardArchivePath) + + filePrefix := cmd.portableFileBase + ".s" + sid + filename := filePrefix + ".tar.gz" + out, err := os.OpenFile(filepath.Join(cmd.path, filename), os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return err + } + + zw := gzip.NewWriter(out) + zw.Name = filePrefix + ".tar" + + cw := backup_util.CountingWriter{Writer: zw} + + _, err = io.Copy(&cw, f) + if err != nil { + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + return err + } + + shardid, err := strconv.ParseUint(sid, 10, 64) + if err != nil { + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + return err + } + cmd.manifest.Files = append(cmd.manifest.Files, backup_util.Entry{ + Database: db, + Policy: rp, + ShardID: shardid, + FileName: filename, + Size: cw.Total, + LastModified: 0, + }) + + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + + cmd.BackupFiles = append(cmd.BackupFiles, filename) + } + return nil + +} + +// backupDatabase will request the database information from the server and then backup +// every shard in every retention policy in the database. Each shard will be written to a separate file. +func (cmd *Command) backupDatabase() error { + cmd.StdoutLogger.Printf("backing up db=%s", cmd.database) + + req := &snapshotter.Request{ + Type: snapshotter.RequestDatabaseInfo, + BackupDatabase: cmd.database, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response) +} + +// backupRetentionPolicy will request the retention policy information from the server and then backup +// every shard in the retention policy. Each shard will be written to a separate file. +func (cmd *Command) backupRetentionPolicy() error { + if cmd.isBackup { + cmd.StdoutLogger.Printf("backing up rp=%s since %s", cmd.retentionPolicy, cmd.since.Format(time.RFC3339)) + } else { + cmd.StdoutLogger.Printf("backing up rp=%s with boundaries start=%s, end=%s", + cmd.retentionPolicy, cmd.start.Format(time.RFC3339), cmd.end.Format(time.RFC3339)) + } + + req := &snapshotter.Request{ + Type: snapshotter.RequestRetentionPolicyInfo, + BackupDatabase: cmd.database, + BackupRetentionPolicy: cmd.retentionPolicy, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response) +} + +// backupResponsePaths will backup all shards identified by shard paths in the response struct +func (cmd *Command) backupResponsePaths(response *snapshotter.Response) error { + + // loop through the returned paths and back up each shard + for _, path := range response.Paths { + db, rp, id, err := backup_util.DBRetentionAndShardFromPath(path) + if err != nil { + return err + } + + err = cmd.backupShard(db, rp, id) + + if err != nil { + return err + } + } + + return nil +} + +// backupMetastore will backup the whole metastore on the host to the backup path +// if useDB is non-empty, it will backup metadata only for the named database. +func (cmd *Command) backupMetastore() error { + metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, backup_util.Metafile)) + if err != nil { + return err + } + + cmd.StdoutLogger.Printf("backing up metastore to %s", metastoreArchivePath) + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetastoreBackup, + } + + err = cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + var magicByte [8]byte + n, err := io.ReadFull(f, magicByte[:]) + if err != nil { + return err + } + + if n < 8 { + return errors.New("Not enough bytes data to verify") + } + + magic := binary.BigEndian.Uint64(magicByte[:]) + if magic != snapshotter.BackupMagicHeader { + cmd.StderrLogger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)") + return errors.New("invalid metadata received") + } + + return nil + }) + + if err != nil { + return err + } + + if !cmd.portable { + cmd.BackupFiles = append(cmd.BackupFiles, metastoreArchivePath) + } + + if cmd.portable { + metaBytes, err := backup_util.GetMetaBytes(metastoreArchivePath) + defer os.Remove(metastoreArchivePath) + if err != nil { + return err + } + filename := cmd.portableFileBase + ".meta" + ep := backup_util.PortablePacker{Data: metaBytes, MaxNodeID: 0} + protoBytes, err := ep.MarshalBinary() + if err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), protoBytes, 0644); err != nil { + fmt.Fprintln(cmd.Stdout, "Error.") + return err + } + + cmd.manifest.Meta.FileName = filename + cmd.manifest.Meta.Size = int64(len(metaBytes)) + cmd.BackupFiles = append(cmd.BackupFiles, filename) + } + + return nil +} + +// nextPath returns the next file to write to. +func (cmd *Command) nextPath(path string) (string, error) { + // Iterate through incremental files until one is available. + for i := 0; ; i++ { + s := fmt.Sprintf(path+".%02d", i) + if _, err := os.Stat(s); os.IsNotExist(err) { + return s, nil + } else if err != nil { + return "", err + } + } +} + +// downloadAndVerify will download either the metastore or shard to a temp file and then +// rename it to a good backup file name after complete +func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error { + tmppath := path + backup_util.Suffix + if err := cmd.download(req, tmppath); err != nil { + return err + } + + if validator != nil { + if err := validator(tmppath); err != nil { + if rmErr := os.Remove(tmppath); rmErr != nil { + cmd.StderrLogger.Printf("Error cleaning up temporary file: %v", rmErr) + } + return err + } + } + + f, err := os.Stat(tmppath) + if err != nil { + return err + } + + // There was nothing downloaded, don't create an empty backup file. + if f.Size() == 0 { + return os.Remove(tmppath) + } + + // Rename temporary file to final path. + if err := os.Rename(tmppath, path); err != nil { + return fmt.Errorf("rename: %s", err) + } + + return nil +} + +// download downloads a snapshot of either the metastore or a shard from a host to a given path. +func (cmd *Command) download(req *snapshotter.Request, path string) error { + // Create local file to write to. + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("open temp file: %s", err) + } + defer f.Close() + + min := 2 * time.Second + for i := 0; i < 10; i++ { + if err = func() error { + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return err + } + defer conn.Close() + + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + return err + } + + // Write the request + if err := json.NewEncoder(conn).Encode(req); err != nil { + return fmt.Errorf("encode snapshot request: %s", err) + } + + // Read snapshot from the connection + if n, err := io.Copy(f, conn); err != nil || n == 0 { + return fmt.Errorf("copy backup to file: err=%v, n=%d", err, n) + } + return nil + }(); err == nil { + break + } else if err != nil { + backoff := time.Duration(math.Pow(3.8, float64(i))) * time.Millisecond + if backoff < min { + backoff = min + } + cmd.StderrLogger.Printf("Download shard %v failed %s. Waiting %v and retrying (%d)...\n", req.ShardID, err, backoff, i) + time.Sleep(backoff) + } + } + + return err +} + +// requestInfo will request the database or retention policy information from the host +func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) { + // Connect to snapshotter service. + var r snapshotter.Response + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return nil, err + } + defer conn.Close() + _, err = conn.Write([]byte{byte(request.Type)}) + if err != nil { + return &r, err + } + + // Write the request + if err := json.NewEncoder(conn).Encode(request); err != nil { + return nil, fmt.Errorf("encode snapshot request: %s", err) + } + + // Read the response + + if err := json.NewDecoder(conn).Decode(&r); err != nil { + return nil, err + } + + return &r, nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, ` +Creates a backup copy of specified InfluxDB OSS database(s) and saves the files in an Enterprise-compatible +format to PATH (directory where backups are saved). + +Usage: influxd backup [options] PATH + + -portable + Required to generate backup files in a portable format that can be restored to InfluxDB OSS or InfluxDB + Enterprise. Use unless the legacy backup is required. + -host + InfluxDB OSS host to back up from. Optional. Defaults to 127.0.0.1:8088. + -db + InfluxDB OSS database name to back up. Optional. If not specified, all databases are backed up when + using '-portable'. + -rp + Retention policy to use for the backup. Optional. If not specified, all retention policies are used by + default. + -shard + The identifier of the shard to back up. Optional. If specified, '-rp ' is required. + -start <2015-12-24T08:12:23Z> + Include all points starting with specified timestamp (RFC3339 format). + Not compatible with '-since '. + -end <2015-12-24T08:12:23Z> + Exclude all points after timestamp (RFC3339 format). + Not compatible with '-since '. + -since <2015-12-24T08:12:23Z> + Create an incremental backup of all points after the timestamp (RFC3339 format). Optional. + Recommend using '-start ' instead. +`) + +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go new file mode 100644 index 0000000..2632da4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go @@ -0,0 +1,225 @@ +package backup_util + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/cmd/influxd/backup_util/internal" + "github.com/influxdata/influxdb/services/snapshotter" + "io/ioutil" + "path/filepath" +) + +//go:generate protoc --gogo_out=. internal/data.proto + +const ( + // Suffix is a suffix added to the backup while it's in-process. + Suffix = ".pending" + + // Metafile is the base name given to the metastore backups. + Metafile = "meta" + + // BackupFilePattern is the beginning of the pattern for a backup + // file. They follow the scheme ... + BackupFilePattern = "%s.%s.%05d" + + PortableFileNamePattern = "20060102T150405Z" +) + +type PortablePacker struct { + Data []byte + MaxNodeID uint64 +} + +func (ep PortablePacker) MarshalBinary() ([]byte, error) { + ed := internal.PortableData{Data: ep.Data, MaxNodeID: &ep.MaxNodeID} + return proto.Marshal(&ed) +} + +func (ep *PortablePacker) UnmarshalBinary(data []byte) error { + var pb internal.PortableData + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + ep.Data = pb.GetData() + ep.MaxNodeID = pb.GetMaxNodeID() + return nil +} + +func GetMetaBytes(fname string) ([]byte, error) { + f, err := os.Open(fname) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, f); err != nil { + return []byte{}, fmt.Errorf("copy: %s", err) + } + + b := buf.Bytes() + var i int + + // Make sure the file is actually a meta store backup file + magic := binary.BigEndian.Uint64(b[:8]) + if magic != snapshotter.BackupMagicHeader { + return []byte{}, fmt.Errorf("invalid metadata file") + } + i += 8 + + // Size of the meta store bytes + length := int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + metaBytes := b[i : i+length] + + return metaBytes, nil +} + +// Manifest lists the meta and shard file information contained in the backup. +// If Limited is false, the manifest contains a full backup, otherwise +// it is a partial backup. +type Manifest struct { + Meta MetaEntry `json:"meta"` + Limited bool `json:"limited"` + Files []Entry `json:"files"` + + // If limited is true, then one (or all) of the following fields will be set + + Database string `json:"database,omitempty"` + Policy string `json:"policy,omitempty"` + ShardID uint64 `json:"shard_id,omitempty"` +} + +// Entry contains the data information for a backed up shard. +type Entry struct { + Database string `json:"database"` + Policy string `json:"policy"` + ShardID uint64 `json:"shardID"` + FileName string `json:"fileName"` + Size int64 `json:"size"` + LastModified int64 `json:"lastModified"` +} + +func (e *Entry) SizeOrZero() int64 { + if e == nil { + return 0 + } + return e.Size +} + +// MetaEntry contains the meta store information for a backup. +type MetaEntry struct { + FileName string `json:"fileName"` + Size int64 `json:"size"` +} + +// Size returns the size of the manifest. +func (m *Manifest) Size() int64 { + if m == nil { + return 0 + } + + size := m.Meta.Size + + for _, f := range m.Files { + size += f.Size + } + return size +} + +func (manifest *Manifest) Save(filename string) error { + b, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return fmt.Errorf("create manifest: %v", err) + } + + return ioutil.WriteFile(filename, b, 0600) +} + +// LoadIncremental loads multiple manifest files from a given directory. +func LoadIncremental(dir string) (*MetaEntry, map[uint64]*Entry, error) { + manifests, err := filepath.Glob(filepath.Join(dir, "*.manifest")) + if err != nil { + return nil, nil, err + } + shards := make(map[uint64]*Entry) + + if len(manifests) == 0 { + return nil, shards, nil + } + + sort.Sort(sort.Reverse(sort.StringSlice(manifests))) + var metaEntry MetaEntry + + for _, fileName := range manifests { + fi, err := os.Stat(fileName) + if err != nil { + return nil, nil, err + } + + if fi.IsDir() { + continue + } + + f, err := os.Open(fileName) + if err != nil { + return nil, nil, err + } + + var manifest Manifest + err = json.NewDecoder(f).Decode(&manifest) + f.Close() + if err != nil { + return nil, nil, fmt.Errorf("read manifest: %v", err) + } + + // sorted (descending) above, so first manifest is most recent + if metaEntry.FileName == "" { + metaEntry = manifest.Meta + } + + for i := range manifest.Files { + sh := manifest.Files[i] + if _, err := os.Stat(filepath.Join(dir, sh.FileName)); err != nil { + continue + } + + e := shards[sh.ShardID] + if e == nil || sh.LastModified > e.LastModified { + shards[sh.ShardID] = &sh + } + } + } + + return &metaEntry, shards, nil +} + +type CountingWriter struct { + io.Writer + Total int64 // Total # of bytes transferred +} + +func (w *CountingWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + w.Total += int64(n) + return +} + +// retentionAndShardFromPath will take the shard relative path and split it into the +// retention policy name and shard ID. The first part of the path should be the database name. +func DBRetentionAndShardFromPath(path string) (db, retention, shard string, err error) { + a := strings.Split(path, string(filepath.Separator)) + if len(a) != 3 { + return "", "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) + } + + return a[0], a[1], a[2], nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go new file mode 100644 index 0000000..f6762af --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-gogo. +// source: internal/data.proto +// DO NOT EDIT! + +/* +Package backup_util is a generated protocol buffer package. + +It is generated from these files: + internal/data.proto + +It has these top-level messages: + PortableData +*/ +package backup_util + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PortableData struct { + Data []byte `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"` + MaxNodeID *uint64 `protobuf:"varint,2,req,name=MaxNodeID" json:"MaxNodeID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PortableData) Reset() { *m = PortableData{} } +func (m *PortableData) String() string { return proto.CompactTextString(m) } +func (*PortableData) ProtoMessage() {} +func (*PortableData) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{0} } + +func (m *PortableData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *PortableData) GetMaxNodeID() uint64 { + if m != nil && m.MaxNodeID != nil { + return *m.MaxNodeID + } + return 0 +} + +func init() { + proto.RegisterType((*PortableData)(nil), "backup_util.PortableData") +} + +func init() { proto.RegisterFile("internal/data.proto", fileDescriptorData) } + +var fileDescriptorData = []byte{ + // 110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2b, 0x49, + 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x4e, 0x4a, 0x4c, 0xce, 0x2e, 0x2d, 0x88, 0x2f, 0x2d, 0xc9, 0xcc, 0x51, 0x72, 0xe0, 0xe2, + 0x09, 0xc8, 0x2f, 0x2a, 0x49, 0x4c, 0xca, 0x49, 0x75, 0x49, 0x2c, 0x49, 0x14, 0x12, 0xe2, 0x62, + 0x01, 0xd1, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x0c, 0x17, 0xa7, 0x6f, + 0x62, 0x85, 0x5f, 0x7e, 0x4a, 0xaa, 0xa7, 0x8b, 0x04, 0x93, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x42, + 0x00, 0x10, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x54, 0xdc, 0x48, 0x64, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto new file mode 100644 index 0000000..13dfcd3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto @@ -0,0 +1,12 @@ +package backup_util; + +//======================================================================== +// +// Metadata +// +//======================================================================== + +message PortableData { + required bytes Data = 1; + required uint64 MaxNodeID = 2; +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go new file mode 100644 index 0000000..67c8cc9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go @@ -0,0 +1,46 @@ +// Package help is the help subcommand of the influxd command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Configure and start an InfluxDB server. + +Usage: influxd [[command] [arguments]] + +The commands are: + + backup downloads a snapshot of a data node and saves it to disk + config display the default configuration + help display this help message + restore uses a snapshot of a data node to rebuild a cluster + run run node with existing configuration + version displays the InfluxDB version + +"run" is the default command. + +Use "influxd [command] -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go new file mode 100644 index 0000000..21773ba --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go @@ -0,0 +1,169 @@ +// Command influxd is the InfluxDB server. +package main + +import ( + "flag" + "fmt" + "io" + "math/rand" + "os" + "os/signal" + "syscall" + "time" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/help" + "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +// These variables are populated via the Go linker. +var ( + version string + commit string + branch string +) + +func init() { + // If commit, branch, or build time are not set, make that clear. + if version == "" { + version = "unknown" + } + if commit == "" { + commit = "unknown" + } + if branch == "" { + branch = "unknown" + } +} + +func main() { + rand.Seed(time.Now().UnixNano()) + + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain return a new instance of Main. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "run": + cmd := run.NewCommand() + + // Tell the server the build details. + cmd.Version = version + cmd.Commit = commit + cmd.Branch = branch + + if err := cmd.Run(args...); err != nil { + return fmt.Errorf("run: %s", err) + } + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + cmd.Logger.Info("Listening for signals") + + // Block until one of the signals above is received + <-signalCh + cmd.Logger.Info("Signal received, initializing clean shutdown...") + go cmd.Close() + + // Block again until another signal is received, a shutdown timeout elapses, + // or the Command is gracefully closed + cmd.Logger.Info("Waiting for clean shutdown...") + select { + case <-signalCh: + cmd.Logger.Info("Second signal received, initializing hard shutdown") + case <-time.After(time.Second * 30): + cmd.Logger.Info("Time limit reached, initializing hard shutdown") + case <-cmd.Closed: + cmd.Logger.Info("Server shutdown completed") + } + + // goodbye. + + case "backup": + name := backup.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("backup: %s", err) + } + case "restore": + name := restore.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("restore: %s", err) + } + case "config": + if err := run.NewPrintConfigCommand().Run(args...); err != nil { + return fmt.Errorf("config: %s", err) + } + case "version": + if err := NewVersionCommand().Run(args...); err != nil { + return fmt.Errorf("version: %s", err) + } + case "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) + } + + return nil +} + +// VersionCommand represents the command executed by "influxd version". +type VersionCommand struct { + Stdout io.Writer + Stderr io.Writer +} + +// NewVersionCommand return a new instance of VersionCommand. +func NewVersionCommand() *VersionCommand { + return &VersionCommand{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run prints the current version and commit info. +func (cmd *VersionCommand) Run(args ...string) error { + // Parse flags in case -h is specified. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, versionUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Print version info. + fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) + + return nil +} + +var versionUsage = `Displays the InfluxDB version, build branch and git commit hash. + +Usage: influxd version +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go new file mode 100644 index 0000000..eadd5f0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go @@ -0,0 +1,600 @@ +// Package restore is the restore subcommand for the influxd command, +// for restoring from a backup. +package restore + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" + "strings" + + gzip "github.com/klauspost/pgzip" + + "github.com/influxdata/influxdb/cmd/influxd/backup_util" + tarstream "github.com/influxdata/influxdb/pkg/tar" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/snapshotter" +) + +// Command represents the program execution for "influxd restore". +type Command struct { + // The logger passed to the ticker during execution. + StdoutLogger *log.Logger + StderrLogger *log.Logger + + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + host string + client *snapshotter.Client + + backupFilesPath string + metadir string + datadir string + destinationDatabase string + sourceDatabase string + backupRetention string + restoreRetention string + shard uint64 + portable bool + online bool + manifestMeta *backup_util.MetaEntry + manifestFiles map[uint64]*backup_util.Entry + + // TODO: when the new meta stuff is done this should not be exported or be gone + MetaConfig *meta.Config + + shardIDMap map[uint64]uint64 +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + MetaConfig: meta.NewConfig(), + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.StdoutLogger = log.New(cmd.Stdout, "", log.LstdFlags) + cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags) + if err := cmd.parseFlags(args); err != nil { + return err + } + + if cmd.portable { + return cmd.runOnlinePortable() + } else if cmd.online { + return cmd.runOnlineLegacy() + } else { + return cmd.runOffline() + } +} + +func (cmd *Command) runOffline() error { + if cmd.metadir != "" { + if err := cmd.unpackMeta(); err != nil { + return err + } + } + + if cmd.shard != 0 { + return cmd.unpackShard(cmd.shard) + } else if cmd.restoreRetention != "" { + return cmd.unpackRetention() + } else if cmd.datadir != "" { + return cmd.unpackDatabase() + } + return nil +} + +func (cmd *Command) runOnlinePortable() error { + err := cmd.updateMetaPortable() + if err != nil { + cmd.StderrLogger.Printf("error updating meta: %v", err) + return err + } + err = cmd.uploadShardsPortable() + if err != nil { + cmd.StderrLogger.Printf("error updating shards: %v", err) + return err + } + return nil +} + +func (cmd *Command) runOnlineLegacy() error { + err := cmd.updateMetaLegacy() + if err != nil { + cmd.StderrLogger.Printf("error updating meta: %v", err) + return err + } + err = cmd.uploadShardsLegacy() + if err != nil { + cmd.StderrLogger.Printf("error updating shards: %v", err) + return err + } + return nil +} + +// parseFlags parses and validates the command line arguments. +func (cmd *Command) parseFlags(args []string) error { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&cmd.host, "host", "localhost:8088", "") + fs.StringVar(&cmd.metadir, "metadir", "", "") + fs.StringVar(&cmd.datadir, "datadir", "", "") + + fs.StringVar(&cmd.sourceDatabase, "database", "", "") + fs.StringVar(&cmd.sourceDatabase, "db", "", "") + fs.StringVar(&cmd.destinationDatabase, "newdb", "", "") + + fs.StringVar(&cmd.backupRetention, "retention", "", "") + fs.StringVar(&cmd.backupRetention, "rp", "", "") + fs.StringVar(&cmd.restoreRetention, "newrp", "", "") + + fs.Uint64Var(&cmd.shard, "shard", 0, "") + fs.BoolVar(&cmd.online, "online", false, "") + fs.BoolVar(&cmd.portable, "portable", false, "") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } + + cmd.MetaConfig = meta.NewConfig() + cmd.MetaConfig.Dir = cmd.metadir + cmd.client = snapshotter.NewClient(cmd.host) + + // Require output path. + cmd.backupFilesPath = fs.Arg(0) + if cmd.backupFilesPath == "" { + return fmt.Errorf("path with backup files required") + } + + fi, err := os.Stat(cmd.backupFilesPath) + if err != nil || !fi.IsDir() { + return fmt.Errorf("backup path should be a valid directory: %s", cmd.backupFilesPath) + } + + if cmd.portable || cmd.online { + // validate the arguments + + if cmd.metadir != "" { + return fmt.Errorf("offline parameter metadir found, not compatible with -portable") + } + + if cmd.datadir != "" { + return fmt.Errorf("offline parameter datadir found, not compatible with -portable") + } + + if cmd.restoreRetention == "" { + cmd.restoreRetention = cmd.backupRetention + } + + if cmd.portable { + var err error + cmd.manifestMeta, cmd.manifestFiles, err = backup_util.LoadIncremental(cmd.backupFilesPath) + if err != nil { + return fmt.Errorf("restore failed while processing manifest files: %s", err.Error()) + } else if cmd.manifestMeta == nil { + // No manifest files found. + return fmt.Errorf("No manifest files found in: %s\n", cmd.backupFilesPath) + + } + } + } else { + // validate the arguments + if cmd.metadir == "" && cmd.destinationDatabase == "" { + return fmt.Errorf("-metadir or -destinationDatabase are required to restore") + } + + if cmd.destinationDatabase != "" && cmd.datadir == "" { + return fmt.Errorf("-datadir is required to restore") + } + + if cmd.shard != 0 { + if cmd.destinationDatabase == "" { + return fmt.Errorf("-destinationDatabase is required to restore shard") + } + if cmd.backupRetention == "" { + return fmt.Errorf("-retention is required to restore shard") + } + } else if cmd.backupRetention != "" && cmd.destinationDatabase == "" { + return fmt.Errorf("-destinationDatabase is required to restore retention policy") + } + } + + return nil +} + +// unpackMeta reads the metadata from the backup directory and initializes a raft +// cluster and replaces the root metadata. +func (cmd *Command) unpackMeta() error { + // find the meta file + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*")) + if err != nil { + return err + } + + if len(metaFiles) == 0 { + return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath) + } + + latest := metaFiles[len(metaFiles)-1] + + fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest) + // Read the metastore backup + f, err := os.Open(latest) + if err != nil { + return err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, f); err != nil { + return fmt.Errorf("copy: %s", err) + } + + b := buf.Bytes() + var i int + + // Make sure the file is actually a meta store backup file + magic := binary.BigEndian.Uint64(b[:8]) + if magic != snapshotter.BackupMagicHeader { + return fmt.Errorf("invalid metadata file") + } + i += 8 + + // Size of the meta store bytes + length := int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + metaBytes := b[i : i+length] + i += int(length) + + // Size of the node.json bytes + length = int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + nodeBytes := b[i : i+length] + + // Unpack into metadata. + var data meta.Data + if err := data.UnmarshalBinary(metaBytes); err != nil { + return fmt.Errorf("unmarshal: %s", err) + } + + // Copy meta config and remove peers so it starts in single mode. + c := cmd.MetaConfig + c.Dir = cmd.metadir + + // Create the meta dir + if err := os.MkdirAll(c.Dir, 0700); err != nil { + return err + } + + // Write node.json back to meta dir + if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil { + return err + } + + client := meta.NewClient(c) + if err := client.Open(); err != nil { + return err + } + defer client.Close() + + // Force set the full metadata. + if err := client.SetData(&data); err != nil { + return fmt.Errorf("set data: %s", err) + } + + // remove the raft.db file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "raft.db")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // remove the node.json file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "node.json")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return nil +} + +func (cmd *Command) updateMetaPortable() error { + var metaBytes []byte + fileName := filepath.Join(cmd.backupFilesPath, cmd.manifestMeta.FileName) + + fileBytes, err := ioutil.ReadFile(fileName) + if err != nil { + return err + } + + var ep backup_util.PortablePacker + ep.UnmarshalBinary(fileBytes) + + metaBytes = ep.Data + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetaStoreUpdate, + BackupDatabase: cmd.sourceDatabase, + RestoreDatabase: cmd.destinationDatabase, + BackupRetentionPolicy: cmd.backupRetention, + RestoreRetentionPolicy: cmd.restoreRetention, + UploadSize: int64(len(metaBytes)), + } + + shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes)) + cmd.shardIDMap = shardIDMap + return err + +} + +// updateMetaLive takes a metadata backup and sends it to the influx server +// for a live merger of metadata. +func (cmd *Command) updateMetaLegacy() error { + + var metaBytes []byte + + // find the meta file + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*")) + if err != nil { + return err + } + + if len(metaFiles) == 0 { + return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath) + } + + fileName := metaFiles[len(metaFiles)-1] + cmd.StdoutLogger.Printf("Using metastore snapshot: %v\n", fileName) + metaBytes, err = backup_util.GetMetaBytes(fileName) + if err != nil { + return err + } + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetaStoreUpdate, + BackupDatabase: cmd.sourceDatabase, + RestoreDatabase: cmd.destinationDatabase, + BackupRetentionPolicy: cmd.backupRetention, + RestoreRetentionPolicy: cmd.restoreRetention, + UploadSize: int64(len(metaBytes)), + } + + shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes)) + cmd.shardIDMap = shardIDMap + return err +} + +func (cmd *Command) uploadShardsPortable() error { + for _, file := range cmd.manifestFiles { + if cmd.sourceDatabase == "" || cmd.sourceDatabase == file.Database { + if cmd.backupRetention == "" || cmd.backupRetention == file.Policy { + if cmd.shard == 0 || cmd.shard == file.ShardID { + oldID := file.ShardID + // if newID not found then this shard's metadata was NOT imported + // and should be skipped + newID, ok := cmd.shardIDMap[oldID] + if !ok { + cmd.StdoutLogger.Printf("Meta info not found for shard %d on database %s. Skipping shard file %s", oldID, file.Database, file.FileName) + continue + } + cmd.StdoutLogger.Printf("Restoring shard %d live from backup %s\n", file.ShardID, file.FileName) + f, err := os.Open(filepath.Join(cmd.backupFilesPath, file.FileName)) + if err != nil { + f.Close() + return err + } + gr, err := gzip.NewReader(f) + if err != nil { + f.Close() + return err + } + tr := tar.NewReader(gr) + targetDB := cmd.destinationDatabase + if targetDB == "" { + targetDB = file.Database + } + + if err := cmd.client.UploadShard(oldID, newID, targetDB, cmd.restoreRetention, tr); err != nil { + f.Close() + return err + } + f.Close() + } + } + } + } + return nil +} + +// unpackFiles will look for backup files matching the pattern and restore them to the data dir +func (cmd *Command) uploadShardsLegacy() error { + // find the destinationDatabase backup files + pat := fmt.Sprintf("%s.*", filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase)) + cmd.StdoutLogger.Printf("Restoring live from backup %s\n", pat) + backupFiles, err := filepath.Glob(pat) + if err != nil { + return err + } + if len(backupFiles) == 0 { + return fmt.Errorf("no backup files in %s", cmd.backupFilesPath) + } + + for _, fn := range backupFiles { + parts := strings.Split(fn, ".") + + if len(parts) != 4 { + cmd.StderrLogger.Printf("Skipping mis-named backup file: %s", fn) + } + shardID, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return err + } + + // if newID not found then this shard's metadata was NOT imported + // and should be skipped + newID, ok := cmd.shardIDMap[shardID] + if !ok { + cmd.StdoutLogger.Printf("Meta info not found for shard %d. Skipping shard file %s", shardID, fn) + continue + } + f, err := os.Open(fn) + if err != nil { + return err + } + tr := tar.NewReader(f) + if err := cmd.client.UploadShard(shardID, newID, cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil { + f.Close() + return err + } + f.Close() + } + + return nil +} + +// unpackDatabase will look for all backup files in the path matching this destinationDatabase +// and restore them to the data dir +func (cmd *Command) unpackDatabase() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.sourceDatabase) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("database already present: %s", restorePath) + } + + // find the database backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase) + return cmd.unpackFiles(pat + ".*") +} + +// unpackRetention will look for all backup files in the path matching this retention +// and restore them to the data dir +func (cmd *Command) unpackRetention() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.sourceDatabase, cmd.backupRetention) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("retention already present: %s", restorePath) + } + + // find the retention backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase) + return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.backupRetention)) +} + +// unpackShard will look for all backup files in the path matching this shard ID +// and restore them to the data dir +func (cmd *Command) unpackShard(shard uint64) error { + shardID := strconv.FormatUint(shard, 10) + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.sourceDatabase, cmd.backupRetention, shardID) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("shard already present: %s", restorePath) + } + + id, err := strconv.ParseUint(shardID, 10, 64) + if err != nil { + return err + } + + // find the shard backup files + pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.sourceDatabase, cmd.backupRetention, id)) + return cmd.unpackFiles(pat + ".*") +} + +// unpackFiles will look for backup files matching the pattern and restore them to the data dir +func (cmd *Command) unpackFiles(pat string) error { + cmd.StdoutLogger.Printf("Restoring offline from backup %s\n", pat) + + backupFiles, err := filepath.Glob(pat) + if err != nil { + return err + } + + if len(backupFiles) == 0 { + return fmt.Errorf("no backup files for %s in %s", pat, cmd.backupFilesPath) + } + + for _, fn := range backupFiles { + if err := cmd.unpackTar(fn); err != nil { + return err + } + } + + return nil +} + +// unpackTar will restore a single tar archive to the data dir +func (cmd *Command) unpackTar(tarFile string) error { + f, err := os.Open(tarFile) + if err != nil { + return err + } + defer f.Close() + + // should get us ["db","rp", "00001", "00"] + pathParts := strings.Split(filepath.Base(tarFile), ".") + if len(pathParts) != 4 { + return fmt.Errorf("backup tarfile name incorrect format") + } + + shardPath := filepath.Join(cmd.datadir, pathParts[0], pathParts[1], strings.Trim(pathParts[2], "0")) + os.MkdirAll(shardPath, 0755) + + return tarstream.Restore(f, shardPath) +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, ` +Uses backup copies from the specified PATH to restore databases or specific shards from InfluxDB OSS + or InfluxDB Enterprise to an InfluxDB OSS instance. + +Usage: influxd restore -portable [options] PATH + +Note: Restore using the '-portable' option consumes files in an improved Enterprise-compatible + format that includes a file manifest. + +Options: + -portable + Required to activate the portable restore mode. If not specified, the legacy restore mode is used. + -host + InfluxDB OSS host to connect to where the data will be restored. Defaults to '127.0.0.1:8088'. + -db + Name of database to be restored from the backup (InfluxDB OSS or InfluxDB Enterprise) + -newdb + Name of the InfluxDB OSS database into which the archived data will be imported on the target system. + Optional. If not given, then the value of '-db ' is used. The new database name must be unique + to the target system. + -rp + Name of retention policy from the backup that will be restored. Optional. + Requires that '-db ' is specified. + -newrp + Name of the retention policy to be created on the target system. Optional. Requires that '-rp ' + is set. If not given, the '-rp ' value is used. + -shard + Identifier of the shard to be restored. Optional. If specified, then '-db ' and '-rp ' are + required. + PATH + Path to directory containing the backup files. + +`) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go new file mode 100644 index 0000000..d6c3dce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go @@ -0,0 +1,301 @@ +// Package run is the run (default) subcommand for the influxd command. +package run + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + _ "net/http/pprof" + "os" + "path/filepath" + "runtime" + "strconv" + "time" + + "github.com/influxdata/influxdb/logger" + "go.uber.org/zap" +) + +const logo = ` + 8888888 .d888 888 8888888b. 888888b. + 888 d88P" 888 888 "Y88b 888 "88b + 888 888 888 888 888 888 .88P + 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. + 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b + 888 888 888 888 888 888 888 X88K 888 888 888 888 + 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P + 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" + +` + +// Command represents the command executed by "influxd run". +type Command struct { + Version string + Branch string + Commit string + BuildTime string + + closing chan struct{} + pidfile string + Closed chan struct{} + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Logger *zap.Logger + + Server *Server + + // How to get environment variables. Normally set to os.Getenv, except for tests. + Getenv func(string) string +} + +// NewCommand return a new instance of Command. +func NewCommand() *Command { + return &Command{ + closing: make(chan struct{}), + Closed: make(chan struct{}), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + Logger: zap.NewNop(), + } +} + +// Run parses the config from args and runs the server. +func (cmd *Command) Run(args ...string) error { + // Parse the command line flags. + options, err := cmd.ParseFlags(args...) + if err != nil { + return err + } + + config, err := cmd.ParseConfig(options.GetConfigPath()) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(cmd.Getenv); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + var logErr error + if cmd.Logger, logErr = config.Logging.New(cmd.Stderr); logErr != nil { + // assign the default logger + cmd.Logger = logger.New(cmd.Stderr) + } + + // Attempt to run pprof on :6060 before startup if debug pprof enabled. + if config.HTTPD.DebugPprofEnabled { + runtime.SetBlockProfileRate(int(1 * time.Second)) + runtime.SetMutexProfileFraction(1) + go func() { http.ListenAndServe("localhost:6060", nil) }() + } + + // Print sweet InfluxDB logo. + if !config.Logging.SuppressLogo && logger.IsTerminal(cmd.Stdout) { + fmt.Fprint(cmd.Stdout, logo) + } + + // Mark start-up in log. + cmd.Logger.Info("InfluxDB starting", + zap.String("version", cmd.Version), + zap.String("branch", cmd.Branch), + zap.String("commit", cmd.Commit)) + cmd.Logger.Info("Go runtime", + zap.String("version", runtime.Version()), + zap.Int("maxprocs", runtime.GOMAXPROCS(0))) + + // If there was an error on startup when creating the logger, output it now. + if logErr != nil { + cmd.Logger.Error("Unable to configure logger", zap.Error(logErr)) + } + + // Write the PID file. + if err := cmd.writePIDFile(options.PIDFile); err != nil { + return fmt.Errorf("write pid file: %s", err) + } + cmd.pidfile = options.PIDFile + + if config.HTTPD.PprofEnabled { + // Turn on block and mutex profiling. + runtime.SetBlockProfileRate(int(1 * time.Second)) + runtime.SetMutexProfileFraction(1) // Collect every sample + } + + // Create server from config and start it. + buildInfo := &BuildInfo{ + Version: cmd.Version, + Commit: cmd.Commit, + Branch: cmd.Branch, + Time: cmd.BuildTime, + } + s, err := NewServer(config, buildInfo) + if err != nil { + return fmt.Errorf("create server: %s", err) + } + s.Logger = cmd.Logger + s.CPUProfile = options.CPUProfile + s.MemProfile = options.MemProfile + if err := s.Open(); err != nil { + return fmt.Errorf("open server: %s", err) + } + cmd.Server = s + + // Begin monitoring the server's error channel. + go cmd.monitorServerErrors() + + return nil +} + +// Close shuts down the server. +func (cmd *Command) Close() error { + defer close(cmd.Closed) + defer cmd.removePIDFile() + close(cmd.closing) + if cmd.Server != nil { + return cmd.Server.Close() + } + return nil +} + +func (cmd *Command) monitorServerErrors() { + logger := log.New(cmd.Stderr, "", log.LstdFlags) + for { + select { + case err := <-cmd.Server.Err(): + logger.Println(err) + case <-cmd.closing: + return + } + } +} + +func (cmd *Command) removePIDFile() { + if cmd.pidfile != "" { + if err := os.Remove(cmd.pidfile); err != nil { + cmd.Logger.Error("Unable to remove pidfile", zap.Error(err)) + } + } +} + +// ParseFlags parses the command line flags from args and returns an options set. +func (cmd *Command) ParseFlags(args ...string) (Options, error) { + var options Options + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ConfigPath, "config", "", "") + fs.StringVar(&options.PIDFile, "pidfile", "", "") + // Ignore hostname option. + _ = fs.String("hostname", "", "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } + if err := fs.Parse(args); err != nil { + return Options{}, err + } + return options, nil +} + +// writePIDFile writes the process ID to path. +func (cmd *Command) writePIDFile(path string) error { + // Ignore if path is not set. + if path == "" { + return nil + } + + // Ensure the required directory structure exists. + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return fmt.Errorf("mkdir: %s", err) + } + + // Retrieve the PID and write it. + pid := strconv.Itoa(os.Getpid()) + if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { + return fmt.Errorf("write file: %s", err) + } + + return nil +} + +// ParseConfig parses the config at path. +// It returns a demo configuration if path is blank. +func (cmd *Command) ParseConfig(path string) (*Config, error) { + // Use demo configuration if no config path is specified. + if path == "" { + cmd.Logger.Info("No configuration provided, using default settings") + return NewDemoConfig() + } + + cmd.Logger.Info("Loading configuration file", zap.String("path", path)) + + config := NewConfig() + if err := config.FromTomlFile(path); err != nil { + return nil, err + } + + return config, nil +} + +const usage = `Runs the InfluxDB server. + +Usage: influxd run [flags] + + -config + Set the path to the configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). + -pidfile + Write process ID to a file. + -cpuprofile + Write CPU profiling information to a file. + -memprofile + Write memory usage information to a file. +` + +// Options represents the command line options that can be parsed. +type Options struct { + ConfigPath string + PIDFile string + CPUProfile string + MemProfile string +} + +// GetConfigPath returns the config path from the options. +// It will return a path by searching in this order: +// 1. The CLI option in ConfigPath +// 2. The environment variable INFLUXDB_CONFIG_PATH +// 3. The first influxdb.conf file on the path: +// - ~/.influxdb +// - /etc/influxdb +func (opt *Options) GetConfigPath() string { + if opt.ConfigPath != "" { + if opt.ConfigPath == os.DevNull { + return "" + } + return opt.ConfigPath + } else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" { + return envVar + } + + for _, path := range []string{ + os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"), + "/etc/influxdb/influxdb.conf", + } { + if _, err := os.Stat(path); err == nil { + return path + } + } + return "" +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go new file mode 100644 index 0000000..7507f32 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go @@ -0,0 +1,64 @@ +package run_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +func TestCommand_PIDFile(t *testing.T) { + tmpdir, err := ioutil.TempDir(os.TempDir(), "influxd-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + pidFile := filepath.Join(tmpdir, "influxdb.pid") + + // Override the default data/wal dir so it doesn't look in ~/.influxdb which + // might have junk not related to this test. + os.Setenv("INFLUXDB_DATA_DIR", tmpdir) + os.Setenv("INFLUXDB_DATA_WAL_DIR", tmpdir) + + cmd := run.NewCommand() + cmd.Getenv = func(key string) string { + switch key { + case "INFLUXDB_DATA_DIR": + return filepath.Join(tmpdir, "data") + case "INFLUXDB_META_DIR": + return filepath.Join(tmpdir, "meta") + case "INFLUXDB_DATA_WAL_DIR": + return filepath.Join(tmpdir, "wal") + case "INFLUXDB_BIND_ADDRESS", "INFLUXDB_HTTP_BIND_ADDRESS": + return "127.0.0.1:0" + case "INFLUXDB_REPORTING_DISABLED": + return "true" + default: + return os.Getenv(key) + } + } + if err := cmd.Run("-pidfile", pidFile, "-config", os.DevNull); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if _, err := os.Stat(pidFile); err != nil { + t.Fatalf("could not stat pid file: %s", err) + } + go cmd.Close() + + timeout := time.NewTimer(100 * time.Millisecond) + select { + case <-timeout.C: + t.Fatal("unexpected timeout") + case <-cmd.Closed: + timeout.Stop() + } + + if _, err := os.Stat(pidFile); err == nil { + t.Fatal("expected pid file to be removed") + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go new file mode 100644 index 0000000..47b57c2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go @@ -0,0 +1,262 @@ +package run + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "os/user" + "path/filepath" + "regexp" + "strings" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/pkg/tlsconfig" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/storage" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + itoml "github.com/influxdata/influxdb/toml" + "github.com/influxdata/influxdb/tsdb" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" +) + +const ( + // DefaultBindAddress is the default address for various RPC services. + DefaultBindAddress = "127.0.0.1:8088" +) + +// Config represents the configuration format for the influxd binary. +type Config struct { + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` + Coordinator coordinator.Config `toml:"coordinator"` + Retention retention.Config `toml:"retention"` + Precreator precreator.Config `toml:"shard-precreation"` + + Monitor monitor.Config `toml:"monitor"` + Subscriber subscriber.Config `toml:"subscriber"` + HTTPD httpd.Config `toml:"http"` + Logging logger.Config `toml:"logging"` + Storage storage.Config `toml:"ifql"` + GraphiteInputs []graphite.Config `toml:"graphite"` + CollectdInputs []collectd.Config `toml:"collectd"` + OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"` + UDPInputs []udp.Config `toml:"udp"` + + ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` + + // Server reporting + ReportingDisabled bool `toml:"reporting-disabled"` + + // BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.) + BindAddress string `toml:"bind-address"` + + // TLS provides configuration options for all https endpoints. + TLS tlsconfig.Config `toml:"tls"` +} + +// NewConfig returns an instance of Config with reasonable defaults. +func NewConfig() *Config { + c := &Config{} + c.Meta = meta.NewConfig() + c.Data = tsdb.NewConfig() + c.Coordinator = coordinator.NewConfig() + c.Precreator = precreator.NewConfig() + + c.Monitor = monitor.NewConfig() + c.Subscriber = subscriber.NewConfig() + c.HTTPD = httpd.NewConfig() + c.Logging = logger.NewConfig() + c.Storage = storage.NewConfig() + + c.GraphiteInputs = []graphite.Config{graphite.NewConfig()} + c.CollectdInputs = []collectd.Config{collectd.NewConfig()} + c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()} + c.UDPInputs = []udp.Config{udp.NewConfig()} + + c.ContinuousQuery = continuous_querier.NewConfig() + c.Retention = retention.NewConfig() + c.BindAddress = DefaultBindAddress + + return c +} + +// NewDemoConfig returns the config that runs when no config is specified. +func NewDemoConfig() (*Config, error) { + c := NewConfig() + + var homeDir string + // By default, store meta and data files in current users home directory + u, err := user.Current() + if err == nil { + homeDir = u.HomeDir + } else if os.Getenv("HOME") != "" { + homeDir = os.Getenv("HOME") + } else { + return nil, fmt.Errorf("failed to determine current user for storage") + } + + c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") + c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") + c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") + + return c, nil +} + +// FromTomlFile loads the config from a TOML file. +func (c *Config) FromTomlFile(fpath string) error { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return err + } + + // Handle any potential Byte-Order-Marks that may be in the config file. + // This is for Windows compatibility only. + // See https://github.com/influxdata/telegraf/issues/1378 and + // https://github.com/influxdata/influxdb/issues/8965. + bom := unicode.BOMOverride(transform.Nop) + bs, _, err = transform.Bytes(bom, bs) + if err != nil { + return err + } + return c.FromToml(string(bs)) +} + +// FromToml loads the config from TOML. +func (c *Config) FromToml(input string) error { + // Replace deprecated [cluster] with [coordinator] + re := regexp.MustCompile(`(?m)^\s*\[cluster\]`) + input = re.ReplaceAllStringFunc(input, func(in string) string { + in = strings.TrimSpace(in) + out := "[coordinator]" + log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in) + return out + }) + + _, err := toml.Decode(input, c) + return err +} + +// Validate returns an error if the config is invalid. +func (c *Config) Validate() error { + if err := c.Meta.Validate(); err != nil { + return err + } + + if err := c.Data.Validate(); err != nil { + return err + } + + if err := c.Monitor.Validate(); err != nil { + return err + } + + if err := c.ContinuousQuery.Validate(); err != nil { + return err + } + + if err := c.Retention.Validate(); err != nil { + return err + } + + if err := c.Precreator.Validate(); err != nil { + return err + } + + if err := c.Subscriber.Validate(); err != nil { + return err + } + + for _, graphite := range c.GraphiteInputs { + if err := graphite.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) + } + } + + for _, collectd := range c.CollectdInputs { + if err := collectd.Validate(); err != nil { + return fmt.Errorf("invalid collectd config: %v", err) + } + } + + if err := c.TLS.Validate(); err != nil { + return err + } + + return nil +} + +// ApplyEnvOverrides apply the environment configuration on top of the config. +func (c *Config) ApplyEnvOverrides(getenv func(string) string) error { + return itoml.ApplyEnvOverrides(getenv, "INFLUXDB", c) +} + +// Diagnostics returns a diagnostics representation of Config. +func (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) { + return diagnostics.RowFromMap(map[string]interface{}{ + "reporting-disabled": c.ReportingDisabled, + "bind-address": c.BindAddress, + }), nil +} + +func (c *Config) diagnosticsClients() map[string]diagnostics.Client { + // Config settings that are always present. + m := map[string]diagnostics.Client{ + "config": c, + + "config-data": c.Data, + "config-meta": c.Meta, + "config-coordinator": c.Coordinator, + "config-retention": c.Retention, + "config-precreator": c.Precreator, + + "config-monitor": c.Monitor, + "config-subscriber": c.Subscriber, + "config-httpd": c.HTTPD, + + "config-cqs": c.ContinuousQuery, + } + + // Config settings that can be repeated and can be disabled. + if g := graphite.Configs(c.GraphiteInputs); g.Enabled() { + m["config-graphite"] = g + } + if cc := collectd.Configs(c.CollectdInputs); cc.Enabled() { + m["config-collectd"] = cc + } + if t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() { + m["config-opentsdb"] = t + } + if u := udp.Configs(c.UDPInputs); u.Enabled() { + m["config-udp"] = u + } + + return m +} + +// registerDiagnostics registers the config settings with the Monitor. +func (c *Config) registerDiagnostics(m *monitor.Monitor) { + for name, dc := range c.diagnosticsClients() { + m.RegisterDiagnosticsClient(name, dc) + } +} + +// registerDiagnostics deregisters the config settings from the Monitor. +func (c *Config) deregisterDiagnostics(m *monitor.Monitor) { + for name := range c.diagnosticsClients() { + m.DeregisterDiagnosticsClient(name) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go new file mode 100644 index 0000000..3e75aaa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go @@ -0,0 +1,92 @@ +package run + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/BurntSushi/toml" +) + +// PrintConfigCommand represents the command executed by "influxd config". +type PrintConfigCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPrintConfigCommand return a new instance of PrintConfigCommand. +func NewPrintConfigCommand() *PrintConfigCommand { + return &PrintConfigCommand{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses and prints the current config loaded. +func (cmd *PrintConfigCommand) Run(args ...string) error { + // Parse command flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + configPath := fs.String("config", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Parse config from path. + opt := Options{ConfigPath: *configPath} + config, err := cmd.parseConfig(opt.GetConfigPath()) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(os.Getenv); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + toml.NewEncoder(cmd.Stdout).Encode(config) + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { + config, err := NewDemoConfig() + if err != nil { + config = NewConfig() + } + + if path == "" { + return config, nil + } + + fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path) + + if err := config.FromTomlFile(path); err != nil { + return nil, err + } + return config, nil +} + +var printConfigUsage = `Displays the default configuration. + +Usage: influxd config [flags] + + -config + Set the path to the initial configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go new file mode 100644 index 0000000..d4ff07b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go @@ -0,0 +1,522 @@ +package run_test + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/cmd/influxd/run" + influxtoml "github.com/influxdata/influxdb/toml" + "go.uber.org/zap/zapcore" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" +) + +// Ensure the configuration can be parsed. +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c run.Config + if err := c.FromToml(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[opentsdb]] +bind-address = ":2020" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[subscriber] +enabled = true + +[continuous_queries] +enabled = true + +[tls] +ciphers = ["cipher"] +`); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.GraphiteInputs) != 2 { + t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs)) + } else if c.GraphiteInputs[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol) + } else if c.GraphiteInputs[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol) + } else if c.CollectdInputs[0].BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress) + } else if c.CollectdInputs[1].BindAddress != ":1010" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } else if c.OpenTSDBInputs[0].BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } else if c.OpenTSDBInputs[1].BindAddress != ":2010" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress) + } else if c.OpenTSDBInputs[2].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) + } else if c.UDPInputs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } else if !c.Subscriber.Enabled { + t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) + } else if !c.ContinuousQuery.Enabled { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } else if c.TLS.Ciphers[0] != "cipher" { + t.Fatalf("unexpected tls: %q", c.TLS.Ciphers) + } +} + +// Ensure the configuration can be parsed. +func TestConfig_Parse_EnvOverride(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" +templates = [ + "default.* .template.in.config" +] + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[udp]] +bind-address = ":4444" + +[[udp]] + +[monitoring] +enabled = true + +[continuous_queries] +enabled = true + +[tls] +min-version = "tls1.0" +`, &c); err != nil { + t.Fatal(err) + } + + getenv := func(s string) string { + switch s { + case "INFLUXDB_UDP_BIND_ADDRESS": + return ":1234" + case "INFLUXDB_UDP_0_BIND_ADDRESS": + return ":5555" + case "INFLUXDB_GRAPHITE_0_TEMPLATES_0": + return "override.* .template.0" + case "INFLUXDB_GRAPHITE_1_TEMPLATES": + return "override.* .template.1.1,override.* .template.1.2" + case "INFLUXDB_GRAPHITE_1_PROTOCOL": + return "udp" + case "INFLUXDB_COLLECTD_1_BIND_ADDRESS": + return ":1020" + case "INFLUXDB_OPENTSDB_0_BIND_ADDRESS": + return ":2020" + case "INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE": + // uint64 type + return "1000" + case "INFLUXDB_LOGGING_LEVEL": + // logging type + return "warn" + case "INFLUXDB_COORDINATOR_QUERY_TIMEOUT": + // duration type + return "1m" + case "INFLUXDB_TLS_MIN_VERSION": + return "tls1.2" + } + return "" + } + + if err := c.ApplyEnvOverrides(getenv); err != nil { + t.Fatalf("failed to apply env overrides: %v", err) + } + + if c.UDPInputs[0].BindAddress != ":5555" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } + + if c.UDPInputs[1].BindAddress != ":1234" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress) + } + + if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "override.* .template.0" { + t.Fatalf("unexpected graphite 0 templates: %+v", c.GraphiteInputs[0].Templates) + } + + if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "override.* .template.1.2" { + t.Fatalf("unexpected graphite 1 templates: %+v", c.GraphiteInputs[1].Templates) + } + + if c.GraphiteInputs[1].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol: %s", c.GraphiteInputs[1].Protocol) + } + + if c.CollectdInputs[1].BindAddress != ":1020" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } + + if c.OpenTSDBInputs[0].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } + + if c.Data.CacheMaxMemorySize != 1000 { + t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize) + } + + if c.Logging.Level != zapcore.WarnLevel { + t.Fatalf("unexpected logging level: %v", c.Logging.Level) + } + + if c.Coordinator.QueryTimeout != influxtoml.Duration(time.Minute) { + t.Fatalf("unexpected query timeout: %v", c.Coordinator.QueryTimeout) + } + + if c.TLS.MinVersion != "tls1.2" { + t.Fatalf("unexpected tls min version: %q", c.TLS.MinVersion) + } +} + +func TestConfig_ValidateNoServiceConfigured(t *testing.T) { + var c run.Config + if _, err := toml.Decode(` +[meta] +enabled = false + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if e := c.Validate(); e == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) { + c := run.NewConfig() + if _, err := toml.Decode(` +[monitor] +store-enabled = true + +[meta] +dir = "foo" + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_DeprecatedOptions(t *testing.T) { + // Parse configuration. + var c run.Config + if err := c.FromToml(` +[cluster] +max-select-point = 100 +`); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Coordinator.MaxSelectPointN != 100 { + t.Fatalf("unexpected coordinator max select points: %d", c.Coordinator.MaxSelectPointN) + + } +} + +// Ensure that Config.Validate correctly validates the individual subsections. +func TestConfig_InvalidSubsections(t *testing.T) { + // Precondition: NewDemoConfig must validate correctly. + c, err := run.NewDemoConfig() + if err != nil { + t.Fatalf("error creating demo config: %s", err) + } + if err := c.Validate(); err != nil { + t.Fatalf("new demo config failed validation: %s", err) + } + + // For each subsection, load a config with a single invalid setting. + for _, tc := range []struct { + section string + kv string + }{ + {"meta", `dir = ""`}, + {"data", `dir = ""`}, + {"monitor", `store-database = ""`}, + {"continuous_queries", `run-interval = "0s"`}, + {"subscriber", `http-timeout = "0s"`}, + {"retention", `check-interval = "0s"`}, + {"shard-precreation", `advance-period = "0s"`}, + } { + c, err := run.NewDemoConfig() + if err != nil { + t.Fatalf("error creating demo config: %s", err) + } + + s := fmt.Sprintf("\n[%s]\n%s\n", tc.section, tc.kv) + if err := c.FromToml(s); err != nil { + t.Fatalf("error loading toml %q: %s", s, err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("expected error but got nil for config: %s", s) + } + } +} + +// Ensure the configuration can be parsed when a Byte-Order-Mark is present. +func TestConfig_Parse_UTF8_ByteOrderMark(t *testing.T) { + // Parse configuration. + var c run.Config + f, err := ioutil.TempFile("", "influxd") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + f.WriteString("\ufeff") + f.WriteString(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[opentsdb]] +bind-address = ":2020" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[subscriber] +enabled = true + +[continuous_queries] +enabled = true +`) + if err := c.FromTomlFile(f.Name()); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.GraphiteInputs) != 2 { + t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs)) + } else if c.GraphiteInputs[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol) + } else if c.GraphiteInputs[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol) + } else if c.CollectdInputs[0].BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress) + } else if c.CollectdInputs[1].BindAddress != ":1010" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } else if c.OpenTSDBInputs[0].BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } else if c.OpenTSDBInputs[1].BindAddress != ":2010" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress) + } else if c.OpenTSDBInputs[2].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) + } else if c.UDPInputs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } else if !c.Subscriber.Enabled { + t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) + } else if !c.ContinuousQuery.Enabled { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } +} + +// Ensure the configuration can be parsed when a Byte-Order-Mark is present. +func TestConfig_Parse_UTF16_ByteOrderMark(t *testing.T) { + // Parse configuration. + var c run.Config + f, err := ioutil.TempFile("", "influxd") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + utf16 := unicode.UTF16(unicode.BigEndian, unicode.UseBOM) + w := transform.NewWriter(f, utf16.NewEncoder()) + io.WriteString(w, ` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[opentsdb]] +bind-address = ":2020" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[subscriber] +enabled = true + +[continuous_queries] +enabled = true +`) + if err := c.FromTomlFile(f.Name()); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.GraphiteInputs) != 2 { + t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs)) + } else if c.GraphiteInputs[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol) + } else if c.GraphiteInputs[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol) + } else if c.CollectdInputs[0].BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress) + } else if c.CollectdInputs[1].BindAddress != ":1010" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } else if c.OpenTSDBInputs[0].BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } else if c.OpenTSDBInputs[1].BindAddress != ":2010" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress) + } else if c.OpenTSDBInputs[2].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) + } else if c.UDPInputs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } else if !c.Subscriber.Enabled { + t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) + } else if !c.ContinuousQuery.Enabled { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go new file mode 100644 index 0000000..67c9893 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go @@ -0,0 +1,655 @@ +package run + +import ( + "crypto/tls" + "fmt" + "io" + "log" + "net" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tcp" + "github.com/influxdata/influxdb/tsdb" + client "github.com/influxdata/usage-client/v1" + "go.uber.org/zap" + + "github.com/influxdata/influxdb/services/storage" + // Initialize the engine package + _ "github.com/influxdata/influxdb/tsdb/engine" + // Initialize the index package + _ "github.com/influxdata/influxdb/tsdb/index" +) + +var startTime time.Time + +func init() { + startTime = time.Now().UTC() +} + +// BuildInfo represents the build details for the server code. +type BuildInfo struct { + Version string + Commit string + Branch string + Time string +} + +// Server represents a container for the metadata and storage data and services. +// It is built using a Config and it manages the startup and shutdown of all +// services in the proper order. +type Server struct { + buildInfo BuildInfo + + err chan error + closing chan struct{} + + BindAddress string + Listener net.Listener + + Logger *zap.Logger + + MetaClient *meta.Client + + TSDBStore *tsdb.Store + QueryExecutor *query.Executor + PointsWriter *coordinator.PointsWriter + Subscriber *subscriber.Service + + Services []Service + + // These references are required for the tcp muxer. + SnapshotterService *snapshotter.Service + + Monitor *monitor.Monitor + + // Server reporting and registration + reportingDisabled bool + + // Profiling + CPUProfile string + MemProfile string + + // httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data + httpAPIAddr string + + // httpUseTLS specifies if we should use a TLS connection to the http servers + httpUseTLS bool + + // tcpAddr is the host:port combination for the TCP listener that services mux onto + tcpAddr string + + config *Config +} + +// updateTLSConfig stores with into the tls config pointed at by into but only if with is not nil +// and into is nil. Think of it as setting the default value. +func updateTLSConfig(into **tls.Config, with *tls.Config) { + if with != nil && into != nil && *into == nil { + *into = with + } +} + +// NewServer returns a new instance of Server built from a config. +func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { + // First grab the base tls config we will use for all clients and servers + tlsConfig, err := c.TLS.Parse() + if err != nil { + return nil, fmt.Errorf("tls configuration: %v", err) + } + + // Update the TLS values on each of the configs to be the parsed one if + // not already specified (set the default). + updateTLSConfig(&c.HTTPD.TLS, tlsConfig) + updateTLSConfig(&c.Subscriber.TLS, tlsConfig) + for i := range c.OpenTSDBInputs { + updateTLSConfig(&c.OpenTSDBInputs[i].TLS, tlsConfig) + } + + // We need to ensure that a meta directory always exists even if + // we don't start the meta store. node.json is always stored under + // the meta directory. + if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { + return nil, fmt.Errorf("mkdir all: %s", err) + } + + // 0.10-rc1 and prior would sometimes put the node.json at the root + // dir which breaks backup/restore and restarting nodes. This moves + // the file from the root so it's always under the meta dir. + oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") + newPath := filepath.Join(c.Meta.Dir, "node.json") + + if _, err := os.Stat(oldPath); err == nil { + if err := os.Rename(oldPath, newPath); err != nil { + return nil, err + } + } + + _, err = influxdb.LoadNode(c.Meta.Dir) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } + + if err := raftDBExists(c.Meta.Dir); err != nil { + return nil, err + } + + // In 0.10.0 bind-address got moved to the top level. Check + // The old location to keep things backwards compatible + bind := c.BindAddress + + s := &Server{ + buildInfo: *buildInfo, + err: make(chan error), + closing: make(chan struct{}), + + BindAddress: bind, + + Logger: logger.New(os.Stderr), + + MetaClient: meta.NewClient(c.Meta), + + reportingDisabled: c.ReportingDisabled, + + httpAPIAddr: c.HTTPD.BindAddress, + httpUseTLS: c.HTTPD.HTTPSEnabled, + tcpAddr: bind, + + config: c, + } + s.Monitor = monitor.New(s, c.Monitor) + s.config.registerDiagnostics(s.Monitor) + + if err := s.MetaClient.Open(); err != nil { + return nil, err + } + + s.TSDBStore = tsdb.NewStore(c.Data.Dir) + s.TSDBStore.EngineOptions.Config = c.Data + + // Copy TSDB configuration. + s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine + s.TSDBStore.EngineOptions.IndexVersion = c.Data.Index + + // Create the Subscriber service + s.Subscriber = subscriber.NewService(c.Subscriber) + + // Initialize points writer. + s.PointsWriter = coordinator.NewPointsWriter() + s.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout) + s.PointsWriter.TSDBStore = s.TSDBStore + + // Initialize query executor. + s.QueryExecutor = query.NewExecutor() + s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: s.MetaClient, + TaskManager: s.QueryExecutor.TaskManager, + TSDBStore: s.TSDBStore, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: s.MetaClient, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + }, + Monitor: s.Monitor, + PointsWriter: s.PointsWriter, + MaxSelectPointN: c.Coordinator.MaxSelectPointN, + MaxSelectSeriesN: c.Coordinator.MaxSelectSeriesN, + MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN, + } + s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout) + s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter) + s.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries + + // Initialize the monitor + s.Monitor.Version = s.buildInfo.Version + s.Monitor.Commit = s.buildInfo.Commit + s.Monitor.Branch = s.buildInfo.Branch + s.Monitor.BuildTime = s.buildInfo.Time + s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter) + return s, nil +} + +// Statistics returns statistics for the services running in the Server. +func (s *Server) Statistics(tags map[string]string) []models.Statistic { + var statistics []models.Statistic + statistics = append(statistics, s.QueryExecutor.Statistics(tags)...) + statistics = append(statistics, s.TSDBStore.Statistics(tags)...) + statistics = append(statistics, s.PointsWriter.Statistics(tags)...) + statistics = append(statistics, s.Subscriber.Statistics(tags)...) + for _, srv := range s.Services { + if m, ok := srv.(monitor.Reporter); ok { + statistics = append(statistics, m.Statistics(tags)...) + } + } + return statistics +} + +func (s *Server) appendSnapshotterService() { + srv := snapshotter.NewService() + srv.TSDBStore = s.TSDBStore + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + s.SnapshotterService = srv +} + +// SetLogOutput sets the logger used for all messages. It must not be called +// after the Open method has been called. +func (s *Server) SetLogOutput(w io.Writer) { + s.Logger = logger.New(w) +} + +func (s *Server) appendMonitorService() { + s.Services = append(s.Services, s.Monitor) +} + +func (s *Server) appendRetentionPolicyService(c retention.Config) { + if !c.Enabled { + return + } + srv := retention.NewService(c) + srv.MetaClient = s.MetaClient + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) +} + +func (s *Server) appendHTTPDService(c httpd.Config) { + if !c.Enabled { + return + } + srv := httpd.NewService(c) + srv.Handler.MetaClient = s.MetaClient + srv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient) + srv.Handler.WriteAuthorizer = meta.NewWriteAuthorizer(s.MetaClient) + srv.Handler.QueryExecutor = s.QueryExecutor + srv.Handler.Monitor = s.Monitor + srv.Handler.PointsWriter = s.PointsWriter + srv.Handler.Version = s.buildInfo.Version + srv.Handler.BuildType = "OSS" + + // Wire up storage service for Prometheus endpoints. + storageStore := storage.NewStore() + storageStore.MetaClient = s.MetaClient + storageStore.TSDBStore = s.TSDBStore + srv.Handler.Store = storageStore + + s.Services = append(s.Services, srv) +} + +func (s *Server) appendStorageService(c storage.Config) { + if !c.Enabled { + return + } + srv := storage.NewService(c) + srv.MetaClient = s.MetaClient + srv.TSDBStore = s.TSDBStore + + s.Services = append(s.Services, srv) +} + +func (s *Server) appendCollectdService(c collectd.Config) { + if !c.Enabled { + return + } + srv := collectd.NewService(c) + srv.MetaClient = s.MetaClient + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { + if !c.Enabled { + return nil + } + srv, err := opentsdb.NewService(c) + if err != nil { + return err + } + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendGraphiteService(c graphite.Config) error { + if !c.Enabled { + return nil + } + srv, err := graphite.NewService(c) + if err != nil { + return err + } + + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + srv.Monitor = s.Monitor + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendPrecreatorService(c precreator.Config) error { + if !c.Enabled { + return nil + } + srv := precreator.NewService(c) + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendUDPService(c udp.Config) { + if !c.Enabled { + return + } + srv := udp.NewService(c) + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) +} + +func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { + if !c.Enabled { + return + } + srv := continuous_querier.NewService(c) + srv.MetaClient = s.MetaClient + srv.QueryExecutor = s.QueryExecutor + srv.Monitor = s.Monitor + s.Services = append(s.Services, srv) +} + +// Err returns an error channel that multiplexes all out of band errors received from all services. +func (s *Server) Err() <-chan error { return s.err } + +// Open opens the meta and data store and all services. +func (s *Server) Open() error { + // Start profiling, if set. + startProfile(s.CPUProfile, s.MemProfile) + + // Open shared TCP connection. + ln, err := net.Listen("tcp", s.BindAddress) + if err != nil { + return fmt.Errorf("listen: %s", err) + } + s.Listener = ln + + // Multiplex listener. + mux := tcp.NewMux() + go mux.Serve(ln) + + // Append services. + s.appendMonitorService() + s.appendPrecreatorService(s.config.Precreator) + s.appendSnapshotterService() + s.appendContinuousQueryService(s.config.ContinuousQuery) + s.appendHTTPDService(s.config.HTTPD) + s.appendStorageService(s.config.Storage) + s.appendRetentionPolicyService(s.config.Retention) + for _, i := range s.config.GraphiteInputs { + if err := s.appendGraphiteService(i); err != nil { + return err + } + } + for _, i := range s.config.CollectdInputs { + s.appendCollectdService(i) + } + for _, i := range s.config.OpenTSDBInputs { + if err := s.appendOpenTSDBService(i); err != nil { + return err + } + } + for _, i := range s.config.UDPInputs { + s.appendUDPService(i) + } + + s.Subscriber.MetaClient = s.MetaClient + s.PointsWriter.MetaClient = s.MetaClient + s.Monitor.MetaClient = s.MetaClient + + s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) + + // Configure logging for all services and clients. + if s.config.Meta.LoggingEnabled { + s.MetaClient.WithLogger(s.Logger) + } + s.TSDBStore.WithLogger(s.Logger) + if s.config.Data.QueryLogEnabled { + s.QueryExecutor.WithLogger(s.Logger) + } + s.PointsWriter.WithLogger(s.Logger) + s.Subscriber.WithLogger(s.Logger) + for _, svc := range s.Services { + svc.WithLogger(s.Logger) + } + s.SnapshotterService.WithLogger(s.Logger) + s.Monitor.WithLogger(s.Logger) + + // Open TSDB store. + if err := s.TSDBStore.Open(); err != nil { + return fmt.Errorf("open tsdb store: %s", err) + } + + // Open the subscriber service + if err := s.Subscriber.Open(); err != nil { + return fmt.Errorf("open subscriber: %s", err) + } + + // Open the points writer service + if err := s.PointsWriter.Open(); err != nil { + return fmt.Errorf("open points writer: %s", err) + } + + s.PointsWriter.AddWriteSubscriber(s.Subscriber.Points()) + + for _, service := range s.Services { + if err := service.Open(); err != nil { + return fmt.Errorf("open service: %s", err) + } + } + + // Start the reporting service, if not disabled. + if !s.reportingDisabled { + go s.startServerReporting() + } + + return nil +} + +// Close shuts down the meta and data stores and all services. +func (s *Server) Close() error { + stopProfile() + + // Close the listener first to stop any new connections + if s.Listener != nil { + s.Listener.Close() + } + + // Close services to allow any inflight requests to complete + // and prevent new requests from being accepted. + for _, service := range s.Services { + service.Close() + } + + s.config.deregisterDiagnostics(s.Monitor) + + if s.PointsWriter != nil { + s.PointsWriter.Close() + } + + if s.QueryExecutor != nil { + s.QueryExecutor.Close() + } + + // Close the TSDBStore, no more reads or writes at this point + if s.TSDBStore != nil { + s.TSDBStore.Close() + } + + if s.Subscriber != nil { + s.Subscriber.Close() + } + + if s.MetaClient != nil { + s.MetaClient.Close() + } + + close(s.closing) + return nil +} + +// startServerReporting starts periodic server reporting. +func (s *Server) startServerReporting() { + s.reportServer() + + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() + for { + select { + case <-s.closing: + return + case <-ticker.C: + s.reportServer() + } + } +} + +// reportServer reports usage statistics about the system. +func (s *Server) reportServer() { + dbs := s.MetaClient.Databases() + numDatabases := len(dbs) + + var ( + numMeasurements int64 + numSeries int64 + ) + + for _, db := range dbs { + name := db.Name + n, err := s.TSDBStore.SeriesCardinality(name) + if err != nil { + s.Logger.Error(fmt.Sprintf("Unable to get series cardinality for database %s: %v", name, err)) + } else { + numSeries += n + } + + n, err = s.TSDBStore.MeasurementsCardinality(name) + if err != nil { + s.Logger.Error(fmt.Sprintf("Unable to get measurement cardinality for database %s: %v", name, err)) + } else { + numMeasurements += n + } + } + + clusterID := s.MetaClient.ClusterID() + cl := client.New("") + usage := client.Usage{ + Product: "influxdb", + Data: []client.UsageData{ + { + Values: client.Values{ + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "version": s.buildInfo.Version, + "cluster_id": fmt.Sprintf("%v", clusterID), + "num_series": numSeries, + "num_measurements": numMeasurements, + "num_databases": numDatabases, + "uptime": time.Since(startTime).Seconds(), + }, + }, + }, + } + + s.Logger.Info("Sending usage statistics to usage.influxdata.com") + + go cl.Save(usage) +} + +// Service represents a service attached to the server. +type Service interface { + WithLogger(log *zap.Logger) + Open() error + Close() error +} + +// prof stores the file locations of active profiles. +var prof struct { + cpu *os.File + mem *os.File +} + +// StartProfile initializes the cpu and memory profile, if specified. +func startProfile(cpuprofile, memprofile string) { + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatalf("cpuprofile: %v", err) + } + log.Printf("writing CPU profile to: %s\n", cpuprofile) + prof.cpu = f + pprof.StartCPUProfile(prof.cpu) + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatalf("memprofile: %v", err) + } + log.Printf("writing mem profile to: %s\n", memprofile) + prof.mem = f + runtime.MemProfileRate = 4096 + } + +} + +// StopProfile closes the cpu and memory profiles if they are running. +func stopProfile() { + if prof.cpu != nil { + pprof.StopCPUProfile() + prof.cpu.Close() + log.Println("CPU profile stopped") + } + if prof.mem != nil { + pprof.Lookup("heap").WriteTo(prof.mem, 0) + prof.mem.Close() + log.Println("mem profile stopped") + } +} + +// monitorPointsWriter is a wrapper around `coordinator.PointsWriter` that helps +// to prevent a circular dependency between the `cluster` and `monitor` packages. +type monitorPointsWriter coordinator.PointsWriter + +func (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error { + return (*coordinator.PointsWriter)(pw).WritePointsPrivileged(database, retentionPolicy, models.ConsistencyLevelAny, points) +} + +func raftDBExists(dir string) error { + // Check to see if there is a raft db, if so, error out with a message + // to downgrade, export, and then import the meta data + raftFile := filepath.Join(dir, "raft.db") + if _, err := os.Stat(raftFile); err == nil { + return fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/integration_config_test.go b/vendor/github.com/influxdata/influxdb/cmd/integration_config_test.go new file mode 100644 index 0000000..b5011a5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/integration_config_test.go @@ -0,0 +1,96 @@ +package cmd_test + +import ( + "strconv" + "strings" + "testing" + + client "github.com/influxdata/influxdb/client/v2" +) + +func TestRetentionAutocreate(t *testing.T) { + for _, tc := range []struct { + name string + enabled bool + }{ + {name: "enabled", enabled: true}, + {name: "disabled", enabled: false}, + } { + t.Run(tc.name, func(t *testing.T) { + cmd := NewTestRunCommand(map[string]string{ + "INFLUXDB_META_RETENTION_AUTOCREATE": strconv.FormatBool(tc.enabled), + }) + defer cmd.Cleanup() + + cmd.MustRun() + + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://" + cmd.BoundHTTPAddr(), + }) + if err != nil { + t.Fatal(err) + } + + _ = mustQuery(c, "CREATE DATABASE test", "", "") + + resp := mustQuery(c, "SHOW RETENTION POLICIES ON test", "", "") + if len(resp.Results) != 1 { + t.Fatalf("expected 1 result in response, got %d", len(resp.Results)) + } + + if tc.enabled { + if len(resp.Results[0].Series) != 1 || len(resp.Results[0].Series[0].Values) != 1 { + t.Fatalf("expected one automatically created retention policy, got %#v", resp.Results[0].Series[0].Values) + } + } else { + if len(resp.Results[0].Series) != 1 || len(resp.Results[0].Series[0].Values) != 0 { + t.Fatalf("expected no retention policies, got: %#v", resp.Results[0].Series[0].Values) + } + } + }) + } +} + +func TestCacheMaxMemorySize(t *testing.T) { + cmd := NewTestRunCommand(map[string]string{ + "INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE": "1024", + }) + defer cmd.Cleanup() + + cmd.MustRun() + + c := cmd.HTTPClient() + _ = mustQuery(c, "CREATE DATABASE test", "", "") + + // Add a small point that fits in the cache size. + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{Database: "test"}) + pt, _ := client.NewPoint("strings", nil, map[string]interface{}{"s": "a short string"}) + bp.AddPoint(pt) + if err := c.Write(bp); err != nil { + t.Fatal(err) + } + + // This point won't fit in the cache size and should be rejected. + bp, _ = client.NewBatchPoints(client.BatchPointsConfig{Database: "test"}) + pt, _ = client.NewPoint("strings", nil, map[string]interface{}{"s": strings.Repeat("long", 1024)}) + bp.AddPoint(pt) + err := c.Write(bp) + if err == nil { + t.Fatal("expected an error but got nil") + } + + if !strings.Contains(err.Error(), "cache-max-memory-size") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func mustQuery(c client.Client, q, db, precision string) *client.Response { + resp, err := c.Query(client.NewQuery(q, db, precision)) + if err != nil { + panic(err) + } else if resp.Error() != nil { + panic(resp.Error()) + } + + return resp +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/integration_test.go b/vendor/github.com/influxdata/influxdb/cmd/integration_test.go new file mode 100644 index 0000000..ee355c6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/integration_test.go @@ -0,0 +1,91 @@ +package cmd_test + +import ( + "io/ioutil" + "os" + "path/filepath" + + client "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/services/httpd" +) + +type TestRunCommand struct { + *run.Command + + // Temporary directory used for default data, meta, and wal dirs. + Dir string +} + +func NewTestRunCommand(env map[string]string) *TestRunCommand { + dir, err := ioutil.TempDir("", "testrun-") + if err != nil { + panic(err) + } + + cmd := run.NewCommand() + cmd.Getenv = func(k string) string { + // Return value in env map, if set. + if env != nil { + if v, ok := env[k]; ok { + return v + } + } + + // If the key wasn't explicitly set in env, use some reasonable defaults for test. + switch k { + case "INFLUXDB_DATA_DIR": + return filepath.Join(dir, "data") + case "INFLUXDB_META_DIR": + return filepath.Join(dir, "meta") + case "INFLUXDB_DATA_WAL_DIR": + return filepath.Join(dir, "wal") + case "INFLUXDB_HTTP_BIND_ADDRESS": + return "localhost:0" + case "INFLUXDB_BIND_ADDRESS": + return "localhost:0" + case "INFLUXDB_REPORTING_DISABLED": + return "true" + default: + return "" + } + } + + return &TestRunCommand{ + Command: cmd, + Dir: dir, + } +} + +// MustRun calls Command.Run and panics if there is an error. +func (c *TestRunCommand) MustRun() { + if err := c.Command.Run("-config", os.DevNull); err != nil { + panic(err) + } +} + +// HTTPClient returns a new v2 HTTP client. +func (c *TestRunCommand) HTTPClient() client.Client { + cl, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://" + c.BoundHTTPAddr(), + }) + if err != nil { + panic(err) + } + return cl +} + +// BoundHTTPAddr returns the bind address of the HTTP service, in form "localhost:65432". +func (c *TestRunCommand) BoundHTTPAddr() string { + for _, s := range c.Command.Server.Services { + if s, ok := s.(*httpd.Service); ok { + return s.BoundHTTPAddr() + } + } + panic("Did not find HTTPD service!") +} + +func (c *TestRunCommand) Cleanup() { + c.Command.Close() + os.RemoveAll(c.Dir) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/parse.go b/vendor/github.com/influxdata/influxdb/cmd/parse.go new file mode 100644 index 0000000..7b140ed --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/parse.go @@ -0,0 +1,29 @@ +// Package cmd is the root package of the various command-line utilities for InfluxDB. +package cmd + +import "strings" + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 { + if !strings.HasPrefix(args[0], "-") { + name = args[0] + } else if args[0] == "-h" || args[0] == "-help" || args[0] == "--help" { + // Special case -h immediately following binary name + name = "help" + } + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 2 && !strings.HasPrefix(args[1], "-") { + return args[1], []string{"-h"} + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/store/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/store/help/help.go new file mode 100644 index 0000000..9d7f371 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/store/help/help.go @@ -0,0 +1,40 @@ +// Package help contains the help for the store command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Usage: store [[command] [arguments]] + +The commands are: + + query queries data. + help display this help message + +"help" is the default command. + +Use "store [command] -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/store/main.go b/vendor/github.com/influxdata/influxdb/cmd/store/main.go new file mode 100644 index 0000000..b1c4dad --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/store/main.go @@ -0,0 +1,65 @@ +// The store command displays detailed information about InfluxDB data files. +package main + +import ( + "fmt" + "io" + "os" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/store/help" + "github.com/influxdata/influxdb/cmd/store/query" + "github.com/influxdata/influxdb/logger" + _ "github.com/influxdata/influxdb/tsdb/engine" + "go.uber.org/zap" +) + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger *zap.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: logger.New(os.Stderr), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + case "query": + name := query.NewCommand() + name.Logger = m.Logger + if err := name.Run(args...); err != nil { + return fmt.Errorf("query: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'store help' for usage`+"\n\n", name) + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go b/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go new file mode 100644 index 0000000..f84b4d2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go @@ -0,0 +1,570 @@ +package query + +import ( + "bufio" + "context" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/storage" + "github.com/influxdata/influxql" + "github.com/influxdata/yarpc" + "go.uber.org/zap" +) + +// Command represents the program execution for "store query". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + Logger *zap.Logger + + addr string + cpuProfile string + memProfile string + database string + retentionPolicy string + startTime int64 + endTime int64 + limit int64 + slimit int64 + soffset int64 + desc bool + silent bool + expr string + agg string + groupArg string + group storage.ReadRequest_Group + groupKeys string + keys []string + hintsArg string + hints storage.HintFlags + + aggType storage.Aggregate_AggregateType + + // response + integerSum int64 + unsignedSum uint64 + floatSum float64 + pointCount uint64 +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +func parseTime(v string) (int64, error) { + if s, err := time.Parse(time.RFC3339, v); err == nil { + return s.UnixNano(), nil + } + + if i, err := strconv.ParseInt(v, 10, 64); err == nil { + return i, nil + } + + return 0, errors.New("invalid time") +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var start, end string + fs := flag.NewFlagSet("query", flag.ExitOnError) + fs.StringVar(&cmd.cpuProfile, "cpuprofile", "", "CPU profile name") + fs.StringVar(&cmd.memProfile, "memprofile", "", "memory profile name") + fs.StringVar(&cmd.addr, "addr", ":8082", "the RPC address") + fs.StringVar(&cmd.database, "database", "", "the database to query") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to query") + fs.StringVar(&start, "start", "", "Optional: the start time to query (RFC3339 format)") + fs.StringVar(&end, "end", "", "Optional: the end time to query (RFC3339 format)") + fs.Int64Var(&cmd.slimit, "slimit", 0, "Optional: limit number of series") + fs.Int64Var(&cmd.soffset, "soffset", 0, "Optional: start offset for series") + fs.Int64Var(&cmd.limit, "limit", 0, "Optional: limit number of values per series (-1 to return series only)") + fs.BoolVar(&cmd.desc, "desc", false, "Optional: return results in descending order") + fs.BoolVar(&cmd.silent, "silent", false, "silence output") + fs.StringVar(&cmd.expr, "expr", "", "InfluxQL conditional expression") + fs.StringVar(&cmd.agg, "agg", "", "aggregate functions (sum, count)") + fs.StringVar(&cmd.groupArg, "group", "none", "group operation (none,all,by,except,disable)") + fs.StringVar(&cmd.groupKeys, "group-keys", "", "comma-separated list of tags to specify series order") + fs.StringVar(&cmd.hintsArg, "hints", "none", "comma-separated list of read hints (none,no_points,no_series)") + + fs.SetOutput(cmd.Stdout) + fs.Usage = func() { + fmt.Fprintln(cmd.Stdout, "Query via RPC") + fmt.Fprintf(cmd.Stdout, "Usage: %s query [flags]\n\n", filepath.Base(os.Args[0])) + fs.PrintDefaults() + } + + if err := fs.Parse(args); err != nil { + return err + } + + // set defaults + if start != "" { + t, err := parseTime(start) + if err != nil { + return err + } + cmd.startTime = t + + } else { + cmd.startTime = models.MinNanoTime + } + if end != "" { + t, err := parseTime(end) + if err != nil { + return err + } + cmd.endTime = t + + } else { + // set end time to max if it is not set. + cmd.endTime = models.MaxNanoTime + } + + if cmd.groupKeys != "" { + cmd.keys = strings.Split(cmd.groupKeys, ",") + } + + if err := cmd.validate(); err != nil { + return err + } + + conn, err := yarpc.Dial(cmd.addr) + if err != nil { + return err + } + defer conn.Close() + + return cmd.query(storage.NewStorageClient(conn)) +} + +func (cmd *Command) validate() error { + if cmd.database == "" { + return fmt.Errorf("must specify a database") + } + if cmd.startTime != 0 && cmd.endTime != 0 && cmd.endTime < cmd.startTime { + return fmt.Errorf("end time before start time") + } + + if cmd.agg != "" { + tm := proto.EnumValueMap("com.github.influxdata.influxdb.services.storage.Aggregate_AggregateType") + agg, ok := tm[strings.ToUpper(cmd.agg)] + if !ok { + return errors.New("invalid aggregate function: " + cmd.agg) + } + cmd.aggType = storage.Aggregate_AggregateType(agg) + } + + enums := proto.EnumValueMap("com.github.influxdata.influxdb.services.storage.ReadRequest_Group") + group, ok := enums["GROUP_"+strings.ToUpper(cmd.groupArg)] + if !ok { + return errors.New("invalid group type: " + cmd.groupArg) + } + cmd.group = storage.ReadRequest_Group(group) + + enums = proto.EnumValueMap("com.github.influxdata.influxdb.services.storage.ReadRequest_HintFlags") + for _, h := range strings.Split(cmd.hintsArg, ",") { + cmd.hints |= storage.HintFlags(enums["HINT_"+strings.ToUpper(h)]) + } + + return nil +} + +func (cmd *Command) query(c storage.StorageClient) error { + var req storage.ReadRequest + req.Database = cmd.database + if cmd.retentionPolicy != "" { + req.Database += "/" + cmd.retentionPolicy + } + + req.TimestampRange.Start = cmd.startTime + req.TimestampRange.End = cmd.endTime + req.SeriesLimit = cmd.slimit + req.SeriesOffset = cmd.soffset + req.PointsLimit = cmd.limit + req.Descending = cmd.desc + req.Group = cmd.group + req.GroupKeys = cmd.keys + req.Hints = cmd.hints + + if cmd.aggType != storage.AggregateTypeNone { + req.Aggregate = &storage.Aggregate{Type: cmd.aggType} + } + + if cmd.expr != "" { + expr, err := influxql.ParseExpr(cmd.expr) + if err != nil { + return nil + } + fmt.Fprintln(cmd.Stdout, expr) + var v exprToNodeVisitor + influxql.Walk(&v, expr) + if v.Err() != nil { + return v.Err() + } + + req.Predicate = &storage.Predicate{Root: v.nodes[0]} + } + + stream, err := c.Read(context.Background(), &req) + if err != nil { + fmt.Fprintln(cmd.Stdout, err) + return err + } + + wr := bufio.NewWriter(os.Stdout) + + now := time.Now() + defer func() { + dur := time.Since(now) + fmt.Fprintf(cmd.Stdout, "time: %v\n", dur) + }() + + for { + var rep storage.ReadResponse + + if err = stream.RecvMsg(&rep); err != nil { + if err == io.EOF { + break + } + + return err + } + + if cmd.silent { + cmd.processFramesSilent(rep.Frames) + } else { + cmd.processFrames(wr, rep.Frames) + } + } + + fmt.Fprintln(cmd.Stdout) + fmt.Fprint(cmd.Stdout, "points(count): ", cmd.pointCount, ", sum(int64): ", cmd.integerSum, ", sum(uint64): ", cmd.unsignedSum, ", sum(float64): ", cmd.floatSum, "\n") + + return nil +} + +func (cmd *Command) processFramesSilent(frames []storage.ReadResponse_Frame) { + for _, frame := range frames { + switch f := frame.Data.(type) { + case *storage.ReadResponse_Frame_IntegerPoints: + for _, v := range f.IntegerPoints.Values { + cmd.integerSum += v + } + cmd.pointCount += uint64(len(f.IntegerPoints.Values)) + + case *storage.ReadResponse_Frame_UnsignedPoints: + for _, v := range f.UnsignedPoints.Values { + cmd.unsignedSum += v + } + cmd.pointCount += uint64(len(f.UnsignedPoints.Values)) + + case *storage.ReadResponse_Frame_FloatPoints: + for _, v := range f.FloatPoints.Values { + cmd.floatSum += v + } + cmd.pointCount += uint64(len(f.FloatPoints.Values)) + + case *storage.ReadResponse_Frame_StringPoints: + cmd.pointCount += uint64(len(f.StringPoints.Values)) + + case *storage.ReadResponse_Frame_BooleanPoints: + cmd.pointCount += uint64(len(f.BooleanPoints.Values)) + } + } +} + +func printByteSlice(wr *bufio.Writer, v [][]byte) { + wr.WriteString("[\033[36m") + first := true + for _, t := range v { + if !first { + wr.WriteByte(',') + } else { + first = false + } + wr.Write(t) + } + wr.WriteString("\033[0m]\n") +} + +func (cmd *Command) processFrames(wr *bufio.Writer, frames []storage.ReadResponse_Frame) { + var buf [1024]byte + var line []byte + + for _, frame := range frames { + switch f := frame.Data.(type) { + case *storage.ReadResponse_Frame_Group: + g := f.Group + wr.WriteString("partition values") + printByteSlice(wr, g.PartitionKeyVals) + wr.WriteString("group keys") + printByteSlice(wr, g.TagKeys) + wr.Flush() + + case *storage.ReadResponse_Frame_Series: + s := f.Series + wr.WriteString("\033[36m") + first := true + for _, t := range s.Tags { + if !first { + wr.WriteByte(',') + } else { + first = false + } + wr.Write(t.Key) + wr.WriteByte(':') + wr.Write(t.Value) + } + wr.WriteString("\033[0m\n") + wr.Flush() + + case *storage.ReadResponse_Frame_IntegerPoints: + p := f.IntegerPoints + for i := 0; i < len(p.Timestamps); i++ { + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Timestamps[i], 10)) + wr.WriteByte(' ') + + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Values[i], 10)) + wr.WriteString("\n") + wr.Flush() + + cmd.integerSum += p.Values[i] + } + cmd.pointCount += uint64(len(f.IntegerPoints.Values)) + + case *storage.ReadResponse_Frame_UnsignedPoints: + p := f.UnsignedPoints + for i := 0; i < len(p.Timestamps); i++ { + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Timestamps[i], 10)) + wr.WriteByte(' ') + + line = buf[:0] + wr.Write(strconv.AppendUint(line, p.Values[i], 10)) + wr.WriteString("\n") + wr.Flush() + + cmd.unsignedSum += p.Values[i] + } + cmd.pointCount += uint64(len(f.UnsignedPoints.Values)) + + case *storage.ReadResponse_Frame_FloatPoints: + p := f.FloatPoints + for i := 0; i < len(p.Timestamps); i++ { + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Timestamps[i], 10)) + wr.WriteByte(' ') + + line = buf[:0] + wr.Write(strconv.AppendFloat(line, p.Values[i], 'f', 10, 64)) + wr.WriteString("\n") + wr.Flush() + + cmd.floatSum += p.Values[i] + } + cmd.pointCount += uint64(len(f.FloatPoints.Values)) + + case *storage.ReadResponse_Frame_StringPoints: + p := f.StringPoints + for i := 0; i < len(p.Timestamps); i++ { + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Timestamps[i], 10)) + wr.WriteByte(' ') + + wr.WriteString(p.Values[i]) + wr.WriteString("\n") + wr.Flush() + } + cmd.pointCount += uint64(len(f.StringPoints.Values)) + + case *storage.ReadResponse_Frame_BooleanPoints: + p := f.BooleanPoints + for i := 0; i < len(p.Timestamps); i++ { + line = buf[:0] + wr.Write(strconv.AppendInt(line, p.Timestamps[i], 10)) + wr.WriteByte(' ') + + if p.Values[i] { + wr.WriteString("true") + } else { + wr.WriteString("false") + } + wr.WriteString("\n") + wr.Flush() + } + cmd.pointCount += uint64(len(f.BooleanPoints.Values)) + } + } +} + +type exprToNodeVisitor struct { + nodes []*storage.Node + err error +} + +func (v *exprToNodeVisitor) Err() error { + return v.err +} + +func (v *exprToNodeVisitor) pop() (top *storage.Node) { + if len(v.nodes) < 1 { + panic("exprToNodeVisitor: stack empty") + } + + top, v.nodes = v.nodes[len(v.nodes)-1], v.nodes[:len(v.nodes)-1] + return +} + +func (v *exprToNodeVisitor) pop2() (lhs, rhs *storage.Node) { + if len(v.nodes) < 2 { + panic("exprToNodeVisitor: stack empty") + } + + rhs = v.nodes[len(v.nodes)-1] + lhs = v.nodes[len(v.nodes)-2] + v.nodes = v.nodes[:len(v.nodes)-2] + return +} + +func mapOpToComparison(op influxql.Token) storage.Node_Comparison { + switch op { + case influxql.EQ: + return storage.ComparisonEqual + case influxql.EQREGEX: + return storage.ComparisonRegex + case influxql.NEQ: + return storage.ComparisonNotEqual + case influxql.NEQREGEX: + return storage.ComparisonNotEqual + case influxql.LT: + return storage.ComparisonLess + case influxql.LTE: + return storage.ComparisonLessEqual + case influxql.GT: + return storage.ComparisonGreater + case influxql.GTE: + return storage.ComparisonGreaterEqual + + default: + return -1 + } +} + +func (v *exprToNodeVisitor) Visit(node influxql.Node) influxql.Visitor { + switch n := node.(type) { + case *influxql.BinaryExpr: + if v.err != nil { + return nil + } + + influxql.Walk(v, n.LHS) + if v.err != nil { + return nil + } + + influxql.Walk(v, n.RHS) + if v.err != nil { + return nil + } + + if comp := mapOpToComparison(n.Op); comp != -1 { + lhs, rhs := v.pop2() + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeComparisonExpression, + Value: &storage.Node_Comparison_{Comparison: comp}, + Children: []*storage.Node{lhs, rhs}, + }) + } else if n.Op == influxql.AND || n.Op == influxql.OR { + var op storage.Node_Logical + if n.Op == influxql.AND { + op = storage.LogicalAnd + } else { + op = storage.LogicalOr + } + + lhs, rhs := v.pop2() + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLogicalExpression, + Value: &storage.Node_Logical_{Logical: op}, + Children: []*storage.Node{lhs, rhs}, + }) + } else { + v.err = fmt.Errorf("unsupported operator, %s", n.Op) + } + + return nil + + case *influxql.ParenExpr: + influxql.Walk(v, n.Expr) + if v.err != nil { + return nil + } + + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeParenExpression, + Children: []*storage.Node{v.pop()}, + }) + return nil + + case *influxql.StringLiteral: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_StringValue{StringValue: n.Val}, + }) + return nil + + case *influxql.NumberLiteral: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_FloatValue{FloatValue: n.Val}, + }) + return nil + + case *influxql.IntegerLiteral: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_IntegerValue{IntegerValue: n.Val}, + }) + return nil + + case *influxql.UnsignedLiteral: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_UnsignedValue{UnsignedValue: n.Val}, + }) + return nil + + case *influxql.VarRef: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeTagRef, + Value: &storage.Node_TagRefValue{TagRefValue: n.Val}, + }) + return nil + + case *influxql.RegexLiteral: + v.nodes = append(v.nodes, &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_RegexValue{RegexValue: n.Val.String()}, + }) + return nil + default: + v.err = fmt.Errorf("unsupported expression %T", n) + return nil + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/config.go b/vendor/github.com/influxdata/influxdb/coordinator/config.go new file mode 100644 index 0000000..b964f4b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/config.go @@ -0,0 +1,63 @@ +// Package coordinator contains abstractions for writing points, executing statements, +// and accessing meta data. +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultWriteTimeout is the default timeout for a complete write to succeed. + DefaultWriteTimeout = 10 * time.Second + + // DefaultMaxConcurrentQueries is the maximum number of running queries. + // A value of zero will make the maximum query limit unlimited. + DefaultMaxConcurrentQueries = 0 + + // DefaultMaxSelectPointN is the maximum number of points a SELECT can process. + // A value of zero will make the maximum point count unlimited. + DefaultMaxSelectPointN = 0 + + // DefaultMaxSelectSeriesN is the maximum number of series a SELECT can run. + // A value of zero will make the maximum series count unlimited. + DefaultMaxSelectSeriesN = 0 +) + +// Config represents the configuration for the coordinator service. +type Config struct { + WriteTimeout toml.Duration `toml:"write-timeout"` + MaxConcurrentQueries int `toml:"max-concurrent-queries"` + QueryTimeout toml.Duration `toml:"query-timeout"` + LogQueriesAfter toml.Duration `toml:"log-queries-after"` + MaxSelectPointN int `toml:"max-select-point"` + MaxSelectSeriesN int `toml:"max-select-series"` + MaxSelectBucketsN int `toml:"max-select-buckets"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + WriteTimeout: toml.Duration(DefaultWriteTimeout), + QueryTimeout: toml.Duration(query.DefaultQueryTimeout), + MaxConcurrentQueries: DefaultMaxConcurrentQueries, + MaxSelectPointN: DefaultMaxSelectPointN, + MaxSelectSeriesN: DefaultMaxSelectSeriesN, + } +} + +// Diagnostics returns a diagnostics representation of a subset of the Config. +func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { + return diagnostics.RowFromMap(map[string]interface{}{ + "write-timeout": c.WriteTimeout, + "max-concurrent-queries": c.MaxConcurrentQueries, + "query-timeout": c.QueryTimeout, + "log-queries-after": c.LogQueriesAfter, + "max-select-point": c.MaxSelectPointN, + "max-select-series": c.MaxSelectSeriesN, + "max-select-buckets": c.MaxSelectBucketsN, + }), nil +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/config_test.go b/vendor/github.com/influxdata/influxdb/coordinator/config_test.go new file mode 100644 index 0000000..2f21436 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/config_test.go @@ -0,0 +1,24 @@ +package coordinator_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c coordinator.Config + if _, err := toml.Decode(` +write-timeout = "20s" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.WriteTimeout) != 20*time.Second { + t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go new file mode 100644 index 0000000..4107e7c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go @@ -0,0 +1,36 @@ +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxql" +) + +// MetaClient is an interface for accessing meta data. +type MetaClient interface { + CreateContinuousQuery(database, name, query string) error + CreateDatabase(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + CreateSubscription(database, rp, name, mode string, destinations []string) error + CreateUser(name, password string, admin bool) (meta.User, error) + Database(name string) *meta.DatabaseInfo + Databases() []meta.DatabaseInfo + DropShard(id uint64) error + DropContinuousQuery(database, name string) error + DropDatabase(name string) error + DropRetentionPolicy(database, name string) error + DropSubscription(database, rp, name string) error + DropUser(name string) error + RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilege(username string, admin bool) error + SetPrivilege(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + TruncateShardGroups(t time.Time) error + UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUser(name, password string) error + UserPrivilege(username, database string) (*influxql.Privilege, error) + UserPrivileges(username string) (map[string]influxql.Privilege, error) + Users() []meta.UserInfo +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go new file mode 100644 index 0000000..f1b17ba --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go @@ -0,0 +1,162 @@ +package coordinator_test + +import ( + "time" + + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxql" +) + +// MetaClient is a mockable implementation of cluster.MetaClient. +type MetaClient struct { + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (meta.User, error) + DatabaseFn func(name string) *meta.DatabaseInfo + DatabasesFn func() []meta.DatabaseInfo + DataNodeFn func(id uint64) (*meta.NodeInfo, error) + DataNodesFn func() ([]meta.NodeInfo, error) + DeleteDataNodeFn func(id uint64) error + DeleteMetaNodeFn func(id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropShardFn func(id uint64) error + DropUserFn func(name string) error + MetaNodesFn func() ([]meta.NodeInfo, error) + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilegeFn func(username string, admin bool) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + TruncateShardGroupsFn func(t time.Time) error + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClient) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (c *MetaClient) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, spec, makeDefault) +} + +func (c *MetaClient) DropShard(id uint64) error { + return c.DropShardFn(id) +} + +func (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClient) CreateUser(name, password string, admin bool) (meta.User, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClient) Database(name string) *meta.DatabaseInfo { + return c.DatabaseFn(name) +} + +func (c *MetaClient) Databases() []meta.DatabaseInfo { + return c.DatabasesFn() +} + +func (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) { + return c.DataNodeFn(id) +} + +func (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) { + return c.DataNodesFn() +} + +func (c *MetaClient) DeleteDataNode(id uint64) error { + return c.DeleteDataNodeFn(id) +} + +func (c *MetaClient) DeleteMetaNode(id uint64) error { + return c.DeleteMetaNodeFn(id) +} + +func (c *MetaClient) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClient) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClient) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClient) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClient) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) { + return c.MetaNodesFn() +} + +func (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return c.ShardGroupsByTimeRangeFn(database, policy, min, max) +} + +func (c *MetaClient) TruncateShardGroups(t time.Time) error { + return c.TruncateShardGroupsFn(t) +} + +func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { + return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) +} + +func (c *MetaClient) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClient) Users() []meta.UserInfo { + return c.UsersFn() +} + +// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy. +func DefaultMetaClientDatabaseFn(name string) *meta.DatabaseInfo { + return &meta.DatabaseInfo{Name: DefaultDatabase, DefaultRetentionPolicy: DefaultRetentionPolicy} +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go new file mode 100644 index 0000000..08f3c69 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go @@ -0,0 +1,402 @@ +package coordinator + +import ( + "errors" + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +// The keys for statistics generated by the "write" module. +const ( + statWriteReq = "req" + statPointWriteReq = "pointReq" + statPointWriteReqLocal = "pointReqLocal" + statWriteOK = "writeOk" + statWriteDrop = "writeDrop" + statWriteTimeout = "writeTimeout" + statWriteErr = "writeError" + statSubWriteOK = "subWriteOk" + statSubWriteDrop = "subWriteDrop" +) + +var ( + // ErrTimeout is returned when a write times out. + ErrTimeout = errors.New("timeout") + + // ErrPartialWrite is returned when a write partially succeeds but does + // not meet the requested consistency level. + ErrPartialWrite = errors.New("partial write") + + // ErrWriteFailed is returned when no writes succeeded. + ErrWriteFailed = errors.New("write failed") +) + +// PointsWriter handles writes across multiple local and remote data nodes. +type PointsWriter struct { + mu sync.RWMutex + closing chan struct{} + WriteTimeout time.Duration + Logger *zap.Logger + + Node *influxdb.Node + + MetaClient interface { + Database(name string) (di *meta.DatabaseInfo) + RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) + CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + } + + TSDBStore interface { + CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + } + + subPoints []chan<- *WritePointsRequest + + stats *WriteStatistics +} + +// WritePointsRequest represents a request to write point data to the cluster. +type WritePointsRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// AddPoint adds a point to the WritePointRequest with field key 'value' +func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + pt, err := models.NewPoint( + name, models.NewTags(tags), map[string]interface{}{"value": value}, timestamp, + ) + if err != nil { + return + } + w.Points = append(w.Points, pt) +} + +// NewPointsWriter returns a new instance of PointsWriter for a node. +func NewPointsWriter() *PointsWriter { + return &PointsWriter{ + closing: make(chan struct{}), + WriteTimeout: DefaultWriteTimeout, + Logger: zap.NewNop(), + stats: &WriteStatistics{}, + } +} + +// ShardMapping contains a mapping of shards to points. +type ShardMapping struct { + n int + Points map[uint64][]models.Point // The points associated with a shard ID + Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID + Dropped []models.Point // Points that were dropped +} + +// NewShardMapping creates an empty ShardMapping. +func NewShardMapping(n int) *ShardMapping { + return &ShardMapping{ + n: n, + Points: map[uint64][]models.Point{}, + Shards: map[uint64]*meta.ShardInfo{}, + } +} + +// MapPoint adds the point to the ShardMapping, associated with the given shardInfo. +func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { + if cap(s.Points[shardInfo.ID]) < s.n { + s.Points[shardInfo.ID] = make([]models.Point, 0, s.n) + } + s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p) + s.Shards[shardInfo.ID] = shardInfo +} + +// Open opens the communication channel with the point writer. +func (w *PointsWriter) Open() error { + w.mu.Lock() + defer w.mu.Unlock() + w.closing = make(chan struct{}) + return nil +} + +// Close closes the communication channel with the point writer. +func (w *PointsWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing != nil { + close(w.closing) + } + if w.subPoints != nil { + // 'nil' channels always block so this makes the + // select statement in WritePoints hit its default case + // dropping any in-flight writes. + w.subPoints = nil + } + return nil +} + +func (w *PointsWriter) AddWriteSubscriber(c chan<- *WritePointsRequest) { + w.subPoints = append(w.subPoints, c) +} + +// WithLogger sets the Logger on w. +func (w *PointsWriter) WithLogger(log *zap.Logger) { + w.Logger = log.With(zap.String("service", "write")) +} + +// WriteStatistics keeps statistics related to the PointsWriter. +type WriteStatistics struct { + WriteReq int64 + PointWriteReq int64 + PointWriteReqLocal int64 + WriteOK int64 + WriteDropped int64 + WriteTimeout int64 + WriteErr int64 + SubWriteOK int64 + SubWriteDrop int64 +} + +// Statistics returns statistics for periodic monitoring. +func (w *PointsWriter) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "write", + Tags: tags, + Values: map[string]interface{}{ + statWriteReq: atomic.LoadInt64(&w.stats.WriteReq), + statPointWriteReq: atomic.LoadInt64(&w.stats.PointWriteReq), + statPointWriteReqLocal: atomic.LoadInt64(&w.stats.PointWriteReqLocal), + statWriteOK: atomic.LoadInt64(&w.stats.WriteOK), + statWriteDrop: atomic.LoadInt64(&w.stats.WriteDropped), + statWriteTimeout: atomic.LoadInt64(&w.stats.WriteTimeout), + statWriteErr: atomic.LoadInt64(&w.stats.WriteErr), + statSubWriteOK: atomic.LoadInt64(&w.stats.SubWriteOK), + statSubWriteDrop: atomic.LoadInt64(&w.stats.SubWriteDrop), + }, + }} +} + +// MapShards maps the points contained in wp to a ShardMapping. If a point +// maps to a shard group or shard that does not currently exist, it will be +// created before returning the mapping. +func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { + rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) + if err != nil { + return nil, err + } else if rp == nil { + return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) + } + + // Holds all the shard groups and shards that are required for writes. + list := make(sgList, 0, 8) + min := time.Unix(0, models.MinNanoTime) + if rp.Duration > 0 { + min = time.Now().Add(-rp.Duration) + } + + for _, p := range wp.Points { + // Either the point is outside the scope of the RP, or we already have + // a suitable shard group for the point. + if p.Time().Before(min) || list.Covers(p.Time()) { + continue + } + + // No shard groups overlap with the point's time, so we will create + // a new shard group for this point. + sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, p.Time()) + if err != nil { + return nil, err + } + + if sg == nil { + return nil, errors.New("nil shard group") + } + list = list.Append(*sg) + } + + mapping := NewShardMapping(len(wp.Points)) + for _, p := range wp.Points { + sg := list.ShardGroupAt(p.Time()) + if sg == nil { + // We didn't create a shard group because the point was outside the + // scope of the RP. + mapping.Dropped = append(mapping.Dropped, p) + atomic.AddInt64(&w.stats.WriteDropped, 1) + continue + } + + sh := sg.ShardFor(p.HashID()) + mapping.MapPoint(&sh, p) + } + return mapping, nil +} + +// sgList is a wrapper around a meta.ShardGroupInfos where we can also check +// if a given time is covered by any of the shard groups in the list. +type sgList meta.ShardGroupInfos + +func (l sgList) Covers(t time.Time) bool { + if len(l) == 0 { + return false + } + return l.ShardGroupAt(t) != nil +} + +// ShardGroupAt attempts to find a shard group that could contain a point +// at the given time. +// +// Shard groups are sorted first according to end time, and then according +// to start time. Therefore, if there are multiple shard groups that match +// this point's time they will be preferred in this order: +// +// - a shard group with the earliest end time; +// - (assuming identical end times) the shard group with the earliest start time. +func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { + idx := sort.Search(len(l), func(i int) bool { return l[i].EndTime.After(t) }) + + // We couldn't find a shard group the point falls into. + if idx == len(l) || t.Before(l[idx].StartTime) { + return nil + } + return &l[idx] +} + +// Append appends a shard group to the list, and returns a sorted list. +func (l sgList) Append(sgi meta.ShardGroupInfo) sgList { + next := append(l, sgi) + sort.Sort(meta.ShardGroupInfos(next)) + return next +} + +// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of +// a cluster structure for information. This is to avoid a circular dependency. +func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error { + return w.WritePointsPrivileged(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points) +} + +// WritePoints writes the data to the underlying storage. consitencyLevel and user are only used for clustered scenarios +func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error { + return w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points) +} + +// WritePointsPrivileged writes the data to the underlying storage, consitencyLevel is only used for clustered scenarios +func (w *PointsWriter) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + atomic.AddInt64(&w.stats.WriteReq, 1) + atomic.AddInt64(&w.stats.PointWriteReq, int64(len(points))) + + if retentionPolicy == "" { + db := w.MetaClient.Database(database) + if db == nil { + return influxdb.ErrDatabaseNotFound(database) + } + retentionPolicy = db.DefaultRetentionPolicy + } + + shardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}) + if err != nil { + return err + } + + // Write each shard in it's own goroutine and return as soon as one fails. + ch := make(chan error, len(shardMappings.Points)) + for shardID, points := range shardMappings.Points { + go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { + err := w.writeToShard(shard, database, retentionPolicy, points) + if err == tsdb.ErrShardDeletion { + err = tsdb.PartialWriteError{Reason: fmt.Sprintf("shard %d is pending deletion", shard.ID), Dropped: len(points)} + } + ch <- err + }(shardMappings.Shards[shardID], database, retentionPolicy, points) + } + + // Send points to subscriptions if possible. + var ok, dropped int64 + pts := &WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points} + // We need to lock just in case the channel is about to be nil'ed + w.mu.RLock() + for _, ch := range w.subPoints { + select { + case ch <- pts: + ok++ + default: + dropped++ + } + } + w.mu.RUnlock() + + if ok > 0 { + atomic.AddInt64(&w.stats.SubWriteOK, ok) + } + + if dropped > 0 { + atomic.AddInt64(&w.stats.SubWriteDrop, dropped) + } + + if err == nil && len(shardMappings.Dropped) > 0 { + err = tsdb.PartialWriteError{Reason: "points beyond retention policy", Dropped: len(shardMappings.Dropped)} + + } + timeout := time.NewTimer(w.WriteTimeout) + defer timeout.Stop() + for range shardMappings.Points { + select { + case <-w.closing: + return ErrWriteFailed + case <-timeout.C: + atomic.AddInt64(&w.stats.WriteTimeout, 1) + // return timeout error to caller + return ErrTimeout + case err := <-ch: + if err != nil { + return err + } + } + } + return err +} + +// writeToShards writes points to a shard. +func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error { + atomic.AddInt64(&w.stats.PointWriteReqLocal, int64(len(points))) + + err := w.TSDBStore.WriteToShard(shard.ID, points) + if err == nil { + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil + } + + // If this is a partial write error, that is also ok. + if _, ok := err.(tsdb.PartialWriteError); ok { + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + + // If we've written to shard that should exist on the current node, but the store has + // not actually created this shard, tell it to create it and retry the write + if err == tsdb.ErrShardNotFound { + err = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true) + if err != nil { + w.Logger.Info("Write failed", zap.Uint64("shard", shard.ID), zap.Error(err)) + + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + } + err = w.TSDBStore.WriteToShard(shard.ID, points) + if err != nil { + w.Logger.Info("Write failed", zap.Uint64("shard", shard.ID), zap.Error(err)) + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go new file mode 100644 index 0000000..ec6a6ca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go @@ -0,0 +1,46 @@ +package coordinator + +import ( + "testing" + "time" +) + +func TestSgList_ShardGroupAt(t *testing.T) { + base := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC) + day := func(n int) time.Time { + return base.Add(time.Duration(24*n) * time.Hour) + } + + list := sgList{ + {ID: 1, StartTime: day(0), EndTime: day(1)}, + {ID: 2, StartTime: day(1), EndTime: day(2)}, + {ID: 3, StartTime: day(2), EndTime: day(3)}, + // SG day 3 to day 4 missing... + {ID: 4, StartTime: day(4), EndTime: day(5)}, + {ID: 5, StartTime: day(5), EndTime: day(6)}, + } + + examples := []struct { + T time.Time + ShardGroupID uint64 // 0 will indicate we don't expect a shard group + }{ + {T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG + {T: day(0), ShardGroupID: 1}, + {T: day(0).Add(time.Minute), ShardGroupID: 1}, + {T: day(1), ShardGroupID: 2}, + {T: day(3).Add(time.Minute), ShardGroupID: 0}, // No matching SG + {T: day(5).Add(time.Hour), ShardGroupID: 5}, + } + + for i, example := range examples { + sg := list.ShardGroupAt(example.T) + var id uint64 + if sg != nil { + id = sg.ID + } + + if got, exp := id, example.ShardGroupID; got != exp { + t.Errorf("[Example %d] got %v, expected %v", i+1, got, exp) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go new file mode 100644 index 0000000..83bab40 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go @@ -0,0 +1,683 @@ +package coordinator_test + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. + +// Ensures the points writer maps a single point to a single shard. +func TestPointsWriter_MapShards_One(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.PointsWriter{MetaClient: ms} + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, time.Now(), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 1; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +// Ensures the points writer maps to a new shard group when the shard duration +// is changed. +func TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + var ( + i int + now = time.Now() + ) + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + sg := []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now, EndTime: now.Add(rp.Duration).Add(-1), + }, + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now.Add(time.Hour), EndTime: now.Add(3 * time.Hour).Add(rp.Duration).Add(-1), + }, + }[i] + i++ + return &sg, nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now, nil) + pr.AddPoint("cpu", 2.0, now.Add(2*time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if got, exp := len(shardMappings.Points[0]), 2; got != exp { + t.Fatalf("got %d point(s), expected %d", got, exp) + } + + if got, exp := len(shardMappings.Shards), 1; got != exp { + t.Errorf("got %d shard(s), expected %d", got, exp) + } + + // Now we alter the retention policy duration. + rp.ShardGroupDuration = 3 * time.Hour + + pr = &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now.Add(2*time.Hour), nil) + + // Point is beyond previous shard group so a new shard group should be + // created. + if _, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + // We can check value of i since it's only incremeneted when a shard group + // is created. + if got, exp := i, 2; got != exp { + t.Fatal("new shard group was not created, expected it to be") + } +} + +// Ensures the points writer maps a multiple points across shard group boundaries. +func TestPointsWriter_MapShards_Multiple(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + rp.ShardGroupDuration = time.Hour + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 2; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } + + for _, points := range shardMappings.Points { + // First shard should have 1 point w/ first point added + if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) + } + + // Second shard should have the last two points added + if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) + } + + if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) + } + } +} + +// Ensures the points writer does not map points beyond the retention policy. +func TestPointsWriter_MapShards_Invalid(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Add a point that goes beyond the current retention policy. + pr.AddPoint("cpu", 1.0, time.Now().Add(-2*time.Hour), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if got, exp := len(shardMappings.Points), 0; got != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", got, exp) + } + + if got, exp := len(shardMappings.Dropped), 1; got != exp { + t.Fatalf("MapShard() dropped mismatch: got %v, exp %v", got, exp) + } +} + +func TestPointsWriter_WritePoints(t *testing.T) { + tests := []struct { + name string + database string + retentionPolicy string + + // the responses returned by each shard write call. node ID 1 = pos 0 + err []error + expErr error + }{ + { + name: "write one success", + database: "mydb", + retentionPolicy: "myrp", + err: []error{nil, nil, nil}, + expErr: nil, + }, + + // Write to non-existent database + { + name: "write to non-existent database", + database: "doesnt_exist", + retentionPolicy: "", + err: []error{nil, nil, nil}, + expErr: fmt.Errorf("database not found: doesnt_exist"), + }, + } + + for _, test := range tests { + + pr := &coordinator.WritePointsRequest{ + Database: test.database, + RetentionPolicy: test.retentionPolicy, + } + + // Ensure that the test shard groups are created before the points + // are created. + ms := NewPointsWriterMetaClient() + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + // copy to prevent data race + theTest := test + sm := coordinator.NewShardMapping(16) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[0]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[1]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[2]) + + // Local coordinator.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[0] + }, + } + + ms.DatabaseFn = func(database string) *meta.DatabaseInfo { + return nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *coordinator.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest { + return subPoints + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + c.TSDBStore = store + c.AddWriteSubscriber(sub.Points()) + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) + if err == nil && test.expErr != nil { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + + if err != nil && test.expErr == nil { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if test.expErr == nil { + select { + case p := <-subPoints: + if !reflect.DeepEqual(p, pr) { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) + } + default: + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: Subscriber.Points not called", test.name) + } + } + } +} + +func TestPointsWriter_WritePoints_Dropped(t *testing.T) { + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Ensure that the test shard groups are created before the points + // are created. + ms := NewPointsWriterMetaClient() + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now().Add(-24*time.Hour), nil) + + // copy to prevent data race + sm := coordinator.NewShardMapping(16) + + // ShardMapper dropped this point + sm.Dropped = append(sm.Dropped, pr.Points[0]) + + // Local coordinator.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return nil + }, + } + + ms.DatabaseFn = func(database string) *meta.DatabaseInfo { + return nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *coordinator.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest { + return subPoints + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + c.TSDBStore = store + c.AddWriteSubscriber(sub.Points()) + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) + if _, ok := err.(tsdb.PartialWriteError); !ok { + t.Errorf("PointsWriter.WritePoints(): got %v, exp %v", err, tsdb.PartialWriteError{}) + } +} + +type fakePointsWriter struct { + WritePointsIntoFn func(*coordinator.IntoWriteRequest) error +} + +func (f *fakePointsWriter) WritePointsInto(req *coordinator.IntoWriteRequest) error { + return f.WritePointsIntoFn(req) +} + +func TestBufferedPointsWriter(t *testing.T) { + db := "db0" + rp := "rp0" + capacity := 10000 + + writePointsIntoCnt := 0 + pointsWritten := []models.Point{} + + reset := func() { + writePointsIntoCnt = 0 + pointsWritten = pointsWritten[:0] + } + + fakeWriter := &fakePointsWriter{ + WritePointsIntoFn: func(req *coordinator.IntoWriteRequest) error { + writePointsIntoCnt++ + pointsWritten = append(pointsWritten, req.Points...) + return nil + }, + } + + w := coordinator.NewBufferedPointsWriter(fakeWriter, db, rp, capacity) + + // Test that capacity and length are correct for new buffered writer. + if w.Cap() != capacity { + t.Fatalf("exp %d, got %d", capacity, w.Cap()) + } else if w.Len() != 0 { + t.Fatalf("exp %d, got %d", 0, w.Len()) + } + + // Test flushing an empty buffer. + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } + + // Test writing zero points. + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: []models.Point{}, + }); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } else if w.Len() > 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } + + // Test writing single large bunch of points points. + req := coordinator.WritePointsRequest{ + Database: db, + RetentionPolicy: rp, + } + + numPoints := int(float64(capacity) * 5.5) + for i := 0; i < numPoints; i++ { + req.AddPoint("cpu", float64(i), time.Now().Add(time.Duration(i)*time.Second), nil) + } + + r := coordinator.IntoWriteRequest(req) + if err := w.WritePointsInto(&r); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 5 { + t.Fatalf("exp 5, got %d", writePointsIntoCnt) + } else if w.Len() != capacity/2 { + t.Fatalf("exp %d, got %d", capacity/2, w.Len()) + } else if len(pointsWritten) != numPoints-capacity/2 { + t.Fatalf("exp %d, got %d", numPoints-capacity/2, len(pointsWritten)) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } + + reset() + + // Test writing points one at a time. + for i, _ := range r.Points { + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: r.Points[i : i+1], + }); err != nil { + t.Fatal(err) + } + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } +} + +var shardID uint64 + +type fakeStore struct { + WriteFn func(shardID uint64, points []models.Point) error + CreateShardfn func(database, retentionPolicy string, shardID uint64, enabled bool) error +} + +func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { + return f.WriteFn(shardID, points) +} + +func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error { + return f.CreateShardfn(database, retentionPolicy, shardID, enabled) +} + +func NewPointsWriterMetaClient() *PointsWriterMetaClient { + ms := &PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + return ms +} + +type PointsWriterMetaClient struct { + NodeIDFn func() uint64 + RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + DatabaseFn func(database string) *meta.DatabaseInfo + ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) +} + +func (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() } + +func (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { + return m.RetentionPolicyFn(database, name) +} + +func (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) +} + +func (m PointsWriterMetaClient) Database(database string) *meta.DatabaseInfo { + return m.DatabaseFn(database) +} + +func (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { + return m.ShardOwnerFn(shardID) +} + +type Subscriber struct { + PointsFn func() chan<- *coordinator.WritePointsRequest +} + +func (s Subscriber) Points() chan<- *coordinator.WritePointsRequest { + return s.PointsFn() +} + +func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { + shards := []meta.ShardInfo{} + owners := []meta.ShardOwner{} + for i := 1; i <= nodeCount; i++ { + owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) + } + + // each node is fully replicated with each other + shards = append(shards, meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }) + + start := time.Now() + rp := &meta.RetentionPolicyInfo{ + Name: "myrp", + ReplicaN: nodeCount, + Duration: duration, + ShardGroupDuration: duration, + ShardGroups: []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + ID: nextShardID(), + StartTime: start, + EndTime: start.Add(duration).Add(-1), + Shards: shards, + }, + }, + } + return rp +} + +func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { + var startTime, endTime time.Time + if len(rp.ShardGroups) == 0 { + startTime = time.Now() + } else { + startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) + } + endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) + + sh := meta.ShardGroupInfo{ + ID: uint64(len(rp.ShardGroups) + 1), + StartTime: startTime, + EndTime: endTime, + Shards: []meta.ShardInfo{ + meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }, + }, + } + rp.ShardGroups = append(rp.ShardGroups, sh) +} + +func nextShardID() uint64 { + return atomic.AddUint64(&shardID, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go new file mode 100644 index 0000000..b231bff --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go @@ -0,0 +1,255 @@ +package coordinator + +import ( + "context" + "io" + "time" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +// IteratorCreator is an interface that combines mapping fields and creating iterators. +type IteratorCreator interface { + query.IteratorCreator + influxql.FieldMapper + io.Closer +} + +// LocalShardMapper implements a ShardMapper for local shards. +type LocalShardMapper struct { + MetaClient interface { + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + } + + TSDBStore interface { + ShardGroup(ids []uint64) tsdb.ShardGroup + } +} + +// MapShards maps the sources to the appropriate shards into an IteratorCreator. +func (e *LocalShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { + a := &LocalShardMapping{ + ShardMap: make(map[Source]tsdb.ShardGroup), + } + + tmin := time.Unix(0, t.MinTimeNano()) + tmax := time.Unix(0, t.MaxTimeNano()) + if err := e.mapShards(a, sources, tmin, tmax); err != nil { + return nil, err + } + a.MinTime, a.MaxTime = tmin, tmax + return a, nil +} + +func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time) error { + for _, s := range sources { + switch s := s.(type) { + case *influxql.Measurement: + source := Source{ + Database: s.Database, + RetentionPolicy: s.RetentionPolicy, + } + // Retrieve the list of shards for this database. This list of + // shards is always the same regardless of which measurement we are + // using. + if _, ok := a.ShardMap[source]; !ok { + groups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, tmin, tmax) + if err != nil { + return err + } + + if len(groups) == 0 { + a.ShardMap[source] = nil + continue + } + + shardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups)) + for _, g := range groups { + for _, si := range g.Shards { + shardIDs = append(shardIDs, si.ID) + } + } + a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs) + } + case *influxql.SubQuery: + if err := e.mapShards(a, s.Statement.Sources, tmin, tmax); err != nil { + return err + } + } + } + return nil +} + +// ShardMapper maps data sources to a list of shard information. +type LocalShardMapping struct { + ShardMap map[Source]tsdb.ShardGroup + + // MinTime is the minimum time that this shard mapper will allow. + // Any attempt to use a time before this one will automatically result in using + // this time instead. + MinTime time.Time + + // MaxTime is the maximum time that this shard mapper will allow. + // Any attempt to use a time after this one will automatically result in using + // this time instead. + MaxTime time.Time +} + +func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return + } + + fields = make(map[string]influxql.DataType) + dimensions = make(map[string]struct{}) + + var measurements []string + if m.Regex != nil { + measurements = sg.MeasurementsByRegex(m.Regex.Val) + } else { + measurements = []string{m.Name} + } + + f, d, err := sg.FieldDimensions(measurements) + if err != nil { + return nil, nil, err + } + for k, typ := range f { + fields[k] = typ + } + for k := range d { + dimensions[k] = struct{}{} + } + return +} + +func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return influxql.Unknown + } + + var names []string + if m.Regex != nil { + names = sg.MeasurementsByRegex(m.Regex.Val) + } else { + names = []string{m.Name} + } + + var typ influxql.DataType + for _, name := range names { + if m.SystemIterator != "" { + name = m.SystemIterator + } + t := sg.MapType(name, field) + if typ.LessThan(t) { + typ = t + } + } + return typ +} + +func (a *LocalShardMapping) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return nil, nil + } + + // Override the time constraints if they don't match each other. + if !a.MinTime.IsZero() && opt.StartTime < a.MinTime.UnixNano() { + opt.StartTime = a.MinTime.UnixNano() + } + if !a.MaxTime.IsZero() && opt.EndTime > a.MaxTime.UnixNano() { + opt.EndTime = a.MaxTime.UnixNano() + } + + if m.Regex != nil { + measurements := sg.MeasurementsByRegex(m.Regex.Val) + inputs := make([]query.Iterator, 0, len(measurements)) + if err := func() error { + // Create a Measurement for each returned matching measurement value + // from the regex. + for _, measurement := range measurements { + mm := m.Clone() + mm.Name = measurement // Set the name to this matching regex value. + input, err := sg.CreateIterator(ctx, mm, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + } + return nil + }(); err != nil { + query.Iterators(inputs).Close() + return nil, err + } + + return query.Iterators(inputs).Merge(opt) + } + return sg.CreateIterator(ctx, m, opt) +} + +func (a *LocalShardMapping) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return query.IteratorCost{}, nil + } + + // Override the time constraints if they don't match each other. + if !a.MinTime.IsZero() && opt.StartTime < a.MinTime.UnixNano() { + opt.StartTime = a.MinTime.UnixNano() + } + if !a.MaxTime.IsZero() && opt.EndTime > a.MaxTime.UnixNano() { + opt.EndTime = a.MaxTime.UnixNano() + } + + if m.Regex != nil { + var costs query.IteratorCost + measurements := sg.MeasurementsByRegex(m.Regex.Val) + for _, measurement := range measurements { + cost, err := sg.IteratorCost(measurement, opt) + if err != nil { + return query.IteratorCost{}, err + } + costs = costs.Combine(cost) + } + return costs, nil + } + return sg.IteratorCost(m.Name, opt) +} + +// Close clears out the list of mapped shards. +func (a *LocalShardMapping) Close() error { + a.ShardMap = nil + return nil +} + +// Source contains the database and retention policy source for data. +type Source struct { + Database string + RetentionPolicy string +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go new file mode 100644 index 0000000..f11b024 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go @@ -0,0 +1,105 @@ +package coordinator_test + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +func TestLocalShardMapper(t *testing.T) { + var metaClient MetaClient + metaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) ([]meta.ShardGroupInfo, error) { + if database != "db0" { + t.Errorf("unexpected database: %s", database) + } + if policy != "rp0" { + t.Errorf("unexpected retention policy: %s", policy) + } + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 1, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 2, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + {ID: 2, Shards: []meta.ShardInfo{ + {ID: 3, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 4, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + tsdbStore := &internal.TSDBStoreMock{} + tsdbStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{1, 2, 3, 4}) { + t.Errorf("unexpected shard ids: %#v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if measurement.Name != "cpu" { + t.Errorf("unexpected measurement: %s", measurement.Name) + } + return &FloatIterator{}, nil + } + return &sh + } + + // Initialize the shard mapper. + shardMapper := &coordinator.LocalShardMapper{ + MetaClient: &metaClient, + TSDBStore: tsdbStore, + } + + // Normal measurement. + measurement := &influxql.Measurement{ + Database: "db0", + RetentionPolicy: "rp0", + Name: "cpu", + } + ic, err := shardMapper.MapShards([]influxql.Source{measurement}, influxql.TimeRange{}, query.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok := ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Subquery. + subquery := &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Sources: []influxql.Source{measurement}, + }, + } + ic, err = shardMapper.MapShards([]influxql.Source{subquery}, influxql.TimeRange{}, query.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok = ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go new file mode 100644 index 0000000..df19f34 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go @@ -0,0 +1,1402 @@ +package coordinator + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/pkg/tracing" + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +// ErrDatabaseNameRequired is returned when executing statements that require a database, +// when a database has not been provided. +var ErrDatabaseNameRequired = errors.New("database name required") + +type pointsWriter interface { + WritePointsInto(*IntoWriteRequest) error +} + +// StatementExecutor executes a statement in the query. +type StatementExecutor struct { + MetaClient MetaClient + + // TaskManager holds the StatementExecutor that handles task-related commands. + TaskManager query.StatementExecutor + + // TSDB storage for local node. + TSDBStore TSDBStore + + // ShardMapper for mapping shards when executing a SELECT statement. + ShardMapper query.ShardMapper + + // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. + Monitor *monitor.Monitor + + // Used for rewriting points back into system for SELECT INTO statements. + PointsWriter interface { + WritePointsInto(*IntoWriteRequest) error + } + + // Select statement limits + MaxSelectPointN int + MaxSelectSeriesN int + MaxSelectBucketsN int +} + +// ExecuteStatement executes the given statement with the given execution context. +func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { + // Select statements are handled separately so that they can be streamed. + if stmt, ok := stmt.(*influxql.SelectStatement); ok { + return e.executeSelectStatement(stmt, ctx) + } + + var rows models.Rows + var messages []*query.Message + var err error + switch stmt := stmt.(type) { + case *influxql.AlterRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeAlterRetentionPolicyStatement(stmt) + case *influxql.CreateContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateContinuousQueryStatement(stmt) + case *influxql.CreateDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateDatabaseStatement(stmt) + case *influxql.CreateRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateRetentionPolicyStatement(stmt) + case *influxql.CreateSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateSubscriptionStatement(stmt) + case *influxql.CreateUserStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateUserStatement(stmt) + case *influxql.DeleteSeriesStatement: + err = e.executeDeleteSeriesStatement(stmt, ctx.Database) + case *influxql.DropContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropContinuousQueryStatement(stmt) + case *influxql.DropDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropDatabaseStatement(stmt) + case *influxql.DropMeasurementStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropMeasurementStatement(stmt, ctx.Database) + case *influxql.DropSeriesStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSeriesStatement(stmt, ctx.Database) + case *influxql.DropRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropRetentionPolicyStatement(stmt) + case *influxql.DropShardStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropShardStatement(stmt) + case *influxql.DropSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSubscriptionStatement(stmt) + case *influxql.DropUserStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropUserStatement(stmt) + case *influxql.ExplainStatement: + if stmt.Analyze { + rows, err = e.executeExplainAnalyzeStatement(stmt, ctx) + } else { + rows, err = e.executeExplainStatement(stmt, ctx) + } + case *influxql.GrantStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantStatement(stmt) + case *influxql.GrantAdminStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantAdminStatement(stmt) + case *influxql.RevokeStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeStatement(stmt) + case *influxql.RevokeAdminStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeAdminStatement(stmt) + case *influxql.ShowContinuousQueriesStatement: + rows, err = e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowDatabasesStatement: + rows, err = e.executeShowDatabasesStatement(stmt, ctx) + case *influxql.ShowDiagnosticsStatement: + rows, err = e.executeShowDiagnosticsStatement(stmt) + case *influxql.ShowGrantsForUserStatement: + rows, err = e.executeShowGrantsForUserStatement(stmt) + case *influxql.ShowMeasurementsStatement: + return e.executeShowMeasurementsStatement(stmt, ctx) + case *influxql.ShowMeasurementCardinalityStatement: + rows, err = e.executeShowMeasurementCardinalityStatement(stmt) + case *influxql.ShowRetentionPoliciesStatement: + rows, err = e.executeShowRetentionPoliciesStatement(stmt) + case *influxql.ShowSeriesCardinalityStatement: + rows, err = e.executeShowSeriesCardinalityStatement(stmt) + case *influxql.ShowShardsStatement: + rows, err = e.executeShowShardsStatement(stmt) + case *influxql.ShowShardGroupsStatement: + rows, err = e.executeShowShardGroupsStatement(stmt) + case *influxql.ShowStatsStatement: + rows, err = e.executeShowStatsStatement(stmt) + case *influxql.ShowSubscriptionsStatement: + rows, err = e.executeShowSubscriptionsStatement(stmt) + case *influxql.ShowTagKeysStatement: + return e.executeShowTagKeys(stmt, ctx) + case *influxql.ShowTagValuesStatement: + return e.executeShowTagValues(stmt, ctx) + case *influxql.ShowUsersStatement: + rows, err = e.executeShowUsersStatement(stmt) + case *influxql.SetPasswordUserStatement: + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + err = e.executeSetPasswordUserStatement(stmt) + case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: + // Send query related statements to the task manager. + return e.TaskManager.ExecuteStatement(stmt, ctx) + default: + return query.ErrInvalidQuery + } + + if err != nil { + return err + } + + return ctx.Send(&query.Result{ + Series: rows, + Messages: messages, + }) +} + +func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error { + rpu := &meta.RetentionPolicyUpdate{ + Duration: stmt.Duration, + ReplicaN: stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Update the retention policy. + return e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default) +} + +func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { + // Verify that retention policies exist. + var err error + verifyRPFn := func(n influxql.Node) { + if err != nil { + return + } + switch m := n.(type) { + case *influxql.Measurement: + var rp *meta.RetentionPolicyInfo + if rp, err = e.MetaClient.RetentionPolicy(m.Database, m.RetentionPolicy); err != nil { + return + } else if rp == nil { + err = fmt.Errorf("%s: %s.%s", meta.ErrRetentionPolicyNotFound, m.Database, m.RetentionPolicy) + } + default: + return + } + } + + influxql.WalkFunc(q, verifyRPFn) + + if err != nil { + return err + } + + return e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String()) +} + +func (e *StatementExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error { + if !meta.ValidName(stmt.Name) { + // TODO This should probably be in `(*meta.Data).CreateDatabase` + // but can't go there until 1.1 is used everywhere + return meta.ErrInvalidName + } + + if !stmt.RetentionPolicyCreate { + _, err := e.MetaClient.CreateDatabase(stmt.Name) + return err + } + + // If we're doing, for example, CREATE DATABASE "db" WITH DURATION 1d then + // the name will not yet be set. We only need to validate non-empty + // retention policy names, such as in the statement: + // CREATE DATABASE "db" WITH DURATION 1d NAME "xyz" + if stmt.RetentionPolicyName != "" && !meta.ValidName(stmt.RetentionPolicyName) { + return meta.ErrInvalidName + } + + spec := meta.RetentionPolicySpec{ + Name: stmt.RetentionPolicyName, + Duration: stmt.RetentionPolicyDuration, + ReplicaN: stmt.RetentionPolicyReplication, + ShardGroupDuration: stmt.RetentionPolicyShardGroupDuration, + } + _, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, &spec) + return err +} + +func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error { + if !meta.ValidName(stmt.Name) { + // TODO This should probably be in `(*meta.Data).CreateRetentionPolicy` + // but can't go there until 1.1 is used everywhere + return meta.ErrInvalidName + } + + spec := meta.RetentionPolicySpec{ + Name: stmt.Name, + Duration: &stmt.Duration, + ReplicaN: &stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Create new retention policy. + _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec, stmt.Default) + return err +} + +func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { + return e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations) +} + +func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error { + _, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin) + return err +} + +func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return query.ErrDatabaseNotFound(database) + } + + // Convert "now()" to current time. + stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) + + // Locally delete the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error { + return e.MetaClient.DropContinuousQuery(q.Database, q.Name) +} + +// executeDropDatabaseStatement drops a database from the cluster. +// It does not return an error if the database was not found on any of +// the nodes, or in the Meta store. +func (e *StatementExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error { + if e.MetaClient.Database(stmt.Name) == nil { + return nil + } + + // Locally delete the datababse. + if err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil { + return err + } + + // Remove the database from the Meta Store. + return e.MetaClient.DropDatabase(stmt.Name) +} + +func (e *StatementExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return query.ErrDatabaseNotFound(database) + } + + // Locally drop the measurement + return e.TSDBStore.DeleteMeasurement(database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return query.ErrDatabaseNotFound(database) + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return errors.New("DROP SERIES doesn't support time in WHERE clause") + } + + // Locally drop the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error { + // Locally delete the shard. + if err := e.TSDBStore.DeleteShard(stmt.ID); err != nil { + return err + } + + // Remove the shard reference from the Meta Store. + return e.MetaClient.DropShard(stmt.ID) +} + +func (e *StatementExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error { + dbi := e.MetaClient.Database(stmt.Database) + if dbi == nil { + return nil + } + + if dbi.RetentionPolicy(stmt.Name) == nil { + return nil + } + + // Locally drop the retention policy. + if err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + + return e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error { + return e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name) +} + +func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error { + return e.MetaClient.DropUser(q.Name) +} + +func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement, ctx *query.ExecutionContext) (models.Rows, error) { + opt := query.SelectOptions{ + NodeID: ctx.ExecutionOptions.NodeID, + MaxSeriesN: e.MaxSelectSeriesN, + MaxBucketsN: e.MaxSelectBucketsN, + Authorizer: ctx.Authorizer, + } + + // Prepare the query for execution, but do not actually execute it. + // This should perform any needed substitutions. + p, err := query.Prepare(q.Statement, e.ShardMapper, opt) + if err != nil { + return nil, err + } + defer p.Close() + + plan, err := p.Explain() + if err != nil { + return nil, err + } + plan = strings.TrimSpace(plan) + + row := &models.Row{ + Columns: []string{"QUERY PLAN"}, + } + for _, s := range strings.Split(plan, "\n") { + row.Values = append(row.Values, []interface{}{s}) + } + return models.Rows{row}, nil +} + +func (e *StatementExecutor) executeExplainAnalyzeStatement(q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { + stmt := q.Statement + t, span := tracing.NewTrace("select") + ctx := tracing.NewContextWithTrace(ectx, t) + ctx = tracing.NewContextWithSpan(ctx, span) + var aux query.Iterators + ctx = query.NewContextWithIterators(ctx, &aux) + start := time.Now() + + cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions) + if err != nil { + return nil, err + } + + iterTime := time.Since(start) + + // Generate a row emitter from the iterator set. + em := query.NewEmitter(cur, ectx.ChunkSize) + + // Emit rows to the results channel. + var writeN int64 + for { + var row *models.Row + row, _, err = em.Emit() + if err != nil { + goto CLEANUP + } else if row == nil { + // Check if the query was interrupted while emitting. + select { + case <-ectx.Done(): + err = ectx.Err() + goto CLEANUP + default: + } + break + } + + writeN += int64(len(row.Values)) + } + +CLEANUP: + em.Close() + if err != nil { + return nil, err + } + + // close auxiliary iterators deterministically to finalize any captured measurements + aux.Close() + + totalTime := time.Since(start) + span.MergeFields( + fields.Duration("total_time", totalTime), + fields.Duration("planning_time", iterTime), + fields.Duration("execution_time", totalTime-iterTime), + ) + span.Finish() + + row := &models.Row{ + Columns: []string{"EXPLAIN ANALYZE"}, + } + for _, s := range strings.Split(t.Tree().String(), "\n") { + row.Values = append(row.Values, []interface{}{s}) + } + + return models.Rows{row}, nil +} + +func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) +} + +func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, true) +} + +func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error { + priv := influxql.NoPrivileges + + // Revoking all privileges means there's no need to look at existing user privileges. + if stmt.Privilege != influxql.AllPrivileges { + p, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On) + if err != nil { + return err + } + // Bit clear (AND NOT) the user's privilege with the revoked privilege. + priv = *p &^ stmt.Privilege + } + + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv) +} + +func (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, false) +} + +func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error { + return e.MetaClient.UpdateUser(q.Name, q.Password) +} + +func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *query.ExecutionContext) error { + cur, err := e.createIterators(ctx, stmt, ctx.ExecutionOptions) + if err != nil { + return err + } + + // Generate a row emitter from the iterator set. + em := query.NewEmitter(cur, ctx.ChunkSize) + defer em.Close() + + // Emit rows to the results channel. + var writeN int64 + var emitted bool + + var pointsWriter *BufferedPointsWriter + if stmt.Target != nil { + pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000) + } + + for { + row, partial, err := em.Emit() + if err != nil { + return err + } else if row == nil { + // Check if the query was interrupted while emitting. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + break + } + + // Write points back into system for INTO statements. + if stmt.Target != nil { + n, err := e.writeInto(pointsWriter, stmt, row) + if err != nil { + return err + } + writeN += n + continue + } + + result := &query.Result{ + Series: []*models.Row{row}, + Partial: partial, + } + + // Send results or exit if closing. + if err := ctx.Send(result); err != nil { + return err + } + + emitted = true + } + + // Flush remaining points and emit write count if an INTO statement. + if stmt.Target != nil { + if err := pointsWriter.Flush(); err != nil { + return err + } + + var messages []*query.Message + if ctx.ReadOnly { + messages = append(messages, query.ReadOnlyWarning(stmt.String())) + } + + return ctx.Send(&query.Result{ + Messages: messages, + Series: []*models.Row{{ + Name: "result", + Columns: []string{"time", "written"}, + Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, + }}, + }) + } + + // Always emit at least one result. + if !emitted { + return ctx.Send(&query.Result{ + Series: make([]*models.Row, 0), + }) + } + + return nil +} + +func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql.SelectStatement, opt query.ExecutionOptions) (query.Cursor, error) { + sopt := query.SelectOptions{ + NodeID: opt.NodeID, + MaxSeriesN: e.MaxSelectSeriesN, + MaxPointN: e.MaxSelectPointN, + MaxBucketsN: e.MaxSelectBucketsN, + Authorizer: opt.Authorizer, + } + + // Create a set of iterators from a selection. + cur, err := query.Select(ctx, stmt, e.ShardMapper, sopt) + if err != nil { + return nil, err + } + return cur, nil +} + +func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} + for _, cqi := range di.ContinuousQueries { + row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement, ctx *query.ExecutionContext) (models.Rows, error) { + dis := e.MetaClient.Databases() + a := ctx.ExecutionOptions.Authorizer + + row := &models.Row{Name: "databases", Columns: []string{"name"}} + for _, di := range dis { + // Only include databases that the user is authorized to read or write. + if a.AuthorizeDatabase(influxql.ReadPrivilege, di.Name) || a.AuthorizeDatabase(influxql.WritePrivilege, di.Name) { + row.Values = append(row.Values, []interface{}{di.Name}) + } + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) { + diags, err := e.Monitor.Diagnostics() + if err != nil { + return nil, err + } + + // Get a sorted list of diagnostics keys. + sortedKeys := make([]string, 0, len(diags)) + for k := range diags { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + rows := make([]*models.Row, 0, len(diags)) + for _, k := range sortedKeys { + if stmt.Module != "" && k != stmt.Module { + continue + } + + row := &models.Row{Name: k} + + row.Columns = diags[k].Columns + row.Values = diags[k].Rows + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) { + priv, err := e.MetaClient.UserPrivileges(q.Name) + if err != nil { + return nil, err + } + + row := &models.Row{Columns: []string{"database", "privilege"}} + for d, p := range priv { + row.Values = append(row.Values, []interface{}{d, p.String()}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *query.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + names, err := e.TSDBStore.MeasurementNames(ctx.Authorizer, q.Database, q.Condition) + if err != nil || len(names) == 0 { + return ctx.Send(&query.Result{ + Err: err, + }) + } + + if q.Offset > 0 { + if q.Offset >= len(names) { + names = nil + } else { + names = names[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(names) { + names = names[:q.Limit] + } + } + + values := make([][]interface{}, len(names)) + for i, name := range names { + values[i] = []interface{}{string(name)} + } + + if len(values) == 0 { + return ctx.Send(&query.Result{}) + } + + return ctx.Send(&query.Result{ + Series: []*models.Row{{ + Name: "measurements", + Columns: []string{"name"}, + Values: values, + }}, + }) +} + +func (e *StatementExecutor) executeShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (models.Rows, error) { + if stmt.Database == "" { + return nil, ErrDatabaseNameRequired + } + + n, err := e.TSDBStore.MeasurementsCardinality(stmt.Database) + if err != nil { + return nil, err + } + + return []*models.Row{&models.Row{ + Columns: []string{"cardinality estimation"}, + Values: [][]interface{}{{n}}, + }}, nil +} + +func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) { + if q.Database == "" { + return nil, ErrDatabaseNameRequired + } + + di := e.MetaClient.Database(q.Database) + if di == nil { + return nil, influxdb.ErrDatabaseNotFound(q.Database) + } + + row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} + for _, rpi := range di.RetentionPolicies { + row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ShardGroupDuration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + di.Name, + rpi.Name, + sgi.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (models.Rows, error) { + if stmt.Database == "" { + return nil, ErrDatabaseNameRequired + } + + n, err := e.TSDBStore.SeriesCardinality(stmt.Database) + if err != nil { + return nil, err + } + + return []*models.Row{&models.Row{ + Columns: []string{"cardinality estimation"}, + Values: [][]interface{}{{n}}, + }}, nil +} + +func (e *StatementExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "start_time", "end_time", "expiry_time"}, Name: "shard groups"} + for _, di := range dis { + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + row.Values = append(row.Values, []interface{}{ + sgi.ID, + di.Name, + rpi.Name, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + }) + } + } + } + + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) { + var rows []*models.Row + + if _, ok := e.TSDBStore.(*tsdb.Store); stmt.Module == "indexes" && ok { + // The cost of collecting indexes metrics grows with the size of the indexes, so only collect this + // stat when explicitly requested. + b := e.TSDBStore.(*tsdb.Store).IndexBytes() + row := &models.Row{ + Name: "indexes", + Columns: []string{"memoryBytes"}, + Values: [][]interface{}{{b}}, + } + rows = append(rows, row) + + } else { + stats, err := e.Monitor.Statistics(nil) + if err != nil { + return nil, err + } + + for _, stat := range stats { + if stmt.Module != "" && stat.Name != stmt.Module { + continue + } + row := &models.Row{Name: stat.Name, Tags: stat.Tags} + + values := make([]interface{}, 0, len(stat.Values)) + for _, k := range stat.ValueNames() { + row.Columns = append(row.Columns, k) + values = append(values, stat.Values[k]) + } + row.Values = [][]interface{}{values} + rows = append(rows, row) + } + } + return rows, nil +} + +func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, si := range rpi.Subscriptions { + row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations}) + } + } + if len(row.Values) > 0 { + rows = append(rows, row) + } + } + return rows, nil +} + +func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, ctx *query.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + // Determine shard set based on database and time range. + // SHOW TAG KEYS returns all tag keys for the default retention policy. + di := e.MetaClient.Database(q.Database) + if di == nil { + return fmt.Errorf("database not found: %s", q.Database) + } + + // Determine appropriate time range. If one or fewer time boundaries provided + // then min/max possible time should be used instead. + valuer := &influxql.NowValuer{Now: time.Now()} + cond, timeRange, err := influxql.ConditionExpr(q.Condition, valuer) + if err != nil { + return err + } + + // Get all shards for all retention policies. + var allGroups []meta.ShardGroupInfo + for _, rpi := range di.RetentionPolicies { + sgis, err := e.MetaClient.ShardGroupsByTimeRange(q.Database, rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) + if err != nil { + return err + } + allGroups = append(allGroups, sgis...) + } + + var shardIDs []uint64 + for _, sgi := range allGroups { + for _, si := range sgi.Shards { + shardIDs = append(shardIDs, si.ID) + } + } + + tagKeys, err := e.TSDBStore.TagKeys(ctx.Authorizer, shardIDs, cond) + if err != nil { + return ctx.Send(&query.Result{ + Err: err, + }) + } + + emitted := false + for _, m := range tagKeys { + keys := m.Keys + + if q.Offset > 0 { + if q.Offset >= len(keys) { + keys = nil + } else { + keys = keys[q.Offset:] + } + } + if q.Limit > 0 && q.Limit < len(keys) { + keys = keys[:q.Limit] + } + + if len(keys) == 0 { + continue + } + + row := &models.Row{ + Name: m.Measurement, + Columns: []string{"tagKey"}, + Values: make([][]interface{}, len(keys)), + } + for i, key := range keys { + row.Values[i] = []interface{}{key} + } + + if err := ctx.Send(&query.Result{ + Series: []*models.Row{row}, + }); err != nil { + return err + } + emitted = true + } + + // Ensure at least one result is emitted. + if !emitted { + return ctx.Send(&query.Result{}) + } + return nil +} + +func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *query.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + // Determine shard set based on database and time range. + // SHOW TAG VALUES returns all tag values for the default retention policy. + di := e.MetaClient.Database(q.Database) + if di == nil { + return fmt.Errorf("database not found: %s", q.Database) + } + + // Determine appropriate time range. If one or fewer time boundaries provided + // then min/max possible time should be used instead. + valuer := &influxql.NowValuer{Now: time.Now()} + cond, timeRange, err := influxql.ConditionExpr(q.Condition, valuer) + if err != nil { + return err + } + + // Get all shards for all retention policies. + var allGroups []meta.ShardGroupInfo + for _, rpi := range di.RetentionPolicies { + sgis, err := e.MetaClient.ShardGroupsByTimeRange(q.Database, rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) + if err != nil { + return err + } + allGroups = append(allGroups, sgis...) + } + + var shardIDs []uint64 + for _, sgi := range allGroups { + for _, si := range sgi.Shards { + shardIDs = append(shardIDs, si.ID) + } + } + + tagValues, err := e.TSDBStore.TagValues(ctx.Authorizer, shardIDs, cond) + if err != nil { + return ctx.Send(&query.Result{Err: err}) + } + + emitted := false + for _, m := range tagValues { + values := m.Values + + if q.Offset > 0 { + if q.Offset >= len(values) { + values = nil + } else { + values = values[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(values) { + values = values[:q.Limit] + } + } + + if len(values) == 0 { + continue + } + + row := &models.Row{ + Name: m.Measurement, + Columns: []string{"key", "value"}, + Values: make([][]interface{}, len(values)), + } + for i, v := range values { + row.Values[i] = []interface{}{v.Key, v.Value} + } + + if err := ctx.Send(&query.Result{ + Series: []*models.Row{row}, + }); err != nil { + return err + } + emitted = true + } + + // Ensure at least one result is emitted. + if !emitted { + return ctx.Send(&query.Result{}) + } + return nil +} + +func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) { + row := &models.Row{Columns: []string{"user", "admin"}} + for _, ui := range e.MetaClient.Users() { + row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) + } + return []*models.Row{row}, nil +} + +// BufferedPointsWriter adds buffering to a pointsWriter so that SELECT INTO queries +// write their points to the destination in batches. +type BufferedPointsWriter struct { + w pointsWriter + buf []models.Point + database string + retentionPolicy string +} + +// NewBufferedPointsWriter returns a new BufferedPointsWriter. +func NewBufferedPointsWriter(w pointsWriter, database, retentionPolicy string, capacity int) *BufferedPointsWriter { + return &BufferedPointsWriter{ + w: w, + buf: make([]models.Point, 0, capacity), + database: database, + retentionPolicy: retentionPolicy, + } +} + +// WritePointsInto implements pointsWriter for BufferedPointsWriter. +func (w *BufferedPointsWriter) WritePointsInto(req *IntoWriteRequest) error { + // Make sure we're buffering points only for the expected destination. + if req.Database != w.database || req.RetentionPolicy != w.retentionPolicy { + return fmt.Errorf("writer for %s.%s can't write into %s.%s", w.database, w.retentionPolicy, req.Database, req.RetentionPolicy) + } + + for i := 0; i < len(req.Points); { + // Get the available space in the buffer. + avail := cap(w.buf) - len(w.buf) + + // Calculate number of points to copy into the buffer. + n := len(req.Points[i:]) + if n > avail { + n = avail + } + + // Copy points into buffer. + w.buf = append(w.buf, req.Points[i:n+i]...) + + // Advance the index by number of points copied. + i += n + + // If buffer is full, flush points to underlying writer. + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + } + + return nil +} + +// Flush writes all buffered points to the underlying writer. +func (w *BufferedPointsWriter) Flush() error { + if len(w.buf) == 0 { + return nil + } + + if err := w.w.WritePointsInto(&IntoWriteRequest{ + Database: w.database, + RetentionPolicy: w.retentionPolicy, + Points: w.buf, + }); err != nil { + return err + } + + // Clear the buffer. + w.buf = w.buf[:0] + + return nil +} + +// Len returns the number of points buffered. +func (w *BufferedPointsWriter) Len() int { return len(w.buf) } + +// Cap returns the capacity (in points) of the buffer. +func (w *BufferedPointsWriter) Cap() int { return cap(w.buf) } + +func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) (n int64, err error) { + if stmt.Target.Measurement.Database == "" { + return 0, errNoDatabaseInTarget + } + + // It might seem a bit weird that this is where we do this, since we will have to + // convert rows back to points. The Executors (both aggregate and raw) are complex + // enough that changing them to write back to the DB is going to be clumsy + // + // it might seem weird to have the write be in the Executor, but the interweaving of + // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the + // results will be the same as when queried normally. + name := stmt.Target.Measurement.Name + if name == "" { + name = row.Name + } + + points, err := convertRowToPoints(name, row) + if err != nil { + return 0, err + } + + if err := w.WritePointsInto(&IntoWriteRequest{ + Database: stmt.Target.Measurement.Database, + RetentionPolicy: stmt.Target.Measurement.RetentionPolicy, + Points: points, + }); err != nil { + return 0, err + } + + return int64(len(points)), nil +} + +var errNoDatabaseInTarget = errors.New("no database in target") + +// convertRowToPoints will convert a query result Row into Points that can be written back in. +func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { + // figure out which parts of the result are the time and which are the fields + timeIndex := -1 + fieldIndexes := make(map[string]int) + for i, c := range row.Columns { + if c == "time" { + timeIndex = i + } else { + fieldIndexes[c] = i + } + } + + if timeIndex == -1 { + return nil, errors.New("error finding time index in result") + } + + points := make([]models.Point, 0, len(row.Values)) + for _, v := range row.Values { + vals := make(map[string]interface{}) + for fieldName, fieldIndex := range fieldIndexes { + val := v[fieldIndex] + // Check specifically for nil or a NullFloat. This is because + // the NullFloat represents float numbers that don't have an internal representation + // (like NaN) that cannot be written back, but will not equal nil so there will be + // an attempt to write them if we do not check for it. + if val != nil && val != query.NullFloat { + vals[fieldName] = v[fieldIndex] + } + } + + p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time)) + if err != nil { + // Drop points that can't be stored + continue + } + + points = append(points, p) + } + + return points, nil +} + +// NormalizeStatement adds a default database and policy to the measurements in statement. +// Parameter defaultRetentionPolicy can be "". +func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultDatabase, defaultRetentionPolicy string) (err error) { + influxql.WalkFunc(stmt, func(node influxql.Node) { + if err != nil { + return + } + switch node := node.(type) { + case *influxql.ShowRetentionPoliciesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowMeasurementsStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowTagKeysStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowTagValuesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowMeasurementCardinalityStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowSeriesCardinalityStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.Measurement: + switch stmt.(type) { + case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement: + // DB and RP not supported by these statements so don't rewrite into invalid + // statements + default: + err = e.normalizeMeasurement(node, defaultDatabase, defaultRetentionPolicy) + } + } + }) + return +} + +func (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase, defaultRetentionPolicy string) error { + // Targets (measurements in an INTO clause) can have blank names, which means it will be + // the same as the measurement name it came from in the FROM clause. + if !m.IsTarget && m.Name == "" && m.SystemIterator == "" && m.Regex == nil { + return errors.New("invalid measurement") + } + + // Measurement does not have an explicit database? Insert default. + if m.Database == "" { + m.Database = defaultDatabase + } + + // The database must now be specified by this point. + if m.Database == "" { + return ErrDatabaseNameRequired + } + + // Find database. + di := e.MetaClient.Database(m.Database) + if di == nil { + return influxdb.ErrDatabaseNotFound(m.Database) + } + + // If no retention policy was specified, use the default. + if m.RetentionPolicy == "" { + if defaultRetentionPolicy != "" { + m.RetentionPolicy = defaultRetentionPolicy + } else if di.DefaultRetentionPolicy != "" { + m.RetentionPolicy = di.DefaultRetentionPolicy + } else { + return fmt.Errorf("default retention policy not set for: %s", di.Name) + } + } + return nil +} + +// IntoWriteRequest is a partial copy of cluster.WriteRequest +type IntoWriteRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// TSDBStore is an interface for accessing the time series data store. +type TSDBStore interface { + CreateShard(database, policy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + + RestoreShard(id uint64, r io.Reader) error + BackupShard(id uint64, since time.Time, w io.Writer) error + + DeleteDatabase(name string) error + DeleteMeasurement(database, name string) error + DeleteRetentionPolicy(database, name string) error + DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error + DeleteShard(id uint64) error + + MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) + TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) + TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) + + SeriesCardinality(database string) (int64, error) + MeasurementsCardinality(database string) (int64, error) +} + +var _ TSDBStore = LocalTSDBStore{} + +// LocalTSDBStore embeds a tsdb.Store and implements IteratorCreator +// to satisfy the TSDBStore interface. +type LocalTSDBStore struct { + *tsdb.Store +} + +// ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard. +type ShardIteratorCreator interface { + ShardIteratorCreator(id uint64) query.IteratorCreator +} + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go new file mode 100644 index 0000000..5d64c63 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go @@ -0,0 +1,616 @@ +package coordinator_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "reflect" + "regexp" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +const ( + // DefaultDatabase is the default database name used in tests. + DefaultDatabase = "db0" + + // DefaultRetentionPolicy is the default retention policy name used in tests. + DefaultRetentionPolicy = "rp0" +) + +// Ensure query executor can execute a simple SELECT statement. +func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { + e := DefaultQueryExecutor() + + // The meta client should return a single shard owned by the local node. + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + // The TSDB store should return an IteratorCreator for shard. + // This IteratorCreator returns a single iterator with "value" in the aux fields. + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(_ context.Context, _ *influxql.Measurement, _ query.IteratorOptions) (query.Iterator, error) { + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, + {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, + }}, nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT * FROM cpu`, "db0", 0)); !reflect.DeepEqual(a, []*query.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(100)}, + {time.Unix(1, 0).UTC(), float64(200)}, + }, + }}, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +// Ensure query executor can enforce a maximum bucket selection count. +func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) { + e := DefaultQueryExecutor() + e.StatementExecutor.MaxSelectBucketsN = 3 + + // The meta client should return a single shards on the local node. + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(_ context.Context, _ *influxql.Measurement, _ query.IteratorOptions) (query.Iterator, error) { + return &FloatIterator{ + Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, + }, nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, "db0", 0)); !reflect.DeepEqual(a, []*query.Result{ + { + StatementID: 0, + Err: errors.New("max-select-buckets limit exceeded: (4/3)"), + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +func TestStatementExecutor_ExecuteQuery_WriteInto(t *testing.T) { + for _, tt := range []struct { + name string + pw func(t *testing.T, req *coordinator.IntoWriteRequest) error + query string + source func() query.Iterator + written int64 + }{ + { + name: "DropNullPoints", + pw: func(t *testing.T, req *coordinator.IntoWriteRequest) error { + if want, got := len(req.Points), 0; want != got { + t.Errorf("unexpected written points: %d != %d", want, got) + } + return nil + }, + query: `SELECT stddev(value) INTO cpu_stddev FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, + source: func() query.Iterator { + return &FloatIterator{ + Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Value: 100}}, + } + }, + written: 0, + }, + { + name: "PartialDrop", + pw: func(t *testing.T, req *coordinator.IntoWriteRequest) error { + if want, got := len(req.Points), 1; want != got { + t.Errorf("unexpected written points: %d != %d", want, got) + } else { + fields, err := req.Points[0].Fields() + if err != nil { + return err + } else if want, got := len(fields), 1; want != got { + t.Errorf("unexpected number of fields: %d != %d", want, got) + } + } + return nil + }, + query: `SELECT max(value), stddev(value) INTO cpu_agg FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, + source: func() query.Iterator { + return &FloatIterator{ + Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Value: 100}}, + } + }, + written: 1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + e := DefaultQueryExecutor() + e.StatementExecutor.PointsWriter = writePointsIntoFunc(func(req *coordinator.IntoWriteRequest) error { + return tt.pw(t, req) + }) + + // The meta client should return a single shards on the local node. + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(_ context.Context, _ *influxql.Measurement, _ query.IteratorOptions) (query.Iterator, error) { + return tt.source(), nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(tt.query, "db0", 0)); !reflect.DeepEqual(a, []*query.Result{ + { + StatementID: 0, + Series: models.Rows{ + { + Name: "result", + Columns: []string{"time", "written"}, + Values: [][]interface{}{ + {ts("1970-01-01T00:00:00Z"), int64(tt.written)}, + }, + }, + }, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } + }) + } +} + +func TestStatementExecutor_NormalizeStatement(t *testing.T) { + + testCases := []struct { + name string + query string + defaultDB string + defaultRP string + expectedDB string + expectedRP string + }{ + { + name: "defaults", + query: "SELECT f FROM m", + defaultDB: DefaultDatabase, + defaultRP: "", + expectedDB: DefaultDatabase, + expectedRP: DefaultRetentionPolicy, + }, + { + name: "alternate database via param", + query: "SELECT f FROM m", + defaultDB: "dbalt", + defaultRP: "", + expectedDB: "dbalt", + expectedRP: DefaultRetentionPolicy, + }, + { + name: "alternate database via query", + query: fmt.Sprintf("SELECT f FROM dbalt.%s.m", DefaultRetentionPolicy), + defaultDB: DefaultDatabase, + defaultRP: "", + expectedDB: "dbalt", + expectedRP: DefaultRetentionPolicy, + }, + { + name: "alternate RP via param", + query: "SELECT f FROM m", + defaultDB: DefaultDatabase, + defaultRP: "rpalt", + expectedDB: DefaultDatabase, + expectedRP: "rpalt", + }, + { + name: "alternate RP via query", + query: fmt.Sprintf("SELECT f FROM %s.rpalt.m", DefaultDatabase), + defaultDB: DefaultDatabase, + defaultRP: "", + expectedDB: DefaultDatabase, + expectedRP: "rpalt", + }, + { + name: "alternate RP query disagrees with param and query wins", + query: fmt.Sprintf("SELECT f FROM %s.rpquery.m", DefaultDatabase), + defaultDB: DefaultDatabase, + defaultRP: "rpparam", + expectedDB: DefaultDatabase, + expectedRP: "rpquery", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + q, err := influxql.ParseQuery(testCase.query) + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.SelectStatement) + + err = DefaultQueryExecutor().StatementExecutor.NormalizeStatement(stmt, testCase.defaultDB, testCase.defaultRP) + if err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != testCase.expectedDB { + t.Errorf("database got %v, want %v", m.Database, testCase.expectedDB) + } + if m.RetentionPolicy != testCase.expectedRP { + t.Errorf("retention policy got %v, want %v", m.RetentionPolicy, testCase.expectedRP) + } + }) + } +} + +func TestStatementExecutor_NormalizeDropSeries(t *testing.T) { + q, err := influxql.ParseQuery("DROP SERIES FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DropSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo", "bar"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("retention policy rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DROP SERIES FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +func TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) { + q, err := influxql.ParseQuery("DELETE FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DeleteSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo", "bar"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("retention policy rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DELETE FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +type mockAuthorizer struct { + AuthorizeDatabaseFn func(influxql.Privilege, string) bool +} + +func (a *mockAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { + return a.AuthorizeDatabaseFn(p, name) +} + +func (m *mockAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { + panic("fail") +} + +func (m *mockAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { + panic("fail") +} + +func (m *mockAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { + panic("fail") +} + +func TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) { + qe := query.NewExecutor() + qe.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabasesFn: func() []meta.DatabaseInfo { + return []meta.DatabaseInfo{ + {Name: "db1"}, {Name: "db2"}, {Name: "db3"}, {Name: "db4"}, + } + }, + }, + } + + opt := query.ExecutionOptions{ + Authorizer: &mockAuthorizer{ + AuthorizeDatabaseFn: func(p influxql.Privilege, name string) bool { + return name == "db2" || name == "db4" + }, + }, + } + + q, err := influxql.ParseQuery("SHOW DATABASES") + if err != nil { + t.Fatal(err) + } + + results := ReadAllResults(qe.ExecuteQuery(q, opt, make(chan struct{}))) + exp := []*query.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "databases", + Columns: []string{"name"}, + Values: [][]interface{}{ + {"db2"}, {"db4"}, + }, + }}, + }, + } + if !reflect.DeepEqual(results, exp) { + t.Fatalf("unexpected results: exp %s, got %s", spew.Sdump(exp), spew.Sdump(results)) + } +} + +// QueryExecutor is a test wrapper for coordinator.QueryExecutor. +type QueryExecutor struct { + *query.Executor + + MetaClient MetaClient + TSDBStore *internal.TSDBStoreMock + StatementExecutor *coordinator.StatementExecutor + LogOutput bytes.Buffer +} + +// NewQueryExecutor returns a new instance of Executor. +// This query executor always has a node id of 0. +func NewQueryExecutor() *QueryExecutor { + e := &QueryExecutor{ + Executor: query.NewExecutor(), + TSDBStore: &internal.TSDBStoreMock{}, + } + + e.TSDBStore.CreateShardFn = func(database, policy string, shardID uint64, enabled bool) error { + return nil + } + + e.TSDBStore.MeasurementNamesFn = func(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { + return nil, nil + } + + e.TSDBStore.TagValuesFn = func(_ query.Authorizer, _ []uint64, _ influxql.Expr) ([]tsdb.TagValues, error) { + return nil, nil + } + + e.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: &e.MetaClient, + TSDBStore: e.TSDBStore, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: &e.MetaClient, + TSDBStore: e.TSDBStore, + }, + } + e.Executor.StatementExecutor = e.StatementExecutor + + var out io.Writer = &e.LogOutput + if testing.Verbose() { + out = io.MultiWriter(out, os.Stderr) + } + e.Executor.WithLogger(logger.New(out)) + + return e +} + +// DefaultQueryExecutor returns a Executor with a database (db0) and retention policy (rp0). +func DefaultQueryExecutor() *QueryExecutor { + e := NewQueryExecutor() + e.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn + return e +} + +// ExecuteQuery parses query and executes against the database. +func (e *QueryExecutor) ExecuteQuery(q, database string, chunkSize int) <-chan *query.Result { + return e.Executor.ExecuteQuery(MustParseQuery(q), query.ExecutionOptions{ + Database: database, + ChunkSize: chunkSize, + }, make(chan struct{})) +} + +type MockShard struct { + Measurements []string + FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + FieldKeysByMeasurementFn func(name []byte) []string + CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) + IteratorCostFn func(m string, opt query.IteratorOptions) (query.IteratorCost, error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) +} + +func (sh *MockShard) MeasurementsByRegex(re *regexp.Regexp) []string { + names := make([]string, 0, len(sh.Measurements)) + for _, name := range sh.Measurements { + if re.MatchString(name) { + names = append(names, name) + } + } + return names +} + +func (sh *MockShard) FieldKeysByMeasurement(name []byte) []string { + return sh.FieldKeysByMeasurementFn(name) +} + +func (sh *MockShard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return sh.FieldDimensionsFn(measurements) +} + +func (sh *MockShard) MapType(measurement, field string) influxql.DataType { + f, d, err := sh.FieldDimensions([]string{measurement}) + if err != nil { + return influxql.Unknown + } + + if typ, ok := f[field]; ok { + return typ + } else if _, ok := d[field]; ok { + return influxql.Tag + } + return influxql.Unknown +} + +func (sh *MockShard) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + return sh.CreateIteratorFn(ctx, measurement, opt) +} + +func (sh *MockShard) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { + return sh.IteratorCostFn(measurement, opt) +} + +func (sh *MockShard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return sh.ExpandSourcesFn(sources) +} + +// MustParseQuery parses s into a query. Panic on error. +func MustParseQuery(s string) *influxql.Query { + q, err := influxql.ParseQuery(s) + if err != nil { + panic(err) + } + return q +} + +// ReadAllResults reads all results from c and returns as a slice. +func ReadAllResults(c <-chan *query.Result) []*query.Result { + var a []*query.Result + for result := range c { + a = append(a, result) + } + return a +} + +// FloatIterator is a represents an iterator that reads from a slice. +type FloatIterator struct { + Points []query.FloatPoint + stats query.IteratorStats +} + +func (itr *FloatIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*query.FloatPoint, error) { + if len(itr.Points) == 0 { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func ts(s string) time.Time { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err) + } + return t +} + +type writePointsIntoFunc func(req *coordinator.IntoWriteRequest) error + +func (fn writePointsIntoFunc) WritePointsInto(req *coordinator.IntoWriteRequest) error { + return fn(req) +} diff --git a/vendor/github.com/influxdata/influxdb/docker/entrypoint.sh b/vendor/github.com/influxdata/influxdb/docker/entrypoint.sh new file mode 100755 index 0000000..26e6bd7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/docker/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +if [ "${1:0:1}" = '-' ]; then + set -- influxd "$@" +fi + +if [ "$1" = 'influxd' ]; then + /init-influxdb.sh "${@:2}" +fi + +exec "$@" diff --git a/vendor/github.com/influxdata/influxdb/docker/init-influxdb.sh b/vendor/github.com/influxdata/influxdb/docker/init-influxdb.sh new file mode 100755 index 0000000..c941080 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/docker/init-influxdb.sh @@ -0,0 +1,120 @@ +#!/bin/bash +set -e + +AUTH_ENABLED="$INFLUXDB_HTTP_AUTH_ENABLED" + +if [ -z "$AUTH_ENABLED" ]; then + AUTH_ENABLED="$(grep -iE '^\s*auth-enabled\s*=\s*true' /etc/influxdb/influxdb.conf | grep -io 'true' | cat)" +else + AUTH_ENABLED="$(echo ""$INFLUXDB_HTTP_AUTH_ENABLED"" | grep -io 'true' | cat)" +fi + +INIT_USERS=$([ ! -z "$AUTH_ENABLED" ] && [ ! -z "$INFLUXDB_ADMIN_USER" ] && echo 1 || echo) + +if ( [ ! -z "$INIT_USERS" ] || [ ! -z "$INFLUXDB_DB" ] || [ "$(ls -A /docker-entrypoint-initdb.d 2> /dev/null)" ] ) && [ ! "$(ls -A /var/lib/influxdb)" ]; then + + INIT_QUERY="" + CREATE_DB_QUERY="CREATE DATABASE $INFLUXDB_DB" + + if [ ! -z "$INIT_USERS" ]; then + + if [ -z "$INFLUXDB_ADMIN_PASSWORD" ]; then + INFLUXDB_ADMIN_PASSWORD="$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32;echo;)" + echo "INFLUXDB_ADMIN_PASSWORD:$INFLUXDB_ADMIN_PASSWORD" + fi + + INIT_QUERY="CREATE USER $INFLUXDB_ADMIN_USER WITH PASSWORD '$INFLUXDB_ADMIN_PASSWORD' WITH ALL PRIVILEGES" + elif [ ! -z "$INFLUXDB_DB" ]; then + INIT_QUERY="$CREATE_DB_QUERY" + else + INIT_QUERY="SHOW DATABASES" + fi + + INFLUXDB_INIT_PORT="8086" + + INFLUXDB_HTTP_BIND_ADDRESS=127.0.0.1:$INFLUXDB_INIT_PORT INFLUXDB_HTTP_HTTPS_ENABLED=false influxd "$@" & + pid="$!" + + INFLUX_CMD="influx -host 127.0.0.1 -port $INFLUXDB_INIT_PORT -execute " + + for i in {30..0}; do + if $INFLUX_CMD "$INIT_QUERY" &> /dev/null; then + break + fi + echo 'influxdb init process in progress...' + sleep 1 + done + + if [ "$i" = 0 ]; then + echo >&2 'influxdb init process failed.' + exit 1 + fi + + if [ ! -z "$INIT_USERS" ]; then + + INFLUX_CMD="influx -host 127.0.0.1 -port $INFLUXDB_INIT_PORT -username ${INFLUXDB_ADMIN_USER} -password ${INFLUXDB_ADMIN_PASSWORD} -execute " + + if [ ! -z "$INFLUXDB_DB" ]; then + $INFLUX_CMD "$CREATE_DB_QUERY" + fi + + if [ ! -z "$INFLUXDB_USER" ] && [ -z "$INFLUXDB_USER_PASSWORD" ]; then + INFLUXDB_USER_PASSWORD="$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32;echo;)" + echo "INFLUXDB_USER_PASSWORD:$INFLUXDB_USER_PASSWORD" + fi + + if [ ! -z "$INFLUXDB_USER" ]; then + $INFLUX_CMD "CREATE USER $INFLUXDB_USER WITH PASSWORD '$INFLUXDB_USER_PASSWORD'" + + $INFLUX_CMD "REVOKE ALL PRIVILEGES FROM ""$INFLUXDB_USER""" + + if [ ! -z "$INFLUXDB_DB" ]; then + $INFLUX_CMD "GRANT ALL ON ""$INFLUXDB_DB"" TO ""$INFLUXDB_USER""" + fi + fi + + if [ ! -z "$INFLUXDB_WRITE_USER" ] && [ -z "$INFLUXDB_WRITE_USER_PASSWORD" ]; then + INFLUXDB_WRITE_USER_PASSWORD="$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32;echo;)" + echo "INFLUXDB_WRITE_USER_PASSWORD:$INFLUXDB_WRITE_USER_PASSWORD" + fi + + if [ ! -z "$INFLUXDB_WRITE_USER" ]; then + $INFLUX_CMD "CREATE USER $INFLUXDB_WRITE_USER WITH PASSWORD '$INFLUXDB_WRITE_USER_PASSWORD'" + $INFLUX_CMD "REVOKE ALL PRIVILEGES FROM ""$INFLUXDB_WRITE_USER""" + + if [ ! -z "$INFLUXDB_DB" ]; then + $INFLUX_CMD "GRANT WRITE ON ""$INFLUXDB_DB"" TO ""$INFLUXDB_WRITE_USER""" + fi + fi + + if [ ! -z "$INFLUXDB_READ_USER" ] && [ -z "$INFLUXDB_READ_USER_PASSWORD" ]; then + INFLUXDB_READ_USER_PASSWORD="$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32;echo;)" + echo "INFLUXDB_READ_USER_PASSWORD:$INFLUXDB_READ_USER_PASSWORD" + fi + + if [ ! -z "$INFLUXDB_READ_USER" ]; then + $INFLUX_CMD "CREATE USER $INFLUXDB_READ_USER WITH PASSWORD '$INFLUXDB_READ_USER_PASSWORD'" + $INFLUX_CMD "REVOKE ALL PRIVILEGES FROM ""$INFLUXDB_READ_USER""" + + if [ ! -z "$INFLUXDB_DB" ]; then + $INFLUX_CMD "GRANT READ ON ""$INFLUXDB_DB"" TO ""$INFLUXDB_READ_USER""" + fi + fi + + fi + + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) echo "$0: running $f"; . "$f" ;; + *.iql) echo "$0: running $f"; $INFLUX_CMD "$(cat ""$f"")"; echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + if ! kill -s TERM "$pid" || ! wait "$pid"; then + echo >&2 'influxdb init process failed. (Could not stop influxdb)' + exit 1 + fi + +fi diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go new file mode 100644 index 0000000..9bc6b99 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/errors.go @@ -0,0 +1,42 @@ +package influxdb + +import ( + "errors" + "fmt" + "strings" +) + +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") + +// ErrDatabaseNotFound indicates that a database operation failed on the +// specified database because the specified database does not exist. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrRetentionPolicyNotFound indicates that the named retention policy could +// not be found in the database. +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +// IsAuthorizationError indicates whether an error is due to an authorization failure +func IsAuthorizationError(err error) bool { + e, ok := err.(interface { + AuthorizationFailed() bool + }) + return ok && e.AuthorizationFailed() +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc new file mode 100644 index 0000000..a9c1a9c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc @@ -0,0 +1 @@ +rvm use ruby-2.1.0@burn-in --create diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile new file mode 100644 index 0000000..b1816e8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "colorize" +gem "influxdb" diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock new file mode 100644 index 0000000..9e721c3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock @@ -0,0 +1,14 @@ +GEM + remote: https://rubygems.org/ + specs: + colorize (0.6.0) + influxdb (0.0.16) + json + json (1.8.1) + +PLATFORMS + ruby + +DEPENDENCIES + colorize + influxdb diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb new file mode 100644 index 0000000..1d44bc2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb @@ -0,0 +1,79 @@ +require "influxdb" +require "colorize" +require "benchmark" + +require_relative "log" +require_relative "random_gaussian" + +BATCH_SIZE = 10_000 + +Log.info "Starting burn-in suite" +master = InfluxDB::Client.new +master.delete_database("burn-in") rescue nil +master.create_database("burn-in") +master.create_database_user("burn-in", "user", "pass") + +master.database = "burn-in" +# master.query "select * from test1 into test2;" +# master.query "select count(value) from test1 group by time(1m) into test2;" + +influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" + +Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" + +Log.log "Creating RandomGaussian(500, 25)" +gaussian = RandomGaussian.new(500, 25) +point_count = 0 + +while true + Log.log "Generating 10,000 points.." + points = [] + BATCH_SIZE.times do |n| + points << {value: gaussian.rand.to_i.abs} + end + point_count += points.length + + Log.info "Sending points to server.." + begin + st = Time.now + foo = influxdb.write_point("test1", points) + et = Time.now + Log.log foo.inspect + Log.log "#{et-st} seconds elapsed" + Log.success "Write successful." + rescue => e + Log.failure "Write failed:" + Log.log e + end + sleep 0.5 + + Log.info "Checking regular points" + st = Time.now + response = influxdb.query("select count(value) from test1;") + et = Time.now + + Log.log "#{et-st} seconds elapsed" + + response_count = response["test1"].first["count"] + if point_count == response_count + Log.success "Point counts match: #{point_count} == #{response_count}" + else + Log.failure "Point counts don't match: #{point_count} != #{response_count}" + end + + # Log.info "Checking continuous query points for test2" + # st = Time.now + # response = influxdb.query("select count(value) from test2;") + # et = Time.now + + # Log.log "#{et-st} seconds elapsed" + + # response_count = response["test2"].first["count"] + # if point_count == response_count + # Log.success "Point counts match: #{point_count} == #{response_count}" + # else + # Log.failure "Point counts don't match: #{point_count} != #{response_count}" + # end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb new file mode 100644 index 0000000..0f70d76 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb @@ -0,0 +1,23 @@ +module Log + def self.info(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:yellow) + end + + def self.success(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:green) + end + + def self.failure(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:red) + end + + def self.log(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s + end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb new file mode 100644 index 0000000..51d6c3c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb @@ -0,0 +1,31 @@ +class RandomGaussian + def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) + @rand_helper = rand_helper + @mean = mean + @stddev = stddev + @valid = false + @next = 0 + end + + def rand + if @valid then + @valid = false + return @next + else + @valid = true + x, y = self.class.gaussian(@mean, @stddev, @rand_helper) + @next = y + return x + end + end + + private + def self.gaussian(mean, stddev, rand) + theta = 2 * Math::PI * rand.call + rho = Math.sqrt(-2 * Math.log(1 - rand.call)) + scale = stddev * rho + x = mean + scale * Math.cos(theta) + y = mean + scale * Math.sin(theta) + return x, y + end +end diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb new file mode 100644 index 0000000..93bc831 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb @@ -0,0 +1,29 @@ +require "influxdb" + +ONE_WEEK_IN_SECONDS = 7*24*60*60 +NUM_POINTS = 10_000 +BATCHES = 100 + +master = InfluxDB::Client.new +master.delete_database("ctx") rescue nil +master.create_database("ctx") + +influxdb = InfluxDB::Client.new "ctx" +influxdb.time_precision = "s" + +names = ["foo", "bar", "baz", "quu", "qux"] + +st = Time.now +BATCHES.times do |m| + points = [] + + puts "Writing #{NUM_POINTS} points, time ##{m}.." + NUM_POINTS.times do |n| + timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) + points << {value: names.sample, time: timestamp} + end + + influxdb.write_point("ct1", points) +end +puts st +puts Time.now diff --git a/vendor/github.com/influxdata/influxdb/etc/config.sample.toml b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml new file mode 100644 index 0000000..cbbf6e3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml @@ -0,0 +1,556 @@ +### Welcome to the InfluxDB configuration file. + +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the configuration +# field and the default value used. Uncommenting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. +# Change this option to true to disable reporting. +# reporting-disabled = false + +# Bind address to use for the RPC service for backup and restore. +# bind-address = "127.0.0.1:8088" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # The directory where the TSM storage engine stores TSM files. + dir = "/var/lib/influxdb/data" + + # The directory where the TSM storage engine stores WAL files. + wal-dir = "/var/lib/influxdb/wal" + + # The amount of time that a write will wait before fsyncing. A duration + # greater than 0 can be used to batch up multiple fsync calls. This is useful for slower + # disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL. + # Values in the range of 0-100ms are recommended for non-SSD disks. + # wal-fsync-delay = "0s" + + + # The type of shard index to use for new shards. The default is an in-memory index that is + # recreated at startup. A value of "tsi1" will use a disk based index that supports higher + # cardinality datasets. + # index-version = "inmem" + + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # cache-max-memory-size = "1g" + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # cache-snapshot-memory-size = "25m" + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "10m" + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "4h" + + # The maximum number of concurrent full and level compactions that can run at one time. A + # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater + # than 0 limits compactions to that value. This setting does not apply + # to cache snapshotting. + # max-concurrent-compactions = 0 + + # The threshold, in bytes, when an index write-ahead log file will compact + # into an index file. Lower sizes will cause log files to be compacted more + # quickly and result in lower heap usage at the expense of write throughput. + # Higher sizes will be compacted less frequently, store more series in-memory, + # and provide higher write throughput. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # max-index-log-file-size = "1m" + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 + + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 + + # If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to + # TSM files. This setting has been found to be problematic on some kernels, and defaults to off. + # It might help users who have slow disks in some cases. + # tsm-use-madv-willneed = false + +### +### [coordinator] +### +### Controls the clustering service configuration. +### + +[coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" + + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 + + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" + + # The time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make + # the maximum point count unlimited. This will only be checked every second so queries will not + # be aborted immediately when hitting the limit. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + # max-select-series = 0 + + # The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + # Determines whether retention policy enforcement enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether user authentication is enabled over HTTP/HTTPS. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enabled. + # log-enabled = true + + # Determines whether the HTTP write request logs should be suppressed when the log is enabled. + # suppress-write-log = false + + # When HTTP request logging is enabled, this option specifies the path where + # log entries should be written. If unspecified, the default is to write to stderr, which + # intermingles HTTP logs with internal InfluxDB logging. + # + # If influxd is unable to access the specified path, it will log an error and fall back to writing + # the request log to stderr. + # access-log-path = "" + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Enables a pprof endpoint that binds to localhost:6060 immediately on startup. + # This is only needed to debug startup issues. + # debug-pprof-enabled = false + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-secret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 0 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + + # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit. + # max-body-size = 25000000 + + # The maximum number of writes processed concurrently. + # Setting this to 0 disables the limit. + # max-concurrent-write-limit = 0 + + # The maximum number of writes queued for processing. + # Setting this to 0 disables the limit. + # max-enqueued-write-limit = 0 + + # The maximum duration for a write to wait in the queue to be processed. + # Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit. + # enqueued-write-timeout = 0 + + +### +### [ifql] +### +### Configures the ifql RPC API. +### + +[ifql] + # Determines whether the RPC service is enabled. + # enabled = true + + # Determines whether additional logging is enabled. + # log-enabled = true + + # The bind address used by the ifql RPC service. + # bind-address = ":8082" + + +### +### [logging] +### +### Controls how the logger emits logs to the output. +### + +[logging] + # Determines which log encoder to use for logs. Available options + # are auto, logfmt, and json. auto will use a more a more user-friendly + # output format if the output terminal is a TTY, but the format is not as + # easily machine-readable. When the output is a non-TTY, auto will use + # logfmt. + # format = "auto" + + # Determines which level of logs will be emitted. The available levels + # are error, warn, info, and debug. Logs that are equal to or above the + # specified level will be emitted. + # level = "info" + + # Suppresses the logo output that is printed when the program is started. + # The logo is always suppressed if STDOUT is not a TTY. + # suppress-logo = false + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +[subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false + # database = "graphite" + # retention-policy = "" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls one or many listeners for collectd data. +### + +[[collectd]] + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # + # The collectd service supports either scanning a directory for multiple types + # db files, or specifying a single db file. + # typesdb = "/usr/local/share/collectd" + # + # security-level = "none" + # auth-file = "/etc/collectd/auth_file" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + + # Multi-value plugins can be handled two ways. + # "split" will parse and store the multi-value plugin data into separate measurements + # "join" will parse and store the multi-value plugin as a single multi-value measurement. + # "split" is the default behavior for backward compatability with previous versions of influxdb. + # parse-multivalue-plugin = "split" +### +### [opentsdb] +### +### Controls one or many listeners for OpenTSDB data. +### + +[[opentsdb]] + # enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + # enabled = false + # bind-address = ":8089" + # database = "udp" + # retention-policy = "" + + # InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h") + # precision = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + # Determines whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # Controls whether queries are logged to the self-monitoring data store. + # query-stats-enabled = false + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" + +### +### [tls] +### +### Global configuration settings for TLS in InfluxDB. +### + +[tls] + # Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants + # for a list of available ciphers, which depends on the version of Go (use the query + # SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses + # the default settings from Go's crypto/tls package. + # ciphers = [ + # "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + # ] + + # Minimum version of the tls protocol that will be negotiated. If not specified, uses the + # default settings from Go's crypto/tls package. + # min-version = "tls1.2" + + # Maximum version of the tls protocol that will be negotiated. If not specified, uses the + # default settings from Go's crypto/tls package. + # max-version = "tls1.2" diff --git a/vendor/github.com/influxdata/influxdb/gobuild.sh b/vendor/github.com/influxdata/influxdb/gobuild.sh new file mode 100755 index 0000000..9a96e7e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/gobuild.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script run inside the Dockerfile_build_ubuntu64_git container and +# gets the latests Go source code and compiles it. +# Then passes control over to the normal build.py script + +set -e + +cd /go/src +git fetch --all +git checkout $GO_CHECKOUT +# Merge in recent changes if we are on a branch +# if we checked out a tag just ignore the error +git pull || true +./make.bash + +# Run normal build.py +cd "$PROJECT_DIR" +exec ./build.py "$@" diff --git a/vendor/github.com/influxdata/influxdb/importer/README.md b/vendor/github.com/influxdata/influxdb/importer/README.md new file mode 100644 index 0000000..7b0dd87 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/README.md @@ -0,0 +1,214 @@ +# Import/Export + +## Exporting from 0.8.9 + +Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. + +Note that `0.8.9` can be found here: + +``` +http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb +http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm +``` + +### Design + +`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). + +The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdata/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data) in `0.10`. Remember that batching is important and we don't recommend batch sizes over 5k without further testing. + +Example export file: +``` +# DDL +CREATE DATABASE db0 +CREATE DATABASE db1 +CREATE RETENTION POLICY rp1 ON db1 DURATION 1h REPLICATION 1 + +# DML +# CONTEXT-DATABASE:db0 +# CONTEXT-RETENTION-POLICY:autogen +cpu,host=server1 value=33.3 1464026335000000000 +cpu,host=server1 value=43.3 1464026395000000000 +cpu,host=server1 value=63.3 1464026575000000000 + +# CONTEXT-DATABASE:db1 +# CONTEXT-RETENTION-POLICY:rp1 +cpu,host=server1 value=73.3 1464026335000000000 +cpu,host=server1 value=83.3 1464026395000000000 +cpu,host=server1 value=93.3 1464026575000000000 +``` + +You need to specify a database and shard group when you export. + +To list out your shards, use the following http endpoint: + +`/cluster/shard_spaces` + +example: +```sh +http://username:password@localhost:8086/cluster/shard_spaces +``` + +Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: + +```sh +curl -o export http://username:password@localhost:8086/export/metrics/default +``` + +Compression is supported, and will result in a significantly smaller file size. + +Use the following command for compression: +```sh +curl -o export.gz --compressed http://username:password@localhost:8086/export/metrics/default +``` + +You can also export just the `DDL` with this option: + +```sh +curl -o export.ddl http://username:password@localhost:8086/export/metrics/default?l=ddl +``` + +Or just the `DML` with this option: + +```sh +curl -o export.dml.gz --compressed http://username:password@localhost:8086/export/metrics/default?l=dml +``` + +### Assumptions + +- Series name mapping follows these [guidelines](https://docs.influxdata.com/influxdb/v0.8/advanced_topics/schema_design/) +- Database name will map directly from `0.8` to `0.10` +- Shard Spaces map to Retention Policies +- Shard Space Duration is ignored, as in `0.10` we determine shard size automatically +- Regex is used to match the correct series names and only exports that data for the database +- Duration becomes the new Retention Policy duration + +- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.10` + +### Upgrade Recommendations + +It's recommended that you upgrade to `0.9.3` or later first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. + +It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: + +```toml +# Configure the http api +[api] +read-timeout = "0s" +``` + +### Exceptions + +If a series can't be exported to tags based on the guidelines mentioned above, +we will insert the entire series name as the measurement name. You can either +allow that to import into the new InfluxDB instance, or you can do your own +data massage on it prior to importing it. + +For example, if you have the following series name: + +``` +metric.disk.c.host.server01.single +``` + +It will export as exactly thta as the measurement name and no tags: + +``` +metric.disk.c.host.server01.single +``` + +### Export Metrics + +When you export, you will now get comments inline in the `DML`: + +`# Found 999 Series for export` + +As well as count totals for each series exported: + +`# Series FOO - Points Exported: 999` + +With a total at the bottom: + +`# Points Exported: 999` + +You can grep the file that was exported at the end to get all the export metrics: + +`cat myexport | grep Exported` + +## Importing + +Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. + +## Caveats + +For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: + +``` +.... +``` +for example: +``` +az.us-west-1.host.serverA.cpu +``` +or any number of tags +``` +building.2.temperature +``` + +Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. +See below for more information. + +## Running the import command + + To import via the cli, you can specify the following command: + + ```sh + influx -import -path=metrics-default.gz -compressed + ``` + + If the file is not compressed you can issue it without the `-compressed` flag: + + ```sh + influx -import -path=metrics-default + ``` + + To redirect failed import lines to another file, run this command: + + ```sh + influx -import -path=metrics-default.gz -compressed > failures + ``` + + The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. + +### Throttiling the import + + If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. + + ```sh + influx -import -path=metrics-default.gz -compressed -pps 50000 > failures + ``` + + Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. + +## Understanding the results of the import + +During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: + +``` +2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 +``` + + The batch will give some basic stats when finished: + + ```sh + 2015/07/29 23:15:20 Processed 2 commands + 2015/07/29 23:15:20 Processed 70207923 inserts + 2015/07/29 23:15:20 Failed 29785000 inserts + ``` + + Most inserts fail due to the following types of error: + + ```sh + 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer + ``` + + This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` and greater the field has to have a consistent type. diff --git a/vendor/github.com/influxdata/influxdb/importer/v8/importer.go b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go new file mode 100644 index 0000000..bba7576 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go @@ -0,0 +1,274 @@ +// Package v8 contains code for importing data from 0.8 instances of InfluxDB. +package v8 // import "github.com/influxdata/influxdb/importer/v8" + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "log" + "os" + "strings" + "time" + + "github.com/influxdata/influxdb/client" +) + +const batchSize = 5000 + +// Config is the config used to initialize a Importer importer +type Config struct { + Path string // Path to import data. + Version string + Compressed bool // Whether import data is gzipped. + PPS int // points per second importer imports with. + + client.Config +} + +// NewConfig returns an initialized *Config +func NewConfig() Config { + return Config{Config: client.NewConfig()} +} + +// Importer is the importer used for importing 0.8 data +type Importer struct { + client *client.Client + database string + retentionPolicy string + config Config + batch []string + totalInserts int + failedInserts int + totalCommands int + throttlePointsWritten int + startTime time.Time + lastWrite time.Time + throttle *time.Ticker + + stderrLogger *log.Logger + stdoutLogger *log.Logger +} + +// NewImporter will return an intialized Importer struct +func NewImporter(config Config) *Importer { + config.UserAgent = fmt.Sprintf("influxDB importer/%s", config.Version) + return &Importer{ + config: config, + batch: make([]string, 0, batchSize), + stdoutLogger: log.New(os.Stdout, "", log.LstdFlags), + stderrLogger: log.New(os.Stderr, "", log.LstdFlags), + } +} + +// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize +func (i *Importer) Import() error { + // Create a client and try to connect. + cl, err := client.NewClient(i.config.Config) + if err != nil { + return fmt.Errorf("could not create client %s", err) + } + i.client = cl + if _, _, e := i.client.Ping(); e != nil { + return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) + } + + // Validate args + if i.config.Path == "" { + return fmt.Errorf("file argument required") + } + + defer func() { + if i.totalInserts > 0 { + i.stdoutLogger.Printf("Processed %d commands\n", i.totalCommands) + i.stdoutLogger.Printf("Processed %d inserts\n", i.totalInserts) + i.stdoutLogger.Printf("Failed %d inserts\n", i.failedInserts) + } + }() + + // Open the file + f, err := os.Open(i.config.Path) + if err != nil { + return err + } + defer f.Close() + + var r io.Reader + + // If gzipped, wrap in a gzip reader + if i.config.Compressed { + gr, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gr.Close() + // Set the reader to the gzip reader + r = gr + } else { + // Standard text file so our reader can just be the file + r = f + } + + // Get our reader + scanner := bufio.NewReader(r) + + // Process the DDL + if err := i.processDDL(scanner); err != nil { + return fmt.Errorf("reading standard input: %s", err) + } + + // Set up our throttle channel. Since there is effectively no other activity at this point + // the smaller resolution gets us much closer to the requested PPS + i.throttle = time.NewTicker(time.Microsecond) + defer i.throttle.Stop() + + // Prime the last write + i.lastWrite = time.Now() + + // Process the DML + if err := i.processDML(scanner); err != nil { + return fmt.Errorf("reading standard input: %s", err) + } + + // If there were any failed inserts then return an error so that a non-zero + // exit code can be returned. + if i.failedInserts > 0 { + plural := " was" + if i.failedInserts > 1 { + plural = "s were" + } + + return fmt.Errorf("%d point%s not inserted", i.failedInserts, plural) + } + + return nil +} + +func (i *Importer) processDDL(scanner *bufio.Reader) error { + for { + line, err := scanner.ReadString(byte('\n')) + if err != nil && err != io.EOF { + return err + } else if err == io.EOF { + return nil + } + // If we find the DML token, we are done with DDL + if strings.HasPrefix(line, "# DML") { + return nil + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.queryExecutor(line) + } +} + +func (i *Importer) processDML(scanner *bufio.Reader) error { + i.startTime = time.Now() + for { + line, err := scanner.ReadString(byte('\n')) + if err != nil && err != io.EOF { + return err + } else if err == io.EOF { + // Call batchWrite one last time to flush anything out in the batch + i.batchWrite() + return nil + } + if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { + i.batchWrite() + i.database = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { + i.batchWrite() + i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.batchAccumulator(line) + } +} + +func (i *Importer) execute(command string) { + response, err := i.client.Query(client.Query{Command: command, Database: i.database}) + if err != nil { + i.stderrLogger.Printf("error: %s\n", err) + return + } + if err := response.Error(); err != nil { + i.stderrLogger.Printf("error: %s\n", response.Error()) + } +} + +func (i *Importer) queryExecutor(command string) { + i.totalCommands++ + i.execute(command) +} + +func (i *Importer) batchAccumulator(line string) { + i.batch = append(i.batch, line) + if len(i.batch) == batchSize { + i.batchWrite() + } +} + +func (i *Importer) batchWrite() { + // Exit early if there are no points in the batch. + if len(i.batch) == 0 { + return + } + + // Accumulate the batch size to see how many points we have written this second + i.throttlePointsWritten += len(i.batch) + + // Find out when we last wrote data + since := time.Since(i.lastWrite) + + // Check to see if we've exceeded our points per second for the current timeframe + var currentPPS int + if since.Seconds() > 0 { + currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) + } else { + currentPPS = i.throttlePointsWritten + } + + // If our currentPPS is greater than the PPS specified, then we wait and retry + if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { + // Wait for the next tick + <-i.throttle.C + + // Decrement the batch size back out as it is going to get called again + i.throttlePointsWritten -= len(i.batch) + i.batchWrite() + return + } + + _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) + if e != nil { + i.stderrLogger.Println("error writing batch: ", e) + i.stderrLogger.Println(strings.Join(i.batch, "\n")) + i.failedInserts += len(i.batch) + } else { + i.totalInserts += len(i.batch) + } + i.throttlePointsWritten = 0 + i.lastWrite = time.Now() + + // Clear the batch and record the number of processed points. + i.batch = i.batch[:0] + // Give some status feedback every 100000 lines processed + processed := i.totalInserts + i.failedInserts + if processed%100000 == 0 { + since := time.Since(i.startTime) + pps := float64(processed) / since.Seconds() + i.stdoutLogger.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go new file mode 100644 index 0000000..a594175 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxdb.go @@ -0,0 +1,6 @@ +// Package influxdb is the root package of InfluxDB, +// the scalable datastore for metrics, events, and real-time analytics. +// +// If you're looking for the Go HTTP client for InfluxDB, +// see package github.com/influxdata/influxdb/client/v2. +package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/internal/authorizer.go b/vendor/github.com/influxdata/influxdb/internal/authorizer.go new file mode 100644 index 0000000..07847f5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/internal/authorizer.go @@ -0,0 +1,38 @@ +package internal + +import ( + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" +) + +// AuthorizerMock is a mockable implementation of a query.Authorizer. +type AuthorizerMock struct { + AuthorizeDatabaseFn func(influxql.Privilege, string) bool + AuthorizeQueryFn func(database string, query *influxql.Query) error + AuthorizeSeriesReadFn func(database string, measurement []byte, tags models.Tags) bool + AuthorizeSeriesWriteFn func(database string, measurement []byte, tags models.Tags) bool +} + +// AuthorizeDatabase determines if the provided privilege is sufficient to +// authorise access to the database. +func (a *AuthorizerMock) AuthorizeDatabase(p influxql.Privilege, name string) bool { + return a.AuthorizeDatabaseFn(p, name) +} + +// AuthorizeQuery determins if the query can be executed against the provided +// database. +func (a *AuthorizerMock) AuthorizeQuery(database string, query *influxql.Query) error { + return a.AuthorizeQueryFn(database, query) +} + +// AuthorizeSeriesRead determines if the series comprising measurement and tags +// can be read on the provided database. +func (a *AuthorizerMock) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { + return a.AuthorizeSeriesReadFn(database, measurement, tags) +} + +// AuthorizeSeriesWrite determines if the series comprising measurement and tags +// can be written to, on the provided database. +func (a *AuthorizerMock) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { + return a.AuthorizeSeriesWriteFn(database, measurement, tags) +} diff --git a/vendor/github.com/influxdata/influxdb/internal/cursors.go b/vendor/github.com/influxdata/influxdb/internal/cursors.go new file mode 100644 index 0000000..9a957e4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/internal/cursors.go @@ -0,0 +1,132 @@ +package internal + +import "github.com/influxdata/influxdb/tsdb" + +var ( + _ tsdb.IntegerBatchCursor = NewIntegerBatchCursorMock() + _ tsdb.FloatBatchCursor = NewFloatBatchCursorMock() + _ tsdb.UnsignedBatchCursor = NewUnsignedBatchCursorMock() + _ tsdb.StringBatchCursor = NewStringBatchCursorMock() + _ tsdb.BooleanBatchCursor = NewBooleanBatchCursorMock() +) + +// BatchCursorMock provides a mock base implementation for batch cursors. +type BatchCursorMock struct { + CloseFn func() + ErrFn func() error +} + +// NewBatchCursorMock returns an initialised BatchCursorMock, which +// returns the zero value for all methods. +func NewBatchCursorMock() *BatchCursorMock { + return &BatchCursorMock{ + CloseFn: func() {}, + ErrFn: func() error { return nil }, + } +} + +// Close closes the cursor. +func (c *BatchCursorMock) Close() { c.CloseFn() } + +// Err returns the latest error, if any. +func (c *BatchCursorMock) Err() error { return c.ErrFn() } + +// IntegerBatchCursorMock provides a mock implementation of an IntegerBatchCursorMock. +type IntegerBatchCursorMock struct { + *BatchCursorMock + NextFn func() (keys []int64, values []int64) +} + +// NewIntegerBatchCursorMock returns an initialised IntegerBatchCursorMock, which +// returns the zero value for all methods. +func NewIntegerBatchCursorMock() *IntegerBatchCursorMock { + return &IntegerBatchCursorMock{ + BatchCursorMock: NewBatchCursorMock(), + NextFn: func() ([]int64, []int64) { return nil, nil }, + } +} + +// Next returns the next set of keys and values. +func (c *IntegerBatchCursorMock) Next() (keys []int64, values []int64) { + return c.NextFn() +} + +// FloatBatchCursorMock provides a mock implementation of a FloatBatchCursor. +type FloatBatchCursorMock struct { + *BatchCursorMock + NextFn func() (keys []int64, values []float64) +} + +// NewFloatBatchCursorMock returns an initialised FloatBatchCursorMock, which +// returns the zero value for all methods. +func NewFloatBatchCursorMock() *FloatBatchCursorMock { + return &FloatBatchCursorMock{ + BatchCursorMock: NewBatchCursorMock(), + NextFn: func() ([]int64, []float64) { return nil, nil }, + } +} + +// Next returns the next set of keys and values. +func (c *FloatBatchCursorMock) Next() (keys []int64, values []float64) { + return c.NextFn() +} + +// UnsignedBatchCursorMock provides a mock implementation of an UnsignedBatchCursorMock. +type UnsignedBatchCursorMock struct { + *BatchCursorMock + NextFn func() (keys []int64, values []uint64) +} + +// NewUnsignedBatchCursorMock returns an initialised UnsignedBatchCursorMock, which +// returns the zero value for all methods. +func NewUnsignedBatchCursorMock() *UnsignedBatchCursorMock { + return &UnsignedBatchCursorMock{ + BatchCursorMock: NewBatchCursorMock(), + NextFn: func() ([]int64, []uint64) { return nil, nil }, + } +} + +// Next returns the next set of keys and values. +func (c *UnsignedBatchCursorMock) Next() (keys []int64, values []uint64) { + return c.NextFn() +} + +// StringBatchCursorMock provides a mock implementation of a StringBatchCursor. +type StringBatchCursorMock struct { + *BatchCursorMock + NextFn func() (keys []int64, values []string) +} + +// NewStringBatchCursorMock returns an initialised StringBatchCursorMock, which +// returns the zero value for all methods. +func NewStringBatchCursorMock() *StringBatchCursorMock { + return &StringBatchCursorMock{ + BatchCursorMock: NewBatchCursorMock(), + NextFn: func() ([]int64, []string) { return nil, nil }, + } +} + +// Next returns the next set of keys and values. +func (c *StringBatchCursorMock) Next() (keys []int64, values []string) { + return c.NextFn() +} + +// BooleanBatchCursorMock provides a mock implementation of a BooleanBatchCursor. +type BooleanBatchCursorMock struct { + *BatchCursorMock + NextFn func() (keys []int64, values []bool) +} + +// NewBooleanBatchCursorMock returns an initialised BooleanBatchCursorMock, which +// returns the zero value for all methods. +func NewBooleanBatchCursorMock() *BooleanBatchCursorMock { + return &BooleanBatchCursorMock{ + BatchCursorMock: NewBatchCursorMock(), + NextFn: func() ([]int64, []bool) { return nil, nil }, + } +} + +// Next returns the next set of keys and values. +func (c *BooleanBatchCursorMock) Next() (keys []int64, values []bool) { + return c.NextFn() +} diff --git a/vendor/github.com/influxdata/influxdb/internal/meta_client.go b/vendor/github.com/influxdata/influxdb/internal/meta_client.go new file mode 100644 index 0000000..739e477 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/internal/meta_client.go @@ -0,0 +1,179 @@ +package internal + +import ( + "time" + + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxql" +) + +// MetaClientMock is a mockable implementation of meta.MetaClient. +type MetaClientMock struct { + CloseFn func() error + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + CreateShardGroupFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (meta.User, error) + + DatabaseFn func(name string) *meta.DatabaseInfo + DatabasesFn func() []meta.DatabaseInfo + + DataFn func() meta.Data + DeleteShardGroupFn func(database string, policy string, id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropShardFn func(id uint64) error + DropUserFn func(name string) error + + OpenFn func() error + + PrecreateShardGroupsFn func(from, to time.Time) error + PruneShardGroupsFn func() error + + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + + AuthenticateFn func(username, password string) (ui meta.User, err error) + AdminUserExistsFn func() bool + SetAdminPrivilegeFn func(username string, admin bool) error + SetDataFn func(*meta.Data) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) + TruncateShardGroupsFn func(t time.Time) error + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UserFn func(username string) (meta.User, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClientMock) Close() error { + return c.CloseFn() +} + +func (c *MetaClientMock) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClientMock) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClientMock) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (c *MetaClientMock) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, spec, makeDefault) +} + +func (c *MetaClientMock) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return c.CreateShardGroupFn(database, policy, timestamp) +} + +func (c *MetaClientMock) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClientMock) CreateUser(name, password string, admin bool) (meta.User, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClientMock) Database(name string) *meta.DatabaseInfo { + return c.DatabaseFn(name) +} + +func (c *MetaClientMock) Databases() []meta.DatabaseInfo { + return c.DatabasesFn() +} + +func (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error { + return c.DeleteShardGroupFn(database, policy, id) +} + +func (c *MetaClientMock) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClientMock) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClientMock) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClientMock) DropShard(id uint64) error { + return c.DropShardFn(id) +} + +func (c *MetaClientMock) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClientMock) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClientMock) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClientMock) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClientMock) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClientMock) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return c.ShardGroupsByTimeRangeFn(database, policy, min, max) +} + +func (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) { + return c.ShardOwnerFn(shardID) +} + +func (c *MetaClientMock) TruncateShardGroups(t time.Time) error { + return c.TruncateShardGroupsFn(t) +} + +func (c *MetaClientMock) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { + return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) +} + +func (c *MetaClientMock) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClientMock) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClientMock) Authenticate(username, password string) (meta.User, error) { + return c.AuthenticateFn(username, password) +} +func (c *MetaClientMock) AdminUserExists() bool { return c.AdminUserExistsFn() } + +func (c *MetaClientMock) User(username string) (meta.User, error) { return c.UserFn(username) } +func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() } + +func (c *MetaClientMock) Open() error { return c.OpenFn() } +func (c *MetaClientMock) Data() meta.Data { return c.DataFn() } +func (c *MetaClientMock) SetData(d *meta.Data) error { return c.SetDataFn(d) } + +func (c *MetaClientMock) PrecreateShardGroups(from, to time.Time) error { + return c.PrecreateShardGroupsFn(from, to) +} +func (c *MetaClientMock) PruneShardGroups() error { return c.PruneShardGroupsFn() } diff --git a/vendor/github.com/influxdata/influxdb/internal/storage_store.go b/vendor/github.com/influxdata/influxdb/internal/storage_store.go new file mode 100644 index 0000000..e3f6550 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/internal/storage_store.go @@ -0,0 +1,78 @@ +package internal + +import ( + "context" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/storage" + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +// TSDBStoreMock is a mockable implementation of storage.Store. +// +// It's currently a partial implementation as one of a store's exported methods +// returns an unexported type. +type StorageStoreMock struct { + ReadFn func(ctx context.Context, req *storage.ReadRequest) (storage.Results, error) + WithLoggerFn func(log *zap.Logger) + + ResultSet *StorageResultsMock + // TODO(edd): can't mock GroupRead as it returns an unexported type. +} + +// NewStorageStoreMock initialises a StorageStoreMock with methods that return +// their zero values. It also initialises a StorageResultsMock, which can be +// configured via the ResultSet field. +func NewStorageStoreMock() *StorageStoreMock { + store := &StorageStoreMock{ + WithLoggerFn: func(*zap.Logger) {}, + ResultSet: NewStorageResultsMock(), + } + store.ReadFn = func(context.Context, *storage.ReadRequest) (storage.Results, error) { + return store.ResultSet, nil + } + return store +} + +// WithLogger sets the logger. +func (s *StorageStoreMock) WithLogger(log *zap.Logger) { + s.WithLoggerFn(log) +} + +// Read reads the storage request and returns a cursor to access results. +func (s *StorageStoreMock) Read(ctx context.Context, req *storage.ReadRequest) (storage.Results, error) { + return s.ReadFn(ctx, req) +} + +// StorageResultsMock implements the storage.Results interface providing the +// ability to emit mock results from calls to the StorageStoreMock.Read method. +type StorageResultsMock struct { + CloseFn func() + NextFn func() bool + CursorFn func() tsdb.Cursor + TagsFn func() models.Tags +} + +// NewStorageResultsMock initialises a StorageResultsMock whose methods all return +// their zero value. +func NewStorageResultsMock() *StorageResultsMock { + return &StorageResultsMock{ + CloseFn: func() {}, + NextFn: func() bool { return false }, + CursorFn: func() tsdb.Cursor { return nil }, + TagsFn: func() models.Tags { return nil }, + } +} + +// Close closes the result set. +func (r *StorageResultsMock) Close() { r.CloseFn() } + +// Next returns true if there are more results available. +func (r *StorageResultsMock) Next() bool { return r.NextFn() } + +// Cursor returns the cursor for the result set. +func (r *StorageResultsMock) Cursor() tsdb.Cursor { return r.CursorFn() } + +// Tags returns the series' tag set. +func (r *StorageResultsMock) Tags() models.Tags { return r.TagsFn() } diff --git a/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go new file mode 100644 index 0000000..e2f27ca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go @@ -0,0 +1,151 @@ +package internal + +import ( + "io" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" + "go.uber.org/zap" +) + +// TSDBStoreMock is a mockable implementation of tsdb.Store. +type TSDBStoreMock struct { + BackupShardFn func(id uint64, since time.Time, w io.Writer) error + BackupSeriesFileFn func(database string, w io.Writer) error + ExportShardFn func(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error + CloseFn func() error + CreateShardFn func(database, policy string, shardID uint64, enabled bool) error + CreateShardSnapshotFn func(id uint64) (string, error) + DatabasesFn func() []string + DeleteDatabaseFn func(name string) error + DeleteMeasurementFn func(database, name string) error + DeleteRetentionPolicyFn func(database, name string) error + DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error + DeleteShardFn func(id uint64) error + DiskSizeFn func() (int64, error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) + ImportShardFn func(id uint64, r io.Reader) error + MeasurementSeriesCountsFn func(database string) (measuments int, series int) + MeasurementsCardinalityFn func(database string) (int64, error) + MeasurementNamesFn func(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) + OpenFn func() error + PathFn func() string + RestoreShardFn func(id uint64, r io.Reader) error + SeriesCardinalityFn func(database string) (int64, error) + SetShardEnabledFn func(shardID uint64, enabled bool) error + ShardFn func(id uint64) *tsdb.Shard + ShardGroupFn func(ids []uint64) tsdb.ShardGroup + ShardIDsFn func() []uint64 + ShardNFn func() int + ShardRelativePathFn func(id uint64) (string, error) + ShardsFn func(ids []uint64) []*tsdb.Shard + StatisticsFn func(tags map[string]string) []models.Statistic + TagKeysFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) + TagValuesFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) + WithLoggerFn func(log *zap.Logger) + WriteToShardFn func(shardID uint64, points []models.Point) error +} + +func (s *TSDBStoreMock) BackupShard(id uint64, since time.Time, w io.Writer) error { + return s.BackupShardFn(id, since, w) +} +func (s *TSDBStoreMock) BackupSeriesFile(database string, w io.Writer) error { + return s.BackupSeriesFileFn(database, w) +} +func (s *TSDBStoreMock) ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error { + return s.ExportShardFn(id, ExportStart, ExportEnd, w) +} +func (s *TSDBStoreMock) Close() error { return s.CloseFn() } +func (s *TSDBStoreMock) CreateShard(database string, retentionPolicy string, shardID uint64, enabled bool) error { + return s.CreateShardFn(database, retentionPolicy, shardID, enabled) +} +func (s *TSDBStoreMock) CreateShardSnapshot(id uint64) (string, error) { + return s.CreateShardSnapshotFn(id) +} +func (s *TSDBStoreMock) Databases() []string { + return s.DatabasesFn() +} +func (s *TSDBStoreMock) DeleteDatabase(name string) error { + return s.DeleteDatabaseFn(name) +} +func (s *TSDBStoreMock) DeleteMeasurement(database string, name string) error { + return s.DeleteMeasurementFn(database, name) +} +func (s *TSDBStoreMock) DeleteRetentionPolicy(database string, name string) error { + return s.DeleteRetentionPolicyFn(database, name) +} +func (s *TSDBStoreMock) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error { + return s.DeleteSeriesFn(database, sources, condition) +} +func (s *TSDBStoreMock) DeleteShard(shardID uint64) error { + return s.DeleteShardFn(shardID) +} +func (s *TSDBStoreMock) DiskSize() (int64, error) { + return s.DiskSizeFn() +} +func (s *TSDBStoreMock) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return s.ExpandSourcesFn(sources) +} +func (s *TSDBStoreMock) ImportShard(id uint64, r io.Reader) error { + return s.ImportShardFn(id, r) +} +func (s *TSDBStoreMock) MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { + return s.MeasurementNamesFn(auth, database, cond) +} +func (s *TSDBStoreMock) MeasurementSeriesCounts(database string) (measuments int, series int) { + return s.MeasurementSeriesCountsFn(database) +} +func (s *TSDBStoreMock) MeasurementsCardinality(database string) (int64, error) { + return s.MeasurementsCardinalityFn(database) +} +func (s *TSDBStoreMock) Open() error { + return s.OpenFn() +} +func (s *TSDBStoreMock) Path() string { + return s.PathFn() +} +func (s *TSDBStoreMock) RestoreShard(id uint64, r io.Reader) error { + return s.RestoreShardFn(id, r) +} +func (s *TSDBStoreMock) SeriesCardinality(database string) (int64, error) { + return s.SeriesCardinalityFn(database) +} +func (s *TSDBStoreMock) SetShardEnabled(shardID uint64, enabled bool) error { + return s.SetShardEnabledFn(shardID, enabled) +} +func (s *TSDBStoreMock) Shard(id uint64) *tsdb.Shard { + return s.ShardFn(id) +} +func (s *TSDBStoreMock) ShardGroup(ids []uint64) tsdb.ShardGroup { + return s.ShardGroupFn(ids) +} +func (s *TSDBStoreMock) ShardIDs() []uint64 { + return s.ShardIDsFn() +} +func (s *TSDBStoreMock) ShardN() int { + return s.ShardNFn() +} +func (s *TSDBStoreMock) ShardRelativePath(id uint64) (string, error) { + return s.ShardRelativePathFn(id) +} +func (s *TSDBStoreMock) Shards(ids []uint64) []*tsdb.Shard { + return s.ShardsFn(ids) +} +func (s *TSDBStoreMock) Statistics(tags map[string]string) []models.Statistic { + return s.StatisticsFn(tags) +} +func (s *TSDBStoreMock) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) { + return s.TagKeysFn(auth, shardIDs, cond) +} +func (s *TSDBStoreMock) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) { + return s.TagValuesFn(auth, shardIDs, cond) +} +func (s *TSDBStoreMock) WithLogger(log *zap.Logger) { + s.WithLoggerFn(log) +} +func (s *TSDBStoreMock) WriteToShard(shardID uint64, points []models.Point) error { + return s.WriteToShardFn(shardID, points) +} diff --git a/vendor/github.com/influxdata/influxdb/logger/config.go b/vendor/github.com/influxdata/influxdb/logger/config.go new file mode 100644 index 0000000..210ebc1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/config.go @@ -0,0 +1,18 @@ +package logger + +import ( + "go.uber.org/zap/zapcore" +) + +type Config struct { + Format string `toml:"format"` + Level zapcore.Level `toml:"level"` + SuppressLogo bool `toml:"suppress-logo"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + Format: "auto", + } +} diff --git a/vendor/github.com/influxdata/influxdb/logger/context.go b/vendor/github.com/influxdata/influxdb/logger/context.go new file mode 100644 index 0000000..3b4b775 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/context.go @@ -0,0 +1,24 @@ +package logger + +import ( + "context" + + "go.uber.org/zap" +) + +type key int + +const ( + loggerKey key = iota +) + +// NewContextWithLogger returns a new context with log added. +func NewContextWithLogger(ctx context.Context, log *zap.Logger) context.Context { + return context.WithValue(ctx, loggerKey, log) +} + +// LoggerFromContext returns the zap.Logger associated with ctx or nil if no logger has been assigned. +func LoggerFromContext(ctx context.Context) *zap.Logger { + l, _ := ctx.Value(loggerKey).(*zap.Logger) + return l +} diff --git a/vendor/github.com/influxdata/influxdb/logger/fields.go b/vendor/github.com/influxdata/influxdb/logger/fields.go new file mode 100644 index 0000000..3bbb312 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/fields.go @@ -0,0 +1,111 @@ +package logger + +import ( + "time" + + "github.com/influxdata/influxdb/pkg/snowflake" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // TraceIDKey is the logging context key used for identifying unique traces. + TraceIDKey = "trace_id" + + // OperationNameKey is the logging context key used for identifying name of an operation. + OperationNameKey = "op_name" + + // OperationEventKey is the logging context key used for identifying a notable + // event during the course of an operation. + OperationEventKey = "op_event" + + // OperationElapsedKey is the logging context key used for identifying time elapsed to finish an operation. + OperationElapsedKey = "op_elapsed" + + // DBInstanceKey is the logging context key used for identifying name of the relevant database. + DBInstanceKey = "db_instance" + + // DBRetentionKey is the logging context key used for identifying name of the relevant retention policy. + DBRetentionKey = "db_rp" + + // DBShardGroupKey is the logging context key used for identifying relevant shard group. + DBShardGroupKey = "db_shard_group" + + // DBShardIDKey is the logging context key used for identifying name of the relevant shard number. + DBShardIDKey = "db_shard_id" +) +const ( + eventStart = "start" + eventEnd = "end" +) + +var ( + gen = snowflake.New(0) +) + +func nextID() string { + return gen.NextString() +} + +// TraceID returns a field for tracking the trace identifier. +func TraceID(id string) zapcore.Field { + return zap.String(TraceIDKey, id) +} + +// OperationName returns a field for tracking the name of an operation. +func OperationName(name string) zapcore.Field { + return zap.String(OperationNameKey, name) +} + +// OperationElapsed returns a field for tracking the duration of an operation. +func OperationElapsed(d time.Duration) zapcore.Field { + return zap.Duration(OperationElapsedKey, d) +} + +// OperationEventStart returns a field for tracking the start of an operation. +func OperationEventStart() zapcore.Field { + return zap.String(OperationEventKey, eventStart) +} + +// OperationEventFinish returns a field for tracking the end of an operation. +func OperationEventEnd() zapcore.Field { + return zap.String(OperationEventKey, eventEnd) +} + +// Database returns a field for tracking the name of a database. +func Database(name string) zapcore.Field { + return zap.String(DBInstanceKey, name) +} + +// Database returns a field for tracking the name of a database. +func RetentionPolicy(name string) zapcore.Field { + return zap.String(DBRetentionKey, name) +} + +// ShardGroup returns a field for tracking the shard group identifier. +func ShardGroup(id uint64) zapcore.Field { + return zap.Uint64(DBShardGroupKey, id) +} + +// Shard returns a field for tracking the shard identifier. +func Shard(id uint64) zapcore.Field { + return zap.Uint64(DBShardIDKey, id) +} + +// NewOperation uses the exiting log to create a new logger with context +// containing a trace id and the operation. Prior to returning, a standardized message +// is logged indicating the operation has started. The returned function should be +// called when the operation concludes in order to log a corresponding message which +// includes an elapsed time and that the operation has ended. +func NewOperation(log *zap.Logger, msg, name string, fields ...zapcore.Field) (*zap.Logger, func()) { + f := []zapcore.Field{TraceID(nextID()), OperationName(name)} + if len(fields) > 0 { + f = append(f, fields...) + } + + now := time.Now() + log = log.With(f...) + log.Info(msg+" (start)", OperationEventStart()) + + return log, func() { log.Info(msg+" (end)", OperationEventEnd(), OperationElapsed(time.Since(now))) } +} diff --git a/vendor/github.com/influxdata/influxdb/logger/logger.go b/vendor/github.com/influxdata/influxdb/logger/logger.go new file mode 100644 index 0000000..44dc39c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/logger.go @@ -0,0 +1,127 @@ +package logger + +import ( + "fmt" + "io" + "time" + + "github.com/jsternberg/zap-logfmt" + isatty "github.com/mattn/go-isatty" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const TimeFormat = "2006-01-02T15:04:05.000000Z07:00" + +func New(w io.Writer) *zap.Logger { + config := NewConfig() + l, _ := config.New(w) + return l +} + +func (c *Config) New(defaultOutput io.Writer) (*zap.Logger, error) { + w := defaultOutput + format := c.Format + if format == "console" { + // Disallow the console logger if the output is not a terminal. + return nil, fmt.Errorf("unknown logging format: %s", format) + } + + // If the format is empty or auto, then set the format depending + // on whether or not a terminal is present. + if format == "" || format == "auto" { + if IsTerminal(w) { + format = "console" + } else { + format = "logfmt" + } + } + + encoder, err := newEncoder(format) + if err != nil { + return nil, err + } + return zap.New(zapcore.NewCore( + encoder, + zapcore.Lock(zapcore.AddSync(w)), + c.Level, + ), zap.Fields(zap.String("log_id", nextID()))), nil +} + +func newEncoder(format string) (zapcore.Encoder, error) { + config := newEncoderConfig() + switch format { + case "json": + return zapcore.NewJSONEncoder(config), nil + case "console": + return zapcore.NewConsoleEncoder(config), nil + case "logfmt": + return zaplogfmt.NewEncoder(config), nil + default: + return nil, fmt.Errorf("unknown logging format: %s", format) + } +} + +func newEncoderConfig() zapcore.EncoderConfig { + config := zap.NewProductionEncoderConfig() + config.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { + encoder.AppendString(ts.UTC().Format(TimeFormat)) + } + config.EncodeDuration = func(d time.Duration, encoder zapcore.PrimitiveArrayEncoder) { + val := float64(d) / float64(time.Millisecond) + encoder.AppendString(fmt.Sprintf("%.3fms", val)) + } + config.LevelKey = "lvl" + return config +} + +// IsTerminal checks if w is a file and whether it is an interactive terminal session. +func IsTerminal(w io.Writer) bool { + if f, ok := w.(interface { + Fd() uintptr + }); ok { + return isatty.IsTerminal(f.Fd()) + } + return false +} + +const ( + year = 365 * 24 * time.Hour + week = 7 * 24 * time.Hour + day = 24 * time.Hour +) + +func DurationLiteral(key string, val time.Duration) zapcore.Field { + if val == 0 { + return zap.String(key, "0s") + } + + var ( + value int + unit string + ) + switch { + case val%year == 0: + value = int(val / year) + unit = "y" + case val%week == 0: + value = int(val / week) + unit = "w" + case val%day == 0: + value = int(val / day) + unit = "d" + case val%time.Hour == 0: + value = int(val / time.Hour) + unit = "h" + case val%time.Minute == 0: + value = int(val / time.Minute) + unit = "m" + case val%time.Second == 0: + value = int(val / time.Second) + unit = "s" + default: + value = int(val / time.Millisecond) + unit = "ms" + } + return zap.String(key, fmt.Sprintf("%d%s", value, unit)) +} diff --git a/vendor/github.com/influxdata/influxdb/logger/style_guide.md b/vendor/github.com/influxdata/influxdb/logger/style_guide.md new file mode 100644 index 0000000..7003fd3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/style_guide.md @@ -0,0 +1,192 @@ +# Logging Style Guide + +The intention of logging is to give insight to the administrator of how +the server is running and also notify the administrator of any problems +or potential problems with the system. + +At the moment, log level filtering is the only option to configure +logging in InfluxDB. Adding a logging message and choosing its level +should be done according to the guidelines in this document for +operational clarity. The available log levels are: + +* Error +* Warn +* Info +* Debug + +InfluxDB uses structured logging. Structured logging is when you log +messages and attach context to those messages with more easily read data +regarding the state of the system. A structured log message is composed +of: + +* Time +* Level +* Message +* (Optionally) Additional context + +## Guidelines + +**Log messages** should be simple statements or phrases that begin with +a capital letter, but have no punctuation at the end. The message should be a +constant so that every time it is logged it is easily identified and can +be filtered by without regular expressions. + +Any **dynamic content** should be expressed by context. The key should +be a constant and the value is the dynamic content. + +Do not log messages in tight loops or other high performance locations. +It will likely create a performance problem. + +## Naming Conventions + +If the log encoding format uses keys for the time, message, or level, +the key names should be `ts` for time, `msg` for the message, and +`lvl` for the level. + +If the log encoding format does not use keys for the time, message, or +level and instead outputs them in some other method, this guideline can +be ignored. The output formats logfmt and json both use keys when +encoding these values. + +### Context Key Names + +The key for the dynamic content in the context should be formatted in +`snake_case`. The key should be completely lower case. + +## Levels + +As a reminder, levels are usually the only way to configure what is +logged. There are four available logging levels. + +* Error +* Warn +* Info +* Debug + +It is important to get the right logging level to ensure the log +messages are useful for end users to act on. + +In general, when considering which log level to use, you should use +**info**. If you are considering using another level, read the below +expanded descriptions to determine which level your message belongs in. + +### Error + +The **error** level is intended to communicate that there is a serious +problem with the server. **An error should be emitted only when an +on-call engineer can take some action to remedy the situation _and_ the +system cannot continue operating properly without remedying the +situation.** + +An example of what may qualify as an error level message is the creation +of the internal storage for the monitor service. For that system to +function at all, a database must be created. If no database is created, +the service itself cannot function. The error has a clear actionable +solution. Figure out why the database isn't being created and create it. + +An example of what does not qualify as an error is failing to parse a +query or a socket closing prematurely. Both of these usually indicate +some kind of user error rather than system error. Both are ephemeral +errors and they would not be clearly actionable to an administrator who +was paged at 3 AM. Both of these are examples of logging messages that +should be emitted at the info level with an error key rather than being +logged at the error level. + +Logged errors **must not propagate**. Propagating the error risks +logging it in multiple locations and confusing users when the same error +is reported multiple times. In general, if you are returning an error, +never log at any level. By returning the error, you are telling the +parent function to handle the error. Logging a message at any level is +handling the error. + +This logging message should be used very rarely and any messages that +use this logging level should not repeat frequently. Assume that +anything that is logged with error will page someone in the middle of +the night. + +### Warn + +The **warn** level is intended to communicate that there is likely to be +a serious problem with the server if it not addressed. **A warning +should be emitted only when a support engineer can take some action to +remedy the situation _and_ the system may not continue operating +properly in the near future without remedying the situation.** + +An example of what may qualify as a warning is the `max-values-per-tag` +setting. If the server starts to approach the maximum number of values, +the server may stop being able to function properly when it reaches the +maximum number. + +An example of what does not qualify as a warning is the +`log-queries-after` setting. While the message is "warning" that a query +was running for a long period of time, it is not clearly actionable and +does not indicate that the server will fail in the near future. This +should be logged at the info level instead. + +This logging message should be used very rarely and any messages that +use this logging level should not repeat frequently. Assume that +anything that is logged with warn will page someone in the middle of the +night and potentially ignored until normal working hours. + +### Info + +The **info** level should be used for almost anything. If you are not +sure which logging level to use, use info. Temporary or user errors +should be logged at the info level and any informational messages for +administrators should be logged at this level. Info level messages +should be safe for an administrator to discard if they really want to, +but most people will run the system at the info level. + +### Debug + +The **debug** level exists to log messages that are useful only for +debugging a bad running instance. + +This level should be rarely used if ever. If you intend to use this +level, please have a rationale ready. Most messages that could be +considered debug either shouldn't exist or should be logged at the info +level. Debug messages will be suppressed by default. + +## Value Formatting + +Formatting for strings, integers, and other standard values are usually +determined by the log format itself and those will be kept ambiguous. +The following specific formatting choices are for data types that could +be output in multiple ways. + +### Time + +Time values should be encoded using RFC3339 with microsecond precision. +The size of the string should be normalized to the same number of digits +every time to ensure that it is easier to read the time as a column. + +### Duration + +Duration values that denote a period of time should be output in +milliseconds with microsecond precision. The microseconds should be in +decimal form with three decimal points. Durations that denote a static +period of time should be output with a single number and a suffix with +the largest possible unit that doesn't cause the value to be a decimal. + +There are two types of durations. + +* Tracks a (usually small) period of time and is meant for timing how + long something take. The content is dynamic and may be graphed. +* Duration literal where the content is dynamic, is unlikely to be + graphed, and usually comes from some type of configuration. + +If the content is dynamic, the duration should be printed as a number of +milliseconds with a decimal indicating the number of microseconds. Any +duration lower than microseconds should be truncated. The decimal section +should always print exactly 3 points after the decimal point. + +If the content is static, the duration should be printed with a single +number and a suffix indicating the unit in years (`y`), weeks (`w`), +days (`d`), hours (`h`), minutes (`m`), seconds (`s`), or +milliseconds (`ms`). The suffix should be the greatest unit that can be +used without truncating the value. As an example, if the duration is +60 minutes, then `1h` should be used. If the duration is 61 minutes, +then `61m` should be used. + +For anything lower than milliseconds that is static, the duration should +be truncated. A value of zero should be shown as `0s`. diff --git a/vendor/github.com/influxdata/influxdb/man/Makefile b/vendor/github.com/influxdata/influxdb/man/Makefile new file mode 100644 index 0000000..622b2eb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/Makefile @@ -0,0 +1,41 @@ +#!/usr/bin/make -f + +DESTDIR = /usr/local + +MAN1_TXT = +MAN1_TXT += influxd.txt +MAN1_TXT += influxd-backup.txt +MAN1_TXT += influxd-config.txt +MAN1_TXT += influxd-restore.txt +MAN1_TXT += influxd-run.txt +MAN1_TXT += influxd-version.txt +MAN1_TXT += influx.txt +MAN1_TXT += influx_inspect.txt +MAN1_TXT += influx_stress.txt +MAN1_TXT += influx_tsm.txt + +MAN_TXT = $(MAN1_TXT) +MAN_XML = $(patsubst %.txt,%.xml,$(MAN_TXT)) + +DOC_MAN1 = $(patsubst %.txt,%.1,$(MAN1_TXT)) + +build: $(DOC_MAN1) + +install: build + @echo ' INSTALL $(DOC_MAN1)' && \ + mkdir -p $(DESTDIR)/share/man/man1 && \ + install -m 0644 $(DOC_MAN1) $(DESTDIR)/share/man/man1 + +clean: + rm -f $(MAN_XML) $(DOC_MAN1) + +%.xml : %.txt + @echo ' ASCIIDOC $@' && rm -f $@+ && \ + asciidoc -d manpage -b docbook -o $@+ $< && \ + mv $@+ $@ + +%.1 : %.xml + @echo ' XMLTO $@' && \ + xmlto man $< 2> /dev/null + +.PHONY: build install clean diff --git a/vendor/github.com/influxdata/influxdb/man/README.md b/vendor/github.com/influxdata/influxdb/man/README.md new file mode 100644 index 0000000..c9d5d6f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/README.md @@ -0,0 +1,38 @@ +# Building the Man Pages + +The man pages are created with `asciidoc`, `docbook`, and `xmlto`. + +## Debian/Ubuntu + +This is the easiest since Debian and Ubuntu automatically install the +dependencies correctly. + +```bash +$ sudo apt-get install -y build-essential asciidoc xmlto +``` + +You should then be able to run `make` and the man pages will be +produced. + +## Mac OS X + +Mac OS X also has the tools necessary to build the docs, but one of the +dependencies gets installed incorrectly and you need an environment +variable to run it correctly. + +Use Homebrew to install the dependencies. There might be other methods +to get the dependencies, but that's left up to the reader if they want +to use a different package manager. + +If you have Homebrew installed, you should already have the Xcode tools +and that should include `make`. + +```bash +$ brew install asciidoc xmlto +``` + +Then set the following environment variable everytime you run `make`. + +```bash +export XML_CATALOG_FILES=/usr/local/etc/xml/catalog +``` diff --git a/vendor/github.com/influxdata/influxdb/man/footer.txt b/vendor/github.com/influxdata/influxdb/man/footer.txt new file mode 100644 index 0000000..3312a25 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/footer.txt @@ -0,0 +1,13 @@ +BUGS +---- +Report bugs to the GitHub issue tracker . + +AUTHORS +------- +InfluxDB is written and maintained by InfluxData . + +COPYRIGHT +--------- +InfluxDB is released under the MIT license. + +This man page is released under Creative Commons Attribution 4.0 International License. diff --git a/vendor/github.com/influxdata/influxdb/man/influx.txt b/vendor/github.com/influxdata/influxdb/man/influx.txt new file mode 100644 index 0000000..66e556d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influx.txt @@ -0,0 +1,97 @@ +influx(1) +========= + +NAME +---- +influx - InfluxDB client + +SYNOPSIS +-------- +[verse] +'influx' [options] +'influx' -execute [options] +'influx' -import -path (-compressed) [options] +'influx' -version + +DESCRIPTION +----------- +'influx' is the command line program for interacting with an InfluxDB server. + +In the first form, the program starts a CLI that can be used to write data or query the database. The command line is described in *COMMAND LINE*. + +In the second form, this will execute a single command, usually a query. This is the equivalent of starting the command line, running one command, and then exiting. + +In the third form, this imports a previously exported database to the database. + +The fourth form outputs the version of the command line and then immediately exits. + +OPTIONS +------- +-host :: + Host to connect to. Default is localhost. + +-port :: + Port to use when connecting to the host. Default is 8086. + +-database :: + Database to use when connecting to the database. + +-username :: + Username to connect to the server. + +-password :: + Password to connect to the server. If left blank, this will prompt for a password. + +-ssl: + Use https for requests. + +-unsafeSsl:: + Set this with '-ssl' to allow unsafe connections. + +-execute :: + Executes the command and exits. + +-format :: + Sets the format of the server responses. Default is column. + +-precision :: + Specifies the format of the timestamp. Default is ns. + +-consistency :: + Set the write consistency level. Default is one. + +-pretty:: + Turns on pretty print format for the JSON format. + +-node :: + Specifies the data node that should be queried for data. This option is only valid on enterprise clusters. + +-import:: + Import a previous database export from a file. If specified, '-path ' must also be specified. + +-path :: + Path to the database export file to import. Must be used with '-import'. + +-pps : + How many points per second the import will allow. By default, it is zero and will not throttle importing. + +-compressed:: + Set if the import file is compressed. Must be used with '-import'. + +-version:: + Outputs the version of the influx client. + +ENVIRONMENT +----------- +The environment variables can be specified in lower case or upper case. The upper case version has precedence. + +HTTP_PROXY [protocol://][:port]:: + Sets the proxy server to use for HTTP. + +HTTPS_PROXY [protocol://][:port]:: + Sets the proxy server to use for HTTPS. Takes precedence over HTTP_PROXY for HTTPS. + +NO_PROXY :: + List of host names that shouldn't go through any proxy. If set to an asterisk \'*' only, it matches all hosts. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influx_inspect.txt b/vendor/github.com/influxdata/influxdb/man/influx_inspect.txt new file mode 100644 index 0000000..ddb204f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influx_inspect.txt @@ -0,0 +1,104 @@ +influx_inspect(1) +================= + +NAME +---- +influx_inspect - Displays detailed information about InfluxDB data files + +SYNPOSIS +-------- +[verse] +'influx_inspect dumptsm' [options] +'influx_inspect export' [options] +'influx_inspect report' [options] +'influx_inspect verify' [options] +'influx_inspect verify-seriesfile' [options] + +DESCRIPTION +----------- +Displays detailed information about InfluxDB data files through one of the +following commands. + +*dumptsm*:: + Dumps low-level details about tsm1 files. + +*export*:: + Exports TSM files into InfluxDB line protocol format. + +*report*:: + Displays shard level report. + +*verify*:: + Verifies integrity of TSM files. + +DUMPTSM OPTIONS +--------------- +-all:: + Dump all data. Caution: This may print a lot of information. + +-blocks:: + Dump raw block data. + +-filter-key :: + Only display index and block data that match this key substring. + +-index:: + Dump raw index data. + +EXPORT OPTIONS +-------------- +-compress:: + Compress the output. + +-db :: + The database to export. Optional. + +-rp :: + The retention policy to export. Optional. Requires the '-db ' option to be specified. + +-data-dir :: + Data storage path. Defaults to '~/.influxdb/data'. + +-wal-dir :: + Wal storage path. Defaults to '~/.influxdb/wal'. + +-start :: + The start time of the export. The timestamp is in RFC3339 format. Optional. + +-end :: + The end time of the export. The timestamp is in RFC3339 format. Optional. + +-out :: + Destination file to write exported data to. Defaults to '~/.influxdb/export'. + +REPORT OPTIONS +-------------- +-detailed:: + Report detailed cardinality estimates. + +-pattern :: + Include only files matching a pattern. + +VERIFY OPTIONS +-------------- +-dir :: + Root storage path. Defaults to '~/.influxdb'. + +VERIFY-SERIESFILE OPTIONS +------------------------- +-dir :: + Root data storage path. Defaults to '~/.influxdb/data'. + +-db :: + Specific db to check. Optional. + +-file :: + Path to a specific series file to check. Overrides '-dir' and '-db'. Optional. + +-v:: + Verbose output. Optional. + +-c :: + Number of concurrent workers to run. Defaults to the number of cores on the machine. Optional. + +include:footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influx_stress.txt b/vendor/github.com/influxdata/influxdb/man/influx_stress.txt new file mode 100644 index 0000000..8b83feb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influx_stress.txt @@ -0,0 +1,52 @@ +influx_stress(1) +================ + +NAME +---- +influx_stress - Runs a stress test against one or multiple InfluxDB servers + +SYNOPSIS +-------- +[verse] +'influx_stress' [options] + +DESCRIPTION +----------- +Runs write and query stress tests against one or multiple InfluxDB servers to +create reproducible performance benchmarks against InfluxDB. + +OPTIONS +------- +-addr :: + IP address and port of the database where response times will persist. This + is not for specifying which database to test against. That option is located + inside of the configuration file. The default is 'http://localhost:8086'. + +-database :: + The database where response times will persist. This is not for specifying + which database to test against. See '-db' or the configuration file for that + option. The default is 'stress'. + +-retention-policy :: + The retention policy where response times will persist. This is not for + specifying which retention policy to test against. See the configuration file + for that option. The default is an empty string which will use the default + retention policy. + +-config :: + The stress configuration file. + +-cpuprofile :: + Write the cpu profile to the path. No cpu profile is written unless this is + used. This profiles 'influx_stress', not the InfluxDB server. + +-db :: + The target database within the test system for write and query load. + +-tags :: + A comma separated list of tags. + +-v2:: + Use version 2 of the stress tool. The default is to use version 1. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influx_tsm.txt b/vendor/github.com/influxdata/influxdb/man/influx_tsm.txt new file mode 100644 index 0000000..ef26019 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influx_tsm.txt @@ -0,0 +1,58 @@ +influx_tsm(1) +============= + +NAME +---- +influx_tsm - Convert a database from b1 or bz1 format to tsm1 format + +SYNPOSIS +-------- +[verse] +'influx_tsm' [options] + +DESCRIPTION +----------- +This tool can be used to convert a database from the deprecated b1 or bz1 +formats to tsm1 format. The b1 and bz1 formats were deprecated in 0.10 and +removed in 0.12. + +This tool will backup the directories before conversion (if not disabled). The +backed-up files must be removed manually, generally after starting up the node +again to make sure all of the data has been converted correctly. + +To restore a backup after attempting to convert to tsm1, you shut down the +node, remove the converted directory, and copy the backed-up directory to the +original location. + +OPTIONS +------- +-backup :: + The location to backup the current databases. Must not be within the data + directory. + +-dbs :: + Comma-delimited list of databases to convert. The default is to convert all + databases. + +-debug :: + If set, http debugging endpoints will be enabled on the given address. + +-interval :: + How often status updates are printed. Default is '5s'. + +-nobackup:: + Disable database backups. Not recommended. + +-parallel:: + Perform parallel conversions (up to GOMAXPROCS shards at once). + +-profile :: + Write a CPU profile to the path. + +-sz :: + Maximum size of individual TSM files. Defaults to 2147483648. + +-y:: + Don't ask, just convert. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt b/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt new file mode 100644 index 0000000..9573e08 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt @@ -0,0 +1,51 @@ +influxd-backup(1) +================= + +NAME +---- +influxd-backup - Creates a backup copy of specified InfluxDB OSS database(s) and saves to disk. Use this newer `-portable` option + unless legacy support is required. Complete documentation on backing up and restoring, including the deprecated + legacy format, see: + https://docs.influxdata.com/influxdb/latest/administration/backup_and_restore/ + + +SYNOPSIS +-------- +'influxd backup' [options] PATH + +DESCRIPTION +----------- +Creates a backup copy of specified InfluxDB OSS database(s) and saves the files in an Enterprise-compatible +format to PATH (directory where backups are saved). + +OPTIONS +------- +-portable:: + Required to generate backup files in a portable format that can be restored to InfluxDB OSS or InfluxDB Enterprise. Use unless the legacy backup is required. + +-host :: + InfluxDB OSS host to back up from. Optional. Defaults to 127.0.0.1:8088. + +-db :: + InfluxDB OSS database name to back up. Optional. If not specified, all databases are backed up when using '-portable'. + +-rp :: + Retention policy to use for the backup. Optional. If not specified, all retention policies are used by default. + +-shard :: + The identifier of the shard to back up. Optional. If specified, '-rp ' is required. + +-start :: + Include all points starting with specified timestamp (RFC3339 format). Not compatible with '-since '. + +-end :: + Exclude all points after timestamp (RFC3339 format). Not compatible with '-since '. + +-since :: + Create an incremental backup of all points after the timestamp (RFC3339 format). Optional. Recommend using '-start ' instead. + +SEE ALSO +-------- +*influxd-restore*(1) + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-config.txt b/vendor/github.com/influxdata/influxdb/man/influxd-config.txt new file mode 100644 index 0000000..4a625bb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd-config.txt @@ -0,0 +1,40 @@ +influxd-config(1) +================= + +NAME +---- +influxd-config - Generate configuration files for InfluxDB + +SYNOPSIS +-------- +[verse] +'influxd' config (-config ) +'influxd config' -config /dev/null + +DESCRIPTION +----------- +'influxd config' will generate a configuration file for InfluxDB. The configuration file will be output to standard output and can be written to a file by redirecting the shell output to another file. + +When a configuration file is specified using '-config ', this configuration file will be read and will overwrite the default values for any values that are present. It can be used to provide a configuration fragment with only the options you want to customize and generate a new configuration file from that file. If '-config ' is not specified, the command will look for a default configuration file using the same method as *influxd-run*(1). + +When using this command to regenerate a configuration file in place, be sure to use a temporary file as the output. This command will not work: + +=== +# DO NOT USE! +$ influxd config -config influxdb.conf > influxdb.conf + +# PROPER METHOD! +$ influxd config -config influxdb.conf > influxdb.conf.tmp && \ + mv influxdb.conf.tmp influxdb.conf +=== + +The shell will truncate the configuration file before 'influxd config' can read it and you will lose all of your custom options. For safety, redirect output to a temporary file instead and use 'mv' to move the file afterwards. + +The second command version will force 'influxd config' to output the default configuration file. Setting the configuration file to */dev/null* will cause the command to output only the defaults and will not read any values from any existing configuration files. + +OPTIONS +------- +-config :: + Customize the default configuration file to load. Disables automatic loading when the path is */dev/null*. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt new file mode 100644 index 0000000..7c1a92b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt @@ -0,0 +1,58 @@ +influxd-restore(1) +================== + +NAME +---- +influxd-restore - Restores databases or specific shards to an InfluxDB OSS instance + from the specified PATH. Complete documentation + for the '-portable' restore method described here, and the deprecated legacy restore format, + is located here: + https://docs.influxdata.com/influxdb/latest/administration/backup_and_restore/ + + +SYNOPSIS +-------- +'influxd restore' -portable [options] PATH + +DESCRIPTION +----------- +Uses backup copies from the specified PATH to restore databases or specific shards from InfluxDB OSS + or InfluxDB Enterprise to an InfluxDB OSS instance. + +OPTIONS +------- + +Note: Restore using the '-portable' option consumes files in an improved Enterprise-compatible + format that includes a file manifest. + +-portable:: + Required to activate the portable restore mode. If not specified, the legacy restore mode is used. + +-host :: + InfluxDB OSS host to connect to where the data will be restored. Defaults to '127.0.0.1:8088'. + +-db :: + Name of database to be restored from the backup (InfluxDB OSS or InfluxDB Enterprise) + +-newdb :: + Name of the InfluxDB OSS database into which the archived data will be imported on the target system. Optional. + If not given, then the value of '-db ' is used. The new database name must be unique to the target system. + +-rp :: + Name of retention policy from the backup that will be restored. Optional. Requires that '-db ' is specified. + +-newrp :: + Name of the retention policy to be created on the target system. Optional. Requires that '-rp ' is set. + If not given, the '-rp ' value is used. + +-shard :: + Identifier of the shard to be restored. Optional. If specified, then '-db ' and '-rp ' are required. + +PATH + Path to directory containing the backup files. + +SEE ALSO +-------- +*influxd-backup*(1) + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-run.txt b/vendor/github.com/influxdata/influxdb/man/influxd-run.txt new file mode 100644 index 0000000..1214127 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd-run.txt @@ -0,0 +1,32 @@ +influxd-run(1) +============== + +NAME +---- +influxd-run - Configure and start an InfluxDB server + +SYNOPSIS +-------- +[verse] +'influxd' [-config ] [-pidfile ] [-cpuprofile ] [-memprofile ] +'influxd run' [-config ] [-pidfile ] [-cpuprofile ] [-memprofile ] + +DESCRIPTION +----------- +Runs the InfluxDB server. + +OPTIONS +------- +-config :: + Sets the path to the configuration file. This defaults to the environment variable *INFLUXDB_CONFIG_PATH*, *~/.influxdb/influxdb.conf*, or */etc/influxdb/influxdb.conf* if a file is present at any of these locations. Disable the automatic loading of a configuration file by using the null device as the path (such as /dev/null on Linux or Mac OS X). + +-pidfile :: + Write process ID to a file. + +-cpuprofile :: + Write CPU profiling information to a file. + +-memprofile :: + Write memory usage information to a file. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-version.txt b/vendor/github.com/influxdata/influxdb/man/influxd-version.txt new file mode 100644 index 0000000..26da658 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd-version.txt @@ -0,0 +1,17 @@ +influxd-version(1) +================== + +NAME +---- +influxd-version - Display the version of influxdb + +SYNOPSIS +-------- +[verse] +'influxd version' + +DESCRIPTION +----------- +'influxd version' will output the version of the InfluxDB server. + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/man/influxd.txt b/vendor/github.com/influxdata/influxdb/man/influxd.txt new file mode 100644 index 0000000..693d7e8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/man/influxd.txt @@ -0,0 +1,40 @@ +influxd(1) +========== + +NAME +---- +influxd - InfluxDB server daemon + +SYNOPSIS +-------- +[verse] +'influxd' [command] [options] + +DESCRIPTION +----------- +'influxd' is the server daemon for InfluxDB. + +COMMANDS +-------- +These commands can be invoked using the 'influxd' program. The default is 'run' if the command parameter is skipped. + +backup:: + Downloads a snapshot of a data node and saves it to disk. + +config:: + Displays the default configuration. This can also read an existing configuration file and output the default values for any missing fields. Default values and existing entries in a configuration file can be customized through environment variables. + +restore:: + Uses backups to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be running during a restore. + +run:: + Runs the InfluxDB server. This is the default command if none is specified. + +version:: + Displays the InfluxDB version, build branch, and git commit hash. + +SEE ALSO +-------- +*influxd-backup*(1), *influxd-config*(1), *influxd-restore*(1), *influxd-run*(1), *influxd-version*(1) + +include::footer.txt[] diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go new file mode 100644 index 0000000..2a3269b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/consistency.go @@ -0,0 +1,48 @@ +package models + +import ( + "errors" + "strings" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful. +// +// The consistency level is handled in open-source InfluxDB but only applicable to clusters. +type ConsistencyLevel int + +const ( + // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write. + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write. + ConsistencyLevelAll +) + +var ( + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go new file mode 100644 index 0000000..eec1ae8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go new file mode 100644 index 0000000..62bc53e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go @@ -0,0 +1,29 @@ +package models_test + +import ( + "hash/fnv" + "testing" + "testing/quick" + + "github.com/influxdata/influxdb/models" +) + +func TestInlineFNV64aEquivalenceFuzz(t *testing.T) { + f := func(data []byte) bool { + stdlibFNV := fnv.New64a() + stdlibFNV.Write(data) + want := stdlibFNV.Sum64() + + inlineFNV := models.NewInlineFNV64a() + inlineFNV.Write(data) + got := inlineFNV.Sum64() + + return want == got + } + cfg := &quick.Config{ + MaxCount: 10000, + } + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go new file mode 100644 index 0000000..8db4837 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -0,0 +1,44 @@ +package models // import "github.com/influxdata/influxdb/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint. +func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseUint(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go new file mode 100644 index 0000000..119f543 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go @@ -0,0 +1,103 @@ +package models + +import ( + "strconv" + "testing" + "testing/quick" +) + +func TestParseIntBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, base int, bitSize int) bool { + exp, expErr := strconv.ParseInt(string(b), base, bitSize) + got, gotErr := parseIntBytes(b, base, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n int64) bool { + buf = strconv.AppendInt(buf[:0], n, 10) + + exp, expErr := strconv.ParseInt(string(buf), 10, 64) + got, gotErr := parseIntBytes(buf, 10, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, bitSize int) bool { + exp, expErr := strconv.ParseFloat(string(b), bitSize) + got, gotErr := parseFloatBytes(b, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n float64) bool { + buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64) + + exp, expErr := strconv.ParseFloat(string(buf), 64) + got, gotErr := parseFloatBytes(buf, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseBoolBytesEquivalence(t *testing.T) { + var buf []byte + for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} { + buf = append(buf[:0], s...) + + exp, expErr := strconv.ParseBool(s) + got, gotErr := parseBoolBytes(buf) + + if got != exp || !checkErrs(expErr, gotErr) { + t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr) + } + } +} + +func checkErrs(a, b error) bool { + if (a == nil) != (b == nil) { + return false + } + + return a == nil || a.Error() == b.Error() +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go new file mode 100644 index 0000000..e919281 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -0,0 +1,2463 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/influxdata/influxdb/models" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/influxdata/influxdb/pkg/escape" +) + +type escapeSet struct { + k [1]byte + esc [2]byte +} + +var ( + measurementEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + } + + tagEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, + } + + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") +) + +const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. + MaxKeyLength = 65535 +) + +// enableUint64Support will enable uint64 support if set to true. +var enableUint64Support = false + +// EnableUintSupport manually enables uint support for the point parser. +// This function will be removed in the future and only exists for unit tests during the +// transition. +func EnableUintSupport() { + enableUint64Support = true +} + +// Point defines the values that will be written to the database. +type Point interface { + // Name return the measurement name for the point. + Name() []byte + + // SetName updates the measurement name for the point. + SetName(string) + + // Tags returns the tag set for the point. + Tags() Tags + + // ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops. + ForEachTag(fn func(k, v []byte) bool) + + // AddTag adds or replaces a tag value for a point. + AddTag(key, value string) + + // SetTags replaces the tags for the point. + SetTags(tags Tags) + + // HasTag returns true if the tag exists for the point. + HasTag(tag []byte) bool + + // Fields returns the fields for the point. + Fields() (Fields, error) + + // Time return the timestamp for the point. + Time() time.Time + + // SetTime updates the timestamp for the point. + SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. + UnixNano() int64 + + // HashID returns a non-cryptographic checksum of the point's key. + HashID() uint64 + + // Key returns the key (measurement joined with tags) of the point. + Key() []byte + + // String returns a string representation of the point. If there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds. + String() string + + // MarshalBinary returns a binary representation of the point. + MarshalBinary() ([]byte, error) + + // PrecisionString returns a string representation of the point. If there + // is a timestamp associated with the point then it will be specified in the + // given unit. + PrecisionString(precision string) string + + // RoundedString returns a string representation of the point. If there + // is a timestamp associated with the point, then it will be rounded to the + // given duration. + RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty + + // Unsigned indicates the field's type is an unsigned integer. + Unsigned +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // UnsignedValue returns the unsigned value of the current field. + UnsignedValue() (uint64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Reset resets the iterator to its initial state. + Reset() +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator +} + +// type assertions +var ( + _ Point = (*point)(nil) + _ FieldIterator = (*point)(nil) +) + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters for the largest possible uint64 (18446744073709551615) + maxUint64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. If any points fail to parse, a non-nil error +// will be returned in addition to the points that parsed successfully. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +// ParsePointsString is identical to ParsePoints but accepts a string. +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParseKey returns the measurement name and tags from a point. +// +// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParseKey(buf []byte) (string, Tags) { + name, tags := ParseKeyBytes(buf) + return string(name), tags +} + +func ParseKeyBytes(buf []byte) ([]byte, Tags) { + return ParseKeyBytesWithTags(buf, nil) +} + +func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var name []byte + if state == tagKeyState { + tags = parseTags(buf, tags) + // scanMeasurement returns the location of the comma if there are tags, strip that off + name = buf[:i-1] + } else { + name = buf[:i] + } + return unescapeMeasurement(name), tags +} + +func ParseTags(buf []byte) Tags { + return parseTags(buf, nil) +} + +func ParseName(buf []byte) []byte { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + var name []byte + if state == tagKeyState { + name = buf[:i-1] + } else { + name = buf[:i] + } + + return unescapeMeasurement(name) +} + +// ParsePointsWithPrecision is similar to ParsePoints, but allows the +// caller to provide a precision for time. +// +// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) + var ( + pos int + block []byte + failed []string + ) + for pos < len(buf) { + pos, block = scanLine(buf, pos) + pos++ + + if len(block) == 0 { + continue + } + + // lines which start with '#' are comments + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:], defaultTime, precision) + if err != nil { + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) + } else { + points = append(points, pt) + } + + } + if len(failed) > 0 { + return points, fmt.Errorf("%s", strings.Join(failed, "\n")) + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value=2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + var maxKeyErr error + err = walkFields(fields, func(k, v []byte) bool { + if sz := seriesKeySize(key, k); sz > MaxKeyLength { + maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + return false + } + return true + }) + + if err != nil { + return nil, err + } + + if maxKeyErr != nil { + return nil, maxKeyErr + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + pt.time, err = SafeCalcTime(ts, precision) + if err != nil { + return nil, err + } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } + } + return pt, nil +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified. +func GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // First scan the Point's measurement. + state, i, err := scanMeasurement(buf, i) + if err != nil { + return i, buf[start:i], err + } + + // Optionally scan tags if needed. + if state == tagKeyState { + i, commas, indices, err = scanTags(buf, i, indices) + if err != nil { + return i, buf[start:i], err + } + } + + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { + sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos++ + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +// The following constants allow us to specify which state to move to +// next, when scanning sections of a Point. +const ( + tagKeyState = iota + tagValueState + fieldsState +) + +// scanMeasurement examines the measurement part of a Point, returning +// the next state to move to, and the current location in the buffer. +func scanMeasurement(buf []byte, i int) (int, int, error) { + // Check first byte of measurement, anything except a comma is fine. + // It can't be a space, since whitespace is stripped prior to this + // function call. + if i >= len(buf) || buf[i] == ',' { + return -1, i, fmt.Errorf("missing measurement") + } + + for { + i++ + if i >= len(buf) { + // cpu + return -1, i, fmt.Errorf("missing fields") + } + + if buf[i-1] == '\\' { + // Skip character (it's escaped). + continue + } + + // Unescaped comma; move onto scanning the tags. + if buf[i] == ',' { + return tagKeyState, i + 1, nil + } + + // Unescaped space; move onto scanning the fields. + if buf[i] == ' ' { + // cpu value=1.0 + return fieldsState, i, nil + } + } +} + +// scanTags examines all the tags in a Point, keeping track of and +// returning the updated indices slice, number of commas and location +// in buf where to start examining the Point fields. +func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { + var ( + err error + commas int + state = tagKeyState + ) + + for { + switch state { + case tagKeyState: + // Grow our indices slice if we have too many tags. + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas++ + + i, err = scanTagsKey(buf, i) + state = tagValueState // tag value always follows a tag key + case tagValueState: + state, i, err = scanTagsValue(buf, i) + case fieldsState: + indices[commas] = i + 1 + return i, commas, indices, nil + } + + if err != nil { + return i, commas, indices, err + } + } +} + +// scanTagsKey scans each character in a tag key. +func scanTagsKey(buf []byte, i int) (int, error) { + // First character of the key. + if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { + // cpu,{'', ' ', ',', '='} + return i, fmt.Errorf("missing tag key") + } + + // Examine each character in the tag key until we hit an unescaped + // equals (the tag value), or we hit an error (i.e., unescaped + // space or comma). + for { + i++ + + // Either we reached the end of the buffer or we hit an + // unescaped comma or space. + if i >= len(buf) || + ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { + // cpu,tag{'', ' ', ','} + return i, fmt.Errorf("missing tag value") + } + + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag= + return i + 1, nil + } + } +} + +// scanTagsValue scans each character in a tag value. +func scanTagsValue(buf []byte, i int) (int, int, error) { + // Tag value cannot be empty. + if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { + // cpu,tag={',', ' '} + return -1, i, fmt.Errorf("missing tag value") + } + + // Examine each character in the tag value until we hit an unescaped + // comma (move onto next tag key), an unescaped space (move onto + // fields), or we error out. + for { + i++ + if i >= len(buf) { + // cpu,tag=value + return -1, i, fmt.Errorf("missing fields") + } + + // An unescaped equals sign is an invalid tag value. + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag={'=', 'fo=o'} + return -1, i, fmt.Errorf("invalid tag format") + } + + if buf[i] == ',' && buf[i-1] != '\\' { + // cpu,tag=foo, + return tagKeyState, i + 1, nil + } + + // cpu,tag=foo value=1.0 + // cpu, tag=foo\= value=1.0 + if buf[i] == ' ' && buf[i-1] != '\\' { + return fieldsState, i, nil + } + } +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf. +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + i += 2 + continue + } + + // If the value is quoted, scan until we get to the end quote + // Only quote values in the field value since quotes are not significant + // in the field key + if buf[i] == '"' && equals > commas { + quoted = !quoted + i++ + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals++ + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas++ + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i++ + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break + } + + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + i++ + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt, isUnsigned bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i++ + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, ErrInvalidNumber + } + } + + // how many decimal points we've see + decimal := false + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !(isInt || isUnsigned) { + isInt = true + i++ + continue + } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) { + isUnsigned = true + i++ + continue + } + + if buf[i] == '.' { + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e' || buf[i] == 'E') { + scientific = true + i++ + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { + i++ + continue + } + + // NaN is an unsupported value + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + return i, ErrInvalidNumber + } + + if !isNumeric(buf[i]) { + return i, ErrInvalidNumber + } + i++ + } + + if (isInt || isUnsigned) && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, ErrInvalidNumber + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else if isUnsigned { + // Return an error if uint64 support has not been enabled. + if !enableUint64Support { + return i, ErrInvalidNumber + } + // Make sure the last char is a 'u' for unsigned + if buf[i-1] != 'u' { + return i, ErrInvalidNumber + } + // Make sure the first char is not a '-' for unsigned + if buf[start] == '-' { + return i, ErrInvalidNumber + } + // Parse the uint to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `u` from our tests + if len(buf[start:i-1]) >= maxUint64Digits { + if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i++ + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i++ + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags. +func skipWhitespace(buf []byte, i int) int { + for i < len(buf) { + if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { + break + } + i++ + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + + // tracks how many '=' and commas we've seen + // this duplicates some of the functionality in scanFields + equals := 0 + commas := 0 + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // skip past escaped characters + if buf[i] == '\\' && i+2 < len(buf) { + i += 2 + continue + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields { + if !quoted && buf[i] == '=' { + i++ + equals++ + continue + } else if !quoted && buf[i] == ',' { + i++ + commas++ + continue + } else if buf[i] == '"' && equals > commas { + i++ + quoted = !quoted + continue + } + } + + if buf[i] == '\n' && !quoted { + break + } + + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte, where stop byte +// has not been escaped. +// +// If there are leading spaces, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { + break + } + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + + for { + i++ + if buf[i-1] == '\\' { + continue + } + + // reached the end of buf? + if i >= len(buf) { + return i, buf[start:i] + } + + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + } +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' && buf[i-1] != '\\' { + break + } + i++ + } + if i > len(buf) { + return i, nil + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i++ + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i++ + } + return i, buf[start:i] +} + +func EscapeMeasurement(in []byte) []byte { + for _, c := range measurementEscapeCodes { + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range measurementEscapeCodes { + c := &measurementEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +func escapeTag(in []byte) []byte { + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped. +func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i++ + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function +// returns an error. +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + + if !t.IsZero() { + if err := CheckTime(t); err != nil { + return nil, err + } + } + + for key, value := range fields { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsInf(value, 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsInf(float64(value), 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(float64(value)) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } + } + + key := MakeKey([]byte(measurement), tags) + for field := range fields { + sz := seriesKeySize(key, []byte(field)) + if sz > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + } + } + + return key, nil +} + +func seriesKeySize(key, field []byte) int { + // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular + // dependency. + return len(key) + 4 + len(field) +} + +// NewPointFromBytes returns a new Point from a marshalled Point. +func NewPointFromBytes(b []byte) (Point, error) { + p := &point{} + if err := p.UnmarshalBinary(b); err != nil { + return nil, err + } + + // This does some basic validation to ensure there are fields and they + // can be unmarshalled as well. + iter := p.FieldIterator() + var hasField bool + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + hasField = true + switch iter.Type() { + case Float: + _, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Integer: + _, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Unsigned: + _, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case String: + // Skip since this won't return an error + case Boolean: + _, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + } + } + + if !hasField { + return nil, ErrPointMustHaveAField + } + + return p, nil +} + +// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) is passed, this function panics. +func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + pt, err := NewPoint(name, tags, fields, time) + if err != nil { + panic(err.Error()) + } + return pt +} + +// Key returns the key (measurement joined with tags) of the point. +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +func (p *point) Name() []byte { + return escape.Unescape(p.name()) +} + +// SetName updates the measurement name for the point. +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point. +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point. +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. +func (p *point) Tags() Tags { + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key, nil) + return p.cachedTags +} + +func (p *point) ForEachTag(fn func(k, v []byte) bool) { + walkTags(p.key, fn) +} + +func (p *point) HasTag(tag []byte) bool { + if len(p.key) == 0 { + return false + } + + var exists bool + walkTags(p.key, func(key, value []byte) bool { + if bytes.Equal(tag, key) { + exists = true + return false + } + return true + }) + + return exists +} + +func walkTags(buf []byte, fn func(key, value []byte) bool) { + if len(buf) == 0 { + return + } + + pos, name := scanTo(buf, 0, ',') + + // it's an empty key, so there are no tags + if len(name) == 0 { + return + } + + hasEscape := bytes.IndexByte(buf, '\\') != -1 + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) + + if len(value) == 0 { + continue + } + + if hasEscape { + if !fn(unescapeTag(key), unescapeTag(value)) { + return + } + } else { + if !fn(key, value) { + return + } + } + + i++ + } +} + +// walkFields walks each field key and value via fn. If fn returns false, the iteration +// is stopped. The values are the raw byte slices and not the converted types. +func walkFields(buf []byte, fn func(key, value []byte) bool) error { + var i int + var key, val []byte + for len(buf) > 0 { + i, key = scanTo(buf, 0, '=') + if i > len(buf)-2 { + return fmt.Errorf("invalid value: field-key=%s", key) + } + buf = buf[i+1:] + i, val = scanFieldValue(buf, 0) + buf = buf[i:] + if !fn(key, val) { + break + } + + // slice off comma + if len(buf) > 0 { + buf = buf[1:] + } + } + return nil +} + +// parseTags parses buf into the provided destination tags, returning destination +// Tags, which may have a different length and capacity. +func parseTags(buf []byte, dst Tags) Tags { + if len(buf) == 0 { + return nil + } + + n := bytes.Count(buf, []byte(",")) + if cap(dst) < n { + dst = make(Tags, n) + } else { + dst = dst[:n] + } + + // Ensure existing behaviour when point has no tags and nil slice passed in. + if dst == nil { + dst = Tags{} + } + + // Series keys can contain escaped commas, therefore the number of commas + // in a series key only gives an estimation of the upper bound on the number + // of tags. + var i int + walkTags(buf, func(key, value []byte) bool { + dst[i].Key, dst[i].Value = key, value + i++ + return true + }) + return dst[:i] +} + +// MakeKey creates a key for a set of tags. +func MakeKey(name []byte, tags Tags) []byte { + return AppendMakeKey(nil, name, tags) +} + +// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. +func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) + dst = tags.AppendHashKey(dst) + return dst +} + +// SetTags replaces the tags for the point. +func (p *point) SetTags(tags Tags) { + p.key = MakeKey(p.Name(), tags) + p.cachedTags = tags +} + +// AddTag adds or replaces a tag value for a point. +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags + p.key = MakeKey(p.Name(), tags) +} + +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { + if p.cachedFields != nil { + return p.cachedFields, nil + } + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil +} + +// SetPrecision will round a time to the specified precision. +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// String returns the string representation of the point. +func (p *point) String() string { + if p.Time().IsZero() { + return string(p.Key()) + " " + string(p.fields) + } + return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) +} + +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. +func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + + tb, err := p.time.MarshalBinary() + if err != nil { + return nil, err + } + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) + return b, nil +} + +// UnmarshalBinary decodes a binary representation of the point into a point struct. +func (p *point) UnmarshalBinary(b []byte) error { + var n int + + // Read key length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read key. + if len(b) < n { + return io.ErrShortBuffer + } + p.key, b = b[:n], b[n:] + + // Read fields length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read fields. + if len(b) < n { + return io.ErrShortBuffer + } + p.fields, b = b[:n], b[n:] + + // Read timestamp. + return p.time.UnmarshalBinary(b) +} + +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/GetPrecisionMultiplier(precision)) +} + +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. +func (p *point) RoundedString(d time.Duration) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.time.Round(d).UnixNano()) +} + +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Unsigned: + v, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil +} + +// HashID returns a non-cryptographic checksum of the point's key. +func (p *point) HashID() uint64 { + h := NewInlineFNV64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || p.StringSize() <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// NewTag returns a new Tag. +func NewTag(key, value []byte) Tag { + return Tag{ + Key: key, + Value: value, + } +} + +// Size returns the size of the key and value. +func (t Tag) Size() int { return len(t.Key) + len(t.Value) } + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// String returns the string reprsentation of the tag. +func (t *Tag) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + buf.WriteString(string(t.Key)) + buf.WriteByte(' ') + buf.WriteString(string(t.Value)) + buf.WriteByte('}') + return buf.String() +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, NewTag([]byte(k), []byte(v))) + } + sort.Sort(a) + return a +} + +// Keys returns the list of keys for a tag set. +func (a Tags) Keys() []string { + if len(a) == 0 { + return nil + } + keys := make([]string, len(a)) + for i, tag := range a { + keys[i] = string(tag.Key) + } + return keys +} + +// Values returns the list of values for a tag set. +func (a Tags) Values() []string { + if len(a) == 0 { + return nil + } + values := make([]string, len(a)) + for i, tag := range a { + values[i] = string(tag.Value) + } + return values +} + +// String returns the string representation of the tags. +func (a Tags) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := range a { + buf.WriteString(a[i].String()) + if i < len(a)-1 { + buf.WriteByte(' ') + } + } + buf.WriteByte(']') + return buf.String() +} + +// Size returns the number of bytes needed to store all tags. Note, this is +// the number of bytes needed to store all keys and values and does not account +// for data structures or delimiters for example. +func (a Tags) Size() int { + var total int + for i := range a { + total += a[i].Size() + } + return total +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +func (a Tags) Len() int { return len(a) } +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Equal returns true if a equals other. +func (a Tags) Equal(other Tags) bool { + if len(a) != len(other) { + return false + } + for i := range a { + if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { + return false + } + } + return true +} + +// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. +func CompareTags(a, b Tags) int { + // Compare each key & value until a mismatch. + for i := 0; i < len(a) && i < len(b); i++ { + if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { + return cmp + } + if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { + return cmp + } + } + + // If all tags are equal up to this point then return shorter tagset. + if len(a) < len(b) { + return -1 + } else if len(a) > len(b) { + return 1 + } + + // All tags are equal. + return 0 +} + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + (*a)[i].Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + return a.AppendHashKey(nil) +} + +func (a Tags) needsEscape() bool { + for i := range a { + t := &a[i] + for j := range tagEscapeCodes { + c := &tagEscapeCodes[j] + if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { + return true + } + } + } + return false +} + +// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. +func (a Tags) AppendHashKey(dst []byte) []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return dst + } + + // Type invariant: Tags are sorted + + sz := 0 + var escaped Tags + if a.needsEscape() { + var tmp [20]Tag + if len(a) < len(tmp) { + escaped = tmp[:len(a)] + } else { + escaped = make(Tags, len(a)) + } + + for i := range a { + t := &a[i] + nt := &escaped[i] + nt.Key = escapeTag(t.Key) + nt.Value = escapeTag(t.Value) + sz += len(nt.Key) + len(nt.Value) + } + } else { + sz = a.Size() + escaped = a + } + + sz += len(escaped) + (len(escaped) * 2) // separators + + // Generate marshaled bytes. + if cap(dst)-len(dst) < sz { + nd := make([]byte, len(dst), len(dst)+sz) + copy(nd, dst) + dst = nd + } + buf := dst[len(dst) : len(dst)+sz] + idx := 0 + for i := range escaped { + k := &escaped[i] + if len(k.Value) == 0 { + continue + } + buf[idx] = ',' + idx++ + copy(buf[idx:], k.Key) + idx += len(k.Key) + buf[idx] = '=' + idx++ + copy(buf[idx:], k.Value) + idx += len(k.Value) + } + return dst[:len(dst)+idx] +} + +// CopyTags returns a shallow copy of tags. +func CopyTags(a Tags) Tags { + other := make(Tags, len(a)) + copy(other, a) + return other +} + +// DeepCopyTags returns a deep copy of tags. +func DeepCopyTags(a Tags) Tags { + // Calculate size of keys/values in bytes. + var n int + for _, t := range a { + n += len(t.Key) + len(t.Value) + } + + // Build single allocation for all key/values. + buf := make([]byte, n) + + // Copy tags to new set. + other := make(Tags, len(a)) + for i, t := range a { + copy(buf, t.Key) + other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] + + copy(buf, t.Value) + other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] + } + + return other +} + +// Fields represents a mapping between a Point's field names and their +// values. +type Fields map[string]interface{} + +// FieldIterator retuns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} + +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} + +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } + + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } + + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' { + p.it.fieldType = Unsigned + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float + } + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// UnsignedValue returns the unsigned value of the current field. +func (p *point) UnsignedValue() (uint64, error) { + n, err := parseUintBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// represenation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... +func (p Fields) MarshalBinary() []byte { + var b []byte + keys := make([]string, 0, len(p)) + + for k := range p { + keys = append(keys, k) + } + + // Not really necessary, can probably be removed. + sort.Strings(keys) + + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) + } + + return b +} + +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint64: + b = strconv.AppendUint(b, v, 10) + b = append(b, 'u') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint: + // TODO: 'uint' should be converted to writing as an unsigned integer, + // but we cannot since that would break backwards compatibility. + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + + return b +} + +// ValidKeyToken returns true if the token used for measurement, tag key, or tag +// value is a valid unicode string and only contains printable, non-replacement characters. +func ValidKeyToken(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, r := range s { + if !unicode.IsPrint(r) || r == unicode.ReplacementChar { + return false + } + } + return true +} + +// ValidKeyTokens returns true if the measurement name and all tags are valid. +func ValidKeyTokens(name string, tags Tags) bool { + if !ValidKeyToken(name) { + return false + } + for _, tag := range tags { + if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) { + return false + } + } + return true +} diff --git a/vendor/github.com/influxdata/influxdb/models/points_internal_test.go b/vendor/github.com/influxdata/influxdb/models/points_internal_test.go new file mode 100644 index 0000000..3a760d3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points_internal_test.go @@ -0,0 +1,17 @@ +package models + +import "testing" + +func TestMarshalPointNoFields(t *testing.T) { + points, err := ParsePointsString("m,k=v f=0i") + if err != nil { + t.Fatal(err) + } + + // It's unclear how this can ever happen, but we've observed points that were marshalled without any fields. + points[0].(*point).fields = []byte{} + + if _, err := points[0].MarshalBinary(); err != ErrPointMustHaveAField { + t.Fatalf("got error %v, exp %v", err, ErrPointMustHaveAField) + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/points_test.go b/vendor/github.com/influxdata/influxdb/models/points_test.go new file mode 100644 index 0000000..d4ee9ac --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points_test.go @@ -0,0 +1,2551 @@ +package models_test + +import ( + "bytes" + "fmt" + "io" + "math" + "math/rand" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/models" +) + +var ( + tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"}) + fields = models.Fields{ + "int64": int64(math.MaxInt64), + "uint32": uint32(math.MaxUint32), + "string": "String field that has a decent length, probably some log message or something", + "boolean": false, + "float64-tiny": float64(math.SmallestNonzeroFloat64), + "float64-large": float64(math.MaxFloat64), + } + maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) + minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) + + sink interface{} +) + +func TestMarshal(t *testing.T) { + got := tags.HashKey() + if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp { + t.Log("got: ", string(got)) + t.Log("exp: ", exp) + t.Error("invalid match") + } +} + +func TestMarshalFields(t *testing.T) { + for _, tt := range []struct { + name string + value interface{} + exp string + }{ + { + name: "Float", + value: float64(2), + exp: `value=2`, + }, + { + name: "Integer", + value: int64(2), + exp: `value=2i`, + }, + { + name: "Unsigned", + value: uint64(2), + exp: `value=2u`, + }, + { + name: "String", + value: "foobar", + exp: `value="foobar"`, + }, + { + name: "Boolean", + value: true, + exp: `value=true`, + }, + } { + t.Run(tt.name, func(t *testing.T) { + fields := map[string]interface{}{"value": tt.value} + if have, want := models.Fields(fields).MarshalBinary(), []byte(tt.exp); !bytes.Equal(have, want) { + t.Fatalf("unexpected field output: %s != %s", string(have), string(want)) + } + }) + } +} + +func TestTags_HashKey(t *testing.T) { + tags = models.NewTags(map[string]string{"A FOO": "bar", "APPLE": "orange", "host": "serverA", "region": "uswest"}) + got := tags.HashKey() + if exp := ",A\\ FOO=bar,APPLE=orange,host=serverA,region=uswest"; string(got) != exp { + t.Log("got: ", string(got)) + t.Log("exp: ", exp) + t.Error("invalid match") + } +} + +func BenchmarkMarshal(b *testing.B) { + for i := 0; i < b.N; i++ { + tags.HashKey() + } +} +func TestPoint_Tags(t *testing.T) { + examples := []struct { + Point string + Tags models.Tags + }{ + {`cpu value=1`, models.Tags{}}, + {"cpu,tag0=v0 value=1", models.NewTags(map[string]string{"tag0": "v0"})}, + {"cpu,tag0=v0,tag1=v0 value=1", models.NewTags(map[string]string{"tag0": "v0", "tag1": "v0"})}, + {`cpu,tag0=v\ 0 value=1`, models.NewTags(map[string]string{"tag0": "v 0"})}, + {`cpu,tag0=v\ 0\ 1,tag1=v2 value=1`, models.NewTags(map[string]string{"tag0": "v 0 1", "tag1": "v2"})}, + {`cpu,tag0=\, value=1`, models.NewTags(map[string]string{"tag0": ","})}, + {`cpu,ta\ g0=\, value=1`, models.NewTags(map[string]string{"ta g0": ","})}, + {`cpu,tag0=\,1 value=1`, models.NewTags(map[string]string{"tag0": ",1"})}, + {`cpu,tag0=1\"\",t=k value=1`, models.NewTags(map[string]string{"tag0": `1\"\"`, "t": "k"})}, + } + + for _, example := range examples { + t.Run(example.Point, func(t *testing.T) { + pts, err := models.ParsePointsString(example.Point) + if err != nil { + t.Fatal(err) + } else if len(pts) != 1 { + t.Fatalf("parsed %d points, expected 1", len(pts)) + } + + // Repeat to test Tags() caching + for i := 0; i < 2; i++ { + tags := pts[0].Tags() + if !reflect.DeepEqual(tags, example.Tags) { + t.Fatalf("got %#v (%s), expected %#v", tags, tags.String(), example.Tags) + } + } + + }) + } +} + +func TestPoint_StringSize(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + l := p.StringSize() + s := p.String() + + if l != len(s) { + t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s)) + } + }) + +} + +func TestPoint_AppendString(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + got := p.AppendString(nil) + exp := []byte(p.String()) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp) + } + }) +} + +func testPoint_cube(t *testing.T, f func(p models.Point)) { + // heard of a table-driven test? let's make a cube-driven test... + tagList := []models.Tags{nil, {models.NewTag([]byte("foo"), []byte("bar"))}, tags} + fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields} + timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()} + + for _, tagSet := range tagList { + for _, fieldSet := range fieldList { + for _, pointTime := range timeList { + p, err := models.NewPoint("test", tagSet, fieldSet, pointTime) + if err != nil { + t.Errorf("unexpected error creating point: %v", err) + continue + } + + f(p) + } + } + } +} + +func TestTag_Clone(t *testing.T) { + tag := models.NewTag([]byte("key"), []byte("value")) + + c := tag.Clone() + + if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { + t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) + } + + if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { + t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) + } +} + +func TestTags_Clone(t *testing.T) { + tags := models.NewTags(map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}) + + clone := tags.Clone() + + for i := range tags { + tag := tags[i] + c := clone[i] + if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { + t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) + } + + if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { + t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) + } + } +} + +var p models.Point + +func BenchmarkNewPoint(b *testing.B) { + ts := time.Now() + for i := 0; i < b.N; i++ { + p, _ = models.NewPoint("measurement", tags, fields, ts) + } +} + +func BenchmarkNewPointFromBinary(b *testing.B) { + pts, err := models.ParsePointsString("cpu value1=1.0,value2=1.0,value3=3.0,value4=4,value5=\"five\" 1000000000") + if err != nil { + b.Fatalf("unexpected error ParsePointsString: %v", err) + } + + bytes, err := pts[0].MarshalBinary() + if err != nil { + b.Fatalf("unexpected error MarshalBinary: %v", err) + } + + for i := 0; i < b.N; i++ { + _, err := models.NewPointFromBytes(bytes) + if err != nil { + b.Fatalf("unexpected error NewPointsFromBytes: %v", err) + } + } +} + +func BenchmarkParsePointNoTags5000(b *testing.B) { + var batch [5000]string + for i := 0; i < len(batch); i++ { + batch[i] = `cpu value=1i 1000000000` + } + lines := strings.Join(batch[:], "\n") + b.ResetTimer() + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(lines)) + b.SetBytes(int64(len(lines))) + } +} + +func BenchmarkParsePointNoTags(b *testing.B) { + line := `cpu value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointWithPrecisionN(b *testing.B) { + line := `cpu value=1i 1000000000` + defaultTime := time.Now().UTC() + for i := 0; i < b.N; i++ { + models.ParsePointsWithPrecision([]byte(line), defaultTime, "n") + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointWithPrecisionU(b *testing.B) { + line := `cpu value=1i 1000000000` + defaultTime := time.Now().UTC() + for i := 0; i < b.N; i++ { + models.ParsePointsWithPrecision([]byte(line), defaultTime, "u") + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted2(b *testing.B) { + line := `cpu,host=serverA,region=us-west value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted5(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted10(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { + line := `cpu,region=us-west,host=serverA value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted5(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParseKey(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5` + for i := 0; i < b.N; i++ { + models.ParseKey([]byte(line)) + } +} + +// TestPoint wraps a models.Point but also makes available the raw +// arguments to the Point. +// +// This is useful for ensuring that comparisons between results of +// operations on Points match the expected input data to the Point, +// since models.Point does not expose the raw input data (e.g., tags) +// via its API. +type TestPoint struct { + RawFields models.Fields + RawTags models.Tags + RawTime time.Time + models.Point +} + +// NewTestPoint returns a new TestPoint. +// +// NewTestPoint panics if it is not a valid models.Point. +func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint { + return TestPoint{ + RawTags: tags, + RawFields: fields, + RawTime: time, + Point: models.MustNewPoint(name, tags, fields, time), + } +} + +func test(t *testing.T, line string, point TestPoint) { + pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n") + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err) + } + + if exp := 1; len(pts) != exp { + t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp) + } + + if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) { + t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp)) + } + + if exp := len(point.Tags()); len(pts[0].Tags()) != exp { + t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) + } + + for _, tag := range pts[0].Tags() { + if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) { + t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key)) + } + } + + for name, value := range point.RawFields { + fields, err := pts[0].Fields() + if err != nil { + t.Fatal(err) + } + val := fields[name] + expfval, ok := val.(float64) + + if ok && math.IsNaN(expfval) { + gotfval, ok := value.(float64) + if ok && !math.IsNaN(gotfval) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name) + } + } + if !reflect.DeepEqual(val, value) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, val, value) + } + } + + if !pts[0].Time().Equal(point.Time()) { + t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time()) + } + + if !strings.HasPrefix(pts[0].String(), line) { + t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line) + } +} + +func TestParsePointNoValue(t *testing.T) { + pts, err := models.ParsePointsString("") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointWhitespaceValue(t *testing.T) { + pts, err := models.ParsePointsString(" ") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointNoFields(t *testing.T) { + expectedSuffix := "missing fields" + examples := []string{ + "cpu_load_short,host=server01,region=us-west", + "cpu", + "cpu,host==", + "=", + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointNoTimestamp(t *testing.T) { + test(t, "cpu value=1", NewTestPoint("cpu", nil, models.Fields{"value": 1.0}, time.Unix(0, 0))) +} + +func TestParsePointMissingQuote(t *testing.T) { + expectedSuffix := "unbalanced quotes" + examples := []string{ + `cpu,host=serverA value="test`, + `cpu,host=serverA value="test""`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointMissingTagKey(t *testing.T) { + expectedSuffix := "missing tag key" + examples := []string{ + `cpu, value=1`, + `cpu,`, + `cpu,,,`, + `cpu,host=serverA,=us-east value=1i`, + `cpu,host=serverAa\,,=us-east value=1i`, + `cpu,host=serverA\,,=us-east value=1i`, + `cpu, =serverA value=1i`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } + + _, err := models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err) + } +} + +func TestParsePointMissingTagValue(t *testing.T) { + expectedSuffix := "missing tag value" + examples := []string{ + `cpu,host`, + `cpu,host,`, + `cpu,host=`, + `cpu,host value=1i`, + `cpu,host=serverA,region value=1i`, + `cpu,host=serverA,region= value=1i`, + `cpu,host=serverA,region=,zone=us-west value=1i`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointInvalidTagFormat(t *testing.T) { + expectedSuffix := "invalid tag format" + examples := []string{ + `cpu,host=f=o,`, + `cpu,host=f\==o,`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointMissingFieldName(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`) + } +} + +func TestParsePointMissingFieldValue(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`) + } + + _, err = models.ParsePointsString(`m f="blah"=123,r 1531703600000000000`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `m f="blah"=123,r 1531703600000000000`) + } +} + +func TestParsePointBadNumber(t *testing.T) { + for _, tt := range []string{ + "cpu v=- ", + "cpu v=-i ", + "cpu v=-. ", + "cpu v=. ", + "cpu v=1.0i ", + "cpu v=1ii ", + "cpu v=1a ", + "cpu v=-e-e-e ", + "cpu v=42+3 ", + "cpu v= ", + "cpu v=-123u", + } { + _, err := models.ParsePointsString(tt) + if err == nil { + t.Errorf("Point %q should be invalid", tt) + } + } +} + +func TestParsePointMaxInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`) + exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range` + if err == nil || (err != nil && err.Error() != exp) { + t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) + } + + // max int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := int64(9223372036854775807), fields["value"].(int64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err) + } +} + +func TestParsePointMinInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`) + } + + // min int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := int64(-9223372036854775808), fields["value"].(int64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err) + } +} + +func TestParsePointMaxFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64))) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // max float + p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := math.MaxFloat64, fields["value"].(float64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err) + } +} + +func TestParsePointMinFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:])) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // min float + p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := -math.MaxFloat64, fields["value"].(float64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:])) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } +} + +func TestParsePointMaxUint64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551616u`) + exp := `unable to parse 'cpu,host=serverA,region=us-west value=18446744073709551616u': unable to parse unsigned 18446744073709551616: strconv.ParseUint: parsing "18446744073709551616": value out of range` + if err == nil || (err != nil && err.Error() != exp) { + t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) + } + + // max int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551615u`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=18446744073709551615u`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := uint64(18446744073709551615), fields["value"].(uint64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=00018446744073709551615u`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=00018446744073709551615u`, err) + } +} + +func TestParsePointMinUint64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=--1u`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-1u`) + } + + // min int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0u`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0u`, err) + } + fields, err := p[0].Fields() + if err != nil { + t.Fatal(err) + } + if exp, got := uint64(0), fields["value"].(uint64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0000u`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0000u`, err) + } +} + +func TestParsePointNumberNonNumeric(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`) + } +} + +func TestParsePointNegativeWrongPlace(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`) + } +} + +func TestParsePointOnlyNegativeSign(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`) + } +} + +func TestParsePointFloatMultipleDecimals(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`) + } +} + +func TestParsePointInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err) + } +} + +func TestParsePointNegativeInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err) + } +} + +func TestParsePointNegativeFloat(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatNoLeadingDigit(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + fields, err := pts[0].Fields() + if err != nil { + t.Fatal(err) + } + if fields["value"] != 1e4 { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err) + } +} + +func TestParsePointFloatScientificUpper(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) + } + + pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) + } + + fields, err := pts[0].Fields() + if err != nil { + t.Fatal(err) + } + if fields["value"] != 1e4 { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err) + } +} + +func TestParsePointFloatScientificDecimal(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err) + } +} + +func TestParsePointFloatNegativeScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err) + } +} + +func TestParsePointBooleanInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`) + } +} + +func TestParsePointScientificIntInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`) + } +} + +func TestParsePointWhitespace(t *testing.T) { + examples := []string{ + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000 `, + `cpu value=1.0 1257894000000000000 +`, + `cpu value=1.0 1257894000000000000 +`, + } + + expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000)) + for i, example := range examples { + pts, err := models.ParsePoints([]byte(example)) + if err != nil { + t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err) + } + + if got, exp := len(pts), 1; got != exp { + t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) + } + + if got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp { + t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) + } + + fields, err := pts[0].Fields() + if err != nil { + t.Fatal(err) + } + eFields, err := expPoint.Fields() + if err != nil { + t.Fatal(err) + } + if got, exp := len(fields), len(eFields); got != exp { + t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp) + } + + if got, exp := fields["value"], eFields["value"]; got != exp { + t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp) + } + + if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp { + t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp) + } + } +} + +func TestParsePointUnescape(t *testing.T) { + // commas in measurement name + test(t, `foo\,bar value=1i`, + NewTestPoint( + "foo,bar", // comma in the name + models.NewTags(map[string]string{}), + models.Fields{ + "value": int64(1), + }, + time.Unix(0, 0))) + + // comma in measurement name with tags + test(t, `cpu\,main,regions=east value=1.0`, + NewTestPoint( + "cpu,main", // comma in the name + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in measurement name + test(t, `cpu\ load,region=east value=1.0`, + NewTestPoint( + "cpu load", // space in the name + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // equals in measurement name + test(t, `cpu\=load,region=east value=1.0`, + NewTestPoint( + `cpu\=load`, // backslash is literal + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // equals in measurement name + test(t, `cpu=load,region=east value=1.0`, + NewTestPoint( + `cpu=load`, // literal equals is fine in measurement name + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag names + test(t, `cpu,region\,zone=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "region,zone": "east", // comma in the tag key + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag name + test(t, `cpu,region\ zone=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "region zone": "east", // space in the tag name + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash with escaped equals in tag name + test(t, `cpu,reg\\=ion=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + `reg\=ion`: "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // space is tag name + test(t, `cpu,\ =east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + " ": "east", // tag name is single space + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag values + test(t, `cpu,regions=east\,west value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east,west", // comma in the tag value + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped space + test(t, `cpu,regions=\\ east value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `\ east`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped space + test(t, `cpu,regions=eas\\ t value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `eas\ t`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by trailing space + test(t, `cpu,regions=east\\ value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `east\ `, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag values + test(t, `cpu,regions=east\ west value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east west", // comma in the tag value + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in field keys + test(t, `cpu,regions=east value\,ms=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value,ms": 1.0, // comma in the field keys + }, + time.Unix(0, 0))) + + // spaces in field keys + test(t, `cpu,regions=east value\ ms=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value ms": 1.0, // comma in the field keys + }, + time.Unix(0, 0))) + + // tag with no value + test(t, `cpu,regions=east value="1"`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + "foobar": "", + }), + models.Fields{ + "value": "1", + }, + time.Unix(0, 0))) + + // commas in field values + test(t, `cpu,regions=east value="1,0"`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value": "1,0", // comma in the field value + }, + time.Unix(0, 0))) + + // random character escaped + test(t, `cpu,regions=eas\t value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": "eas\\t", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped characters + test(t, `cpu,regions=\\,\,\=east value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `\,,=east`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // field keys using escape char. + test(t, `cpu \a=1i`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "\\a": int64(1), // Left as parsed since it's not a known escape sequence. + }, + time.Unix(0, 0))) + + // measurement, tag and tag value with equals + test(t, `cpu=load,equals\=foo=tag\=value value=1i`, + NewTestPoint( + "cpu=load", // Not escaped + models.NewTags(map[string]string{ + "equals=foo": "tag=value", // Tag and value unescaped + }), + models.Fields{ + "value": int64(1), + }, + time.Unix(0, 0))) + +} + +func TestParsePointWithTags(t *testing.T) { + test(t, + "cpu,host=serverA,region=us-east value=1.0 1000000000", + NewTestPoint("cpu", + models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), + models.Fields{"value": 1.0}, time.Unix(1, 0))) +} + +func TestParsePointWithDuplicateTags(t *testing.T) { + for i, tt := range []struct { + line string + err string + }{ + { + line: `cpu,host=serverA,host=serverB value=1i 1000000000`, + err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,b=1,c=3 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,c=3,b=1 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`, + }, + } { + _, err := models.ParsePointsString(tt.line) + if err == nil || tt.err != err.Error() { + t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err) + } + } +} + +func TestParsePointWithStringField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo", + "str2": "bar", + }, + time.Unix(1, 0)), + ) + + test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "str": `foo " bar`, + }, + time.Unix(1, 0)), + ) + +} + +func TestParsePointWithStringWithSpaces(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithNewline(t *testing.T) { + test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo\nbar", // newline in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithCommas(t *testing.T) { + // escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": `foo\,bar`, // commas in string value + }, + time.Unix(1, 0)), + ) + + // non-escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo,bar", // commas in string value + }, + time.Unix(1, 0)), + ) + + // string w/ trailing escape chars + test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "str": "foo\\", // trailing escape char + "str2": "bar", + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedMeasurement(t *testing.T) { + // non-escaped comma + test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, + NewTestPoint( + `"cpu"`, + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedTags(t *testing.T) { + test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + `"host"`: `"serverA"`, + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePoint_TrailingSlash(t *testing.T) { + _, err := models.ParsePointsString(`a v=1 0\`) + if err == nil { + t.Fatalf("ParsePoints failed: %v", err) + } else if !strings.Contains(err.Error(), "bad timestamp") { + t.Fatalf("ParsePoints unexpected error: %v", err) + } +} + +func TestParsePointsUnbalancedQuotedTags(t *testing.T) { + pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") + if err != nil { + t.Fatalf("ParsePoints failed: %v", err) + } + + if exp := 2; len(pts) != exp { + t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) + } + + // Expected " in the tag value + exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}), + models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) + + if pts[0].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) + } + + // Expected two points to ensure we did not overscan the line + exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}), + models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) + + if pts[1].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) + } + +} + +func TestParsePointEscapedStringsAndCommas(t *testing.T) { + // non-escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": `{Hello"{,}" World}`, + }, + time.Unix(1, 0)), + ) + + // escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": `{Hello"{\,}" World}`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithEquals(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo=bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithBackslash(t *testing.T) { + test(t, `cpu value="test\\\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test"`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithBoolField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "t": true, + "T": true, + "true": true, + "True": true, + "TRUE": true, + "f": false, + "F": false, + "false": false, + "False": false, + "FALSE": false, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointUnicodeString(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": "wè", + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointNegativeTimestamp(t *testing.T) { + test(t, `cpu value=1 -1`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, -1)), + ) +} + +func TestParsePointMaxTimestamp(t *testing.T) { + test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime), + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, models.MaxNanoTime)), + ) +} + +func TestParsePointMinTimestamp(t *testing.T) { + test(t, `cpu value=1 -9223372036854775806`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, models.MinNanoTime)), + ) +} + +func TestParsePointInvalidTimestamp(t *testing.T) { + examples := []string{ + "cpu value=1 9223372036854775808", + "cpu value=1 -92233720368547758078", + "cpu value=1 -", + "cpu value=1 -/", + "cpu value=1 -1?", + "cpu value=1 1-", + "cpu value=1 9223372036854775807 12", + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Fatalf("[Example %d] ParsePoints failed: %v", i, err) + } + } +} + +func TestNewPointFloatWithoutDecimal(t *testing.T) { + test(t, `cpu value=1 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} +func TestNewPointNegativeFloat(t *testing.T) { + test(t, `cpu value=-0.64 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": -0.64, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatNoDecimal(t *testing.T) { + test(t, `cpu value=1. 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatScientific(t *testing.T) { + test(t, `cpu value=6.632243e+06 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": float64(6632243), + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointLargeInteger(t *testing.T) { + test(t, `cpu value=6632243i 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06 + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointNaN(t *testing.T) { + _, err := models.ParsePointsString("cpu value=NaN 1000000000") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } + + _, err = models.ParsePointsString("cpu value=nAn 1000000000") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } + + _, err = models.ParsePointsString("cpu value=NaN") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } +} + +func TestNewPointLargeNumberOfTags(t *testing.T) { + tags := "" + for i := 0; i < 255; i++ { + tags += fmt.Sprintf(",tag%d=value%d", i, i) + } + + pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags)) + if err != nil { + t.Fatalf("ParsePoints() with max tags failed: %v", err) + } + + if len(pt[0].Tags()) != 255 { + t.Fatalf("expected %d tags, got %d", 255, len(pt[0].Tags())) + } +} + +func TestParsePointIntsFloats(t *testing.T) { + pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + fields, err := pt.Fields() + if err != nil { + t.Fatal(err) + } + if _, ok := fields["int"].(int64); !ok { + t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", fields["int"], int64(10)) + } + + if _, ok := fields["float"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(11.0)) + } + + if _, ok := fields["float2"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(12.1)) + } +} + +func TestParsePointKeyUnsorted(t *testing.T) { + pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { + t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp) + } +} + +func TestParsePointToString(t *testing.T) { + line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` + pts, err := models.ParsePoints([]byte(line)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if line != got { + t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) + } + + pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), + models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, + time.Unix(1, 0)) + + got = pt.String() + if line != got { + t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) + } +} + +func TestParsePointsWithPrecision(t *testing.T) { + tests := []struct { + name string + line string + precision string + exp string + }{ + { + name: "nanosecond by default", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`, + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789`, + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second", + line: `cpu,host=serverA,region=us-east value=1.0 946730096`, + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute", + line: `cpu,host=serverA,region=us-east value=1.0 15778834`, + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour", + line: `cpu,host=serverA,region=us-east value=1.0 262980`, + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionNoTime(t *testing.T) { + line := `cpu,host=serverA,region=us-east value=1.0` + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second precision", + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionComments(t *testing.T) { + tests := []struct { + name string + batch string + exp string + lenPoints int + }{ + { + name: "comment only", + batch: `# comment only`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 0, + }, + { + name: "point with comment above", + batch: `# a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "point with comment below", + batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345 +# end of points`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "indented comment", + batch: ` # a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "") + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + pointsLength := len(pts) + if exp := test.lenPoints; pointsLength != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp) + } + + if pointsLength > 0 { + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } + } +} + +func TestNewPointEscaped(t *testing.T) { + // commas + pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // spaces + pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // equals + pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestNewPointWithoutField(t *testing.T) { + _, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0)) + if err == nil { + t.Fatalf(`NewPoint() expected error. got nil`) + } +} + +func TestNewPointUnhandledType(t *testing.T) { + // nil value + pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) + if exp := `cpu value= 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // unsupported type gets stored as string + now := time.Unix(0, 0).UTC() + pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) + if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + fields, err := pt.Fields() + if err != nil { + t.Fatal(err) + } + if exp := "1970-01-01 00:00:00 +0000 UTC"; fields["value"] != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestMakeKeyEscaped(t *testing.T) { + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + +} + +func TestPrecisionString(t *testing.T) { + tags := map[string]interface{}{"value": float64(1)} + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu value=1 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "ns", + exp: "cpu value=1 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu value=1 946730096789012", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu value=1 946730096789", + }, + { + name: "second precision", + precision: "s", + exp: "cpu value=1 946730096", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu value=1 15778834", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu value=1 262980", + }, + } + + for _, test := range tests { + pt := models.MustNewPoint("cpu", nil, tags, tm) + act := pt.PrecisionString(test.precision) + + if act != test.exp { + t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", + test.name, act, test.exp) + } + } +} + +func TestRoundedString(t *testing.T) { + tags := map[string]interface{}{"value": float64(1)} + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision time.Duration + exp string + }{ + { + name: "no precision", + precision: time.Duration(0), + exp: "cpu value=1 946730096789012345", + }, + { + name: "nanosecond precision", + precision: time.Nanosecond, + exp: "cpu value=1 946730096789012345", + }, + { + name: "microsecond precision", + precision: time.Microsecond, + exp: "cpu value=1 946730096789012000", + }, + { + name: "millisecond precision", + precision: time.Millisecond, + exp: "cpu value=1 946730096789000000", + }, + { + name: "second precision", + precision: time.Second, + exp: "cpu value=1 946730097000000000", + }, + { + name: "minute precision", + precision: time.Minute, + exp: "cpu value=1 946730100000000000", + }, + { + name: "hour precision", + precision: time.Hour, + exp: "cpu value=1 946731600000000000", + }, + } + + for _, test := range tests { + pt := models.MustNewPoint("cpu", nil, tags, tm) + act := pt.RoundedString(test.precision) + + if act != test.exp { + t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v", + test.name, act, test.exp) + } + } +} + +func TestParsePointsStringWithExtraBuffer(t *testing.T) { + b := make([]byte, 70*5000) + buf := bytes.NewBuffer(b) + key := "cpu,host=A,region=uswest" + buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64())) + + points, err := models.ParsePointsString(buf.String()) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + pointKey := string(points[0].Key()) + + if len(key) != len(pointKey) { + t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey)) + } + + if key != pointKey { + t.Fatalf("expected both keys are same but got %s and %s", key, pointKey) + } +} + +func TestParsePointsQuotesInFieldKey(t *testing.T) { + buf := `cpu "a=1 +cpu value=2 1` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + fields, err := points[0].Fields() + if err != nil { + t.Fatal(err) + } + value, ok := fields["\"a"] + if !ok { + t.Fatalf("expected to parse field '\"a'") + } + + if value != float64(1) { + t.Fatalf("expected field value to be 1, got %v", value) + } + + // The following input should not parse + buf = `cpu "\, '= "\ v=1.0` + _, err = models.ParsePointsString(buf) + if err == nil { + t.Fatalf("expected parsing failure but got no error") + } +} + +func TestParsePointsQuotesInTags(t *testing.T) { + buf := `t159,label=hey\ "ya a=1i,value=0i +t159,label=another a=2i,value=1i 1` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + if len(points) != 2 { + t.Fatalf("expected 2 points, got %d", len(points)) + } +} + +func TestParsePointsBlankLine(t *testing.T) { + buf := `cpu value=1i 1000000000 + +cpu value=2i 2000000000` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + if len(points) != 2 { + t.Fatalf("expected 2 points, got %d", len(points)) + } +} + +func TestNewPointsWithBytesWithCorruptData(t *testing.T) { + corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148} + p, err := models.NewPointFromBytes(corrupted) + if p != nil || err == nil { + t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) + } +} + +func TestNewPointsWithShortBuffer(t *testing.T) { + _, err := models.NewPointFromBytes([]byte{0, 0, 0, 3, 4}) + if err != io.ErrShortBuffer { + t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) + } +} + +func TestNewPointsRejectsEmptyFieldNames(t *testing.T) { + if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil { + t.Fatalf("new point with empty field name. got: nil, expected: error") + } +} + +func TestNewPointsRejectsMaxKey(t *testing.T) { + var key string + // tsm field key is point key, separator (4 bytes) and field + for i := 0; i < models.MaxKeyLength-len("value")-4; i++ { + key += "a" + } + + // Test max key len + if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err != nil { + t.Fatalf("new point with max key. got: %v, expected: nil", err) + } + + if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err != nil { + t.Fatalf("parse point with max key. got: %v, expected: nil", err) + } + + // Test 1 byte over max key len + key += "a" + if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err == nil { + t.Fatalf("new point with max key. got: nil, expected: error") + } + + if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err == nil { + t.Fatalf("parse point with max key. got: nil, expected: error") + } + +} + +func TestPoint_FieldIterator_Simple(t *testing.T) { + + p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`)) + if err != nil { + t.Fatal(err) + } + + if len(p) != 1 { + t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1) + } + + fi := p[0].FieldIterator() + + if !fi.Next() { + t.Fatal("field iterator terminated before first field") + } + + if fi.Type() != models.Integer { + t.Fatalf("'42i' should be an Integer, got %v", fi.Type()) + } + + iv, err := fi.IntegerValue() + if err != nil { + t.Fatal(err) + } + if exp, got := int64(42), iv; exp != got { + t.Fatalf("'42i' should be %d, got %d", exp, got) + } + + if !fi.Next() { + t.Fatalf("field iterator terminated before second field") + } + + if fi.Type() != models.Float { + t.Fatalf("'42' should be a Float, got %v", fi.Type()) + } + + fv, err := fi.FloatValue() + if err != nil { + t.Fatal(err) + } + if exp, got := 42.0, fv; exp != got { + t.Fatalf("'42' should be %f, got %f", exp, got) + } + + if fi.Next() { + t.Fatal("field iterator didn't terminate") + } +} + +func toFields(fi models.FieldIterator) models.Fields { + m := make(models.Fields) + for fi.Next() { + var v interface{} + var err error + switch fi.Type() { + case models.Float: + v, err = fi.FloatValue() + case models.Integer: + v, err = fi.IntegerValue() + case models.Unsigned: + v, err = fi.UnsignedValue() + case models.String: + v = fi.StringValue() + case models.Boolean: + v, err = fi.BooleanValue() + case models.Empty: + v = nil + default: + panic("unknown type") + } + if err != nil { + panic(err) + } + m[string(fi.FieldKey())] = v + } + return m +} + +func TestPoint_FieldIterator_FieldMap(t *testing.T) { + + points, err := models.ParsePointsString(` +m v=42 +m v=42i +m v="string" +m v=true +m v="string\"with\"escapes" +m v=42i,f=42,g=42.314,u=123u +m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 +`) + + if err != nil { + t.Fatal("failed to parse test points:", err) + } + + for _, p := range points { + exp, err := p.Fields() + if err != nil { + t.Fatal(err) + } + got := toFields(p.FieldIterator()) + + if !reflect.DeepEqual(got, exp) { + t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp) + } + } +} + +func TestEscapeStringField(t *testing.T) { + cases := []struct { + in string + expOut string + }{ + {in: "abcdefg", expOut: "abcdefg"}, + {in: `one double quote " .`, expOut: `one double quote \" .`}, + {in: `quote " then backslash \ .`, expOut: `quote \" then backslash \\ .`}, + {in: `backslash \ then quote " .`, expOut: `backslash \\ then quote \" .`}, + } + + for _, c := range cases { + // Unescapes as expected. + got := models.EscapeStringField(c.in) + if got != c.expOut { + t.Errorf("unexpected result from EscapeStringField(%s)\ngot [%s]\nexp [%s]\n", c.in, got, c.expOut) + continue + } + + pointLine := fmt.Sprintf(`t s="%s"`, got) + test(t, pointLine, NewTestPoint( + "t", + models.NewTags(nil), + models.Fields{"s": c.in}, + time.Unix(0, 0), + )) + } +} + +func TestParseKeyBytes(t *testing.T) { + testCases := []struct { + input string + expectedName string + expectedTags map[string]string + }{ + {input: "m,k=v", expectedName: "m", expectedTags: map[string]string{"k": "v"}}, + {input: "m\\ q,k=v", expectedName: "m q", expectedTags: map[string]string{"k": "v"}}, + {input: "m,k\\ q=v", expectedName: "m", expectedTags: map[string]string{"k q": "v"}}, + {input: "m\\ q,k\\ q=v", expectedName: "m q", expectedTags: map[string]string{"k q": "v"}}, + } + + for _, testCase := range testCases { + t.Run(testCase.input, func(t *testing.T) { + name, tags := models.ParseKeyBytes([]byte(testCase.input)) + if !bytes.Equal([]byte(testCase.expectedName), name) { + t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) + } + if !tags.Equal(models.NewTags(testCase.expectedTags)) { + t.Errorf("%s produced tags %s but expected %s", testCase.input, tags.String(), models.NewTags(testCase.expectedTags).String()) + } + }) + } +} + +func TestParseName(t *testing.T) { + testCases := []struct { + input string + expectedName string + }{ + {input: "m,k=v", expectedName: "m"}, + {input: "m\\ q,k=v", expectedName: "m q"}, + } + + for _, testCase := range testCases { + t.Run(testCase.input, func(t *testing.T) { + name := models.ParseName([]byte(testCase.input)) + if !bytes.Equal([]byte(testCase.expectedName), name) { + t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) + } + }) + } +} + +func BenchmarkEscapeStringField_Plain(b *testing.B) { + s := "nothing special" + for i := 0; i < b.N; i++ { + sink = models.EscapeStringField(s) + } +} + +func BenchmarkEscapeString_Quotes(b *testing.B) { + s := `Hello, "world"` + for i := 0; i < b.N; i++ { + sink = models.EscapeStringField(s) + } +} + +func BenchmarkEscapeString_Backslashes(b *testing.B) { + s := `C:\windows\system32` + for i := 0; i < b.N; i++ { + sink = models.EscapeStringField(s) + } +} + +func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) { + s1 := `a quote " then backslash \ .` + s2 := `a backslash \ then quote " .` + for i := 0; i < b.N; i++ { + sink = [...]string{models.EscapeStringField(s1), models.EscapeStringField(s2)} + } +} + +func BenchmarkParseTags(b *testing.B) { + tags := []byte("cpu,tag0=value0,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5") + for i := 0; i < b.N; i++ { + models.ParseTags(tags) + } +} + +func BenchmarkEscapeMeasurement(b *testing.B) { + benchmarks := []struct { + m []byte + }{ + {[]byte("this_is_a_test")}, + {[]byte("this,is,a,test")}, + } + + for _, bm := range benchmarks { + b.Run(string(bm.m), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + models.EscapeMeasurement(bm.m) + } + }) + } +} + +func makeTags(key, val string, n int) models.Tags { + tags := make(models.Tags, n) + for i := range tags { + tags[i].Key = []byte(fmt.Sprintf("%s%03d", key, i)) + tags[i].Value = []byte(fmt.Sprintf("%s%03d", val, i)) + } + return tags +} + +func BenchmarkTags_HashKey(b *testing.B) { + benchmarks := []struct { + name string + t models.Tags + }{ + {"5 tags-no esc", makeTags("tag_foo", "val_bar", 5)}, + {"25 tags-no esc", makeTags("tag_foo", "val_bar", 25)}, + {"5 tags-esc", makeTags("tag foo", "val bar", 5)}, + {"25 tags-esc", makeTags("tag foo", "val bar", 25)}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + bm.t.HashKey() + } + }) + } +} + +func BenchmarkMakeKey(b *testing.B) { + benchmarks := []struct { + m []byte + t models.Tags + }{ + {[]byte("this_is_a_test"), nil}, + {[]byte("this,is,a,test"), nil}, + {[]byte(`this\ is\ a\ test`), nil}, + + {[]byte("this_is_a_test"), makeTags("tag_foo", "val_bar", 8)}, + {[]byte("this,is,a,test"), makeTags("tag_foo", "val_bar", 8)}, + {[]byte("this_is_a_test"), makeTags("tag_foo", "val bar", 8)}, + {[]byte("this,is,a,test"), makeTags("tag_foo", "val bar", 8)}, + } + + for _, bm := range benchmarks { + b.Run(string(bm.m), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + models.MakeKey(bm.m, bm.t) + } + }) + } +} + +func init() { + // Force uint support to be enabled for testing. + models.EnableUintSupport() +} diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go new file mode 100644 index 0000000..c087a48 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/rows.go @@ -0,0 +1,62 @@ +package models + +import ( + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go new file mode 100644 index 0000000..553e9d0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/models/statistic_test.go b/vendor/github.com/influxdata/influxdb/models/statistic_test.go new file mode 100644 index 0000000..918c991 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic_test.go @@ -0,0 +1,55 @@ +package models_test + +import ( + "reflect" + "testing" + + "github.com/influxdata/influxdb/models" +) + +func TestTags_Merge(t *testing.T) { + examples := []struct { + Base map[string]string + Arg map[string]string + Result map[string]string + }{ + { + Base: nil, + Arg: nil, + Result: map[string]string{}, + }, + { + Base: nil, + Arg: map[string]string{"foo": "foo"}, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: nil, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: map[string]string{"bar": "bar"}, + Result: map[string]string{"foo": "foo", "bar": "bar"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"zoo": "zoo"}, + Result: map[string]string{"foo": "foo", "bar": "bar", "zoo": "zoo"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"bar": "newbar"}, + Result: map[string]string{"foo": "foo", "bar": "newbar"}, + }, + } + + for i, example := range examples { + i++ + result := models.StatisticTags(example.Base).Merge(example.Arg) + if got, exp := result, example.Result; !reflect.DeepEqual(got, exp) { + t.Errorf("[Example %d] got %#v, expected %#v", i, got, exp) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go new file mode 100644 index 0000000..e98f2cb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/time.go @@ -0,0 +1,74 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the +// supported range. +func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { + mult := GetPrecisionMultiplier(precision) + if t, ok := safeSignedMult(timestamp, mult); ok { + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) + } + + return time.Time{}, ErrTimeOutOfRange +} + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} + +// Perform the multiplication and check to make sure it didn't overflow. +func safeSignedMult(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == MinNanoTime || b == MaxNanoTime { + return 0, false + } + c := a * b + return c, c/b == a +} diff --git a/vendor/github.com/influxdata/influxdb/models/uint_support.go b/vendor/github.com/influxdata/influxdb/models/uint_support.go new file mode 100644 index 0000000..18d1ca0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/uint_support.go @@ -0,0 +1,7 @@ +// +build uint uint64 + +package models + +func init() { + EnableUintSupport() +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/README.md b/vendor/github.com/influxdata/influxdb/monitor/README.md new file mode 100644 index 0000000..8eb932b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/README.md @@ -0,0 +1,46 @@ +# System Monitoring +_This functionality should be considered experimental and is subject to change._ + +_System Monitoring_ means all statistical and diagnostic information made availabe to the user of InfluxDB system, about the system itself. Its purpose is to assist with troubleshooting and performance analysis of the database itself. + +## Statistics vs. Diagnostics +A distinction is made between _statistics_ and _diagnostics_ for the purposes of monitoring. Generally a statistical quality is something that is being counted, and for which it makes sense to store persistently for historical analysis. Diagnostic information is not necessarily numerical, and may not make sense to store. + +An example of statistical information would be the number of points received over UDP, or the number of queries executed. Examples of diagnostic information would be a list of current Graphite TCP connections, the version of InfluxDB, or the uptime of the process. + +## System Statistics +`SHOW STATS [FOR ]` displays statisics about subsystems within the running `influxd` process. Statistics include points received, points indexed, bytes written to disk, TCP connections handled etc. These statistics are all zero when the InfluxDB process starts. If _module_ is specified, it must be single-quoted. For example `SHOW STATS FOR 'httpd'`. + +All statistics are written, by default, by each node to a "monitor" database within the InfluxDB system, allowing analysis of aggregated statistical data using the standard InfluxQL language. This allows users to track the performance of their system. Importantly, this allows cluster-level statistics to be viewed, since by querying the monitor database, statistics from all nodes may be queried. This can be a very powerful approach for troubleshooting your InfluxDB system and understanding its behaviour. + +## System Diagnostics +`SHOW DIAGNOSTICS [FOR ]` displays various diagnostic information about the `influxd` process. This information is not stored persistently within the InfluxDB system. If _module_ is specified, it must be single-quoted. For example `SHOW STATS FOR 'build'`. + +## Standard expvar support +All statistical information is available at HTTP API endpoint `/debug/vars`, in [expvar](https://golang.org/pkg/expvar/) format, allowing external systems to monitor an InfluxDB node. By default, the full path to this endpoint is `http://localhost:8086/debug/vars`. + +## Configuration +The `monitor` module allows the following configuration: + + * Whether to write statistical and diagnostic information to an InfluxDB system. This is enabled by default. + * The name of the database to where this information should be written. Defaults to `_internal`. The information is written to the default retention policy for the given database. + * The name of the retention policy, along with full configuration control of the retention policy, if the default retention policy is not suitable. + * The rate at which this information should be written. The default rate is once every 10 seconds. + +# Design and Implementation + +A new module named `monitor` supports all basic statistics and diagnostic functionality. This includes: + + * Allowing other modules to register statistics and diagnostics information, allowing it to be accessed on demand by the `monitor` module. + * Serving the statistics and diagnostic information to the user, in response to commands such as `SHOW DIAGNOSTICS`. + * Expose standard Go runtime information such as garbage collection statistics. + * Make all collected expvar data via HTTP, for collection by 3rd-party tools. + * Writing the statistical information to the "monitor" database, for query purposes. + +## Registering statistics and diagnostics + +To export statistical information with the `monitor` system, a service should implement the `monitor.Reporter` interface. Services added to the Server will be automatically added to the list of statistics returned. Any service that is not added to the `Services` slice will need to modify the `Server`'s `Statistics(map[string]string)` method to aggregate the call to the service's `Statistics(map[string]string)` method so they are combined into a single response. The `Statistics(map[string]string)` method should return a statistics slice with the passed in tags included. The statistics should be kept inside of an internal structure and should be accessed in a thread-safe way. It is common to create a struct for holding the statistics and using `sync/atomic` instead of locking. If using `sync/atomic`, be sure to align the values in the struct so it works properly on `i386`. + +To register diagnostic information, `monitor.RegisterDiagnosticsClient` is called, passing a `influxdb.monitor.DiagsClient` object to `monitor`. Implementing the `influxdb.monitor.DiagsClient` interface requires that your component have function returning diagnostic information in specific form, so that it can be displayed by the `monitor` system. + +Statistical information is reset to its initial state when a server is restarted. diff --git a/vendor/github.com/influxdata/influxdb/monitor/build_info.go b/vendor/github.com/influxdata/influxdb/monitor/build_info.go new file mode 100644 index 0000000..d9d874c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/build_info.go @@ -0,0 +1,22 @@ +package monitor + +import "github.com/influxdata/influxdb/monitor/diagnostics" + +// build holds information of the build of the current executable. +type build struct { + Version string + Commit string + Branch string + Time string +} + +func (b *build) Diagnostics() (*diagnostics.Diagnostics, error) { + d := map[string]interface{}{ + "Version": b.Version, + "Commit": b.Commit, + "Branch": b.Branch, + "Build Time": b.Time, + } + + return diagnostics.RowFromMap(d), nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go b/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go new file mode 100644 index 0000000..851ed3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go @@ -0,0 +1,43 @@ +package monitor_test + +import ( + "reflect" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_BuildInfo(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + s.Version = "1.2.0" + s.Commit = "b7bb7e8359642b6e071735b50ae41f5eb343fd42" + s.Branch = "1.2" + s.BuildTime = "10m30s" + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["build"] + if !ok { + t.Error("no diagnostics found for 'build'") + return + } + + if got, exp := diags.Columns, []string{"Branch", "Build Time", "Commit", "Version"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{"1.2", "10m30s", "b7bb7e8359642b6e071735b50ae41f5eb343fd42", "1.2.0"}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/config.go b/vendor/github.com/influxdata/influxdb/monitor/config.go new file mode 100644 index 0000000..2741fc9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/config.go @@ -0,0 +1,63 @@ +package monitor + +import ( + "errors" + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultStoreEnabled is whether the system writes gathered information in + // an InfluxDB system for historical analysis. + DefaultStoreEnabled = true + + // DefaultStoreDatabase is the name of the database where gathered information is written. + DefaultStoreDatabase = "_internal" + + // DefaultStoreInterval is the period between storing gathered information. + DefaultStoreInterval = 10 * time.Second +) + +// Config represents the configuration for the monitor service. +type Config struct { + StoreEnabled bool `toml:"store-enabled"` + StoreDatabase string `toml:"store-database"` + StoreInterval toml.Duration `toml:"store-interval"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + StoreEnabled: true, + StoreDatabase: DefaultStoreDatabase, + StoreInterval: toml.Duration(DefaultStoreInterval), + } +} + +// Validate validates that the configuration is acceptable. +func (c Config) Validate() error { + if c.StoreInterval <= 0 { + return errors.New("monitor store interval must be positive") + } + if c.StoreDatabase == "" { + return errors.New("monitor store database name must not be empty") + } + return nil +} + +// Diagnostics returns a diagnostics representation of a subset of the Config. +func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { + if !c.StoreEnabled { + return diagnostics.RowFromMap(map[string]interface{}{ + "store-enabled": false, + }), nil + } + + return diagnostics.RowFromMap(map[string]interface{}{ + "store-enabled": true, + "store-database": c.StoreDatabase, + "store-interval": c.StoreInterval, + }), nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/config_test.go b/vendor/github.com/influxdata/influxdb/monitor/config_test.go new file mode 100644 index 0000000..e07e80f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/config_test.go @@ -0,0 +1,52 @@ +package monitor_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/monitor" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c monitor.Config + if _, err := toml.Decode(` +store-enabled=true +store-database="the_db" +store-interval="10m" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if !c.StoreEnabled { + t.Fatalf("unexpected store-enabled: %v", c.StoreEnabled) + } else if c.StoreDatabase != "the_db" { + t.Fatalf("unexpected store-database: %s", c.StoreDatabase) + } else if time.Duration(c.StoreInterval) != 10*time.Minute { + t.Fatalf("unexpected store-interval: %s", c.StoreInterval) + } +} + +func TestConfig_Validate(t *testing.T) { + // NewConfig must validate correctly. + c := monitor.NewConfig() + if err := c.Validate(); err != nil { + t.Fatalf("unexpected validation error: %s", err) + } + + // Non-positive duration is invalid. + c = monitor.NewConfig() + c.StoreInterval *= 0 + if err := c.Validate(); err == nil { + t.Fatalf("unexpected successful validation for %#v", c) + } + + // Empty database is invalid. + c = monitor.NewConfig() + c.StoreDatabase = "" + if err := c.Validate(); err == nil { + t.Fatalf("unexpected successful validation for %#v", c) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go b/vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go new file mode 100644 index 0000000..81396a7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go @@ -0,0 +1,64 @@ +// Package diagnostics provides the diagnostics type so that +// other packages can provide diagnostics without depending on the monitor package. +package diagnostics // import "github.com/influxdata/influxdb/monitor/diagnostics" + +import "sort" + +// Client is the interface modules implement if they register diagnostics with monitor. +type Client interface { + Diagnostics() (*Diagnostics, error) +} + +// The ClientFunc type is an adapter to allow the use of +// ordinary functions as Diagnostics clients. +type ClientFunc func() (*Diagnostics, error) + +// Diagnostics calls f(). +func (f ClientFunc) Diagnostics() (*Diagnostics, error) { + return f() +} + +// Diagnostics represents a table of diagnostic information. The first value +// is the name of the columns, the second is a slice of interface slices containing +// the values for each column, by row. This information is never written to an InfluxDB +// system and is display-only. An example showing, say, connections follows: +// +// source_ip source_port dest_ip dest_port +// 182.1.0.2 2890 127.0.0.1 38901 +// 174.33.1.2 2924 127.0.0.1 38902 +type Diagnostics struct { + Columns []string + Rows [][]interface{} +} + +// NewDiagnostic initialises a new Diagnostics with the specified columns. +func NewDiagnostics(columns []string) *Diagnostics { + return &Diagnostics{ + Columns: columns, + Rows: make([][]interface{}, 0), + } +} + +// AddRow appends the provided row to the Diagnostics' rows. +func (d *Diagnostics) AddRow(r []interface{}) { + d.Rows = append(d.Rows, r) +} + +// RowFromMap returns a new one-row Diagnostics from a map. +func RowFromMap(m map[string]interface{}) *Diagnostics { + // Display columns in deterministic order. + sortedKeys := make([]string, 0, len(m)) + for k := range m { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + d := NewDiagnostics(sortedKeys) + row := make([]interface{}, len(sortedKeys)) + for i, k := range sortedKeys { + row[i] = m[k] + } + d.AddRow(row) + + return d +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/go_runtime.go b/vendor/github.com/influxdata/influxdb/monitor/go_runtime.go new file mode 100644 index 0000000..3499ac6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/go_runtime.go @@ -0,0 +1,21 @@ +package monitor + +import ( + "runtime" + + "github.com/influxdata/influxdb/monitor/diagnostics" +) + +// goRuntime captures Go runtime diagnostics. +type goRuntime struct{} + +func (g *goRuntime) Diagnostics() (*diagnostics.Diagnostics, error) { + d := map[string]interface{}{ + "GOARCH": runtime.GOARCH, + "GOOS": runtime.GOOS, + "GOMAXPROCS": runtime.GOMAXPROCS(-1), + "version": runtime.Version(), + } + + return diagnostics.RowFromMap(d), nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go b/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go new file mode 100644 index 0000000..dc52b66 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go @@ -0,0 +1,39 @@ +package monitor_test + +import ( + "reflect" + "runtime" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_GoRuntime(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["runtime"] + if !ok { + t.Error("no diagnostics found for 'runtime'") + return + } + + if got, exp := diags.Columns, []string{"GOARCH", "GOMAXPROCS", "GOOS", "version"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{runtime.GOARCH, runtime.GOMAXPROCS(-1), runtime.GOOS, runtime.Version()}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/network.go b/vendor/github.com/influxdata/influxdb/monitor/network.go new file mode 100644 index 0000000..21ab178 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/network.go @@ -0,0 +1,23 @@ +package monitor + +import ( + "os" + + "github.com/influxdata/influxdb/monitor/diagnostics" +) + +// network captures network diagnostics. +type network struct{} + +func (n *network) Diagnostics() (*diagnostics.Diagnostics, error) { + h, err := os.Hostname() + if err != nil { + return nil, err + } + + d := map[string]interface{}{ + "hostname": h, + } + + return diagnostics.RowFromMap(d), nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/network_test.go b/vendor/github.com/influxdata/influxdb/monitor/network_test.go new file mode 100644 index 0000000..0615e0a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/network_test.go @@ -0,0 +1,44 @@ +package monitor_test + +import ( + "os" + "reflect" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_Network(t *testing.T) { + hostname, err := os.Hostname() + if err != nil { + t.Fatalf("unexpected error retrieving hostname: %s", err) + } + + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["network"] + if !ok { + t.Error("no diagnostics found for 'network'") + return + } + + if got, exp := diags.Columns, []string{"hostname"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{hostname}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/reporter.go b/vendor/github.com/influxdata/influxdb/monitor/reporter.go new file mode 100644 index 0000000..912f9a3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/reporter.go @@ -0,0 +1,10 @@ +package monitor + +import "github.com/influxdata/influxdb/models" + +// Reporter is an interface for gathering internal statistics. +type Reporter interface { + // Statistics returns the statistics for the reporter, + // with the given tags merged into the result. + Statistics(tags map[string]string) []models.Statistic +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/service.go b/vendor/github.com/influxdata/influxdb/monitor/service.go new file mode 100644 index 0000000..e88d446 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/service.go @@ -0,0 +1,501 @@ +// Package monitor provides a service and associated functionality +// for InfluxDB to self-monitor internal statistics and diagnostics. +package monitor // import "github.com/influxdata/influxdb/monitor" + +import ( + "errors" + "expvar" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "sync" + "time" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/services/meta" + "go.uber.org/zap" +) + +// Policy constants. +const ( + // Name of the retention policy used by the monitor service. + MonitorRetentionPolicy = "monitor" + + // Duration of the monitor retention policy. + MonitorRetentionPolicyDuration = 7 * 24 * time.Hour + + // Default replication factor to set on the monitor retention policy. + MonitorRetentionPolicyReplicaN = 1 +) + +// Monitor represents an instance of the monitor system. +type Monitor struct { + // Build information for diagnostics. + Version string + Commit string + Branch string + BuildTime string + + wg sync.WaitGroup + + mu sync.RWMutex + globalTags map[string]string + diagRegistrations map[string]diagnostics.Client + reporter Reporter + done chan struct{} + storeCreated bool + storeEnabled bool + + storeDatabase string + storeRetentionPolicy string + storeInterval time.Duration + + MetaClient interface { + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + Database(name string) *meta.DatabaseInfo + } + + // Writer for pushing stats back into the database. + PointsWriter PointsWriter + + Logger *zap.Logger +} + +// PointsWriter is a simplified interface for writing the points the monitor gathers. +type PointsWriter interface { + WritePoints(database, retentionPolicy string, points models.Points) error +} + +// New returns a new instance of the monitor system. +func New(r Reporter, c Config) *Monitor { + return &Monitor{ + globalTags: make(map[string]string), + diagRegistrations: make(map[string]diagnostics.Client), + reporter: r, + storeEnabled: c.StoreEnabled, + storeDatabase: c.StoreDatabase, + storeInterval: time.Duration(c.StoreInterval), + storeRetentionPolicy: MonitorRetentionPolicy, + Logger: zap.NewNop(), + } +} + +// open returns whether the monitor service is open. +func (m *Monitor) open() bool { + m.mu.Lock() + defer m.mu.Unlock() + return m.done != nil +} + +// Open opens the monitoring system, using the given clusterID, node ID, and hostname +// for identification purpose. +func (m *Monitor) Open() error { + if m.open() { + m.Logger.Info("Monitor is already open") + return nil + } + + m.Logger.Info("Starting monitor service") + + // Self-register various stats and diagnostics. + m.RegisterDiagnosticsClient("build", &build{ + Version: m.Version, + Commit: m.Commit, + Branch: m.Branch, + Time: m.BuildTime, + }) + m.RegisterDiagnosticsClient("runtime", &goRuntime{}) + m.RegisterDiagnosticsClient("network", &network{}) + m.RegisterDiagnosticsClient("system", &system{}) + + m.mu.Lock() + m.done = make(chan struct{}) + m.mu.Unlock() + + // If enabled, record stats in a InfluxDB system. + if m.storeEnabled { + hostname, _ := os.Hostname() + m.SetGlobalTag("hostname", hostname) + + // Start periodic writes to system. + m.wg.Add(1) + go m.storeStatistics() + } + + return nil +} + +func (m *Monitor) Enabled() bool { return m.storeEnabled } + +func (m *Monitor) WritePoints(p models.Points) error { + if !m.storeEnabled { + return nil + } + + if len(m.globalTags) > 0 { + for _, pp := range p { + pp.SetTags(pp.Tags().Merge(m.globalTags)) + } + } + + return m.writePoints(p) +} + +func (m *Monitor) writePoints(p models.Points) error { + m.mu.RLock() + defer m.mu.RUnlock() + + if err := m.PointsWriter.WritePoints(m.storeDatabase, m.storeRetentionPolicy, p); err != nil { + m.Logger.Info("failed to store statistics", zap.Error(err)) + } + return nil +} + +// Close closes the monitor system. +func (m *Monitor) Close() error { + if !m.open() { + m.Logger.Info("Monitor is already closed") + return nil + } + + m.Logger.Info("Shutting down monitor service") + m.mu.Lock() + close(m.done) + m.mu.Unlock() + + m.wg.Wait() + + m.mu.Lock() + m.done = nil + m.mu.Unlock() + + m.DeregisterDiagnosticsClient("build") + m.DeregisterDiagnosticsClient("runtime") + m.DeregisterDiagnosticsClient("network") + m.DeregisterDiagnosticsClient("system") + return nil +} + +// SetGlobalTag can be used to set tags that will appear on all points +// written by the Monitor. +func (m *Monitor) SetGlobalTag(key string, value interface{}) { + m.mu.Lock() + m.globalTags[key] = fmt.Sprintf("%v", value) + m.mu.Unlock() +} + +// RemoteWriterConfig represents the configuration of a remote writer. +type RemoteWriterConfig struct { + RemoteAddr string + NodeID string + Username string + Password string + ClusterID uint64 +} + +// SetPointsWriter can be used to set a writer for the monitoring points. +func (m *Monitor) SetPointsWriter(pw PointsWriter) error { + if !m.storeEnabled { + // not enabled, nothing to do + return nil + } + m.mu.Lock() + m.PointsWriter = pw + m.mu.Unlock() + + // Subsequent calls to an already open Monitor are just a no-op. + return m.Open() +} + +// WithLogger sets the logger for the Monitor. +func (m *Monitor) WithLogger(log *zap.Logger) { + m.Logger = log.With(zap.String("service", "monitor")) +} + +// RegisterDiagnosticsClient registers a diagnostics client with the given name and tags. +func (m *Monitor) RegisterDiagnosticsClient(name string, client diagnostics.Client) { + m.mu.Lock() + defer m.mu.Unlock() + m.diagRegistrations[name] = client + m.Logger.Info("Registered diagnostics client", zap.String("name", name)) +} + +// DeregisterDiagnosticsClient deregisters a diagnostics client by name. +func (m *Monitor) DeregisterDiagnosticsClient(name string) { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.diagRegistrations, name) +} + +// Statistics returns the combined statistics for all expvar data. The given +// tags are added to each of the returned statistics. +func (m *Monitor) Statistics(tags map[string]string) ([]*Statistic, error) { + var statistics []*Statistic + + expvar.Do(func(kv expvar.KeyValue) { + // Skip built-in expvar stats. + if kv.Key == "memstats" || kv.Key == "cmdline" { + return + } + + statistic := &Statistic{ + Statistic: models.NewStatistic(""), + } + + // Add any supplied tags. + for k, v := range tags { + statistic.Tags[k] = v + } + + // Every other top-level expvar value should be a map. + m, ok := kv.Value.(*expvar.Map) + if !ok { + return + } + + m.Do(func(subKV expvar.KeyValue) { + switch subKV.Key { + case "name": + // straight to string name. + u, err := strconv.Unquote(subKV.Value.String()) + if err != nil { + return + } + statistic.Name = u + case "tags": + // string-string tags map. + n := subKV.Value.(*expvar.Map) + n.Do(func(t expvar.KeyValue) { + u, err := strconv.Unquote(t.Value.String()) + if err != nil { + return + } + statistic.Tags[t.Key] = u + }) + case "values": + // string-interface map. + n := subKV.Value.(*expvar.Map) + n.Do(func(kv expvar.KeyValue) { + var f interface{} + var err error + switch v := kv.Value.(type) { + case *expvar.Float: + f, err = strconv.ParseFloat(v.String(), 64) + if err != nil { + return + } + case *expvar.Int: + f, err = strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return + } + default: + return + } + statistic.Values[kv.Key] = f + }) + } + }) + + // If a registered client has no field data, don't include it in the results + if len(statistic.Values) == 0 { + return + } + + statistics = append(statistics, statistic) + }) + + // Add Go memstats. + statistic := &Statistic{ + Statistic: models.NewStatistic("runtime"), + } + + // Add any supplied tags to Go memstats + for k, v := range tags { + statistic.Tags[k] = v + } + + var rt runtime.MemStats + runtime.ReadMemStats(&rt) + statistic.Values = map[string]interface{}{ + "Alloc": int64(rt.Alloc), + "TotalAlloc": int64(rt.TotalAlloc), + "Sys": int64(rt.Sys), + "Lookups": int64(rt.Lookups), + "Mallocs": int64(rt.Mallocs), + "Frees": int64(rt.Frees), + "HeapAlloc": int64(rt.HeapAlloc), + "HeapSys": int64(rt.HeapSys), + "HeapIdle": int64(rt.HeapIdle), + "HeapInUse": int64(rt.HeapInuse), + "HeapReleased": int64(rt.HeapReleased), + "HeapObjects": int64(rt.HeapObjects), + "PauseTotalNs": int64(rt.PauseTotalNs), + "NumGC": int64(rt.NumGC), + "NumGoroutine": int64(runtime.NumGoroutine()), + } + statistics = append(statistics, statistic) + + statistics = m.gatherStatistics(statistics, tags) + return statistics, nil +} + +func (m *Monitor) gatherStatistics(statistics []*Statistic, tags map[string]string) []*Statistic { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.reporter != nil { + for _, s := range m.reporter.Statistics(tags) { + statistics = append(statistics, &Statistic{Statistic: s}) + } + } + return statistics +} + +// Diagnostics fetches diagnostic information for each registered +// diagnostic client. It skips any clients that return an error when +// retrieving their diagnostics. +func (m *Monitor) Diagnostics() (map[string]*diagnostics.Diagnostics, error) { + m.mu.Lock() + defer m.mu.Unlock() + + diags := make(map[string]*diagnostics.Diagnostics, len(m.diagRegistrations)) + for k, v := range m.diagRegistrations { + d, err := v.Diagnostics() + if err != nil { + continue + } + diags[k] = d + } + return diags, nil +} + +// createInternalStorage ensures the internal storage has been created. +func (m *Monitor) createInternalStorage() { + if m.storeCreated { + return + } + + if di := m.MetaClient.Database(m.storeDatabase); di == nil { + duration := MonitorRetentionPolicyDuration + replicaN := MonitorRetentionPolicyReplicaN + spec := meta.RetentionPolicySpec{ + Name: MonitorRetentionPolicy, + Duration: &duration, + ReplicaN: &replicaN, + } + + if _, err := m.MetaClient.CreateDatabaseWithRetentionPolicy(m.storeDatabase, &spec); err != nil { + m.Logger.Info("Failed to create storage", logger.Database(m.storeDatabase), zap.Error(err)) + return + } + } + + // Mark storage creation complete. + m.storeCreated = true +} + +// waitUntilInterval waits until we are on an even interval for the duration. +func (m *Monitor) waitUntilInterval(d time.Duration) error { + now := time.Now() + until := now.Truncate(d).Add(d) + timer := time.NewTimer(until.Sub(now)) + defer timer.Stop() + + select { + case <-timer.C: + return nil + case <-m.done: + return errors.New("interrupted") + } +} + +// storeStatistics writes the statistics to an InfluxDB system. +func (m *Monitor) storeStatistics() { + defer m.wg.Done() + m.Logger.Info("Storing statistics", logger.Database(m.storeDatabase), logger.RetentionPolicy(m.storeRetentionPolicy), logger.DurationLiteral("interval", m.storeInterval)) + + // Wait until an even interval to start recording monitor statistics. + // If we are interrupted before the interval for some reason, exit early. + if err := m.waitUntilInterval(m.storeInterval); err != nil { + return + } + + tick := time.NewTicker(m.storeInterval) + defer tick.Stop() + + for { + select { + case now := <-tick.C: + now = now.Truncate(m.storeInterval) + func() { + m.mu.Lock() + defer m.mu.Unlock() + m.createInternalStorage() + }() + + stats, err := m.Statistics(m.globalTags) + if err != nil { + m.Logger.Info("Failed to retrieve registered statistics", zap.Error(err)) + return + } + + // Write all stats in batches + batch := make(models.Points, 0, 5000) + for _, s := range stats { + pt, err := models.NewPoint(s.Name, models.NewTags(s.Tags), s.Values, now) + if err != nil { + m.Logger.Info("Dropping point", zap.String("name", s.Name), zap.Error(err)) + return + } + batch = append(batch, pt) + if len(batch) == cap(batch) { + m.writePoints(batch) + batch = batch[:0] + + } + } + + // Write the last batch + if len(batch) > 0 { + m.writePoints(batch) + } + case <-m.done: + m.Logger.Info("Terminating storage of statistics") + return + } + } +} + +// Statistic represents the information returned by a single monitor client. +type Statistic struct { + models.Statistic +} + +// ValueNames returns a sorted list of the value names, if any. +func (s *Statistic) ValueNames() []string { + a := make([]string, 0, len(s.Values)) + for k := range s.Values { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Statistics is a slice of sortable statistics. +type Statistics []*Statistic + +// Len implements sort.Interface. +func (a Statistics) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Statistics) Less(i, j int) bool { + return a[i].Name < a[j].Name +} + +// Swap implements sort.Interface. +func (a Statistics) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/monitor/service_test.go b/vendor/github.com/influxdata/influxdb/monitor/service_test.go new file mode 100644 index 0000000..adc5b6e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/service_test.go @@ -0,0 +1,482 @@ +package monitor_test + +import ( + "bytes" + "context" + "expvar" + "fmt" + "os" + "reflect" + "sort" + "testing" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/toml" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +func TestMonitor_Open(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + + // Verify that opening twice is fine. + if err := s.Open(); err != nil { + s.Close() + t.Fatalf("unexpected error on second open: %s", err) + } + + if err := s.Close(); err != nil { + t.Fatalf("unexpected close error: %s", err) + } + + // Verify that closing twice is fine. + if err := s.Close(); err != nil { + t.Fatalf("unexpected error on second close: %s", err) + } +} + +func TestMonitor_SetPointsWriter_StoreEnabled(t *testing.T) { + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + config := monitor.NewConfig() + s := monitor.New(nil, config) + s.MetaClient = &mc + core, logs := observer.New(zap.DebugLevel) + s.WithLogger(zap.New(core)) + + // Setting the points writer should open the monitor. + var pw PointsWriter + if err := s.SetPointsWriter(&pw); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + // Verify that the monitor was opened by looking at the log messages. + if logs.FilterMessage("Starting monitor service").Len() == 0 { + t.Errorf("monitor system was never started") + } +} + +func TestMonitor_SetPointsWriter_StoreDisabled(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + core, logs := observer.New(zap.DebugLevel) + s.WithLogger(zap.New(core)) + + // Setting the points writer should open the monitor. + var pw PointsWriter + if err := s.SetPointsWriter(&pw); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + // Verify that the monitor was not opened by looking at the log messages. + if logs.FilterMessage("Starting monitor system").Len() > 0 { + t.Errorf("monitor system should not have been started") + } +} + +func TestMonitor_StoreStatistics(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + if got, want := name, monitor.DefaultStoreDatabase; got != want { + t.Errorf("unexpected database: got=%q want=%q", got, want) + } + if got, want := spec.Name, monitor.MonitorRetentionPolicy; got != want { + t.Errorf("unexpected retention policy: got=%q want=%q", got, want) + } + if spec.Duration != nil { + if got, want := *spec.Duration, monitor.MonitorRetentionPolicyDuration; got != want { + t.Errorf("unexpected duration: got=%q want=%q", got, want) + } + } else { + t.Error("expected duration in retention policy spec") + } + if spec.ReplicaN != nil { + if got, want := *spec.ReplicaN, monitor.MonitorRetentionPolicyReplicaN; got != want { + t.Errorf("unexpected replica number: got=%q want=%q", got, want) + } + } else { + t.Error("expected replica number in retention policy spec") + } + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Verify that we are attempting to write to the correct database. + if got, want := database, monitor.DefaultStoreDatabase; got != want { + t.Errorf("unexpected database: got=%q want=%q", got, want) + } + if got, want := policy, monitor.MonitorRetentionPolicy; got != want { + t.Errorf("unexpected retention policy: got=%q want=%q", got, want) + } + + // Attempt to write the points to the main goroutine. + select { + case <-ctx.Done(): + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + defer cancel() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Search for the runtime statistic. + found := false + for _, pt := range points { + if !bytes.Equal(pt.Name(), []byte("runtime")) { + continue + } + + // There should be a hostname. + if got := pt.Tags().GetString("hostname"); len(got) == 0 { + t.Errorf("expected hostname tag") + } + // This should write on an exact interval of 10 milliseconds. + if got, want := pt.Time(), pt.Time().Truncate(10*time.Millisecond); got != want { + t.Errorf("unexpected time: got=%q want=%q", got, want) + } + found = true + break + } + + if !found { + t.Error("unable to find runtime statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func TestMonitor_Reporter(t *testing.T) { + reporter := ReporterFunc(func(tags map[string]string) []models.Statistic { + return []models.Statistic{ + { + Name: "foo", + Tags: tags, + Values: map[string]interface{}{ + "value": "bar", + }, + }, + } + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Attempt to write the points to the main goroutine. + select { + case <-ctx.Done(): + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(reporter, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + defer cancel() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Look for the statistic. + found := false + for _, pt := range points { + if !bytes.Equal(pt.Name(), []byte("foo")) { + continue + } + found = true + break + } + + if !found { + t.Error("unable to find foo statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func expvarMap(name string, tags map[string]string, fields map[string]interface{}) *expvar.Map { + m := new(expvar.Map).Init() + eName := new(expvar.String) + eName.Set(name) + m.Set("name", eName) + + var eTags *expvar.Map + if len(tags) > 0 { + eTags = new(expvar.Map).Init() + for k, v := range tags { + kv := new(expvar.String) + kv.Set(v) + eTags.Set(k, kv) + } + m.Set("tags", eTags) + } + + var eFields *expvar.Map + if len(fields) > 0 { + eFields = new(expvar.Map).Init() + for k, v := range fields { + switch v := v.(type) { + case float64: + kv := new(expvar.Float) + kv.Set(v) + eFields.Set(k, kv) + case int: + kv := new(expvar.Int) + kv.Set(int64(v)) + eFields.Set(k, kv) + case string: + kv := new(expvar.String) + kv.Set(v) + eFields.Set(k, kv) + } + } + m.Set("values", eFields) + } + return m +} + +func TestMonitor_Expvar(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Attempt to write the points to the main goroutine. + select { + case <-ctx.Done(): + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + expvar.Publish("expvar1", expvarMap( + "expvar1", + map[string]string{ + "region": "uswest2", + }, + map[string]interface{}{ + "value": 2.0, + }, + )) + expvar.Publish("expvar2", expvarMap( + "expvar2", + map[string]string{ + "region": "uswest2", + }, + nil, + )) + expvar.Publish("expvar3", expvarMap( + "expvar3", + nil, + map[string]interface{}{ + "value": 2, + }, + )) + + bad := new(expvar.String) + bad.Set("badentry") + expvar.Publish("expvar4", bad) + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + defer cancel() + + hostname, _ := os.Hostname() + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Look for the statistic. + var found1, found3 bool + for _, pt := range points { + if bytes.Equal(pt.Name(), []byte("expvar1")) { + if got, want := pt.Tags().HashKey(), []byte(fmt.Sprintf(",hostname=%s,region=uswest2", hostname)); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar1 tags: got=%v want=%v", string(got), string(want)) + } + fields, _ := pt.Fields() + if got, want := fields, models.Fields(map[string]interface{}{ + "value": 2.0, + }); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar1 fields: got=%v want=%v", got, want) + } + found1 = true + } else if bytes.Equal(pt.Name(), []byte("expvar2")) { + t.Error("found expvar2 statistic") + } else if bytes.Equal(pt.Name(), []byte("expvar3")) { + if got, want := pt.Tags().HashKey(), []byte(fmt.Sprintf(",hostname=%s", hostname)); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar3 tags: got=%v want=%v", string(got), string(want)) + } + fields, _ := pt.Fields() + if got, want := fields, models.Fields(map[string]interface{}{ + "value": int64(2), + }); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar3 fields: got=%v want=%v", got, want) + } + found3 = true + } + } + + if !found1 { + t.Error("unable to find expvar1 statistic") + } + if !found3 { + t.Error("unable to find expvar3 statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func TestMonitor_QuickClose(t *testing.T) { + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(24 * time.Hour) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := s.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestStatistic_ValueNames(t *testing.T) { + statistic := monitor.Statistic{ + Statistic: models.Statistic{ + Name: "foo", + Values: map[string]interface{}{ + "abc": 1.0, + "def": 2.0, + }, + }, + } + + names := statistic.ValueNames() + if got, want := names, []string{"abc", "def"}; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected value names: got=%v want=%v", got, want) + } +} + +func TestStatistics_Sort(t *testing.T) { + statistics := []*monitor.Statistic{ + {Statistic: models.Statistic{Name: "b"}}, + {Statistic: models.Statistic{Name: "a"}}, + {Statistic: models.Statistic{Name: "c"}}, + } + + sort.Sort(monitor.Statistics(statistics)) + names := make([]string, 0, len(statistics)) + for _, stat := range statistics { + names = append(names, stat.Name) + } + + if got, want := names, []string{"a", "b", "c"}; !reflect.DeepEqual(got, want) { + t.Errorf("incorrect sorting of statistics: got=%v want=%v", got, want) + } +} + +type ReporterFunc func(tags map[string]string) []models.Statistic + +func (f ReporterFunc) Statistics(tags map[string]string) []models.Statistic { + return f(tags) +} + +type PointsWriter struct { + WritePointsFn func(database, policy string, points models.Points) error +} + +func (pw *PointsWriter) WritePoints(database, policy string, points models.Points) error { + if pw.WritePointsFn != nil { + return pw.WritePointsFn(database, policy, points) + } + return nil +} + +type MetaClient struct { + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + DatabaseFn func(name string) *meta.DatabaseInfo +} + +func (m *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return m.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (m *MetaClient) Database(name string) *meta.DatabaseInfo { + if m.DatabaseFn != nil { + return m.DatabaseFn(name) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/system.go b/vendor/github.com/influxdata/influxdb/monitor/system.go new file mode 100644 index 0000000..01a6bc5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/system.go @@ -0,0 +1,29 @@ +package monitor + +import ( + "os" + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" +) + +var startTime time.Time + +func init() { + startTime = time.Now().UTC() +} + +// system captures system-level diagnostics. +type system struct{} + +func (s *system) Diagnostics() (*diagnostics.Diagnostics, error) { + currentTime := time.Now().UTC() + d := map[string]interface{}{ + "PID": os.Getpid(), + "currentTime": currentTime, + "started": startTime, + "uptime": currentTime.Sub(startTime).String(), + } + + return diagnostics.RowFromMap(d), nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/system_test.go b/vendor/github.com/influxdata/influxdb/monitor/system_test.go new file mode 100644 index 0000000..923345b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/system_test.go @@ -0,0 +1,55 @@ +package monitor_test + +import ( + "os" + "reflect" + "testing" + "time" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_System(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["system"] + if !ok { + t.Fatal("no diagnostics found for 'system'") + } + + if got, exp := diags.Columns, []string{"PID", "currentTime", "started", "uptime"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + // So this next part is nearly impossible to match, so just check if they look correct. + if exp, got := 1, len(diags.Rows); exp != got { + t.Fatalf("expected exactly %d row, got %d", exp, got) + } + + if got, exp := diags.Rows[0][0].(int), os.Getpid(); got != exp { + t.Errorf("unexpected pid: got=%v exp=%v", got, exp) + } + + currentTime := diags.Rows[0][1].(time.Time) + startTime := diags.Rows[0][2].(time.Time) + if !startTime.Before(currentTime) { + t.Errorf("start time is not before the current time: %s (start), %s (current)", startTime, currentTime) + } + + uptime, err := time.ParseDuration(diags.Rows[0][3].(string)) + if err != nil { + t.Errorf("unable to parse uptime duration: %s: %s", diags.Rows[0][3], err) + } else if got, exp := uptime, currentTime.Sub(startTime); got != exp { + t.Errorf("uptime does not match the difference between start time and current time: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/nightly.sh b/vendor/github.com/influxdata/influxdb/nightly.sh new file mode 100755 index 0000000..bfa8295 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/nightly.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +SWAKS="/root/swaks" + +# Bump this whenever a release branch is created from master +MASTER_VERSION=0.10.0 + +# send_failure_notification sends an e-mail with a build failure notification. +function send_failure_notification { + smtp=$1 + user=$2 + password=$3 + to=$4 + version=$5 + $SWAKS --auth \ + --server $smtp \ + --au $user \ + --ap $password \ + --to $to \ + --h-Subject: "Nightly build has FAILED" \ + --body "The nightly build has failed, version: $version" +} + +if [ $# -lt 4 ]; then + echo "$0 [RACE_ENABLED]" + exit 1 +fi +SMTP=$1 +USER=$2 +PASSWORD=$3 +TO=$4 +RACE_ENABLED=$5 + +if [ -n "$RACE_ENABLED" ]; then + race="-x" + echo "Race-detection build enabled." +fi + +REPO_DIR=`mktemp -d` +echo "Using $REPO_DIR for all work..." + +cd $REPO_DIR +export GOPATH=`pwd` +mkdir -p $GOPATH/src/github.com/influxdata +cd $GOPATH/src/github.com/influxdata +git clone https://github.com/influxdata/influxdb.git + +cd $GOPATH/src/github.com/influxdata/influxdb +VERSION="$MASTER_VERSION-nightly-`git log --pretty=format:'%h' -n 1`" +NIGHTLY_BUILD=true ./package.sh $race $VERSION + +if [ $? -ne 0 ]; then + # Send notification e-mail. + send_failure_notification $SMTP $USER $PASSWORD $TO $VERSION +fi + +rm -rf $REPO_DIR diff --git a/vendor/github.com/influxdata/influxdb/node.go b/vendor/github.com/influxdata/influxdb/node.go new file mode 100644 index 0000000..68709ed --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/node.go @@ -0,0 +1,121 @@ +package influxdb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +const ( + nodeFile = "node.json" + oldNodeFile = "id" + peersFilename = "peers.json" +) + +type Node struct { + path string + ID uint64 +} + +// LoadNode will load the node information from disk if present +func LoadNode(path string) (*Node, error) { + // Always check to see if we are upgrading first + if err := upgradeNodeFile(path); err != nil { + return nil, err + } + + n := &Node{ + path: path, + } + + f, err := os.Open(filepath.Join(path, nodeFile)) + if err != nil { + return nil, err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(n); err != nil { + return nil, err + } + + return n, nil +} + +// NewNode will return a new node +func NewNode(path string) *Node { + return &Node{ + path: path, + } +} + +// Save will save the node file to disk and replace the existing one if present +func (n *Node) Save() error { + file := filepath.Join(n.path, nodeFile) + tmpFile := file + "tmp" + + f, err := os.Create(tmpFile) + if err != nil { + return err + } + + if err = json.NewEncoder(f).Encode(n); err != nil { + f.Close() + return err + } + + if err = f.Close(); nil != err { + return err + } + + return os.Rename(tmpFile, file) +} + +func upgradeNodeFile(path string) error { + oldFile := filepath.Join(path, oldNodeFile) + b, err := ioutil.ReadFile(oldFile) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + // We shouldn't have an empty ID file, but if we do, ignore it + if len(b) == 0 { + return nil + } + + peers := []string{} + pb, err := ioutil.ReadFile(filepath.Join(path, peersFilename)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + err = json.Unmarshal(pb, &peers) + if err != nil { + return err + } + + if len(peers) > 1 { + return fmt.Errorf("to upgrade a cluster, please contact support at influxdata") + } + + n := &Node{ + path: path, + } + if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { + return err + } + if err := n.Save(); err != nil { + return err + } + if err := os.Remove(oldFile); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/README.md b/vendor/github.com/influxdata/influxdb/pkg/README.md new file mode 100644 index 0000000..1c73a9e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/README.md @@ -0,0 +1,5 @@ +pkg/ is a collection of utility packages used by the InfluxDB project without being specific to its internals. + +Utility packages are kept separate from the InfluxDB core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the InfluxDB organization, to facilitate re-use by other projects. However that is not the priority. + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go new file mode 100644 index 0000000..b1d5f2a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go @@ -0,0 +1,22 @@ +package binaryutil + +// VarintSize returns the number of bytes to varint encode x. +// This code is copied from encoding/binary.PutVarint() with the buffer removed. +func VarintSize(x int64) int { + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + return UvarintSize(ux) +} + +// UvarintSize returns the number of bytes to uvarint encode x. +// This code is copied from encoding/binary.PutUvarint() with the buffer removed. +func UvarintSize(x uint64) int { + i := 0 + for x >= 0x80 { + x >>= 7 + i++ + } + return i + 1 +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom.go b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom.go new file mode 100644 index 0000000..b77ca71 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom.go @@ -0,0 +1,136 @@ +package bloom + +// NOTE: +// This package implements a limited bloom filter implementation based on +// Will Fitzgerald's bloom & bitset packages. It uses a zero-allocation xxhash +// implementation, rather than murmur3. It's implemented locally to support +// zero-copy memory-mapped slices. +// +// This also optimizes the filter by always using a bitset size with a power of 2. + +import ( + "fmt" + "math" + + "github.com/cespare/xxhash" +) + +// Filter represents a bloom filter. +type Filter struct { + k uint64 + b []byte + mask uint64 +} + +// NewFilter returns a new instance of Filter using m bits and k hash functions. +// If m is not a power of two then it is rounded to the next highest power of 2. +func NewFilter(m uint64, k uint64) *Filter { + m = pow2(m) + return &Filter{k: k, b: make([]byte, m>>3), mask: m - 1} +} + +// NewFilterBuffer returns a new instance of a filter using a backing buffer. +// The buffer length MUST be a power of 2. +func NewFilterBuffer(buf []byte, k uint64) (*Filter, error) { + m := pow2(uint64(len(buf)) * 8) + if m != uint64(len(buf))*8 { + return nil, fmt.Errorf("bloom.Filter: buffer bit count must a power of two: %d/%d", len(buf)*8, m) + } + return &Filter{k: k, b: buf, mask: m - 1}, nil +} + +// Len returns the number of bits used in the filter. +func (f *Filter) Len() uint { return uint(len(f.b)) } + +// K returns the number of hash functions used in the filter. +func (f *Filter) K() uint64 { return f.k } + +// Bytes returns the underlying backing slice. +func (f *Filter) Bytes() []byte { return f.b } + +// Clone returns a copy of f. +func (f *Filter) Clone() *Filter { + other := &Filter{k: f.k, b: make([]byte, len(f.b)), mask: f.mask} + copy(other.b, f.b) + return other +} + +// Insert inserts data to the filter. +func (f *Filter) Insert(v []byte) { + h := f.hash(v) + for i := uint64(0); i < f.k; i++ { + loc := f.location(h, i) + f.b[loc>>3] |= 1 << (loc & 7) + } +} + +// Contains returns true if the filter possibly contains v. +// Returns false if the filter definitely does not contain v. +func (f *Filter) Contains(v []byte) bool { + h := f.hash(v) + for i := uint64(0); i < f.k; i++ { + loc := f.location(h, i) + if f.b[loc>>3]&(1<<(loc&7)) == 0 { + return false + } + } + return true +} + +// Merge performs an in-place union of other into f. +// Returns an error if m or k of the filters differs. +func (f *Filter) Merge(other *Filter) error { + if other == nil { + return nil + } + + // Ensure m & k fields match. + if len(f.b) != len(other.b) { + return fmt.Errorf("bloom.Filter.Merge(): m mismatch: %d <> %d", len(f.b), len(other.b)) + } else if f.k != other.k { + return fmt.Errorf("bloom.Filter.Merge(): k mismatch: %d <> %d", f.b, other.b) + } + + // Perform union of each byte. + for i := range f.b { + f.b[i] |= other.b[i] + } + + return nil +} + +// location returns the ith hashed location using two hash values. +func (f *Filter) location(h [2]uint64, i uint64) uint { + return uint((h[0] + h[1]*i) & f.mask) +} + +// hash returns two 64-bit hashes based on the output of xxhash. +func (f *Filter) hash(data []byte) [2]uint64 { + v1 := xxhash.Sum64(data) + var v2 uint64 + if len(data) > 0 { + b := data[len(data)-1] // We'll put the original byte back. + data[len(data)-1] = byte(0) + v2 = xxhash.Sum64(data) + data[len(data)-1] = b + } + return [2]uint64{v1, v2} +} + +// Estimate returns an estimated bit count and hash count given the element count and false positive rate. +func Estimate(n uint64, p float64) (m uint64, k uint64) { + m = uint64(math.Ceil(-1 * float64(n) * math.Log(p) / math.Pow(math.Log(2), 2))) + k = uint64(math.Ceil(math.Log(2) * float64(m) / float64(n))) + return m, k +} + +// pow2 returns the number that is the next highest power of 2. +// Returns v if it is a power of 2. +func pow2(v uint64) uint64 { + for i := uint64(8); i < 1<<62; i *= 2 { + if i >= v { + return i + } + } + panic("unreachable") +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go new file mode 100644 index 0000000..49ee997 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go @@ -0,0 +1,189 @@ +package bloom_test + +import ( + "encoding/binary" + "fmt" + "os" + "testing" + + "github.com/influxdata/influxdb/pkg/bloom" +) + +// Ensure filter can insert values and verify they exist. +func TestFilter_InsertContains(t *testing.T) { + if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { + t.Skip("Skipping test in short, race and appveyor mode.") + } + + // Short, less comprehensive test. + testShortFilter_InsertContains(t) + + if testing.Short() { + return // Just run the above short test + } + + // More comprehensive test for the xxhash based Bloom Filter. + + // These parameters will result, for 10M entries, with a bloom filter + // with 0.001 false positive rate (1 in 1000 values will be incorrectly + // identified as being present in the set). + filter := bloom.NewFilter(143775876, 10) + v := make([]byte, 4) + for i := 0; i < 10000000; i++ { + binary.BigEndian.PutUint32(v, uint32(i)) + filter.Insert(v) + } + + // None of the values inserted should ever be considered "not possibly in + // the filter". + t.Run("100M", func(t *testing.T) { + for i := 0; i < 10000000; i++ { + binary.BigEndian.PutUint32(v, uint32(i)) + if !filter.Contains(v) { + t.Fatalf("got false for value %q, expected true", v) + } + } + + // If we check for 100,000,000 values that we know are not present in the + // filter then we might expect around 100,000 of them to be false positives. + var fp int + for i := 10000000; i < 110000000; i++ { + binary.BigEndian.PutUint32(v, uint32(i)) + if filter.Contains(v) { + fp++ + } + } + + if fp > 1000000 { + // If we're an order of magnitude off, then it's arguable that there + // is a bug in the bloom filter. + t.Fatalf("got %d false positives which is an error rate of %f, expected error rate <=0.001", fp, float64(fp)/100000000) + } + t.Logf("Bloom false positive error rate was %f", float64(fp)/100000000) + }) +} + +func testShortFilter_InsertContains(t *testing.T) { + t.Run("short", func(t *testing.T) { + f := bloom.NewFilter(1000, 4) + + // Insert value and validate. + f.Insert([]byte("Bess")) + if !f.Contains([]byte("Bess")) { + t.Fatal("expected true") + } + + // Insert another value and test. + f.Insert([]byte("Emma")) + if !f.Contains([]byte("Emma")) { + t.Fatal("expected true") + } + + // Validate that a non-existent value doesn't exist. + if f.Contains([]byte("Jane")) { + t.Fatal("expected false") + } + }) +} + +var benchCases = []struct { + m, k uint64 + n int +}{ + {m: 100, k: 4, n: 1000}, + {m: 1000, k: 4, n: 1000}, + {m: 10000, k: 4, n: 1000}, + {m: 100000, k: 4, n: 1000}, + {m: 100, k: 8, n: 1000}, + {m: 1000, k: 8, n: 1000}, + {m: 10000, k: 8, n: 1000}, + {m: 100000, k: 8, n: 1000}, + {m: 100, k: 20, n: 1000}, + {m: 1000, k: 20, n: 1000}, + {m: 10000, k: 20, n: 1000}, + {m: 100000, k: 20, n: 1000}, +} + +func BenchmarkFilter_Insert(b *testing.B) { + for _, c := range benchCases { + data := make([][]byte, 0, c.n) + for i := 0; i < c.n; i++ { + data = append(data, []byte(fmt.Sprintf("%d", i))) + } + + filter := bloom.NewFilter(c.m, c.k) + b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + for _, v := range data { + filter.Insert(v) + } + } + }) + + } +} + +var okResult bool + +func BenchmarkFilter_Contains(b *testing.B) { + for _, c := range benchCases { + data := make([][]byte, 0, c.n) + notData := make([][]byte, 0, c.n) + for i := 0; i < c.n; i++ { + data = append(data, []byte(fmt.Sprintf("%d", i))) + notData = append(notData, []byte(fmt.Sprintf("%d", c.n+i))) + } + + filter := bloom.NewFilter(c.m, c.k) + for _, v := range data { + filter.Insert(v) + } + + b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + for _, v := range data { + okResult = filter.Contains(v) + if !okResult { + b.Fatalf("Filter returned negative for value %q in set", v) + } + } + + // And now a bunch of values that don't exist. + for _, v := range notData { + okResult = filter.Contains(v) + } + } + }) + } +} + +func BenchmarkFilter_Merge(b *testing.B) { + for _, c := range benchCases { + data1 := make([][]byte, 0, c.n) + data2 := make([][]byte, 0, c.n) + for i := 0; i < c.n; i++ { + data1 = append(data1, []byte(fmt.Sprintf("%d", i))) + data2 = append(data2, []byte(fmt.Sprintf("%d", c.n+i))) + } + + filter1 := bloom.NewFilter(c.m, c.k) + filter2 := bloom.NewFilter(c.m, c.k) + for i := 0; i < c.n; i++ { + filter1.Insert(data1[i]) + filter2.Insert(data2[i]) + } + + b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + other, err := bloom.NewFilterBuffer(filter1.Bytes(), filter1.K()) + if err != nil { + b.Fatal(err) + } + other.Merge(filter2) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go new file mode 100644 index 0000000..a318ab6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go @@ -0,0 +1,195 @@ +package bytesutil + +import ( + "bytes" + "fmt" + "sort" +) + +// Sort sorts a slice of byte slices. +func Sort(a [][]byte) { + sort.Sort(byteSlices(a)) +} + +// SortDedup sorts the byte slice a and removes duplicates. The ret +func SortDedup(a [][]byte) [][]byte { + if len(a) < 2 { + return a + } + + Sort(a) + + i, j := 0, 1 + for j < len(a) { + if !bytes.Equal(a[j-1], a[j]) { + a[i] = a[j-1] + i++ + } + j++ + } + a[i] = a[j-1] + i++ + return a[:i] +} + +func IsSorted(a [][]byte) bool { + return sort.IsSorted(byteSlices(a)) +} + +// SearchBytes performs a binary search for x in the sorted slice a. +func SearchBytes(a [][]byte, x []byte) int { + // Define f(i) => bytes.Compare(a[i], x) < 0 + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, len(a) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if bytes.Compare(a[h], x) < 0 { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +// Contains returns true if x is an element of the sorted slice a. +func Contains(a [][]byte, x []byte) bool { + n := SearchBytes(a, x) + return n < len(a) && bytes.Equal(a[n], x) +} + +// SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of +// of x or else the function panics. There returned value is the index within a where x should +// exist. The caller should ensure that x does exist at this index. +func SearchBytesFixed(a []byte, sz int, fn func(x []byte) bool) int { + if len(a)%sz != 0 { + panic(fmt.Sprintf("x is not a multiple of a: %d %d", len(a), sz)) + } + + i, j := 0, len(a)-sz + for i < j { + h := int(uint(i+j) >> 1) + h -= h % sz + if !fn(a[h : h+sz]) { + i = h + sz + } else { + j = h + } + } + + return i +} + +// Union returns the union of a & b in sorted order. +func Union(a, b [][]byte) [][]byte { + n := len(b) + if len(a) > len(b) { + n = len(a) + } + other := make([][]byte, 0, n) + + for { + if len(a) > 0 && len(b) > 0 { + if cmp := bytes.Compare(a[0], b[0]); cmp == 0 { + other, a, b = append(other, a[0]), a[1:], b[1:] + } else if cmp == -1 { + other, a = append(other, a[0]), a[1:] + } else { + other, b = append(other, b[0]), b[1:] + } + } else if len(a) > 0 { + other, a = append(other, a[0]), a[1:] + } else if len(b) > 0 { + other, b = append(other, b[0]), b[1:] + } else { + return other + } + } +} + +// Intersect returns the intersection of a & b in sorted order. +func Intersect(a, b [][]byte) [][]byte { + n := len(b) + if len(a) > len(b) { + n = len(a) + } + other := make([][]byte, 0, n) + + for len(a) > 0 && len(b) > 0 { + if cmp := bytes.Compare(a[0], b[0]); cmp == 0 { + other, a, b = append(other, a[0]), a[1:], b[1:] + } else if cmp == -1 { + a = a[1:] + } else { + b = b[1:] + } + } + return other +} + +// Clone returns a copy of b. +func Clone(b []byte) []byte { + if b == nil { + return nil + } + buf := make([]byte, len(b)) + copy(buf, b) + return buf +} + +// CloneSlice returns a copy of a slice of byte slices. +func CloneSlice(a [][]byte) [][]byte { + other := make([][]byte, len(a)) + for i := range a { + other[i] = Clone(a[i]) + } + return other +} + +// Pack converts a sparse array to a dense one. It removes sections of a containing +// runs of val of length width. The returned value is a subslice of a. +func Pack(a []byte, width int, val byte) []byte { + var i, j, jStart, end int + + fill := make([]byte, width) + for i := 0; i < len(fill); i++ { + fill[i] = val + } + + // Skip the first run that won't move + for ; i < len(a) && a[i] != val; i += width { + } + end = i + + for i < len(a) { + // Find the next gap to remove + for i < len(a) && a[i] == val { + i += width + } + + // Find the next non-gap to keep + jStart = i + for j = i; j < len(a) && a[j] != val; j += width { + } + + if jStart == len(a) { + break + } + + // Move the non-gap over the section to remove. + copy(a[end:], a[jStart:j]) + end += j - jStart + i = j + } + + return a[:end] +} + +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go new file mode 100644 index 0000000..9d7adc6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go @@ -0,0 +1,281 @@ +package bytesutil_test + +import ( + "bytes" + "encoding/binary" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/pkg/bytesutil" +) + +func TestSearchBytesFixed(t *testing.T) { + n, sz := 5, 8 + a := make([]byte, n*sz) // 5 - 8 byte int64s + + for i := 0; i < 5; i++ { + binary.BigEndian.PutUint64(a[i*sz:i*sz+sz], uint64(i)) + } + + var x [8]byte + + for i := 0; i < n; i++ { + binary.BigEndian.PutUint64(x[:], uint64(i)) + if exp, got := i*sz, bytesutil.SearchBytesFixed(a, len(x), func(v []byte) bool { + return bytes.Compare(v, x[:]) >= 0 + }); exp != got { + t.Fatalf("index mismatch: exp %v, got %v", exp, got) + } + } + + if exp, got := len(a)-1, bytesutil.SearchBytesFixed(a, 1, func(v []byte) bool { + return bytes.Compare(v, []byte{99}) >= 0 + }); exp != got { + t.Fatalf("index mismatch: exp %v, got %v", exp, got) + } +} + +func TestSearchBytes(t *testing.T) { + in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") + tests := []struct { + name string + x string + exp int + }{ + {"exists first", "bbb", 0}, + {"exists middle", "eee", 2}, + {"exists last", "hhh", 5}, + {"not exists last", "zzz", 6}, + {"not exists first", "aaa", 0}, + {"not exists mid", "ddd", 2}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := bytesutil.SearchBytes(in, []byte(test.x)) + if got != test.exp { + t.Errorf("got %d, expected %d", got, test.exp) + } + }) + } +} + +func TestContains(t *testing.T) { + in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") + tests := []struct { + name string + x string + exp bool + }{ + {"exists first", "bbb", true}, + {"exists middle", "eee", true}, + {"exists last", "hhh", true}, + {"not exists last", "zzz", false}, + {"not exists first", "aaa", false}, + {"not exists mid", "ddd", false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := bytesutil.Contains(in, []byte(test.x)) + if got != test.exp { + t.Errorf("got %t, expected %t", got, test.exp) + } + }) + } +} + +func toByteSlices(s ...string) [][]byte { + r := make([][]byte, len(s)) + for i, v := range s { + r[i] = []byte(v) + } + return r +} + +func TestSortDedup(t *testing.T) { + tests := []struct { + name string + in [][]byte + exp [][]byte + }{ + { + name: "mixed dupes", + in: toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba"), + exp: toByteSlices("aba", "bbb", "ccc"), + }, + { + name: "no dupes", + in: toByteSlices("bbb", "ccc", "ddd"), + exp: toByteSlices("bbb", "ccc", "ddd"), + }, + { + name: "dupe at end", + in: toByteSlices("ccc", "ccc", "aaa"), + exp: toByteSlices("aaa", "ccc"), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + out := bytesutil.SortDedup(test.in) + if !cmp.Equal(out, test.exp) { + t.Error("invalid result") + } + }) + } +} + +func TestPack_WidthOne_One(t *testing.T) { + a := make([]byte, 8) + + a[4] = 1 + + a = bytesutil.Pack(a, 1, 0) + if got, exp := len(a), 1; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_Two(t *testing.T) { + a := make([]byte, 8) + + a[4] = 1 + a[6] = 2 + + a = bytesutil.Pack(a, 1, 0) + if got, exp := len(a), 2; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthTwo_Two(t *testing.T) { + a := make([]byte, 8) + + a[2] = 1 + a[3] = 1 + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 0) + if got, exp := len(a), 4; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1, 1, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_Last(t *testing.T) { + a := make([]byte, 8) + + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 255) + if got, exp := len(a), 8; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{0, 0, 0, 0, 0, 0, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_LastFill(t *testing.T) { + a := make([]byte, 8) + + a[0] = 255 + a[1] = 255 + a[2] = 2 + a[3] = 2 + a[4] = 2 + a[5] = 2 + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 255) + if got, exp := len(a), 6; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{2, 2, 2, 2, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +var result [][]byte + +func BenchmarkSortDedup(b *testing.B) { + b.Run("sort-deduplicate", func(b *testing.B) { + data := toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba") + in := append([][]byte{}, data...) + b.ReportAllocs() + + copy(in, data) + for i := 0; i < b.N; i++ { + result = bytesutil.SortDedup(in) + + b.StopTimer() + copy(in, data) + b.StartTimer() + } + }) +} + +func BenchmarkContains_True(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.Contains(in, []byte("xxx")) + } +} + +func BenchmarkContains_False(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.Contains(in, []byte("a")) + } +} + +func BenchmarkSearchBytes_Exists(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.SearchBytes(in, []byte("xxx")) + } +} + +func BenchmarkSearchBytes_NotExits(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.SearchBytes(in, []byte("a")) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/deep/equal.go b/vendor/github.com/influxdata/influxdb/pkg/deep/equal.go new file mode 100644 index 0000000..327db9a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/deep/equal.go @@ -0,0 +1,185 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// License. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package deep provides a deep equality check for use in tests. +package deep // import "github.com/influxdata/influxdb/pkg/deep" + +import ( + "fmt" + "math" + "reflect" +) + +// Equal is a copy of reflect.DeepEqual except that it treats NaN == NaN as true. +func Equal(a1, a2 interface{}) bool { + if a1 == nil || a2 == nil { + return a1 == a2 + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return deepValueEqual(v1, v2, make(map[visit]bool), 0) +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + + // if depth > 10 { panic("deepValueEqual") } // for debugging + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if v1.IsNil() != v2.IsNil() { + return false + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + case reflect.String: + return v1.String() == v2.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + case reflect.Float32, reflect.Float64: + // Special handling for floats so that NaN == NaN is true. + f1, f2 := v1.Float(), v2.Float() + if math.IsNaN(f1) && math.IsNaN(f2) { + return true + } + return f1 == f2 + case reflect.Bool: + return v1.Bool() == v2.Bool() + default: + panic(fmt.Sprintf("cannot compare type: %s", v1.Kind().String())) + } +} + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go new file mode 100644 index 0000000..f3b31f4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -0,0 +1,115 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. +package escape // import "github.com/influxdata/influxdb/pkg/escape" + +import ( + "bytes" + "strings" +) + +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. +func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + i := 0 + inLen := len(in) + + // The output size will be no more than inLen. Preallocating the + // capacity of the output is faster and uses less memory than + // letting append() do its own (over)allocation. + out := make([]byte, 0, inLen) + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go new file mode 100644 index 0000000..8cb101a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go @@ -0,0 +1,139 @@ +package escape + +import ( + "bytes" + "reflect" + "strings" + "testing" +) + +var result []byte + +func BenchmarkBytesEscapeNoEscapes(b *testing.B) { + buf := []byte(`no_escapes`) + for i := 0; i < b.N; i++ { + result = Bytes(buf) + } +} + +func BenchmarkUnescapeNoEscapes(b *testing.B) { + buf := []byte(`no_escapes`) + for i := 0; i < b.N; i++ { + result = Unescape(buf) + } +} + +func BenchmarkBytesEscapeMany(b *testing.B) { + tests := [][]byte{ + []byte("this is my special string"), + []byte("a field w=i th == tons of escapes"), + []byte("some,commas,here"), + } + for n := 0; n < b.N; n++ { + for _, test := range tests { + result = Bytes(test) + } + } +} + +func BenchmarkUnescapeMany(b *testing.B) { + tests := [][]byte{ + []byte(`this\ is\ my\ special\ string`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + for _, test := range tests { + result = Unescape(test) + } + } +} + +var boolResult bool + +func BenchmarkIsEscaped(b *testing.B) { + tests := [][]byte{ + []byte(`no_escapes`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + for _, test := range tests { + boolResult = IsEscaped(test) + } + } +} + +func BenchmarkAppendUnescaped(b *testing.B) { + tests := [][]byte{ + []byte(`this\ is\ my\ special\ string`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + result = nil + for _, test := range tests { + result = AppendUnescaped(result, test) + } + } +} + +func TestUnescape(t *testing.T) { + tests := []struct { + in []byte + out []byte + }{ + { + []byte(nil), + []byte(nil), + }, + + { + []byte(""), + []byte(nil), + }, + + { + []byte("\\,\\\"\\ \\="), + []byte(",\" ="), + }, + + { + []byte("\\\\"), + []byte("\\\\"), + }, + + { + []byte("plain and simple"), + []byte("plain and simple"), + }, + } + + for ii, tt := range tests { + got := Unescape(tt.in) + if !reflect.DeepEqual(got, tt.out) { + t.Errorf("[%d] Unescape(%#v) = %#v, expected %#v", ii, string(tt.in), string(got), string(tt.out)) + } + } +} + +func TestAppendUnescaped(t *testing.T) { + cases := strings.Split(strings.TrimSpace(` +normal +inv\alid +goo\"d +sp\ ace +\,\"\ \= +f\\\ x +`), "\n") + + for _, c := range cases { + exp := Unescape([]byte(c)) + got := AppendUnescaped(nil, []byte(c)) + + if !bytes.Equal(got, exp) { + t.Errorf("AppendUnescaped failed for %#q: got %#q, exp %#q", c, got, exp) + } + } + +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go new file mode 100644 index 0000000..db98033 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go @@ -0,0 +1,21 @@ +package escape + +import "strings" + +var ( + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) +) + +// UnescapeString returns unescaped version of in. +func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + return unescaper.Replace(in) +} + +// String returns the escaped version of in. +func String(in string) string { + return escaper.Replace(in) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings_test.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings_test.go new file mode 100644 index 0000000..d124732 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings_test.go @@ -0,0 +1,115 @@ +package escape + +import ( + "testing" +) + +var s string + +func BenchmarkStringEscapeNoEscapes(b *testing.B) { + for n := 0; n < b.N; n++ { + s = String("no_escapes") + } +} + +func BenchmarkStringUnescapeNoEscapes(b *testing.B) { + for n := 0; n < b.N; n++ { + s = UnescapeString("no_escapes") + } +} + +func BenchmarkManyStringEscape(b *testing.B) { + tests := []string{ + "this is my special string", + "a field w=i th == tons of escapes", + "some,commas,here", + } + + for n := 0; n < b.N; n++ { + for _, test := range tests { + s = String(test) + } + } +} + +func BenchmarkManyStringUnescape(b *testing.B) { + tests := []string{ + `this\ is\ my\ special\ string`, + `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, + `some\,commas\,here`, + } + + for n := 0; n < b.N; n++ { + for _, test := range tests { + s = UnescapeString(test) + } + } +} + +func TestStringEscape(t *testing.T) { + tests := []struct { + in string + expected string + }{ + { + in: "", + expected: "", + }, + { + in: "this is my special string", + expected: `this\ is\ my\ special\ string`, + }, + { + in: "a field w=i th == tons of escapes", + expected: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, + }, + { + in: "no_escapes", + expected: "no_escapes", + }, + { + in: "some,commas,here", + expected: `some\,commas\,here`, + }, + } + + for _, test := range tests { + if test.expected != String(test.in) { + t.Errorf("Got %s, expected %s", String(test.in), test.expected) + } + } +} + +func TestStringUnescape(t *testing.T) { + tests := []struct { + in string + expected string + }{ + { + in: "", + expected: "", + }, + { + in: `this\ is\ my\ special\ string`, + expected: "this is my special string", + }, + { + in: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, + expected: "a field w=i th == tons of escapes", + }, + { + in: "no_escapes", + expected: "no_escapes", + }, + { + in: `some\,commas\,here`, + expected: "some,commas,here", + }, + } + + for _, test := range tests { + if test.expected != UnescapeString(test.in) { + t.Errorf("Got %s, expected %s", UnescapeString(test.in), test.expected) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/compressed.go b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/compressed.go new file mode 100644 index 0000000..d2a4880 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/compressed.go @@ -0,0 +1,173 @@ +package hll + +import "encoding/binary" + +// Original author of this file is github.com/clarkduvall/hyperloglog +type iterable interface { + decode(i int, last uint32) (uint32, int) + Len() int + Iter() *iterator +} + +type iterator struct { + i int + last uint32 + v iterable +} + +func (iter *iterator) Next() uint32 { + n, i := iter.v.decode(iter.i, iter.last) + iter.last = n + iter.i = i + return n +} + +func (iter *iterator) Peek() uint32 { + n, _ := iter.v.decode(iter.i, iter.last) + return n +} + +func (iter iterator) HasNext() bool { + return iter.i < iter.v.Len() +} + +type compressedList struct { + count uint32 + last uint32 + b variableLengthList +} + +func (v *compressedList) Clone() *compressedList { + if v == nil { + return nil + } + + newV := &compressedList{ + count: v.count, + last: v.last, + } + + newV.b = make(variableLengthList, len(v.b)) + copy(newV.b, v.b) + return newV +} + +func (v *compressedList) MarshalBinary() (data []byte, err error) { + // Marshal the variableLengthList + bdata, err := v.b.MarshalBinary() + if err != nil { + return nil, err + } + + // At least 4 bytes for the two fixed sized values plus the size of bdata. + data = make([]byte, 0, 4+4+len(bdata)) + + // Marshal the count and last values. + data = append(data, []byte{ + // Number of items in the list. + byte(v.count >> 24), + byte(v.count >> 16), + byte(v.count >> 8), + byte(v.count), + // The last item in the list. + byte(v.last >> 24), + byte(v.last >> 16), + byte(v.last >> 8), + byte(v.last), + }...) + + // Append the list + return append(data, bdata...), nil +} + +func (v *compressedList) UnmarshalBinary(data []byte) error { + // Set the count. + v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the last value. + v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the list. + sz, data := binary.BigEndian.Uint32(data[:4]), data[4:] + v.b = make([]uint8, sz) + for i := uint32(0); i < sz; i++ { + v.b[i] = uint8(data[i]) + } + return nil +} + +func newCompressedList(size int) *compressedList { + v := &compressedList{} + v.b = make(variableLengthList, 0, size) + return v +} + +func (v *compressedList) Len() int { + return len(v.b) +} + +func (v *compressedList) decode(i int, last uint32) (uint32, int) { + n, i := v.b.decode(i, last) + return n + last, i +} + +func (v *compressedList) Append(x uint32) { + v.count++ + v.b = v.b.Append(x - v.last) + v.last = x +} + +func (v *compressedList) Iter() *iterator { + return &iterator{0, 0, v} +} + +type variableLengthList []uint8 + +func (v variableLengthList) MarshalBinary() (data []byte, err error) { + // 4 bytes for the size of the list, and a byte for each element in the + // list. + data = make([]byte, 0, 4+v.Len()) + + // Length of the list. We only need 32 bits because the size of the set + // couldn't exceed that on 32 bit architectures. + sz := v.Len() + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < sz; i++ { + data = append(data, byte(v[i])) + } + + return data, nil +} + +func (v variableLengthList) Len() int { + return len(v) +} + +func (v *variableLengthList) Iter() *iterator { + return &iterator{0, 0, v} +} + +func (v variableLengthList) decode(i int, last uint32) (uint32, int) { + var x uint32 + j := i + for ; v[j]&0x80 != 0; j++ { + x |= uint32(v[j]&0x7f) << (uint(j-i) * 7) + } + x |= uint32(v[j]) << (uint(j-i) * 7) + return x, j + 1 +} + +func (v variableLengthList) Append(x uint32) variableLengthList { + for x&0xffffff80 != 0 { + v = append(v, uint8((x&0x7f)|0x80)) + x >>= 7 + } + return append(v, uint8(x&0x7f)) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go new file mode 100644 index 0000000..babbe74 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go @@ -0,0 +1,495 @@ +// Package hll contains a HyperLogLog++ with a LogLog-Beta bias correction implementation that is adapted (mostly +// copied) from an implementation provided by Clark DuVall +// github.com/clarkduvall/hyperloglog. +// +// The differences are that the implementation in this package: +// +// * uses an AMD64 optimised xxhash algorithm instead of murmur; +// * uses some AMD64 optimisations for things like clz; +// * works with []byte rather than a Hash64 interface, to reduce allocations; +// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler +// +// Based on some rough benchmarking, this implementation of HyperLogLog++ is +// around twice as fast as the github.com/clarkduvall/hyperloglog implementation. +package hll + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "math/bits" + "sort" + "unsafe" + + "github.com/cespare/xxhash" + "github.com/influxdata/influxdb/pkg/estimator" +) + +// Current version of HLL implementation. +const version uint8 = 2 + +// DefaultPrecision is the default precision. +const DefaultPrecision = 16 + +func beta(ez float64) float64 { + zl := math.Log(ez + 1) + return -0.37331876643753059*ez + + -1.41704077448122989*zl + + 0.40729184796612533*math.Pow(zl, 2) + + 1.56152033906584164*math.Pow(zl, 3) + + -0.99242233534286128*math.Pow(zl, 4) + + 0.26064681399483092*math.Pow(zl, 5) + + -0.03053811369682807*math.Pow(zl, 6) + + 0.00155770210179105*math.Pow(zl, 7) +} + +// Plus implements the Hyperloglog++ algorithm, described in the following +// paper: http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf +// +// The HyperLogLog++ algorithm provides cardinality estimations. +type Plus struct { + // hash function used to hash values to add to the sketch. + hash func([]byte) uint64 + + p uint8 // precision. + pp uint8 // p' (sparse) precision to be used when p ∈ [4..pp] and pp < 64. + + m uint32 // Number of substream used for stochastic averaging of stream. + mp uint32 // m' (sparse) number of substreams. + + alpha float64 // alpha is used for bias correction. + + sparse bool // Should we use a sparse sketch representation. + tmpSet set + + denseList []uint8 // The dense representation of the HLL. + sparseList *compressedList // values that can be stored in the sparse represenation. +} + +// NewPlus returns a new Plus with precision p. p must be between 4 and 18. +func NewPlus(p uint8) (*Plus, error) { + if p > 18 || p < 4 { + return nil, errors.New("precision must be between 4 and 18") + } + + // p' = 25 is used in the Google paper. + pp := uint8(25) + + hll := &Plus{ + hash: xxhash.Sum64, + p: p, + pp: pp, + m: 1 << p, + mp: 1 << pp, + tmpSet: set{}, + sparse: true, + } + hll.sparseList = newCompressedList(int(hll.m)) + + // Determine alpha. + switch hll.m { + case 16: + hll.alpha = 0.673 + case 32: + hll.alpha = 0.697 + case 64: + hll.alpha = 0.709 + default: + hll.alpha = 0.7213 / (1 + 1.079/float64(hll.m)) + } + + return hll, nil +} + +// Bytes estimates the memory footprint of this Plus, in bytes. +func (h *Plus) Bytes() int { + var b int + b += len(h.tmpSet) * 4 + b += cap(h.denseList) + if h.sparseList != nil { + b += int(unsafe.Sizeof(*h.sparseList)) + b += cap(h.sparseList.b) + } + b += int(unsafe.Sizeof(*h)) + return b +} + +// NewDefaultPlus creates a new Plus with the default precision. +func NewDefaultPlus() *Plus { + p, err := NewPlus(DefaultPrecision) + if err != nil { + panic(err) + } + return p +} + +// Clone returns a deep copy of h. +func (h *Plus) Clone() estimator.Sketch { + var hll = &Plus{ + hash: h.hash, + p: h.p, + pp: h.pp, + m: h.m, + mp: h.mp, + alpha: h.alpha, + sparse: h.sparse, + tmpSet: h.tmpSet.Clone(), + sparseList: h.sparseList.Clone(), + } + + hll.denseList = make([]uint8, len(h.denseList)) + copy(hll.denseList, h.denseList) + return hll +} + +// Add adds a new value to the HLL. +func (h *Plus) Add(v []byte) { + x := h.hash(v) + if h.sparse { + h.tmpSet.add(h.encodeHash(x)) + + if uint32(len(h.tmpSet))*100 > h.m { + h.mergeSparse() + if uint32(h.sparseList.Len()) > h.m { + h.toNormal() + } + } + } else { + i := bextr(x, 64-h.p, h.p) // {x63,...,x64-p} + w := x< h.denseList[i] { + h.denseList[i] = rho + } + } +} + +// Count returns a cardinality estimate. +func (h *Plus) Count() uint64 { + if h == nil { + return 0 // Nothing to do. + } + + if h.sparse { + h.mergeSparse() + return uint64(h.linearCount(h.mp, h.mp-uint32(h.sparseList.count))) + } + sum := 0.0 + m := float64(h.m) + var count float64 + for _, val := range h.denseList { + sum += 1.0 / float64(uint32(1)< h.denseList[i] { + h.denseList[i] = v + } + } + } + return nil +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (h *Plus) MarshalBinary() (data []byte, err error) { + if h == nil { + return nil, nil + } + + // Marshal a version marker. + data = append(data, version) + + // Marshal precision. + data = append(data, byte(h.p)) + + if h.sparse { + // It's using the sparse representation. + data = append(data, byte(1)) + + // Add the tmp_set + tsdata, err := h.tmpSet.MarshalBinary() + if err != nil { + return nil, err + } + data = append(data, tsdata...) + + // Add the sparse representation + sdata, err := h.sparseList.MarshalBinary() + if err != nil { + return nil, err + } + return append(data, sdata...), nil + } + + // It's using the dense representation. + data = append(data, byte(0)) + + // Add the dense sketch representation. + sz := len(h.denseList) + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < len(h.denseList); i++ { + data = append(data, byte(h.denseList[i])) + } + + return data, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (h *Plus) UnmarshalBinary(data []byte) error { + if len(data) < 12 { + return fmt.Errorf("provided buffer %v too short for initializing HLL sketch", data) + } + + // Unmarshal version. We may need this in the future if we make + // non-compatible changes. + _ = data[0] + + // Unmarshal precision. + p := uint8(data[1]) + newh, err := NewPlus(p) + if err != nil { + return err + } + *h = *newh + + // h is now initialised with the correct precision. We just need to fill the + // rest of the details out. + if data[2] == byte(1) { + // Using the sparse representation. + h.sparse = true + + // Unmarshal the tmp_set. + tssz := binary.BigEndian.Uint32(data[3:7]) + h.tmpSet = make(map[uint32]struct{}, tssz) + + // We need to unmarshal tssz values in total, and each value requires us + // to read 4 bytes. + tsLastByte := int((tssz * 4) + 7) + for i := 7; i < tsLastByte; i += 4 { + k := binary.BigEndian.Uint32(data[i : i+4]) + h.tmpSet[k] = struct{}{} + } + + // Unmarshal the sparse representation. + return h.sparseList.UnmarshalBinary(data[tsLastByte:]) + } + + // Using the dense representation. + h.sparse = false + dsz := int(binary.BigEndian.Uint32(data[3:7])) + h.denseList = make([]uint8, 0, dsz) + for i := 7; i < dsz+7; i++ { + h.denseList = append(h.denseList, uint8(data[i])) + } + return nil +} + +func (h *Plus) mergeSparse() { + if len(h.tmpSet) == 0 { + return + } + keys := make(uint64Slice, 0, len(h.tmpSet)) + for k := range h.tmpSet { + keys = append(keys, k) + } + sort.Sort(keys) + + newList := newCompressedList(int(h.m)) + for iter, i := h.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); { + if !iter.HasNext() { + newList.Append(keys[i]) + i++ + continue + } + + if i >= len(keys) { + newList.Append(iter.Next()) + continue + } + + x1, x2 := iter.Peek(), keys[i] + if x1 == x2 { + newList.Append(iter.Next()) + i++ + } else if x1 > x2 { + newList.Append(x2) + i++ + } else { + newList.Append(iter.Next()) + } + } + + h.sparseList = newList + h.tmpSet = set{} +} + +// Convert from sparse representation to dense representation. +func (h *Plus) toNormal() { + if len(h.tmpSet) > 0 { + h.mergeSparse() + } + + h.denseList = make([]uint8, h.m) + for iter := h.sparseList.Iter(); iter.HasNext(); { + i, r := h.decodeHash(iter.Next()) + if h.denseList[i] < r { + h.denseList[i] = r + } + } + + h.sparse = false + h.tmpSet = nil + h.sparseList = nil +} + +// Encode a hash to be used in the sparse representation. +func (h *Plus) encodeHash(x uint64) uint32 { + idx := uint32(bextr(x, 64-h.pp, h.pp)) + if bextr(x, 64-h.pp, h.pp-h.p) == 0 { + zeros := bits.LeadingZeros64((bextr(x, 0, 64-h.pp)<> 24), + byte(sl >> 16), + byte(sl >> 8), + byte(sl), + }...) + + // Marshal each element in the set. + for k := range s { + data = append(data, []byte{ + byte(k >> 24), + byte(k >> 16), + byte(k >> 8), + byte(k), + }...) + } + + return data, nil +} + +func (s set) add(v uint32) { s[v] = struct{}{} } +func (s set) has(v uint32) bool { _, ok := s[v]; return ok } + +// bextr performs a bitfield extract on v. start should be the LSB of the field +// you wish to extract, and length the number of bits to extract. +// +// For example: start=0 and length=4 for the following 64-bit word would result +// in 1111 being returned. +// +// 00011110 +// returns 1110 +func bextr(v uint64, start, length uint8) uint64 { + return (v >> start) & ((1 << length) - 1) +} + +func bextr32(v uint32, start, length uint8) uint32 { + return (v >> start) & ((1 << length) - 1) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll_test.go b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll_test.go new file mode 100644 index 0000000..1f17e0d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll_test.go @@ -0,0 +1,683 @@ +package hll + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math" + "math/rand" + "reflect" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +func nopHash(buf []byte) uint64 { + if len(buf) != 8 { + panic(fmt.Sprintf("unexpected size buffer: %d", len(buf))) + } + return binary.BigEndian.Uint64(buf) +} + +func toByte(v uint64) []byte { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], v) + return buf[:] +} + +func TestPlus_Bytes(t *testing.T) { + testCases := []struct { + p uint8 + normal bool + }{ + {4, false}, + {5, false}, + {4, true}, + {5, true}, + } + + for i, testCase := range testCases { + t.Run(fmt.Sprint(i), func(t *testing.T) { + h := NewTestPlus(testCase.p) + + plusStructOverhead := int(unsafe.Sizeof(*h)) + compressedListOverhead := int(unsafe.Sizeof(*h.sparseList)) + + var expectedDenseListCapacity, expectedSparseListCapacity int + + if testCase.normal { + h.toNormal() + // denseList has capacity for 2^p elements, one byte each + expectedDenseListCapacity = int(math.Pow(2, float64(testCase.p))) + if expectedDenseListCapacity != cap(h.denseList) { + t.Errorf("denseList capacity: want %d got %d", expectedDenseListCapacity, cap(h.denseList)) + } + } else { + // sparseList has capacity for 2^p elements, one byte each + expectedSparseListCapacity = int(math.Pow(2, float64(testCase.p))) + if expectedSparseListCapacity != cap(h.sparseList.b) { + t.Errorf("sparseList capacity: want %d got %d", expectedSparseListCapacity, cap(h.sparseList.b)) + } + expectedSparseListCapacity += compressedListOverhead + } + + expectedSize := plusStructOverhead + expectedDenseListCapacity + expectedSparseListCapacity + if expectedSize != h.Bytes() { + t.Errorf("Bytes(): want %d got %d", expectedSize, h.Bytes()) + } + }) + } +} + +func TestPlus_Add_NoSparse(t *testing.T) { + h := NewTestPlus(16) + h.toNormal() + + h.Add(toByte(0x00010fffffffffff)) + n := h.denseList[1] + if n != 5 { + t.Error(n) + } + + h.Add(toByte(0x0002ffffffffffff)) + n = h.denseList[2] + if n != 1 { + t.Error(n) + } + + h.Add(toByte(0x0003000000000000)) + n = h.denseList[3] + if n != 49 { + t.Error(n) + } + + h.Add(toByte(0x0003000000000001)) + n = h.denseList[3] + if n != 49 { + t.Error(n) + } + + h.Add(toByte(0xff03700000000000)) + n = h.denseList[0xff03] + if n != 2 { + t.Error(n) + } + + h.Add(toByte(0xff03080000000000)) + n = h.denseList[0xff03] + if n != 5 { + t.Error(n) + } +} + +func TestPlusPrecision_NoSparse(t *testing.T) { + h := NewTestPlus(4) + h.toNormal() + + h.Add(toByte(0x1fffffffffffffff)) + n := h.denseList[1] + if n != 1 { + t.Error(n) + } + + h.Add(toByte(0xffffffffffffffff)) + n = h.denseList[0xf] + if n != 1 { + t.Error(n) + } + + h.Add(toByte(0x00ffffffffffffff)) + n = h.denseList[0] + if n != 5 { + t.Error(n) + } +} + +func TestPlus_toNormal(t *testing.T) { + h := NewTestPlus(16) + h.Add(toByte(0x00010fffffffffff)) + h.toNormal() + c := h.Count() + if c != 1 { + t.Error(c) + } + + if h.sparse { + t.Error("toNormal should convert to normal") + } + + h = NewTestPlus(16) + h.hash = nopHash + h.Add(toByte(0x00010fffffffffff)) + h.Add(toByte(0x0002ffffffffffff)) + h.Add(toByte(0x0003000000000000)) + h.Add(toByte(0x0003000000000001)) + h.Add(toByte(0xff03700000000000)) + h.Add(toByte(0xff03080000000000)) + h.mergeSparse() + h.toNormal() + + n := h.denseList[1] + if n != 5 { + t.Error(n) + } + n = h.denseList[2] + if n != 1 { + t.Error(n) + } + n = h.denseList[3] + if n != 49 { + t.Error(n) + } + n = h.denseList[0xff03] + if n != 5 { + t.Error(n) + } +} + +func TestPlusCount(t *testing.T) { + h := NewTestPlus(16) + + n := h.Count() + if n != 0 { + t.Error(n) + } + + h.Add(toByte(0x00010fffffffffff)) + h.Add(toByte(0x00020fffffffffff)) + h.Add(toByte(0x00030fffffffffff)) + h.Add(toByte(0x00040fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + + n = h.Count() + if n != 5 { + t.Error(n) + } + + // not mutated, still returns correct count + n = h.Count() + if n != 5 { + t.Error(n) + } + + h.Add(toByte(0x00060fffffffffff)) + + // mutated + n = h.Count() + if n != 6 { + t.Error(n) + } +} + +func TestPlus_Merge_Error(t *testing.T) { + h := NewTestPlus(16) + h2 := NewTestPlus(10) + + err := h.Merge(h2) + if err == nil { + t.Error("different precision should return error") + } +} + +func TestHLL_Merge_Sparse(t *testing.T) { + h := NewTestPlus(16) + h.Add(toByte(0x00010fffffffffff)) + h.Add(toByte(0x00020fffffffffff)) + h.Add(toByte(0x00030fffffffffff)) + h.Add(toByte(0x00040fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + + h2 := NewTestPlus(16) + h2.Merge(h) + n := h2.Count() + if n != 5 { + t.Error(n) + } + + if h2.sparse { + t.Error("Merge should convert to normal") + } + + if !h.sparse { + t.Error("Merge should not modify argument") + } + + h2.Merge(h) + n = h2.Count() + if n != 5 { + t.Error(n) + } + + h.Add(toByte(0x00060fffffffffff)) + h.Add(toByte(0x00070fffffffffff)) + h.Add(toByte(0x00080fffffffffff)) + h.Add(toByte(0x00090fffffffffff)) + h.Add(toByte(0x000a0fffffffffff)) + h.Add(toByte(0x000a0fffffffffff)) + n = h.Count() + if n != 10 { + t.Error(n) + } + + h2.Merge(h) + n = h2.Count() + if n != 10 { + t.Error(n) + } +} + +func TestHLL_Merge_Normal(t *testing.T) { + h := NewTestPlus(16) + h.toNormal() + h.Add(toByte(0x00010fffffffffff)) + h.Add(toByte(0x00020fffffffffff)) + h.Add(toByte(0x00030fffffffffff)) + h.Add(toByte(0x00040fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + h.Add(toByte(0x00050fffffffffff)) + + h2 := NewTestPlus(16) + h2.toNormal() + h2.Merge(h) + n := h2.Count() + if n != 5 { + t.Error(n) + } + + h2.Merge(h) + n = h2.Count() + if n != 5 { + t.Error(n) + } + + h.Add(toByte(0x00060fffffffffff)) + h.Add(toByte(0x00070fffffffffff)) + h.Add(toByte(0x00080fffffffffff)) + h.Add(toByte(0x00090fffffffffff)) + h.Add(toByte(0x000a0fffffffffff)) + h.Add(toByte(0x000a0fffffffffff)) + n = h.Count() + if n != 10 { + t.Error(n) + } + + h2.Merge(h) + n = h2.Count() + if n != 10 { + t.Error(n) + } +} + +func TestPlus_Merge(t *testing.T) { + h := NewTestPlus(16) + + k1 := uint64(0xf000017000000000) + h.Add(toByte(k1)) + if !h.tmpSet.has(h.encodeHash(k1)) { + t.Error("key not in hash") + } + + k2 := uint64(0x000fff8f00000000) + h.Add(toByte(k2)) + if !h.tmpSet.has(h.encodeHash(k2)) { + t.Error("key not in hash") + } + + if len(h.tmpSet) != 2 { + t.Error(h.tmpSet) + } + + h.mergeSparse() + if len(h.tmpSet) != 0 { + t.Error(h.tmpSet) + } + if h.sparseList.count != 2 { + t.Error(h.sparseList) + } + + iter := h.sparseList.Iter() + n := iter.Next() + if n != h.encodeHash(k2) { + t.Error(n) + } + n = iter.Next() + if n != h.encodeHash(k1) { + t.Error(n) + } + + k3 := uint64(0x0f00017000000000) + h.Add(toByte(k3)) + if !h.tmpSet.has(h.encodeHash(k3)) { + t.Error("key not in hash") + } + + h.mergeSparse() + if len(h.tmpSet) != 0 { + t.Error(h.tmpSet) + } + if h.sparseList.count != 3 { + t.Error(h.sparseList) + } + + iter = h.sparseList.Iter() + n = iter.Next() + if n != h.encodeHash(k2) { + t.Error(n) + } + n = iter.Next() + if n != h.encodeHash(k3) { + t.Error(n) + } + n = iter.Next() + if n != h.encodeHash(k1) { + t.Error(n) + } + + h.Add(toByte(k1)) + if !h.tmpSet.has(h.encodeHash(k1)) { + t.Error("key not in hash") + } + + h.mergeSparse() + if len(h.tmpSet) != 0 { + t.Error(h.tmpSet) + } + if h.sparseList.count != 3 { + t.Error(h.sparseList) + } + + iter = h.sparseList.Iter() + n = iter.Next() + if n != h.encodeHash(k2) { + t.Error(n) + } + n = iter.Next() + if n != h.encodeHash(k3) { + t.Error(n) + } + n = iter.Next() + if n != h.encodeHash(k1) { + t.Error(n) + } +} + +func TestPlus_EncodeDecode(t *testing.T) { + h := NewTestPlus(8) + i, r := h.decodeHash(h.encodeHash(0xffffff8000000000)) + if i != 0xff { + t.Error(i) + } + if r != 1 { + t.Error(r) + } + + i, r = h.decodeHash(h.encodeHash(0xff00000000000000)) + if i != 0xff { + t.Error(i) + } + if r != 57 { + t.Error(r) + } + + i, r = h.decodeHash(h.encodeHash(0xff30000000000000)) + if i != 0xff { + t.Error(i) + } + if r != 3 { + t.Error(r) + } + + i, r = h.decodeHash(h.encodeHash(0xaa10000000000000)) + if i != 0xaa { + t.Error(i) + } + if r != 4 { + t.Error(r) + } + + i, r = h.decodeHash(h.encodeHash(0xaa0f000000000000)) + if i != 0xaa { + t.Error(i) + } + if r != 5 { + t.Error(r) + } +} + +func TestPlus_Error(t *testing.T) { + _, err := NewPlus(3) + if err == nil { + t.Error("precision 3 should return error") + } + + _, err = NewPlus(18) + if err != nil { + t.Error(err) + } + + _, err = NewPlus(19) + if err == nil { + t.Error("precision 17 should return error") + } +} + +func TestPlus_Marshal_Unmarshal_Sparse(t *testing.T) { + h, _ := NewPlus(4) + h.sparse = true + h.tmpSet = map[uint32]struct{}{26: struct{}{}, 40: struct{}{}} + + // Add a bunch of values to the sparse representation. + for i := 0; i < 10; i++ { + h.sparseList.Append(uint32(rand.Int())) + } + + data, err := h.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Peeking at the first byte should reveal the version. + if got, exp := data[0], byte(2); got != exp { + t.Fatalf("got byte %v, expected %v", got, exp) + } + + var res Plus + if err := res.UnmarshalBinary(data); err != nil { + t.Fatal(err) + } + + // reflect.DeepEqual will always return false when comparing non-nil + // functions, so we'll set them to nil. + h.hash, res.hash = nil, nil + if got, exp := &res, h; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, wanted %v", spew.Sdump(got), spew.Sdump(exp)) + } +} + +func TestPlus_Marshal_Unmarshal_Dense(t *testing.T) { + h, _ := NewPlus(4) + h.sparse = false + + // Add a bunch of values to the dense representation. + for i := 0; i < 10; i++ { + h.denseList = append(h.denseList, uint8(rand.Int())) + } + + data, err := h.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Peeking at the first byte should reveal the version. + if got, exp := data[0], byte(2); got != exp { + t.Fatalf("got byte %v, expected %v", got, exp) + } + + var res Plus + if err := res.UnmarshalBinary(data); err != nil { + t.Fatal(err) + } + + // reflect.DeepEqual will always return false when comparing non-nil + // functions, so we'll set them to nil. + h.hash, res.hash = nil, nil + if got, exp := &res, h; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, wanted %v", spew.Sdump(got), spew.Sdump(exp)) + } +} + +// Tests that a sketch can be serialised / unserialised and keep an accurate +// cardinality estimate. +func TestPlus_Marshal_Unmarshal_Count(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode") + } + + count := make(map[string]struct{}, 1000000) + h, _ := NewPlus(16) + + buf := make([]byte, 8) + for i := 0; i < 1000000; i++ { + if _, err := crand.Read(buf); err != nil { + panic(err) + } + + count[string(buf)] = struct{}{} + + // Add to the sketch. + h.Add(buf) + } + + gotC := h.Count() + epsilon := 15000 // 1.5% + if got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp { + t.Fatalf("error was %v for estimation %d and true cardinality %d", got, gotC, len(count)) + } + + // Serialise the sketch. + sketch, err := h.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Deserialise. + h = &Plus{} + if err := h.UnmarshalBinary(sketch); err != nil { + t.Fatal(err) + } + + // The count should be the same + oldC := gotC + if got, exp := h.Count(), oldC; got != exp { + t.Fatalf("got %d, expected %d", got, exp) + } + + // Add some more values. + for i := 0; i < 1000000; i++ { + if _, err := crand.Read(buf); err != nil { + panic(err) + } + + count[string(buf)] = struct{}{} + + // Add to the sketch. + h.Add(buf) + } + + // The sketch should still be working correctly. + gotC = h.Count() + epsilon = 30000 // 1.5% + if got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp { + t.Fatalf("error was %v for estimation %d and true cardinality %d", got, gotC, len(count)) + } +} + +func NewTestPlus(p uint8) *Plus { + h, err := NewPlus(p) + if err != nil { + panic(err) + } + h.hash = nopHash + return h +} + +// Generate random data to add to the sketch. +func genData(n int) [][]byte { + out := make([][]byte, 0, n) + buf := make([]byte, 8) + + for i := 0; i < n; i++ { + // generate 8 random bytes + n, err := rand.Read(buf) + if err != nil { + panic(err) + } else if n != 8 { + panic(fmt.Errorf("only %d bytes generated", n)) + } + + out = append(out, buf) + } + if len(out) != n { + panic(fmt.Sprintf("wrong size slice: %d", n)) + } + return out +} + +// Memoises values to be added to a sketch during a benchmark. +var benchdata = map[int][][]byte{} + +func benchmarkPlusAdd(b *testing.B, h *Plus, n int) { + blobs, ok := benchdata[n] + if !ok { + // Generate it. + benchdata[n] = genData(n) + blobs = benchdata[n] + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < len(blobs); j++ { + h.Add(blobs[j]) + } + } + b.StopTimer() +} + +func BenchmarkPlus_Add_100(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 100) +} + +func BenchmarkPlus_Add_1000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 1000) +} + +func BenchmarkPlus_Add_10000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 10000) +} + +func BenchmarkPlus_Add_100000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 100000) +} + +func BenchmarkPlus_Add_1000000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 1000000) +} + +func BenchmarkPlus_Add_10000000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 10000000) +} + +func BenchmarkPlus_Add_100000000(b *testing.B) { + h, _ := NewPlus(16) + benchmarkPlusAdd(b, h, 100000000) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/estimator/sketch.go b/vendor/github.com/influxdata/influxdb/pkg/estimator/sketch.go new file mode 100644 index 0000000..b5d0fdc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/estimator/sketch.go @@ -0,0 +1,24 @@ +package estimator + +import "encoding" + +// Sketch is the interface representing a sketch for estimating cardinality. +type Sketch interface { + // Add adds a single value to the sketch. + Add(v []byte) + + // Count returns a cardinality estimate for the sketch. + Count() uint64 + + // Merge merges another sketch into this one. + Merge(s Sketch) error + + // Bytes estimates the memory footprint of the sketch, in bytes. + Bytes() int + + // Clone returns a deep copy of the sketch. + Clone() Sketch + + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go b/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go new file mode 100644 index 0000000..2287ac2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package file + +import "os" + +func SyncDir(dirName string) error { + // fsync the dir to flush the rename + dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) + if err != nil { + return err + } + defer dir.Close() + return dir.Sync() +} + +// RenameFile will rename the source to target using os function. +func RenameFile(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go b/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go new file mode 100644 index 0000000..97f31b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go @@ -0,0 +1,18 @@ +package file + +import "os" + +func SyncDir(dirName string) error { + return nil +} + +// RenameFile will rename the source to target using os function. If target exists it will be removed before renaming. +func RenameFile(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err = os.Remove(newpath); nil != err { + return err + } + } + + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed.go b/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed.go new file mode 100644 index 0000000..19d967d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed.go @@ -0,0 +1,46 @@ +// Package limiter provides concurrency limiters. +package limiter + +// Fixed is a simple channel-based concurrency limiter. It uses a fixed +// size channel to limit callers from proceeding until there is a value available +// in the channel. If all are in-use, the caller blocks until one is freed. +type Fixed chan struct{} + +func NewFixed(limit int) Fixed { + return make(Fixed, limit) +} + +// Idle returns true if the limiter has all its capacity is available. +func (t Fixed) Idle() bool { + return len(t) == cap(t) +} + +// Available returns the number of available tokens that may be taken. +func (t Fixed) Available() int { + return cap(t) - len(t) +} + +// Capacity returns the number of tokens can be taken. +func (t Fixed) Capacity() int { + return cap(t) +} + +// TryTake attempts to take a token and return true if successful, otherwise returns false. +func (t Fixed) TryTake() bool { + select { + case t <- struct{}{}: + return true + default: + return false + } +} + +// Take attempts to take a token and blocks until one is available. +func (t Fixed) Take() { + t <- struct{}{} +} + +// Release releases a token back to the limiter. +func (t Fixed) Release() { + <-t +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed_test.go b/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed_test.go new file mode 100644 index 0000000..b45a727 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/limiter/fixed_test.go @@ -0,0 +1,26 @@ +package limiter_test + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/limiter" +) + +func TestFixed_Available(t *testing.T) { + f := limiter.NewFixed(10) + if exp, got := 10, f.Available(); exp != got { + t.Fatalf("available mismatch: exp %v, got %v", exp, got) + } + + f.Take() + + if exp, got := 9, f.Available(); exp != got { + t.Fatalf("available mismatch: exp %v, got %v", exp, got) + } + + f.Release() + + if exp, got := 10, f.Available(); exp != got { + t.Fatalf("available mismatch: exp %v, got %v", exp, got) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/limiter/write_test.go b/vendor/github.com/influxdata/influxdb/pkg/limiter/write_test.go new file mode 100644 index 0000000..094e15d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/limiter/write_test.go @@ -0,0 +1,34 @@ +package limiter_test + +import ( + "bytes" + "io" + "testing" + "time" + + "github.com/influxdata/influxdb/pkg/limiter" +) + +func TestWriter_Limited(t *testing.T) { + r := bytes.NewReader(bytes.Repeat([]byte{0}, 1024*1024)) + + limit := 512 * 1024 + w := limiter.NewWriter(discardCloser{}, limit, 10*1024*1024) + + start := time.Now() + n, err := io.Copy(w, r) + elapsed := time.Since(start) + if err != nil { + t.Error("copy error: ", err) + } + + rate := float64(n) / elapsed.Seconds() + if rate > float64(limit) { + t.Errorf("rate limit mismath: exp %f, got %f", float64(limit), rate) + } +} + +type discardCloser struct{} + +func (d discardCloser) Write(b []byte) (int, error) { return len(b), nil } +func (d discardCloser) Close() error { return nil } diff --git a/vendor/github.com/influxdata/influxdb/pkg/limiter/writer.go b/vendor/github.com/influxdata/influxdb/pkg/limiter/writer.go new file mode 100644 index 0000000..f14981a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/limiter/writer.go @@ -0,0 +1,83 @@ +package limiter + +import ( + "context" + "io" + "os" + "time" + + "golang.org/x/time/rate" +) + +type Writer struct { + w io.WriteCloser + limiter Rate + ctx context.Context +} + +type Rate interface { + WaitN(ctx context.Context, n int) error +} + +func NewRate(bytesPerSec, burstLimit int) Rate { + limiter := rate.NewLimiter(rate.Limit(bytesPerSec), burstLimit) + limiter.AllowN(time.Now(), burstLimit) // spend initial burst + return limiter +} + +// NewWriter returns a writer that implements io.Writer with rate limiting. +// The limiter use a token bucket approach and limits the rate to bytesPerSec +// with a maximum burst of burstLimit. +func NewWriter(w io.WriteCloser, bytesPerSec, burstLimit int) *Writer { + limiter := NewRate(bytesPerSec, burstLimit) + + return &Writer{ + w: w, + ctx: context.Background(), + limiter: limiter, + } +} + +// WithRate returns a Writer with the specified rate limiter. +func NewWriterWithRate(w io.WriteCloser, limiter Rate) *Writer { + return &Writer{ + w: w, + ctx: context.Background(), + limiter: limiter, + } +} + +// Write writes bytes from p. +func (s *Writer) Write(b []byte) (int, error) { + if s.limiter == nil { + return s.w.Write(b) + } + + n, err := s.w.Write(b) + if err != nil { + return n, err + } + + if err := s.limiter.WaitN(s.ctx, n); err != nil { + return n, err + } + return n, err +} + +func (s *Writer) Sync() error { + if f, ok := s.w.(*os.File); ok { + return f.Sync() + } + return nil +} + +func (s *Writer) Name() string { + if f, ok := s.w.(*os.File); ok { + return f.Name() + } + return "" +} + +func (s *Writer) Close() error { + return s.w.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/context.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/context.go new file mode 100644 index 0000000..ee407ac --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/context.go @@ -0,0 +1,20 @@ +package metrics + +import "context" + +type key int + +const ( + groupKey key = iota +) + +// NewContextWithGroup returns a new context with the given Group added. +func NewContextWithGroup(ctx context.Context, c *Group) context.Context { + return context.WithValue(ctx, groupKey, c) +} + +// GroupFromContext returns the Group associated with ctx or nil if no Group has been assigned. +func GroupFromContext(ctx context.Context) *Group { + c, _ := ctx.Value(groupKey).(*Group) + return c +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/counter.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/counter.go new file mode 100644 index 0000000..6f2e526 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/counter.go @@ -0,0 +1,28 @@ +package metrics + +import ( + "strconv" + "sync/atomic" +) + +// The Counter type represents a numeric counter that is safe to use from concurrent goroutines. +type Counter struct { + val int64 + desc *desc +} + +// Name identifies the name of the counter. +func (c *Counter) Name() string { return c.desc.Name } + +// Value atomically returns the current value of the counter. +func (c *Counter) Value() int64 { return atomic.LoadInt64(&c.val) } + +// Add atomically adds d to the counter. +func (c *Counter) Add(d int64) { atomic.AddInt64(&c.val, d) } + +// String returns a string representation using the name and value of the counter. +func (c *Counter) String() string { + var buf [16]byte + v := strconv.AppendInt(buf[:0], c.val, 10) + return c.desc.Name + ": " + string(v) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/counter_test.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/counter_test.go new file mode 100644 index 0000000..d444cd8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/counter_test.go @@ -0,0 +1,14 @@ +package metrics + +import ( + "testing" +) + +func TestCounter_Add(t *testing.T) { + c := Counter{} + c.Add(5) + c.Add(5) + if exp, got := int64(10), c.Value(); exp != got { + t.Errorf("unexpected value; exp=%d, got=%d", exp, got) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/default_registry.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/default_registry.go new file mode 100644 index 0000000..893221e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/default_registry.go @@ -0,0 +1,36 @@ +package metrics + +var defaultRegistry = NewRegistry() + +// MustRegisterGroup registers a new group using the specified name. +// If the group name is not unique, MustRegisterGroup will panic. +// +// MustRegisterGroup is not safe to call from multiple goroutines. +func MustRegisterGroup(name string) GID { + return defaultRegistry.MustRegisterGroup(name) +} + +// MustRegisterCounter registers a new counter metric with the default registry +// using the provided descriptor. +// If the metric name is not unique, MustRegisterCounter will panic. +// +// MustRegisterCounter is not safe to call from multiple goroutines. +func MustRegisterCounter(name string, opts ...descOption) ID { + return defaultRegistry.MustRegisterCounter(name, opts...) +} + +// MustRegisterTimer registers a new timer metric with the default registry +// using the provided descriptor. +// If the metric name is not unique, MustRegisterTimer will panic. +// +// MustRegisterTimer is not safe to call from multiple goroutines. +func MustRegisterTimer(name string, opts ...descOption) ID { + return defaultRegistry.MustRegisterTimer(name, opts...) +} + +// NewGroup returns a new measurement group from the default registry. +// +// NewGroup is safe to call from multiple goroutines. +func NewGroup(gid GID) *Group { + return defaultRegistry.NewGroup(gid) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors.go new file mode 100644 index 0000000..0a8dac8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors.go @@ -0,0 +1,64 @@ +package metrics + +type groupDesc struct { + Name string + id GID +} + +type metricType int + +const ( + counterMetricType metricType = iota + timerMetricType +) + +type desc struct { + Name string + mt metricType + gid GID + id ID +} + +type descOption func(*desc) + +// WithGroup assigns the associated measurement to the group identified by gid originally +// returned from MustRegisterGroup. +func WithGroup(gid GID) descOption { + return func(d *desc) { + d.gid = gid + } +} + +func newDesc(name string, opts ...descOption) *desc { + desc := &desc{Name: name} + for _, o := range opts { + o(desc) + } + return desc +} + +const ( + idMask = (1 << 32) - 1 + gidShift = 32 +) + +type ( + GID uint32 + ID uint64 +) + +func newID(id int, gid GID) ID { + return ID(gid)<> gidShift) +} + +func (id *ID) setGID(gid GID) { + *id |= ID(gid) << gidShift +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors_test.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors_test.go new file mode 100644 index 0000000..77bc2c9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/descriptors_test.go @@ -0,0 +1,21 @@ +package metrics + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func TestID_newID(t *testing.T) { + var id = newID(0xff, 0xff0f0fff) + assert.Equal(t, id, ID(0xff0f0fff000000ff)) + assert.Equal(t, id.id(), uint32(0xff)) + assert.Equal(t, id.gid(), uint32(0xff0f0fff)) +} + +func TestID_setGID(t *testing.T) { + var id = ID(1) + assert.Equal(t, id.gid(), uint32(0)) + id.setGID(1) + assert.Equal(t, id.gid(), uint32(1)) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/doc.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/doc.go new file mode 100644 index 0000000..cb0feac --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/doc.go @@ -0,0 +1,6 @@ +/* +Package metrics provides various measurements that are safe for concurrent access. + +Measurements are arranged into groups that are efficient to create and access. +*/ +package metrics diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/group.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/group.go new file mode 100644 index 0000000..0a02bb0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/group.go @@ -0,0 +1,37 @@ +package metrics + +// The Group type represents an instance of a set of measurements that are used for +// instrumenting a specific request. +type Group struct { + g *groupRegistry + counters []Counter + timers []Timer +} + +// Name returns the name of the group. +func (g *Group) Name() string { return g.g.desc.Name } + +// GetCounter returns the counter identified by the id that was returned +// by MustRegisterCounter for the same group. +// Using an id from a different group will result in undefined behavior. +func (g *Group) GetCounter(id ID) *Counter { return &g.counters[id.id()] } + +// GetTimer returns the timer identified by the id that was returned +// by MustRegisterTimer for the same group. +// Using an id from a different group will result in undefined behavior. +func (g *Group) GetTimer(id ID) *Timer { return &g.timers[id.id()] } + +// The Metric type defines a Name +type Metric interface { + Name() string +} + +// ForEach calls fn for all measurements of the group. +func (g *Group) ForEach(fn func(v Metric)) { + for i := range g.counters { + fn(&g.counters[i]) + } + for i := range g.timers { + fn(&g.timers[i]) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/group_registry.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/group_registry.go new file mode 100644 index 0000000..f457f8f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/group_registry.go @@ -0,0 +1,79 @@ +package metrics + +import ( + "fmt" + "sort" +) + +// The groupRegistry type represents a set of metrics that are measured together. +type groupRegistry struct { + desc *groupDesc + descriptors []*desc + group Group +} + +func (g *groupRegistry) register(desc *desc) error { + p := sort.Search(len(g.descriptors), func(i int) bool { + return g.descriptors[i].Name == desc.Name + }) + + if p != len(g.descriptors) { + return fmt.Errorf("metric name '%s' already in use", desc.Name) + } + + g.descriptors = append(g.descriptors, desc) + sort.Slice(g.descriptors, func(i, j int) bool { + return g.descriptors[i].Name < g.descriptors[j].Name + }) + + return nil +} + +func (g *groupRegistry) mustRegister(desc *desc) { + if err := g.register(desc); err != nil { + panic(err.Error()) + } +} + +// MustRegisterCounter registers a new counter metric using the provided descriptor. +// If the metric name is not unique, MustRegisterCounter will panic. +// +// MustRegisterCounter is not safe to call from multiple goroutines. +func (g *groupRegistry) mustRegisterCounter(desc *desc) ID { + desc.mt = counterMetricType + g.mustRegister(desc) + + desc.id = newID(len(g.group.counters), g.desc.id) + g.group.counters = append(g.group.counters, Counter{desc: desc}) + + return desc.id +} + +// MustRegisterTimer registers a new timer metric using the provided descriptor. +// If the metric name is not unique, MustRegisterTimer will panic. +// +// MustRegisterTimer is not safe to call from multiple goroutines. +func (g *groupRegistry) mustRegisterTimer(desc *desc) ID { + desc.mt = timerMetricType + g.mustRegister(desc) + + desc.id = newID(len(g.group.timers), g.desc.id) + g.group.timers = append(g.group.timers, Timer{desc: desc}) + + return desc.id +} + +// newCollector returns a Collector with a copy of all the registered counters. +// +// newCollector is safe to call from multiple goroutines. +func (g *groupRegistry) newGroup() *Group { + c := &Group{ + g: g, + counters: make([]Counter, len(g.group.counters)), + timers: make([]Timer, len(g.group.timers)), + } + copy(c.counters, g.group.counters) + copy(c.timers, g.group.timers) + + return c +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/registry.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/registry.go new file mode 100644 index 0000000..b6eea5f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/registry.go @@ -0,0 +1,87 @@ +package metrics + +import ( + "fmt" + "sort" +) + +type Registry struct { + descriptors []*groupDesc + groups []groupRegistry +} + +const ( + // DefaultGroup is the identifier for the default group. + DefaultGroup = GID(0) +) + +// NewRegistry creates a new Registry with a single group identified by DefaultGroup. +func NewRegistry() *Registry { + var r Registry + r.MustRegisterGroup("global") + return &r +} + +func (r *Registry) register(gd *groupDesc) error { + p := sort.Search(len(r.descriptors), func(i int) bool { + return r.descriptors[i].Name == gd.Name + }) + + if p != len(r.descriptors) { + return fmt.Errorf("group name '%s' already in use", gd.Name) + } + + r.descriptors = append(r.descriptors, gd) + sort.Slice(r.descriptors, func(i, j int) bool { + return r.descriptors[i].Name < r.descriptors[j].Name + }) + + gd.id = GID(len(r.groups)) + r.groups = append(r.groups, groupRegistry{desc: gd}) + + return nil +} + +func (r *Registry) mustRegister(gd *groupDesc) { + if err := r.register(gd); err != nil { + panic(err.Error()) + } +} + +// MustRegisterGroup registers a new group and panics if a group already exists with the same name. +// +// MustRegisterGroup is not safe to call from concurrent goroutines. +func (r *Registry) MustRegisterGroup(name string) GID { + gd := &groupDesc{Name: name} + r.mustRegister(gd) + return gd.id +} + +func (r *Registry) mustGetGroupRegistry(id GID) *groupRegistry { + if int(id) >= len(r.groups) { + panic(fmt.Sprintf("invalid group ID")) + } + return &r.groups[id] +} + +// MustRegisterCounter registers a new counter metric using the provided descriptor. +// If the metric name is not unique within the group, MustRegisterCounter will panic. +// +// MustRegisterCounter is not safe to call from concurrent goroutines. +func (r *Registry) MustRegisterCounter(name string, opts ...descOption) ID { + desc := newDesc(name, opts...) + return r.mustGetGroupRegistry(desc.gid).mustRegisterCounter(desc) +} + +// MustRegisterTimer registers a new timer metric using the provided descriptor. +// If the metric name is not unique within the group, MustRegisterTimer will panic. +// +// MustRegisterTimer is not safe to call from concurrent goroutines. +func (r *Registry) MustRegisterTimer(name string, opts ...descOption) ID { + desc := newDesc(name, opts...) + return r.mustGetGroupRegistry(desc.gid).mustRegisterTimer(desc) +} + +func (r *Registry) NewGroup(gid GID) *Group { + return r.mustGetGroupRegistry(gid).newGroup() +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/registry_test.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/registry_test.go new file mode 100644 index 0000000..78496f3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/registry_test.go @@ -0,0 +1,63 @@ +package metrics + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func TestRegistry_MustRegisterCounter(t *testing.T) { + r := NewRegistry() + id := r.MustRegisterCounter("counter") + assert.Equal(t, id, ID(0), "invalid id") +} + +func TestRegistry_MustRegisterCounter_Panics(t *testing.T) { + r := NewRegistry() + r.MustRegisterCounter("counter") + assert.PanicsWithValue(t, "metric name 'counter' already in use", func() { + r.MustRegisterCounter("counter") + }) +} + +func TestRegistry_NewGroup_CounterIsZero(t *testing.T) { + r := NewRegistry() + id := r.MustRegisterCounter("counter") + + c := r.NewGroup(DefaultGroup).GetCounter(id) + c.Add(1) + assert.Equal(t, int64(1), c.Value()) + + c = r.NewGroup(DefaultGroup).GetCounter(id) + assert.Equal(t, int64(0), c.Value()) +} + +func TestRegistry_MustRegisterTimer(t *testing.T) { + r := NewRegistry() + id := r.MustRegisterTimer("timer") + assert.Equal(t, ID(0), id, "invalid id") +} + +func TestRegistry_MustRegisterTimer_Panics(t *testing.T) { + r := NewRegistry() + r.MustRegisterCounter("timer") + assert.PanicsWithValue(t, "metric name 'timer' already in use", func() { + r.MustRegisterCounter("timer") + }) +} + +func TestRegistry_MustRegisterMultiple(t *testing.T) { + r := NewRegistry() + cnt := r.MustRegisterCounter("counter") + tmr := r.MustRegisterTimer("timer") + assert.Equal(t, ID(0), cnt, "invalid id") + assert.Equal(t, ID(0), tmr, "invalid id") +} + +func TestRegistry_MustRegister_Panics_Across_Measurements(t *testing.T) { + r := NewRegistry() + r.MustRegisterCounter("foo") + assert.PanicsWithValue(t, "metric name 'foo' already in use", func() { + r.MustRegisterCounter("foo") + }) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/timer.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/timer.go new file mode 100644 index 0000000..a0382c5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/timer.go @@ -0,0 +1,34 @@ +package metrics + +import ( + "sync/atomic" + "time" +) + +// The timer type is used to store a duration. +type Timer struct { + val int64 + desc *desc +} + +// Name returns the name of the timer. +func (t *Timer) Name() string { return t.desc.Name } + +// Value atomically returns the value of the timer. +func (t *Timer) Value() time.Duration { return time.Duration(atomic.LoadInt64(&t.val)) } + +// Update sets the timer value to d. +func (t *Timer) Update(d time.Duration) { atomic.StoreInt64(&t.val, int64(d)) } + +// UpdateSince sets the timer value to the difference between since and the current time. +func (t *Timer) UpdateSince(since time.Time) { t.Update(time.Since(since)) } + +// String returns a string representation using the name and value of the timer. +func (t *Timer) String() string { return t.desc.Name + ": " + time.Duration(t.val).String() } + +// Time updates the timer to the duration it takes to call f. +func (t *Timer) Time(f func()) { + s := time.Now() + f() + t.UpdateSince(s) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/metrics/timer_test.go b/vendor/github.com/influxdata/influxdb/pkg/metrics/timer_test.go new file mode 100644 index 0000000..aca439b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/metrics/timer_test.go @@ -0,0 +1,14 @@ +package metrics + +import ( + "testing" + "time" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func TestTimer_Update(t *testing.T) { + var c Timer + c.Update(100 * time.Millisecond) + assert.Equal(t, c.Value(), 100*time.Millisecond, "unexpected value") +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go new file mode 100644 index 0000000..4a406db --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go @@ -0,0 +1,45 @@ +// +build solaris + +package mmap + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func Map(path string, sz int64) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } else if fi.Size() == 0 { + return nil, nil + } + + // Use file size if map size is not passed in. + if sz == 0 { + sz = fi.Size() + } + + data, err := unix.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return nil, err + } + + return data, nil +} + +// Unmap closes the memory-map. +func Unmap(data []byte) error { + if data == nil { + return nil + } + return unix.Munmap(data) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go new file mode 100644 index 0000000..a182219 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go @@ -0,0 +1,22 @@ +package mmap_test + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/influxdata/influxdb/pkg/mmap" +) + +func TestMap(t *testing.T) { + data, err := mmap.Map("mmap_test.go", 0) + if err != nil { + t.Fatalf("Open: %v", err) + } + + if exp, err := ioutil.ReadFile("mmap_test.go"); err != nil { + t.Fatalf("ioutil.ReadFile: %v", err) + } else if !bytes.Equal(data, exp) { + t.Fatalf("got %q\nwant %q", string(data), string(exp)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go new file mode 100644 index 0000000..13629c1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go @@ -0,0 +1,49 @@ +// +build darwin dragonfly freebsd linux nacl netbsd openbsd + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mmap provides a way to memory-map a file. +package mmap + +import ( + "os" + "syscall" +) + +// Map memory-maps a file. +func Map(path string, sz int64) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } else if fi.Size() == 0 { + return nil, nil + } + + // Use file size if map size is not passed in. + if sz == 0 { + sz = fi.Size() + } + + data, err := syscall.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return nil, err + } + + return data, nil +} + +// Unmap closes the memory-map. +func Unmap(data []byte) error { + if data == nil { + return nil + } + return syscall.Munmap(data) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go new file mode 100644 index 0000000..5df36ea --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go @@ -0,0 +1,56 @@ +package mmap + +import ( + "os" + "syscall" + "unsafe" +) + +// Map memory-maps a file. +func Map(path string, sz int64) ([]byte, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + // Truncate file to size if too small. + if fi.Size() < sz { + if err := os.Truncate(path, sz); err != nil { + return nil, err + } + } else { + sz = fi.Size() + } + if sz == 0 { + return nil, nil + } + + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + lo, hi := uint32(sz), uint32(sz>>32) + fmap, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, hi, lo, nil) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(fmap) + + ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if err != nil { + return nil, err + } + data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:sz] + + return data, nil +} + +// Unmap closes the memory-map. +func Unmap(data []byte) error { + if data == nil { + return nil + } + return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&data[0]))) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go new file mode 100644 index 0000000..5fdb3d3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go @@ -0,0 +1,99 @@ +// Package pool provides pool structures to help reduce garbage collector pressure. +package pool + +// Bytes is a pool of byte slices that can be re-used. Slices in +// this pool will not be garbage collected when not in use. +type Bytes struct { + pool chan []byte +} + +// NewBytes returns a Bytes pool with capacity for max byte slices +// to be pool. +func NewBytes(max int) *Bytes { + return &Bytes{ + pool: make(chan []byte, max), + } +} + +// Get returns a byte slice size with at least sz capacity. Items +// returned may not be in the zero state and should be reset by the +// caller. +func (p *Bytes) Get(sz int) []byte { + var c []byte + select { + case c = <-p.pool: + default: + return make([]byte, sz) + } + + if cap(c) < sz { + return make([]byte, sz) + } + + return c[:sz] +} + +// Put returns a slice back to the pool. If the pool is full, the byte +// slice is discarded. +func (p *Bytes) Put(c []byte) { + select { + case p.pool <- c: + default: + } +} + +// LimitedBytes is a pool of byte slices that can be re-used. Slices in +// this pool will not be garbage collected when not in use. The pool will +// hold onto a fixed number of byte slices of a maximum size. If the pool +// is empty and max pool size has not been allocated yet, it will return a +// new byte slice. Byte slices added to the pool that are over the max size +// are dropped. +type LimitedBytes struct { + maxSize int + pool chan []byte +} + +// NewBytes returns a Bytes pool with capacity for max byte slices +// to be pool. +func NewLimitedBytes(capacity int, maxSize int) *LimitedBytes { + return &LimitedBytes{ + pool: make(chan []byte, capacity), + maxSize: maxSize, + } +} + +// Get returns a byte slice size with at least sz capacity. Items +// returned may not be in the zero state and should be reset by the +// caller. +func (p *LimitedBytes) Get(sz int) []byte { + var c []byte + + // If we have not allocated our capacity, return a new allocation, + // otherwise block until one frees up. + select { + case c = <-p.pool: + default: + return make([]byte, sz) + } + + if cap(c) < sz { + return make([]byte, sz) + } + + return c[:sz] +} + +// Put returns a slice back to the pool. If the pool is full, the byte +// slice is discarded. If the byte slice is over the configured max size +// of any byte slice in the pool, it is discared. +func (p *LimitedBytes) Put(c []byte) { + // Drop buffers that are larger than the max size + if cap(c) >= p.maxSize { + return + } + + select { + case p.pool <- c: + default: + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/pool/bytes_test.go b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes_test.go new file mode 100644 index 0000000..28ca98d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes_test.go @@ -0,0 +1,16 @@ +package pool_test + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/pool" +) + +func TestLimitedBytePool_Put_MaxSize(t *testing.T) { + bp := pool.NewLimitedBytes(1, 10) + bp.Put(make([]byte, 1024)) // should be dropped + + if got, exp := cap(bp.Get(10)), 10; got != exp { + t.Fatalf("max cap size exceeded: got %v, exp %v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/pool/generic.go b/vendor/github.com/influxdata/influxdb/pkg/pool/generic.go new file mode 100644 index 0000000..9eb98cc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/pool/generic.go @@ -0,0 +1,40 @@ +package pool + +// Generic is a pool of types that can be re-used. Items in +// this pool will not be garbage collected when not in use. +type Generic struct { + pool chan interface{} + fn func(sz int) interface{} +} + +// NewGeneric returns a Generic pool with capacity for max items +// to be pool. +func NewGeneric(max int, fn func(sz int) interface{}) *Generic { + return &Generic{ + pool: make(chan interface{}, max), + fn: fn, + } +} + +// Get returns a item from the pool or a new instance if the pool +// is empty. Items returned may not be in the zero state and should +// be reset by the caller. +func (p *Generic) Get(sz int) interface{} { + var c interface{} + select { + case c = <-p.pool: + default: + c = p.fn(sz) + } + + return c +} + +// Put returns an item back to the pool. If the pool is full, the item +// is discarded. +func (p *Generic) Put(c interface{}) { + select { + case p.pool <- c: + default: + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go new file mode 100644 index 0000000..d73352c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go @@ -0,0 +1,36 @@ +package pprofutil + +import ( + "os" + "runtime/pprof" +) + +type Profile struct { + *pprof.Profile + + Path string + Debug int +} + +func NewProfile(name, path string, debug int) *Profile { + p := &Profile{Profile: pprof.NewProfile(name), Path: path, Debug: debug} + return p +} + +func (p *Profile) Stop() { + f, err := os.Create(p.Path) + if err != nil { + panic(err) + } + defer f.Close() + + if err := p.WriteTo(f, p.Debug); err != nil { + panic(err) + } + + if err := f.Close(); err != nil { + panic(err) + } + + println("pprof profile written:", p.Path) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/buffer.go b/vendor/github.com/influxdata/influxdb/pkg/radix/buffer.go new file mode 100644 index 0000000..19e5bfc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/buffer.go @@ -0,0 +1,31 @@ +package radix + +// bufferSize is the size of the buffer and the largest slice that can be +// contained in it. +const bufferSize = 4096 + +// buffer is a type that amoritizes allocations into larger ones, handing out +// small subslices to make copies. +type buffer []byte + +// Copy returns a copy of the passed in byte slice allocated using the byte +// slice in the buffer. +func (b *buffer) Copy(x []byte) []byte { + // if we can never have enough room, just return a copy + if len(x) > bufferSize { + out := make([]byte, len(x)) + copy(out, x) + return out + } + + // if we don't have enough room, reallocate the buf first + if len(x) > len(*b) { + *b = make([]byte, bufferSize) + } + + // create a copy and hand out a slice + copy(*b, x) + out := (*b)[:len(x):len(x)] + *b = (*b)[len(x):] + return out +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/buffer_test.go b/vendor/github.com/influxdata/influxdb/pkg/radix/buffer_test.go new file mode 100644 index 0000000..ff21c5a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/buffer_test.go @@ -0,0 +1,55 @@ +package radix + +import ( + "bytes" + "math/rand" + "testing" +) + +func TestBuffer(t *testing.T) { + var buf buffer + + for i := 0; i < 1000; i++ { + x1 := make([]byte, rand.Intn(32)+1) + for j := range x1 { + x1[j] = byte(i + j) + } + + x2 := buf.Copy(x1) + if !bytes.Equal(x2, x1) { + t.Fatal("bad copy") + } + + x1[0] += 1 + if bytes.Equal(x2, x1) { + t.Fatal("bad copy") + } + } +} + +func TestBufferAppend(t *testing.T) { + var buf buffer + x1 := buf.Copy(make([]byte, 1)) + x2 := buf.Copy(make([]byte, 1)) + + _ = append(x1, 1) + if x2[0] != 0 { + t.Fatal("append wrote past") + } +} + +func TestBufferLarge(t *testing.T) { + var buf buffer + + x1 := make([]byte, bufferSize+1) + x2 := buf.Copy(x1) + + if !bytes.Equal(x1, x2) { + t.Fatal("bad copy") + } + + x1[0] += 1 + if bytes.Equal(x1, x2) { + t.Fatal("bad copy") + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/sort.go b/vendor/github.com/influxdata/influxdb/pkg/radix/sort.go new file mode 100644 index 0000000..cfc486d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/sort.go @@ -0,0 +1,92 @@ +// Portions of this file from github.com/shawnsmithdev/zermelo under the MIT license. +// +// The MIT License (MIT) +// +// Copyright (c) 2014 Shawn Smith +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package radix + +import ( + "sort" +) + +const ( + minSize = 256 + radix uint = 8 + bitSize uint = 64 +) + +// SortUint64s sorts a slice of uint64s. +func SortUint64s(x []uint64) { + if len(x) < 2 { + return + } else if len(x) < minSize { + sort.Slice(x, func(i, j int) bool { return x[i] < x[j] }) + } else { + doSort(x) + } +} + +func doSort(x []uint64) { + // Each pass processes a byte offset, copying back and forth between slices + from := x + to := make([]uint64, len(x)) + var key uint8 + var offset [256]int // Keep track of where groups start + + for keyOffset := uint(0); keyOffset < bitSize; keyOffset += radix { + keyMask := uint64(0xFF << keyOffset) // Current 'digit' to look at + var counts [256]int // Keep track of the number of elements for each kind of byte + sorted := true // Check for already sorted + prev := uint64(0) // if elem is always >= prev it is already sorted + for _, elem := range from { + key = uint8((elem & keyMask) >> keyOffset) // fetch the byte at current 'digit' + counts[key]++ // count of elems to put in this digit's bucket + + if sorted { // Detect sorted + sorted = elem >= prev + prev = elem + } + } + + if sorted { // Short-circuit sorted + if (keyOffset/radix)%2 == 1 { + copy(to, from) + } + return + } + + // Find target bucket offsets + offset[0] = 0 + for i := 1; i < len(offset); i++ { + offset[i] = offset[i-1] + counts[i-1] + } + + // Rebucket while copying to other buffer + for _, elem := range from { + key = uint8((elem & keyMask) >> keyOffset) // Get the digit + to[offset[key]] = elem // Copy the element to the digit's bucket + offset[key]++ // One less space, move the offset + } + // On next pass copy data the other way + to, from = from, to + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/sort_test.go b/vendor/github.com/influxdata/influxdb/pkg/radix/sort_test.go new file mode 100644 index 0000000..19e6d08 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/sort_test.go @@ -0,0 +1,27 @@ +package radix + +import ( + "math/rand" + "testing" +) + +func benchmarkSort(b *testing.B, size int) { + orig := make([]uint64, size) + for i := range orig { + orig[i] = uint64(rand.Int63()) + } + data := make([]uint64, size) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + copy(data, orig) + SortUint64s(data) + } +} + +func BenchmarkSort_64(b *testing.B) { benchmarkSort(b, 64) } +func BenchmarkSort_128(b *testing.B) { benchmarkSort(b, 128) } +func BenchmarkSort_256(b *testing.B) { benchmarkSort(b, 256) } +func BenchmarkSort_12K(b *testing.B) { benchmarkSort(b, 12*1024) } diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/tree.go b/vendor/github.com/influxdata/influxdb/pkg/radix/tree.go new file mode 100644 index 0000000..5bf21bb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/tree.go @@ -0,0 +1,428 @@ +package radix + +// This is a fork of https://github.com/armon/go-radix that removes the +// ability to update nodes as well as uses fixed int value type. + +import ( + "bytes" + "sort" + "sync" +) + +// leafNode is used to represent a value +type leafNode struct { + valid bool // true if key/val are valid + key []byte + val int +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf.valid +} + +func (n *node) addEdge(e edge) { + // find the insertion point with bisection + num := len(n.edges) + i, j := 0, num + for i < j { + h := int(uint(i+j) >> 1) + if n.edges[h].label < e.label { + i = h + 1 + } else { + j = h + } + } + + // make room, copy the suffix, and insert. + n.edges = append(n.edges, edge{}) + copy(n.edges[i+1:], n.edges[i:]) + n.edges[i] = e +} + +func (n *node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + // linear search for small slices + if len(n.edges) < 16 { + for _, e := range n.edges { + if e.label == label { + return e.node + } + } + return nil + } + + // binary search for larger + num := len(n.edges) + i, j := 0, num + for i < j { + h := int(uint(i+j) >> 1) + if n.edges[h].label < label { + i = h + 1 + } else { + j = h + } + } + if i < num && n.edges[i].label == label { + return n.edges[i].node + } + return nil +} + +type edges []edge + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration. The tree is safe for concurrent access. +type Tree struct { + mu sync.RWMutex + root *node + size int + buf buffer +} + +// New returns an empty Tree +func New() *Tree { + return &Tree{root: &node{}} +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]int) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert([]byte(k), v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + t.mu.RLock() + size := t.size + t.mu.RUnlock() + + return size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + // for loops can't be inlined, but goto's can. we also use uint to help + // out the compiler to prove bounds checks aren't necessary on the index + // operations. + + lk1, lk2 := uint(len(k1)), uint(len(k2)) + i := uint(0) + +loop: + if lk1 <= i || lk2 <= i { + return int(i) + } + if k1[i] != k2[i] { + return int(i) + } + i++ + goto loop +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if inserted. +func (t *Tree) Insert(s []byte, v int) (int, bool) { + t.mu.RLock() + + var parent *node + n := t.root + search := s + + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + + t.mu.RUnlock() + return old, false + } + + n.leaf = leafNode{ + key: t.buf.Copy(s), + val: v, + valid: true, + } + t.size++ + + t.mu.RUnlock() + return v, true + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + newNode := &node{ + leaf: leafNode{ + key: t.buf.Copy(s), + val: v, + valid: true, + }, + prefix: t.buf.Copy(search), + } + + e := edge{ + label: search[0], + node: newNode, + } + + parent.addEdge(e) + t.size++ + + t.mu.RUnlock() + return v, true + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: t.buf.Copy(search[:commonPrefix]), + } + parent.replaceEdge(edge{ + label: search[0], + node: child, + }) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := leafNode{ + key: t.buf.Copy(s), + val: v, + valid: true, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + + t.mu.RUnlock() + return v, true + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: t.buf.Copy(search), + }, + }) + + t.mu.RUnlock() + return v, true + } +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s []byte) int { + t.mu.Lock() + defer t.mu.Unlock() + + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix []byte) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s []byte, v int) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = leafNode{} + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!bytes.HasPrefix(child.prefix, prefix) && !bytes.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + prefix := make([]byte, 0, len(n.prefix)+len(child.prefix)) + prefix = append(prefix, n.prefix...) + prefix = append(prefix, child.prefix...) + n.prefix = prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s []byte) (int, bool) { + t.mu.RLock() + + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + t.mu.RUnlock() + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + + t.mu.RUnlock() + return 0, false +} + +// walkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type walkFn func(s []byte, v int) bool + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn walkFn) bool { + // Visit the leaf values if any + if n.leaf.valid && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() ([]byte, int, bool) { + t.mu.RLock() + + n := t.root + for { + if n.isLeaf() { + t.mu.RUnlock() + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + + t.mu.RUnlock() + return nil, 0, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() ([]byte, int, bool) { + t.mu.RLock() + + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + t.mu.RUnlock() + return n.leaf.key, n.leaf.val, true + } + break + } + + t.mu.RUnlock() + return nil, 0, false +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/radix/tree_test.go b/vendor/github.com/influxdata/influxdb/pkg/radix/tree_test.go new file mode 100644 index 0000000..875b9b7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/radix/tree_test.go @@ -0,0 +1,174 @@ +package radix + +import ( + "crypto/rand" + "fmt" + "reflect" + "testing" +) + +// generateUUID is used to generate a random UUID +func generateUUID() string { + buf := make([]byte, 16) + if _, err := rand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func TestRadix(t *testing.T) { + var min, max string + inp := make(map[string]int) + for i := 0; i < 1000; i++ { + gen := generateUUID() + inp[gen] = i + if gen < min || i == 0 { + min = gen + } + if gen > max || i == 0 { + max = gen + } + } + + r := NewFromMap(inp) + if r.Len() != len(inp) { + t.Fatalf("bad length: %v %v", r.Len(), len(inp)) + } + + // Check min and max + outMin, _, _ := r.Minimum() + if string(outMin) != min { + t.Fatalf("bad minimum: %s %v", outMin, min) + } + outMax, _, _ := r.Maximum() + if string(outMax) != max { + t.Fatalf("bad maximum: %s %v", outMax, max) + } + + for k, v := range inp { + out, ok := r.Get([]byte(k)) + if !ok { + t.Fatalf("missing key: %v", k) + } + if out != v { + t.Fatalf("value mis-match: %v %v", out, v) + } + } + +} + +func TestDeletePrefix(t *testing.T) { + type exp struct { + inp []string + prefix string + out []string + numDeleted int + } + + cases := []exp{ + {[]string{"", "A", "AB", "ABC", "R", "S"}, "A", []string{"", "R", "S"}, 3}, + {[]string{"", "A", "AB", "ABC", "R", "S"}, "ABC", []string{"", "A", "AB", "R", "S"}, 1}, + {[]string{"", "A", "AB", "ABC", "R", "S"}, "", []string{}, 6}, + {[]string{"", "A", "AB", "ABC", "R", "S"}, "S", []string{"", "A", "AB", "ABC", "R"}, 1}, + {[]string{"", "A", "AB", "ABC", "R", "S"}, "SS", []string{"", "A", "AB", "ABC", "R", "S"}, 0}, + } + + for _, test := range cases { + r := New() + for _, ss := range test.inp { + r.Insert([]byte(ss), 1) + } + + deleted := r.DeletePrefix([]byte(test.prefix)) + if deleted != test.numDeleted { + t.Fatalf("Bad delete, expected %v to be deleted but got %v", test.numDeleted, deleted) + } + + out := []string{} + fn := func(s []byte, v int) bool { + out = append(out, string(s)) + return false + } + recursiveWalk(r.root, fn) + + if !reflect.DeepEqual(out, test.out) { + t.Fatalf("mis-match: %v %v", out, test.out) + } + } +} + +func TestInsert_Duplicate(t *testing.T) { + r := New() + vv, ok := r.Insert([]byte("cpu"), 1) + if vv != 1 { + t.Fatalf("value mismatch: got %v, exp %v", vv, 1) + } + + if !ok { + t.Fatalf("value mismatch: got %v, exp %v", ok, true) + } + + // Insert a dup with a different type should fail + vv, ok = r.Insert([]byte("cpu"), 2) + if vv != 1 { + t.Fatalf("value mismatch: got %v, exp %v", vv, 1) + } + + if ok { + t.Fatalf("value mismatch: got %v, exp %v", ok, false) + } +} + +// +// benchmarks +// + +func BenchmarkTree_Insert(b *testing.B) { + t := New() + + keys := make([][]byte, 0, 10000) + for i := 0; i < cap(keys); i++ { + k := []byte(fmt.Sprintf("cpu,host=%d", i)) + if v, ok := t.Insert(k, 1); v != 1 || !ok { + b.Fatalf("insert failed: %v != 1 || !%v", v, ok) + } + keys = append(keys, k) + } + + b.SetBytes(int64(len(keys))) + b.ReportAllocs() + b.ResetTimer() + + for j := 0; j < b.N; j++ { + for _, key := range keys { + if v, ok := t.Insert(key, 1); v != 1 || ok { + b.Fatalf("insert failed: %v != 1 || !%v", v, ok) + } + } + } +} + +func BenchmarkTree_InsertNew(b *testing.B) { + keys := make([][]byte, 0, 10000) + for i := 0; i < cap(keys); i++ { + k := []byte(fmt.Sprintf("cpu,host=%d", i)) + keys = append(keys, k) + } + + b.SetBytes(int64(len(keys))) + b.ReportAllocs() + b.ResetTimer() + + for j := 0; j < b.N; j++ { + t := New() + for _, key := range keys { + t.Insert(key, 1) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go new file mode 100644 index 0000000..bb8db4b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go @@ -0,0 +1,286 @@ +package rhh + +import ( + "bytes" + "encoding/binary" + "sort" + + "github.com/cespare/xxhash" +) + +// HashMap represents a hash map that implements Robin Hood Hashing. +// https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf +type HashMap struct { + hashes []int64 + elems []hashElem + + n int64 + capacity int64 + threshold int64 + mask int64 + loadFactor int + + tmpKey []byte +} + +func NewHashMap(opt Options) *HashMap { + m := &HashMap{ + capacity: pow2(opt.Capacity), // Limited to 2^64. + loadFactor: opt.LoadFactor, + } + m.alloc() + return m +} + +// Reset clears the values in the map without deallocating the space. +func (m *HashMap) Reset() { + for i := int64(0); i < m.capacity; i++ { + m.hashes[i] = 0 + m.elems[i].reset() + } + m.n = 0 +} + +func (m *HashMap) Get(key []byte) interface{} { + i := m.index(key) + if i == -1 { + return nil + } + return m.elems[i].value +} + +func (m *HashMap) Put(key []byte, val interface{}) { + // Grow the map if we've run out of slots. + m.n++ + if m.n > m.threshold { + m.grow() + } + + // If the key was overwritten then decrement the size. + overwritten := m.insert(HashKey(key), key, val) + if overwritten { + m.n-- + } +} + +func (m *HashMap) insert(hash int64, key []byte, val interface{}) (overwritten bool) { + pos := hash & m.mask + var dist int64 + + var copied bool + searchKey := key + + // Continue searching until we find an empty slot or lower probe distance. + for { + e := &m.elems[pos] + + // Empty slot found or matching key, insert and exit. + match := bytes.Equal(m.elems[pos].key, searchKey) + if m.hashes[pos] == 0 || match { + m.hashes[pos] = hash + e.hash, e.value = hash, val + e.setKey(searchKey) + return match + } + + // If the existing elem has probed less than us, then swap places with + // existing elem, and keep going to find another slot for that elem. + elemDist := Dist(m.hashes[pos], pos, m.capacity) + if elemDist < dist { + // Swap with current position. + hash, m.hashes[pos] = m.hashes[pos], hash + val, e.value = e.value, val + + m.tmpKey = assign(m.tmpKey, e.key) + e.setKey(searchKey) + + if !copied { + searchKey = make([]byte, len(key)) + copy(searchKey, key) + copied = true + } + + searchKey = assign(searchKey, m.tmpKey) + + // Update current distance. + dist = elemDist + } + + // Increment position, wrap around on overflow. + pos = (pos + 1) & m.mask + dist++ + } +} + +// alloc elems according to currently set capacity. +func (m *HashMap) alloc() { + m.elems = make([]hashElem, m.capacity) + m.hashes = make([]int64, m.capacity) + m.threshold = (m.capacity * int64(m.loadFactor)) / 100 + m.mask = int64(m.capacity - 1) +} + +// grow doubles the capacity and reinserts all existing hashes & elements. +func (m *HashMap) grow() { + // Copy old elements and hashes. + elems, hashes := m.elems, m.hashes + capacity := m.capacity + + // Double capacity & reallocate. + m.capacity *= 2 + m.alloc() + + // Copy old elements to new hash/elem list. + for i := int64(0); i < capacity; i++ { + elem, hash := &elems[i], hashes[i] + if hash == 0 { + continue + } + m.insert(hash, elem.key, elem.value) + } +} + +// index returns the position of key in the hash map. +func (m *HashMap) index(key []byte) int64 { + hash := HashKey(key) + pos := hash & m.mask + + var dist int64 + for { + if m.hashes[pos] == 0 { + return -1 + } else if dist > Dist(m.hashes[pos], pos, m.capacity) { + return -1 + } else if m.hashes[pos] == hash && bytes.Equal(m.elems[pos].key, key) { + return pos + } + + pos = (pos + 1) & m.mask + dist++ + } +} + +// Elem returns the i-th key/value pair of the hash map. +func (m *HashMap) Elem(i int64) (key []byte, value interface{}) { + if i >= int64(len(m.elems)) { + return nil, nil + } + + e := &m.elems[i] + return e.key, e.value +} + +// Len returns the number of key/values set in map. +func (m *HashMap) Len() int64 { return m.n } + +// Cap returns the number of key/values set in map. +func (m *HashMap) Cap() int64 { return m.capacity } + +// AverageProbeCount returns the average number of probes for each element. +func (m *HashMap) AverageProbeCount() float64 { + var sum float64 + for i := int64(0); i < m.capacity; i++ { + hash := m.hashes[i] + if hash == 0 { + continue + } + sum += float64(Dist(hash, i, m.capacity)) + } + return sum/float64(m.n) + 1.0 +} + +// Keys returns a list of sorted keys. +func (m *HashMap) Keys() [][]byte { + a := make([][]byte, 0, m.Len()) + for i := int64(0); i < m.Cap(); i++ { + k, v := m.Elem(i) + if v == nil { + continue + } + a = append(a, k) + } + sort.Sort(byteSlices(a)) + return a +} + +type hashElem struct { + key []byte + value interface{} + hash int64 +} + +// reset clears the values in the element. +func (e *hashElem) reset() { + e.key = e.key[:0] + e.value = nil + e.hash = 0 +} + +// setKey copies v to a key on e. +func (e *hashElem) setKey(v []byte) { + e.key = assign(e.key, v) +} + +// Options represents initialization options that are passed to NewHashMap(). +type Options struct { + Capacity int64 + LoadFactor int +} + +// DefaultOptions represents a default set of options to pass to NewHashMap(). +var DefaultOptions = Options{ + Capacity: 256, + LoadFactor: 90, +} + +// HashKey computes a hash of key. Hash is always non-zero. +func HashKey(key []byte) int64 { + h := int64(xxhash.Sum64(key)) + if h == 0 { + h = 1 + } else if h < 0 { + h = 0 - h + } + return h +} + +// HashUint64 computes a hash of an int64. Hash is always non-zero. +func HashUint64(key uint64) int64 { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, key) + return HashKey(buf) +} + +// Dist returns the probe distance for a hash in a slot index. +// NOTE: Capacity must be a power of 2. +func Dist(hash, i, capacity int64) int64 { + mask := capacity - 1 + dist := (i + capacity - (hash & mask)) & mask + return dist +} + +// pow2 returns the number that is the next highest power of 2. +// Returns v if it is a power of 2. +func pow2(v int64) int64 { + for i := int64(2); i < 1<<62; i *= 2 { + if i >= v { + return i + } + } + panic("unreachable") +} + +func assign(x, v []byte) []byte { + if cap(x) < len(v) { + x = make([]byte, len(v)) + } + x = x[:len(v)] + copy(x, v) + return x +} + +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh_test.go b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh_test.go new file mode 100644 index 0000000..0a2b4a8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh_test.go @@ -0,0 +1,78 @@ +package rhh_test + +import ( + "bytes" + "math/rand" + "reflect" + "testing" + "testing/quick" + + "github.com/influxdata/influxdb/pkg/rhh" +) + +// Ensure hash map can perform basic get/put operations. +func TestHashMap(t *testing.T) { + m := rhh.NewHashMap(rhh.DefaultOptions) + m.Put([]byte("foo"), []byte("bar")) + m.Put([]byte("baz"), []byte("bat")) + + // Verify values can be retrieved. + if v := m.Get([]byte("foo")); !bytes.Equal(v.([]byte), []byte("bar")) { + t.Fatalf("unexpected value: %s", v) + } + if v := m.Get([]byte("baz")); !bytes.Equal(v.([]byte), []byte("bat")) { + t.Fatalf("unexpected value: %s", v) + } + + // Overwrite field & verify. + m.Put([]byte("foo"), []byte("XXX")) + if v := m.Get([]byte("foo")); !bytes.Equal(v.([]byte), []byte("XXX")) { + t.Fatalf("unexpected value: %s", v) + } +} + +// Ensure hash map can insert random data. +func TestHashMap_Quick(t *testing.T) { + if testing.Short() { + t.Skip("short mode, skipping") + } + + if err := quick.Check(func(keys, values [][]byte) bool { + m := rhh.NewHashMap(rhh.Options{Capacity: 1000, LoadFactor: 90}) + h := make(map[string][]byte) + + // Insert all key/values into both maps. + for i := range keys { + key, value := keys[i], values[i] + h[string(key)] = value + m.Put(key, value) + } + + // Verify the maps are equal. + for k, v := range h { + if mv := m.Get([]byte(k)); !bytes.Equal(mv.([]byte), v) { + t.Fatalf("value mismatch:\nkey=%x\ngot=%x\nexp=%x\n\n", []byte(k), mv, v) + } + } + + return true + }, &quick.Config{ + Values: func(values []reflect.Value, rand *rand.Rand) { + n := rand.Intn(10000) + values[0] = GenerateByteSlices(rand, n) + values[1] = GenerateByteSlices(rand, n) + }, + }); err != nil { + t.Fatal(err) + } +} + +// GenerateByteSlices returns a random list of byte slices. +func GenerateByteSlices(rand *rand.Rand, n int) reflect.Value { + var a [][]byte + for i := 0; i < n; i++ { + v, _ := quick.Value(reflect.TypeOf(([]byte)(nil)), rand) + a = append(a, v.Interface().([]byte)) + } + return reflect.ValueOf(a) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/slices/bytes.go new file mode 100644 index 0000000..b12a148 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/bytes.go @@ -0,0 +1,37 @@ +package slices + +// BytesToStrings converts a slice of []byte into a slice of strings. +func BytesToStrings(a [][]byte) []string { + s := make([]string, 0, len(a)) + for _, v := range a { + s = append(s, string(v)) + } + return s +} + +// CopyChunkedByteSlices deep-copies a [][]byte to a new [][]byte that is backed by a small number of []byte "chunks". +func CopyChunkedByteSlices(src [][]byte, chunkSize int) [][]byte { + dst := make([][]byte, len(src)) + + for chunkBegin := 0; chunkBegin < len(src); chunkBegin += chunkSize { + chunkEnd := len(src) + if chunkEnd-chunkBegin > chunkSize { + chunkEnd = chunkBegin + chunkSize + } + + chunkByteSize := 0 + for j := chunkBegin; j < chunkEnd; j++ { + chunkByteSize += len(src[j]) + } + + chunk := make([]byte, chunkByteSize) + offset := 0 + for j := chunkBegin; j < chunkEnd; j++ { + copy(chunk[offset:offset+len(src[j])], src[j]) + dst[j] = chunk[offset : offset+len(src[j]) : offset+len(src[j])] + offset += len(src[j]) + } + } + + return dst +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/bytes_test.go b/vendor/github.com/influxdata/influxdb/pkg/slices/bytes_test.go new file mode 100644 index 0000000..7969c1d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/bytes_test.go @@ -0,0 +1,78 @@ +package slices + +import ( + "math" + "reflect" + "testing" + "unsafe" +) + +func TestCopyChunkedByteSlices_oneChunk(t *testing.T) { + src := [][]byte{ + []byte("influx"), + []byte("data"), + } + + dst := CopyChunkedByteSlices(src, 3) + if !reflect.DeepEqual(src, dst) { + t.Errorf("destination should match source src: %v dst: %v", src, dst) + } + + dst[0][1] = 'z' + if reflect.DeepEqual(src, dst) { + t.Error("destination should not match source") + } +} + +func TestCopyChunkedByteSlices_multipleChunks(t *testing.T) { + src := [][]byte{ + []byte("influx"), + []byte("data"), + []byte("is"), + []byte("the"), + []byte("best"), + []byte("time"), + []byte("series"), + []byte("database"), + []byte("in"), + []byte("the"), + []byte("whole"), + []byte("wide"), + []byte("world"), + []byte(":-)"), + } + + chunkSize := 4 + dst := CopyChunkedByteSlices(src, chunkSize) + if !reflect.DeepEqual(src, dst) { + t.Errorf("destination should match source src: %v dst: %v", src, dst) + } + + for i := 0; i < int(math.Ceil(float64(len(src))/float64(chunkSize))); i++ { + thisChunkSize := chunkSize + if len(src)-thisChunkSize*i < thisChunkSize { + thisChunkSize = len(src) - thisChunkSize*i + } + + chunk := dst[i*thisChunkSize : (i+1)*thisChunkSize] + + for j := 0; j < thisChunkSize-1; j++ { + a := (*reflect.SliceHeader)(unsafe.Pointer(&chunk[j])) + b := (*reflect.SliceHeader)(unsafe.Pointer(&chunk[j+1])) + if b.Data-a.Data != uintptr(a.Len) { + t.Error("chunk elements do not appear to be adjacent, so not part of one chunk") + } + if a.Cap != a.Len { + t.Errorf("slice length != capacity; %d vs %d", a.Len, a.Cap) + } + if b.Cap != b.Len { + t.Errorf("slice length != capacity; %d vs %d", b.Len, b.Cap) + } + } + } + + dst[0][5] = 'z' + if reflect.DeepEqual(src, dst) { + t.Error("destination should not match source") + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go new file mode 100644 index 0000000..304c2de --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go @@ -0,0 +1,398 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: merge.gen.go.tmpl + +package slices + +import "bytes" + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedFloats(n ...[]float64) []float64 { + var result []float64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedFloats(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]float64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedInts(n ...[]int64) []int64 { + var result []int64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedInts(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]int64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedUInts(n ...[]uint64) []uint64 { + var result []uint64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedUInts(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]uint64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedStrings(n ...[]string) []string { + var result []string + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedStrings(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]string, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedBytes(n ...[][]byte) [][]byte { + var result [][]byte + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedBytes(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([][]byte, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + var cmp int // Result of comparing most recent value. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) + if cmp == -1 { + j = i + } else if cmp == 0 { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) + if cmp == -1 { + result = append(result, n[j][idxs[j]]) + } else if cmp == 0 { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl new file mode 100644 index 0000000..8e40a65 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl @@ -0,0 +1,104 @@ +package slices + +import "bytes" + +{{with $types := .}}{{range $k := $types}} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSorted{{$k.Name}}(n ...[]{{$k.Type}}) []{{$k.Type}} { + var result []{{$k.Type}} + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSorted{{$k.Name}}(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]{{$k.Type}}, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. +{{if eq $k.Name "Bytes" }} + var cmp int // Result of comparing most recent value. +{{end}} + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. +{{if eq $k.Name "Bytes" }} + cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) + if cmp == -1 { + j = i + } else if cmp == 0 { + // Duplicate value. Throw it away. + idxs[i]++ + } +{{else}} + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } +{{end}} + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. +{{if eq $k.Name "Bytes" }} + cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) + if cmp == -1 { + result = append(result, n[j][idxs[j]]) + } else if cmp == 0 { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } +{{else}} + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } +{{end}} + idxs[j]++ + } + return result +} + + +{{end}}{{end}} \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go new file mode 100644 index 0000000..55f97de --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go @@ -0,0 +1,101 @@ +package slices_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/influxdata/influxdb/pkg/slices" +) + +func TestMergeSortedBytes(t *testing.T) { + cases := []struct { + Inputs [][][]byte + Out [][]byte + }{ + {Inputs: [][][]byte{}}, + {Inputs: [][][]byte{toBytes(0)}, Out: toBytes(0)}, + { + Inputs: [][][]byte{toBytes(2), [][]byte(nil), toBytes(2)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(9), toBytes(1, 16, 16), toBytes(5, 10)}, + Out: toBytes(1, 5, 9, 10, 16), + }, + { + Inputs: [][][]byte{toBytes(20), toBytes(16), toBytes(10)}, + Out: toBytes(10, 16, 20), + }, + { + Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2), [][]byte(nil), [][]byte(nil), [][]byte(nil)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5)}, + Out: toBytes(1, 2, 3, 4, 5), + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { + if got, exp := slices.MergeSortedBytes(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, expected %v", got, exp) + } + }) + } +} + +func toBytes(a ...int) [][]byte { + var result [][]byte + for _, v := range a { + result = append(result, []byte{byte(v)}) + } + return result +} + +func TestMergeSortedInts(t *testing.T) { + cases := []struct { + Inputs [][]int64 + Out []int64 + }{ + {Inputs: [][]int64{}}, + {Inputs: [][]int64{[]int64{0}}, Out: []int64{0}}, + { + Inputs: [][]int64{[]int64{2}, []int64(nil), []int64{2}}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{9}, []int64{1, 16, 16}, []int64{5, 10}}, + Out: []int64{1, 5, 9, 10, 16}, + }, + { + Inputs: [][]int64{[]int64{20}, []int64{16}, []int64{10}}, + Out: []int64{10, 16, 20}, + }, + { + Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}, []int64(nil), []int64(nil), []int64(nil)}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}}, + Out: []int64{1, 2, 3, 4, 5}, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { + if got, exp := slices.MergeSortedInts(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, expected %v", got, exp) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/strings.go b/vendor/github.com/influxdata/influxdb/pkg/slices/strings.go new file mode 100644 index 0000000..7539c84 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/strings.go @@ -0,0 +1,50 @@ +// Package slices contains functions to operate on slices treated as sets. +package slices // import "github.com/influxdata/influxdb/pkg/slices" + +import "strings" + +// Union combines two string sets. +func Union(setA, setB []string, ignoreCase bool) []string { + for _, b := range setB { + if ignoreCase { + if !ExistsIgnoreCase(setA, b) { + setA = append(setA, b) + } + continue + } + if !Exists(setA, b) { + setA = append(setA, b) + } + } + return setA +} + +// Exists checks if a string is in a set. +func Exists(set []string, find string) bool { + for _, s := range set { + if s == find { + return true + } + } + return false +} + +// ExistsIgnoreCase checks if a string is in a set but ignores its case. +func ExistsIgnoreCase(set []string, find string) bool { + find = strings.ToLower(find) + for _, s := range set { + if strings.ToLower(s) == find { + return true + } + } + return false +} + +// StringsToBytes converts a variable number of strings into a slice of []byte. +func StringsToBytes(s ...string) [][]byte { + a := make([][]byte, 0, len(s)) + for _, v := range s { + a = append(a, []byte(v)) + } + return a +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/strings_test.go b/vendor/github.com/influxdata/influxdb/pkg/slices/strings_test.go new file mode 100644 index 0000000..42d8153 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/strings_test.go @@ -0,0 +1,83 @@ +package slices + +import "testing" + +func TestExists(t *testing.T) { + tests := []struct { + set []string + find string + output bool + }{ + { + set: []string{}, + find: "foo", + output: false, + }, + { + set: []string{"foo"}, + find: "foo", + output: true, + }, + { + set: []string{"bar", "foo"}, + find: "foo", + output: true, + }, + { + set: []string{"bar", "foo"}, + find: "stuff", + output: false, + }, + { + set: []string{"bar", "Foo"}, + find: "foo", + output: false, + }, + } + for i, tt := range tests { + actual := Exists(tt.set, tt.find) + if actual != tt.output { + t.Errorf("[%d] set: %v , find: %s , expected: %t , actual: %t", i, tt.set, tt.find, tt.output, actual) + } + } +} + +func TestExistsIgnoreCase(t *testing.T) { + tests := []struct { + set []string + find string + output bool + }{ + { + set: []string{}, + find: "foo", + output: false, + }, + { + set: []string{"foo"}, + find: "foo", + output: true, + }, + { + set: []string{"bar", "foo"}, + find: "foo", + output: true, + }, + { + set: []string{"bar", "foo"}, + find: "stuff", + output: false, + }, + { + set: []string{"bar", "Foo"}, + find: "foo", + output: true, + }, + } + for i, tt := range tests { + actual := ExistsIgnoreCase(tt.set, tt.find) + if actual != tt.output { + t.Errorf("[%d] set: %v , find: %s , expected: %t , actual: %t", i, tt.set, tt.find, tt.output, actual) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata new file mode 100644 index 0000000..f478685 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata @@ -0,0 +1,22 @@ +[ + { + "Name":"Floats", + "Type":"float64" + }, + { + "Name":"Ints", + "Type":"int64" + }, + { + "Name":"UInts", + "Type":"uint64" + }, + { + "Name":"Strings", + "Type":"string" + }, + { + "Name":"Bytes", + "Type":"[]byte" + } +] diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md b/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md new file mode 100644 index 0000000..92166b2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md @@ -0,0 +1,38 @@ +Snowflake ID generator +====================== + +This is a Go implementation of [Twitter Snowflake](https://blog.twitter.com/2010/announcing-snowflake). + +The most useful aspect of these IDs is they are _roughly_ sortable and when generated +at roughly the same time, should have values in close proximity to each other. + +IDs +--- + +Each id will be a 64-bit number represented, structured as follows: + + +``` +6 6 5 4 3 2 1 +3210987654321098765432109876543210987654321098765432109876543210 + +ttttttttttttttttttttttttttttttttttttttttttmmmmmmmmmmssssssssssss +``` + +where + +* s (sequence) is a 12-bit integer that increments if called multiple times for the same millisecond +* m (machine id) is a 10-bit integer representing the server id +* t (time) is a 42-bit integer representing the current timestamp in milliseconds + the number of milliseconds to have elapsed since 1491696000000 or 2017-04-09T00:00:00Z + +### String Encoding + +The 64-bit unsigned integer is base-63 encoded using the following URL-safe characters, which are ordered +according to their ASCII value. + +``` +0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~ +``` + +A binary sort of a list of encoded values will be correctly ordered according to the numerical representation. \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go new file mode 100644 index 0000000..1e327fb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go @@ -0,0 +1,107 @@ +package snowflake + +import ( + "fmt" + "sync" + "time" +) + +const ( + epoch = 1491696000000 + serverBits = 10 + sequenceBits = 12 + serverShift = sequenceBits + timeShift = sequenceBits + serverBits + serverMax = ^(-1 << serverBits) + sequenceMask = ^(-1 << sequenceBits) +) + +type Generator struct { + rw sync.Mutex + lastTimestamp uint64 + machineID int + sequence int32 +} + +func New(machineID int) *Generator { + if machineID < 0 || machineID > serverMax { + panic(fmt.Errorf("invalid machine id; must be 0 ≤ id < %d", serverMax)) + } + return &Generator{ + machineID: machineID, + lastTimestamp: 0, + sequence: 0, + } +} + +func (g *Generator) MachineID() int { + return g.machineID +} + +func (g *Generator) Next() uint64 { + t := now() + g.rw.Lock() + if t == g.lastTimestamp { + g.sequence = (g.sequence + 1) & sequenceMask + if g.sequence == 0 { + t = g.nextMillis() + } + } else if t < g.lastTimestamp { + t = g.nextMillis() + } else { + g.sequence = 0 + } + g.lastTimestamp = t + seq := g.sequence + g.rw.Unlock() + + tp := (t - epoch) << timeShift + sp := uint64(g.machineID << serverShift) + n := tp | sp | uint64(seq) + + return n +} + +func (g *Generator) NextString() string { + var s [11]byte + encode(&s, g.Next()) + return string(s[:]) +} + +func (g *Generator) AppendNext(s *[11]byte) { + encode(s, g.Next()) +} + +func (g *Generator) nextMillis() uint64 { + t := now() + for t <= g.lastTimestamp { + time.Sleep(100 * time.Microsecond) + t = now() + } + return t +} + +func now() uint64 { return uint64(time.Now().UnixNano() / 1e6) } + +var digits = [...]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', '_', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', + 'x', 'y', 'z', '~'} + +func encode(s *[11]byte, n uint64) { + s[10], n = digits[n&0x3f], n>>6 + s[9], n = digits[n&0x3f], n>>6 + s[8], n = digits[n&0x3f], n>>6 + s[7], n = digits[n&0x3f], n>>6 + s[6], n = digits[n&0x3f], n>>6 + s[5], n = digits[n&0x3f], n>>6 + s[4], n = digits[n&0x3f], n>>6 + s[3], n = digits[n&0x3f], n>>6 + s[2], n = digits[n&0x3f], n>>6 + s[1], n = digits[n&0x3f], n>>6 + s[0] = digits[n&0x3f] +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go new file mode 100644 index 0000000..bd1dd28 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go @@ -0,0 +1,68 @@ +package snowflake + +import ( + "fmt" + "math/rand" + "sort" + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func TestEncode(t *testing.T) { + tests := []struct { + v uint64 + exp string + }{ + {0x000, "00000000000"}, + {0x001, "00000000001"}, + {0x03f, "0000000000~"}, + {0x07f, "0000000001~"}, + {0xf07f07f07f07f07f, "F1~1~1~1~1~"}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("0x%03x→%s", test.v, test.exp), func(t *testing.T) { + var s [11]byte + encode(&s, test.v) + assert.Equal(t, string(s[:]), test.exp) + }) + } +} + +// TestSorting verifies numbers using base 63 encoding are ordered according to their numerical representation. +func TestSorting(t *testing.T) { + var ( + vals = make([]string, 1000) + exp = make([]string, 1000) + ) + + for i := 0; i < len(vals); i++ { + var s [11]byte + encode(&s, uint64(i*47)) + vals[i] = string(s[:]) + exp[i] = string(s[:]) + } + + // randomize them + shuffle(len(vals), func(i, j int) { + vals[i], vals[j] = vals[j], vals[i] + }) + + sort.Strings(vals) + assert.Equal(t, vals, exp) +} + +func BenchmarkEncode(b *testing.B) { + b.ReportAllocs() + var s [11]byte + for i := 0; i < b.N; i++ { + encode(&s, 100) + } +} + +func shuffle(n int, swap func(i, j int)) { + for i := n - 1; i > 0; i-- { + j := rand.Intn(i + 1) + swap(i, j) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go new file mode 100644 index 0000000..6ae43ce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package tar + +import "os" + +func syncDir(dirName string) error { + // fsync the dir to flush the rename + dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) + if err != nil { + return err + } + defer dir.Close() + return dir.Sync() +} + +// renameFile renames the file at oldpath to newpath. +func renameFile(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go new file mode 100644 index 0000000..2402d12 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go @@ -0,0 +1,19 @@ +package tar + +import "os" + +func syncDir(dirName string) error { + return nil +} + +// renameFile renames the file at oldpath to newpath. +// If newpath already exists, it will be removed before renaming. +func renameFile(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err = os.Remove(newpath); nil != err { + return err + } + } + + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go new file mode 100644 index 0000000..c6105e7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go @@ -0,0 +1,163 @@ +package tar + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" +) + +// Stream is a convenience function for creating a tar of a shard dir. It walks over the directory and subdirs, +// possibly writing each file to a tar writer stream. By default StreamFile is used, which will result in all files +// being written. A custom writeFunc can be passed so that each file may be written, modified+written, or skipped +// depending on the custom logic. +func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error) error { + tw := tar.NewWriter(w) + defer tw.Close() + + if writeFunc == nil { + writeFunc = StreamFile + } + + return filepath.Walk(dir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip adding an entry for the root dir + if dir == path && f.IsDir() { + return nil + } + + // Figure out the the full relative path including any sub-dirs + subDir, _ := filepath.Split(path) + subDir, err = filepath.Rel(dir, subDir) + if err != nil { + return err + } + + return writeFunc(f, filepath.Join(relativePath, subDir), path, tw) + }) +} + +// Generates a filtering function for Stream that checks an incoming file, and only writes the file to the stream if +// its mod time is later than since. Example: to tar only files newer than a certain datetime, use +// tar.Stream(w, dir, relativePath, SinceFilterTarFile(datetime)) +func SinceFilterTarFile(since time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + return func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + if f.ModTime().After(since) { + return StreamFile(f, shardRelativePath, fullPath, tw) + } + return nil + } +} + +// stream a single file to tw, extending the header name using the shardRelativePath +func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) +} + +/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename +// e.g., when we want to write a *.tmp file using the original file's non-tmp name. +func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { + h, err := tar.FileInfoHeader(f, f.Name()) + if err != nil { + return err + } + h.Name = filepath.ToSlash(filepath.Join(relativePath, tarHeaderFileName)) + + if err := tw.WriteHeader(h); err != nil { + return err + } + + if !f.Mode().IsRegular() { + return nil + } + + fr, err := os.Open(fullPath) + if err != nil { + return err + } + + defer fr.Close() + + _, err = io.CopyN(tw, fr, h.Size) + + return err +} + +// Restore reads a tar archive from r and extracts all of its files into dir, +// using only the base name of each file. +func Restore(r io.Reader, dir string) error { + tr := tar.NewReader(r) + for { + if err := extractFile(tr, dir); err == io.EOF { + break + } else if err != nil { + return err + } + } + + return syncDir(dir) +} + +// extractFile copies the next file from tr into dir, using the file's base name. +func extractFile(tr *tar.Reader, dir string) error { + // Read next archive file. + hdr, err := tr.Next() + if err != nil { + return err + } + + // The hdr.Name is the relative path of the file from the root data dir. + // e.g (db/rp/1/xxxxx.tsm or db/rp/1/index/xxxxxx.tsi) + sections := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator)) + if len(sections) < 3 { + return fmt.Errorf("invalid archive path: %s", hdr.Name) + } + + relativePath := filepath.Join(sections[3:]...) + + subDir, _ := filepath.Split(relativePath) + // If this is a directory entry (usually just `index` for tsi), create it an move on. + if hdr.Typeflag == tar.TypeDir { + return os.MkdirAll(filepath.Join(dir, subDir), os.FileMode(hdr.Mode).Perm()) + } + + // Make sure the dir we need to write into exists. It should, but just double check in + // case we get a slightly invalid tarball. + if subDir != "" { + if err := os.MkdirAll(filepath.Join(dir, subDir), 0755); err != nil { + return err + } + } + + destPath := filepath.Join(dir, relativePath) + tmp := destPath + ".tmp" + + // Create new file on disk. + f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode).Perm()) + if err != nil { + return err + } + defer f.Close() + + // Copy from archive to the file. + if _, err := io.CopyN(f, tr, hdr.Size); err != nil { + return err + } + + // Sync to disk & close. + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + return renameFile(tmp, destPath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/testing/assert/assertions.go b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/assertions.go new file mode 100644 index 0000000..3699d31 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/assertions.go @@ -0,0 +1,116 @@ +package assert + +import ( + "bytes" + "fmt" + "reflect" +) + +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +type helper interface { + Helper() +} + +// Equal asserts that the values are equal and returns +// true if the assertion was successful. +func Equal(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool { + if ValuesAreEqual(got, expected) { + return true + } + + if th, ok := t.(helper); ok { + th.Helper() + } + + got, expected = formatValues(got, expected) + fail(t, fmt.Sprintf("Not Equal: got=%s, exp=%s", got, expected), msgAndArgs...) + return false +} + +// NotEqual asserts that the values are not equal and returns +// true if the assertion was successful. +func NotEqual(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool { + if !ValuesAreEqual(got, expected) { + return true + } + + if th, ok := t.(helper); ok { + th.Helper() + } + _, expected = formatValues(got, expected) + fail(t, fmt.Sprintf("Equal: should not be %s", expected), msgAndArgs...) + return false +} + +// NoError asserts that err is nil and returns +// true if the assertion was successful. +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + return fail(t, fmt.Sprintf("unexpected error: %+v", err), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that fn panics, and that +// the recovered panic value equals the expected panic value. +// +// Returns true if the assertion was successful. +func PanicsWithValue(t TestingT, expected interface{}, fn PanicTestFunc, msgAndArgs ...interface{}) bool { + if th, ok := t.(helper); ok { + th.Helper() + } + if funcDidPanic, got := didPanic(fn); !funcDidPanic { + return fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", fn, got), msgAndArgs...) + } else if got != expected { + return fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", fn, expected, got), msgAndArgs...) + } + + return true +} + +// ValuesAreEqual determines if the values are equal. +func ValuesAreEqual(got, expected interface{}) bool { + if got == nil || expected == nil { + return got == expected + } + + if exp, ok := expected.([]byte); ok { + act, ok := got.([]byte) + if !ok { + return false + } else if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) + } + + return reflect.DeepEqual(expected, got) + +} + +// ValuesAreExactlyEqual determines if the values are equal and +// their types are the same. +func ValuesAreExactlyEqual(got, expected interface{}) bool { + if ValuesAreEqual(got, expected) { + return true + } + + actualType := reflect.TypeOf(got) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), got) + } + + return false +} + +// PanicTestFunc defines a function that is called to determine whether a panic occurs. +type PanicTestFunc func() diff --git a/vendor/github.com/influxdata/influxdb/pkg/testing/assert/doc.go b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/doc.go new file mode 100644 index 0000000..174facb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/doc.go @@ -0,0 +1,4 @@ +/* +Package assert provides helper functions that can be used with the standard Go testing package. +*/ +package assert diff --git a/vendor/github.com/influxdata/influxdb/pkg/testing/assert/helper.go b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/helper.go new file mode 100644 index 0000000..9afc21b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/testing/assert/helper.go @@ -0,0 +1,55 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func fail(t TestingT, failureMsg string, msgAndArgs ...interface{}) bool { + if th, ok := t.(helper); ok { + th.Helper() + } + + msg := formatMsgAndArgs(msgAndArgs...) + if msg == "" { + t.Errorf("%s", failureMsg) + } else { + t.Errorf("%s: %s", failureMsg, msg) + } + + return false +} + +func formatValues(got, expected interface{}) (string, string) { + if reflect.TypeOf(got) != reflect.TypeOf(expected) { + return fmt.Sprintf("%T(%#v)", got, got), fmt.Sprintf("%T(%#v)", expected, expected) + } + + return fmt.Sprintf("%#v", got), fmt.Sprintf("%#v", expected) +} + +func formatMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// didPanic returns true if fn panics when called. +func didPanic(fn PanicTestFunc) (panicked bool, message interface{}) { + defer func() { + if message = recover(); message != nil { + panicked = true + } + }() + + fn() + + return panicked, message +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tlsconfig/tls_config.go b/vendor/github.com/influxdata/influxdb/pkg/tlsconfig/tls_config.go new file mode 100644 index 0000000..6e8fcf8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tlsconfig/tls_config.go @@ -0,0 +1,128 @@ +package tlsconfig + +import ( + "crypto/tls" + "fmt" + "sort" + "strings" +) + +type Config struct { + Ciphers []string `toml:"ciphers"` + MinVersion string `toml:"min-version"` + MaxVersion string `toml:"max-version"` +} + +func NewConfig() Config { + return Config{} +} + +func (c Config) Validate() error { + _, err := c.Parse() + return err +} + +func (c Config) Parse() (out *tls.Config, err error) { + if len(c.Ciphers) > 0 { + if out == nil { + out = new(tls.Config) + } + + for _, name := range c.Ciphers { + cipher, ok := ciphersMap[strings.ToUpper(name)] + if !ok { + return nil, unknownCipher(name) + } + out.CipherSuites = append(out.CipherSuites, cipher) + } + } + + if c.MinVersion != "" { + if out == nil { + out = new(tls.Config) + } + + version, ok := versionsMap[strings.ToUpper(c.MinVersion)] + if !ok { + return nil, unknownVersion(c.MinVersion) + } + out.MinVersion = version + } + + if c.MaxVersion != "" { + if out == nil { + out = new(tls.Config) + } + + version, ok := versionsMap[strings.ToUpper(c.MaxVersion)] + if !ok { + return nil, unknownVersion(c.MaxVersion) + } + out.MaxVersion = version + } + + return out, nil +} + +var ciphersMap = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +func unknownCipher(name string) error { + available := make([]string, 0, len(ciphersMap)) + for name := range ciphersMap { + available = append(available, name) + } + sort.Strings(available) + + return fmt.Errorf("unknown cipher suite: %q. available ciphers: %s", + name, strings.Join(available, ", ")) +} + +var versionsMap = map[string]uint16{ + "SSL3.0": tls.VersionSSL30, + "TLS1.0": tls.VersionTLS10, + "1.0": tls.VersionTLS11, + "TLS1.1": tls.VersionTLS11, + "1.1": tls.VersionTLS11, + "TLS1.2": tls.VersionTLS12, + "1.2": tls.VersionTLS12, +} + +func unknownVersion(name string) error { + available := make([]string, 0, len(versionsMap)) + for name := range versionsMap { + // skip the ones that just begin with a number. they may be confusing + // due to the duplication, and just help if the user specifies without + // the TLS part. + if name[0] == '1' { + continue + } + available = append(available, name) + } + sort.Strings(available) + + return fmt.Errorf("unknown tls version: %q. available versions: %s", + name, strings.Join(available, ", ")) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/context.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/context.go new file mode 100644 index 0000000..0b98468 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/context.go @@ -0,0 +1,32 @@ +package tracing + +import "context" + +type key int + +const ( + spanKey key = iota + traceKey +) + +// NewContextWithSpan returns a new context with the given Span added. +func NewContextWithSpan(ctx context.Context, c *Span) context.Context { + return context.WithValue(ctx, spanKey, c) +} + +// SpanFromContext returns the Span associated with ctx or nil if no Span has been assigned. +func SpanFromContext(ctx context.Context) *Span { + c, _ := ctx.Value(spanKey).(*Span) + return c +} + +// NewContextWithTrace returns a new context with the given Trace added. +func NewContextWithTrace(ctx context.Context, t *Trace) context.Context { + return context.WithValue(ctx, traceKey, t) +} + +// TraceFromContext returns the Trace associated with ctx or nil if no Trace has been assigned. +func TraceFromContext(ctx context.Context) *Trace { + c, _ := ctx.Value(traceKey).(*Trace) + return c +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/doc.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/doc.go new file mode 100644 index 0000000..4e7b582 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/doc.go @@ -0,0 +1,26 @@ +/* +Package tracing provides a way for capturing hierarchical traces. + +To start a new trace with a root span named select + + trace, span := tracing.NewTrace("select") + +It is recommended that a span be forwarded to callees using the +context package. Firstly, create a new context with the span associated +as follows + + ctx = tracing.NewContextWithSpan(ctx, span) + +followed by calling the API with the new context + + SomeAPI(ctx, ...) + +Once the trace is complete, it may be converted to a graph with the Tree method. + + tree := t.Tree() + +The tree is intended to be used with the Walk function in order to generate +different presentations. The default Tree#String method returns a tree. + +*/ +package tracing diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/field.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/field.go new file mode 100644 index 0000000..38e4907 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/field.go @@ -0,0 +1,117 @@ +package fields + +import ( + "fmt" + "math" + "time" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + int64Type + uint64Type + durationType + float64Type +) + +// Field instances are constructed via Bool, String, and so on. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/opentracing/opentracing-go/log +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Duration(key string, val time.Duration) Field { + return Field{ + key: key, + fieldType: durationType, + numericVal: int64(val), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case int64Type: + return int64(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case durationType: + return time.Duration(lf.numericVal) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ": ", lf.Value()) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields.go new file mode 100644 index 0000000..825cf25 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields.go @@ -0,0 +1,61 @@ +package fields + +import "sort" + +type Fields []Field + +// Merge merges other with the current set, replacing any matching keys from other. +func (fs *Fields) Merge(other Fields) { + var list []Field + i, j := 0, 0 + for i < len(*fs) && j < len(other) { + if (*fs)[i].key < other[j].key { + list = append(list, (*fs)[i]) + i++ + } else if (*fs)[i].key > other[j].key { + list = append(list, other[j]) + j++ + } else { + // equal, then "other" replaces existing key + list = append(list, other[j]) + i++ + j++ + } + } + + if i < len(*fs) { + list = append(list, (*fs)[i:]...) + } else if j < len(other) { + list = append(list, other[j:]...) + } + + *fs = list +} + +// New creates a new set of fields, sorted by Key. +// Duplicate keys are removed. +func New(args ...Field) Fields { + fields := Fields(args) + sort.Slice(fields, func(i, j int) bool { + return fields[i].key < fields[j].key + }) + + // deduplicate + // loop invariant: fields[:i] has no duplicates + for i := 0; i < len(fields)-1; i++ { + j := i + 1 + // find all duplicate keys + for j < len(fields) && fields[i].key == fields[j].key { + j++ + } + + d := (j - 1) - i // number of duplicate keys + if d > 0 { + // copy over duplicate keys in order to maintain loop invariant + copy(fields[i+1:], fields[j:]) + fields = fields[:len(fields)-d] + } + } + + return fields +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields_test.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields_test.go new file mode 100644 index 0000000..f28bb7a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/fields/fields_test.go @@ -0,0 +1,101 @@ +package fields + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func makeFields(args ...string) Fields { + if len(args)%2 != 0 { + panic("uneven number of arguments") + } + + var f Fields + for i := 0; i+1 < len(args); i += 2 { + f = append(f, String(args[i], args[i+1])) + } + return f +} + +func TestNew(t *testing.T) { + cases := []struct { + n string + l []string + exp Fields + }{ + { + n: "empty", + l: nil, + exp: makeFields(), + }, + { + n: "not duplicates", + l: []string{"k01", "v01", "k03", "v03", "k02", "v02"}, + exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"), + }, + { + n: "duplicates at end", + l: []string{"k01", "v01", "k02", "v02", "k02", "v02"}, + exp: makeFields("k01", "v01", "k02", "v02"), + }, + { + n: "duplicates at start", + l: []string{"k01", "v01", "k02", "v02", "k01", "v01"}, + exp: makeFields("k01", "v01", "k02", "v02"), + }, + { + n: "duplicates in middle", + l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"}, + exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"), + }, + } + + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + l := New(makeFields(tc.l...)...) + assert.Equal(t, tc.exp, l) + }) + } +} + +func TestFields_Merge(t *testing.T) { + cases := []struct { + n string + l, r Fields + exp Fields + }{ + { + n: "no matching keys", + l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")), + r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + exp: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01"), String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + }, + { + n: "multiple matching keys", + l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")), + r: New(String("k02", "v02"), String("k03", "v03a"), String("k05", "v05a")), + exp: New(String("k05", "v05a"), String("k03", "v03a"), String("k01", "v01"), String("k02", "v02")), + }, + { + n: "source empty", + l: New(), + r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + }, + { + n: "other empty", + l: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + r: New(), + exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), + }, + } + + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + l := tc.l + l.Merge(tc.r) + assert.Equal(t, tc.exp, l) + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels.go new file mode 100644 index 0000000..90afda7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels.go @@ -0,0 +1,74 @@ +package labels + +import "sort" + +type Label struct { + Key, Value string +} + +// The Labels type represents a set of labels, sorted by Key. +type Labels []Label + +// Merge merges other with the current set, replacing any matching keys from other. +func (ls *Labels) Merge(other Labels) { + var list []Label + i, j := 0, 0 + for i < len(*ls) && j < len(other) { + if (*ls)[i].Key < other[j].Key { + list = append(list, (*ls)[i]) + i++ + } else if (*ls)[i].Key > other[j].Key { + list = append(list, other[j]) + j++ + } else { + // equal, then "other" replaces existing key + list = append(list, other[j]) + i++ + j++ + } + } + + if i < len(*ls) { + list = append(list, (*ls)[i:]...) + } else if j < len(other) { + list = append(list, other[j:]...) + } + + *ls = list +} + +// New takes an even number of strings representing key-value pairs +// and creates a new slice of Labels. Duplicates are removed, however, +// there is no guarantee which will be removed +func New(args ...string) Labels { + if len(args)%2 != 0 { + panic("uneven number of arguments to label.Labels") + } + var labels Labels + for i := 0; i+1 < len(args); i += 2 { + labels = append(labels, Label{Key: args[i], Value: args[i+1]}) + } + + sort.Slice(labels, func(i, j int) bool { + return labels[i].Key < labels[j].Key + }) + + // deduplicate + // loop invariant: labels[:i] has no duplicates + for i := 0; i < len(labels)-1; i++ { + j := i + 1 + // find all duplicate keys + for j < len(labels) && labels[i].Key == labels[j].Key { + j++ + } + + d := (j - 1) - i // number of duplicate keys + if d > 0 { + // copy over duplicate keys in order to maintain loop invariant + copy(labels[i+1:], labels[j:]) + labels = labels[:len(labels)-d] + } + } + + return labels +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels_test.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels_test.go new file mode 100644 index 0000000..7e8bcc1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/labels/labels_test.go @@ -0,0 +1,101 @@ +package labels + +import ( + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func makeLabels(args ...string) Labels { + if len(args)%2 != 0 { + panic("uneven number of arguments") + } + + var l Labels + for i := 0; i+1 < len(args); i += 2 { + l = append(l, Label{Key: args[i], Value: args[i+1]}) + } + return l +} + +func TestNew(t *testing.T) { + cases := []struct { + n string + l []string + exp Labels + }{ + { + n: "empty", + l: nil, + exp: makeLabels(), + }, + { + n: "not duplicates", + l: []string{"k01", "v01", "k03", "v03", "k02", "v02"}, + exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"), + }, + { + n: "duplicates at end", + l: []string{"k01", "v01", "k02", "v02", "k02", "v02"}, + exp: makeLabels("k01", "v01", "k02", "v02"), + }, + { + n: "duplicates at start", + l: []string{"k01", "v01", "k02", "v02", "k01", "v01"}, + exp: makeLabels("k01", "v01", "k02", "v02"), + }, + { + n: "duplicates in middle", + l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"}, + exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"), + }, + } + + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + l := New(tc.l...) + assert.Equal(t, l, tc.exp) + }) + } +} + +func TestLabels_Merge(t *testing.T) { + cases := []struct { + n string + l, r Labels + exp Labels + }{ + { + n: "no matching keys", + l: New("k05", "v05", "k03", "v03", "k01", "v01"), + r: New("k02", "v02", "k04", "v04", "k00", "v00"), + exp: New("k05", "v05", "k03", "v03", "k01", "v01", "k02", "v02", "k04", "v04", "k00", "v00"), + }, + { + n: "multiple matching keys", + l: New("k05", "v05", "k03", "v03", "k01", "v01"), + r: New("k02", "v02", "k03", "v03a", "k05", "v05a"), + exp: New("k05", "v05a", "k03", "v03a", "k01", "v01", "k02", "v02"), + }, + { + n: "source empty", + l: New(), + r: New("k02", "v02", "k04", "v04", "k00", "v00"), + exp: New("k02", "v02", "k04", "v04", "k00", "v00"), + }, + { + n: "other empty", + l: New("k02", "v02", "k04", "v04", "k00", "v00"), + r: New(), + exp: New("k02", "v02", "k04", "v04", "k00", "v00"), + }, + } + + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + l := tc.l + l.Merge(tc.r) + assert.Equal(t, l, tc.exp) + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/rawspan.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/rawspan.go new file mode 100644 index 0000000..12e37e5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/rawspan.go @@ -0,0 +1,18 @@ +package tracing + +import ( + "time" + + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" +) + +// RawSpan represents the data associated with a span. +type RawSpan struct { + Context SpanContext + ParentSpanID uint64 // ParentSpanID identifies the parent of this span or 0 if this is the root span. + Name string // Name is the operation name given to this span. + Start time.Time // Start identifies the start time of the span. + Labels labels.Labels // Labels contains additional metadata about this span. + Fields fields.Fields // Fields contains typed values associated with this span. +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/span.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/span.go new file mode 100644 index 0000000..c8bcfb4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/span.go @@ -0,0 +1,84 @@ +package tracing + +import ( + "sync" + "time" + + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" +) + +// The Span type denotes a specific operation for a Trace. +// A Span may have one or more children, identifying additional +// details about a trace. +type Span struct { + tracer *Trace + mu sync.Mutex + raw RawSpan +} + +type StartSpanOption interface { + applyStart(*Span) +} + +// The StartTime start span option specifies the start time of +// the new span rather than using now. +type StartTime time.Time + +func (t StartTime) applyStart(s *Span) { + s.raw.Start = time.Time(t) +} + +// StartSpan creates a new child span using time.Now as the start time. +func (s *Span) StartSpan(name string, opt ...StartSpanOption) *Span { + return s.tracer.startSpan(name, s.raw.Context, opt) +} + +// Context returns a SpanContext that can be serialized and passed to a remote node to continue a trace. +func (s *Span) Context() SpanContext { + return s.raw.Context +} + +// SetLabels replaces any existing labels for the Span with args. +func (s *Span) SetLabels(args ...string) { + s.mu.Lock() + s.raw.Labels = labels.New(args...) + s.mu.Unlock() +} + +// MergeLabels merges args with any existing labels defined +// for the Span. +func (s *Span) MergeLabels(args ...string) { + ls := labels.New(args...) + s.mu.Lock() + s.raw.Labels.Merge(ls) + s.mu.Unlock() +} + +// SetFields replaces any existing fields for the Span with args. +func (s *Span) SetFields(set fields.Fields) { + s.mu.Lock() + s.raw.Fields = set + s.mu.Unlock() +} + +// MergeFields merges the provides args with any existing fields defined +// for the Span. +func (s *Span) MergeFields(args ...fields.Field) { + set := fields.New(args...) + s.mu.Lock() + s.raw.Fields.Merge(set) + s.mu.Unlock() +} + +// Finish marks the end of the span and records it to the associated Trace. +// If Finish is not called, the span will not appear in the trace. +func (s *Span) Finish() { + s.mu.Lock() + s.tracer.addRawSpan(s.raw) + s.mu.Unlock() +} + +func (s *Span) Tree() *TreeNode { + return s.tracer.TreeFrom(s.raw.Context.SpanID) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/spancontext.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/spancontext.go new file mode 100644 index 0000000..62cf7af --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/spancontext.go @@ -0,0 +1,27 @@ +package tracing + +import ( + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/pkg/tracing/wire" +) + +// A SpanContext represents the minimal information to identify a span in a trace. +// This is typically serialized to continue a trace on a remote node. +type SpanContext struct { + TraceID uint64 // TraceID is assigned a random number to this trace. + SpanID uint64 // SpanID is assigned a random number to identify this span. +} + +func (s SpanContext) MarshalBinary() ([]byte, error) { + ws := wire.SpanContext(s) + return proto.Marshal(&ws) +} + +func (s *SpanContext) UnmarshalBinary(data []byte) error { + var ws wire.SpanContext + err := proto.Unmarshal(data, &ws) + if err == nil { + *s = SpanContext(ws) + } + return err +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go new file mode 100644 index 0000000..4beb7a5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go @@ -0,0 +1,138 @@ +package tracing + +import ( + "sort" + "sync" + "time" +) + +// The Trace type functions as a container for capturing Spans used to +// trace the execution of a request. +type Trace struct { + mu sync.Mutex + spans map[uint64]RawSpan +} + +// NewTrace starts a new trace and returns a root span identified by the provided name. +// +// Additional options may be specified to override the default behavior when creating the span. +func NewTrace(name string, opt ...StartSpanOption) (*Trace, *Span) { + t := &Trace{spans: make(map[uint64]RawSpan)} + s := &Span{tracer: t} + s.raw.Name = name + s.raw.Context.TraceID, s.raw.Context.SpanID = randomID2() + setOptions(s, opt) + + return t, s +} + +// NewTraceFromSpan starts a new trace and returns the associated span, which is a child of the +// parent span context. +func NewTraceFromSpan(name string, parent SpanContext, opt ...StartSpanOption) (*Trace, *Span) { + t := &Trace{spans: make(map[uint64]RawSpan)} + s := &Span{tracer: t} + s.raw.Name = name + s.raw.ParentSpanID = parent.SpanID + s.raw.Context.TraceID = parent.TraceID + s.raw.Context.SpanID = randomID() + setOptions(s, opt) + + return t, s +} + +func (t *Trace) startSpan(name string, sc SpanContext, opt []StartSpanOption) *Span { + s := &Span{tracer: t} + s.raw.Name = name + s.raw.Context.SpanID = randomID() + s.raw.Context.TraceID = sc.TraceID + s.raw.ParentSpanID = sc.SpanID + setOptions(s, opt) + + return s +} + +func setOptions(s *Span, opt []StartSpanOption) { + for _, o := range opt { + o.applyStart(s) + } + + if s.raw.Start.IsZero() { + s.raw.Start = time.Now() + } +} + +func (t *Trace) addRawSpan(raw RawSpan) { + t.mu.Lock() + t.spans[raw.Context.SpanID] = raw + t.mu.Unlock() +} + +// Tree returns a graph of the current trace. +func (t *Trace) Tree() *TreeNode { + t.mu.Lock() + defer t.mu.Unlock() + + for _, s := range t.spans { + if s.ParentSpanID == 0 { + return t.treeFrom(s.Context.SpanID) + } + } + return nil +} + +// Merge combines other with the current trace. This is +// typically necessary when traces are transferred from a remote. +func (t *Trace) Merge(other *Trace) { + for k, s := range other.spans { + t.spans[k] = s + } +} + +func (t *Trace) TreeFrom(root uint64) *TreeNode { + t.mu.Lock() + defer t.mu.Unlock() + return t.treeFrom(root) +} + +func (t *Trace) treeFrom(root uint64) *TreeNode { + c := map[uint64]*TreeNode{} + + for k, s := range t.spans { + c[k] = &TreeNode{Raw: s} + } + + if _, ok := c[root]; !ok { + return nil + } + + for _, n := range c { + if n.Raw.ParentSpanID != 0 { + if pn := c[n.Raw.ParentSpanID]; pn != nil { + pn.Children = append(pn.Children, n) + } + } + } + + // sort nodes + var v treeSortVisitor + Walk(&v, c[root]) + + return c[root] +} + +type treeSortVisitor struct{} + +func (v *treeSortVisitor) Visit(node *TreeNode) Visitor { + sort.Slice(node.Children, func(i, j int) bool { + lt, rt := node.Children[i].Raw.Start.UnixNano(), node.Children[j].Raw.Start.UnixNano() + if lt < rt { + return true + } else if lt > rt { + return false + } + + ln, rn := node.Children[i].Raw.Name, node.Children[j].Raw.Name + return ln < rn + }) + return v +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/trace_encoding.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace_encoding.go new file mode 100644 index 0000000..31c3b33 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace_encoding.go @@ -0,0 +1,136 @@ +package tracing + +import ( + "math" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" + "github.com/influxdata/influxdb/pkg/tracing/wire" +) + +func fieldsToWire(set fields.Fields) []wire.Field { + var r []wire.Field + for _, f := range set { + wf := wire.Field{Key: f.Key()} + switch val := f.Value().(type) { + case string: + wf.FieldType = wire.FieldTypeString + wf.Value = &wire.Field_StringVal{StringVal: val} + + case bool: + var numericVal int64 + if val { + numericVal = 1 + } + wf.FieldType = wire.FieldTypeBool + wf.Value = &wire.Field_NumericVal{NumericVal: numericVal} + + case int64: + wf.FieldType = wire.FieldTypeInt64 + wf.Value = &wire.Field_NumericVal{NumericVal: val} + + case uint64: + wf.FieldType = wire.FieldTypeUint64 + wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} + + case time.Duration: + wf.FieldType = wire.FieldTypeDuration + wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} + + case float64: + wf.FieldType = wire.FieldTypeFloat64 + wf.Value = &wire.Field_NumericVal{NumericVal: int64(math.Float64bits(val))} + + default: + continue + } + + r = append(r, wf) + } + return r +} + +func labelsToWire(set labels.Labels) []string { + var r []string + for i := range set { + r = append(r, set[i].Key, set[i].Value) + } + return r +} + +func (t *Trace) MarshalBinary() ([]byte, error) { + wt := wire.Trace{} + for _, sp := range t.spans { + wt.Spans = append(wt.Spans, &wire.Span{ + Context: wire.SpanContext{ + TraceID: sp.Context.TraceID, + SpanID: sp.Context.SpanID, + }, + ParentSpanID: sp.ParentSpanID, + Name: sp.Name, + Start: sp.Start, + Labels: labelsToWire(sp.Labels), + Fields: fieldsToWire(sp.Fields), + }) + } + + return proto.Marshal(&wt) +} + +func wireToFields(wfs []wire.Field) fields.Fields { + var fs []fields.Field + for _, wf := range wfs { + switch wf.FieldType { + case wire.FieldTypeString: + fs = append(fs, fields.String(wf.Key, wf.GetStringVal())) + + case wire.FieldTypeBool: + var boolVal bool + if wf.GetNumericVal() != 0 { + boolVal = true + } + fs = append(fs, fields.Bool(wf.Key, boolVal)) + + case wire.FieldTypeInt64: + fs = append(fs, fields.Int64(wf.Key, wf.GetNumericVal())) + + case wire.FieldTypeUint64: + fs = append(fs, fields.Uint64(wf.Key, uint64(wf.GetNumericVal()))) + + case wire.FieldTypeDuration: + fs = append(fs, fields.Duration(wf.Key, time.Duration(wf.GetNumericVal()))) + + case wire.FieldTypeFloat64: + fs = append(fs, fields.Float64(wf.Key, math.Float64frombits(uint64(wf.GetNumericVal())))) + } + } + + return fields.New(fs...) +} + +func (t *Trace) UnmarshalBinary(data []byte) error { + var wt wire.Trace + if err := proto.Unmarshal(data, &wt); err != nil { + return err + } + + t.spans = make(map[uint64]RawSpan) + + for _, sp := range wt.Spans { + t.spans[sp.Context.SpanID] = RawSpan{ + Context: SpanContext{ + TraceID: sp.Context.TraceID, + SpanID: sp.Context.SpanID, + }, + ParentSpanID: sp.ParentSpanID, + Name: sp.Name, + Start: sp.Start, + Labels: labels.New(sp.Labels...), + Fields: wireToFields(sp.Fields), + } + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/tree.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/tree.go new file mode 100644 index 0000000..0321be6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/tree.go @@ -0,0 +1,74 @@ +package tracing + +import ( + "github.com/xlab/treeprint" +) + +// A Visitor's Visit method is invoked for each node encountered by Walk. +// If the result of Visit is not nil, Walk visits each of the children. +type Visitor interface { + Visit(*TreeNode) Visitor +} + +// A TreeNode represents a single node in the graph. +type TreeNode struct { + Raw RawSpan + Children []*TreeNode +} + +// String returns the tree as a string. +func (t *TreeNode) String() string { + if t == nil { + return "" + } + tv := newTreeVisitor() + Walk(tv, t) + return tv.root.String() +} + +// Walk traverses the graph in a depth-first order, calling v.Visit +// for each node until completion or v.Visit returns nil. +func Walk(v Visitor, node *TreeNode) { + if v = v.Visit(node); v == nil { + return + } + + for _, c := range node.Children { + Walk(v, c) + } +} + +type treeVisitor struct { + root treeprint.Tree + trees []treeprint.Tree +} + +func newTreeVisitor() *treeVisitor { + t := treeprint.New() + return &treeVisitor{root: t, trees: []treeprint.Tree{t}} +} + +func (v *treeVisitor) Visit(n *TreeNode) Visitor { + t := v.trees[len(v.trees)-1].AddBranch(n.Raw.Name) + v.trees = append(v.trees, t) + + if labels := n.Raw.Labels; len(labels) > 0 { + l := t.AddBranch("labels") + for _, ll := range n.Raw.Labels { + l.AddNode(ll.Key + ": " + ll.Value) + } + } + + for _, k := range n.Raw.Fields { + t.AddNode(k.String()) + } + + for _, cn := range n.Children { + Walk(v, cn) + } + + v.trees[len(v.trees)-1] = nil + v.trees = v.trees[:len(v.trees)-1] + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/util.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/util.go new file mode 100644 index 0000000..f98cc77 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/util.go @@ -0,0 +1,26 @@ +package tracing + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + seededIDLock sync.Mutex +) + +func randomID() (n uint64) { + seededIDLock.Lock() + n = uint64(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +func randomID2() (n uint64, m uint64) { + seededIDLock.Lock() + n, m = uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.go new file mode 100644 index 0000000..62bb854 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.go @@ -0,0 +1,7 @@ +/* +Package wire is used to serialize a trace. + +*/ +package wire + +//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. binary.proto diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.pb.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.pb.go new file mode 100644 index 0000000..377bea8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.pb.go @@ -0,0 +1,1292 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: binary.proto + +/* + Package wire is a generated protocol buffer package. + + It is generated from these files: + binary.proto + + It has these top-level messages: + SpanContext + Span + Trace + Field +*/ +package wire + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Field_FieldType int32 + +const ( + FieldTypeString Field_FieldType = 0 + FieldTypeBool Field_FieldType = 1 + FieldTypeInt64 Field_FieldType = 2 + FieldTypeUint64 Field_FieldType = 3 + FieldTypeDuration Field_FieldType = 4 + FieldTypeFloat64 Field_FieldType = 6 +) + +var Field_FieldType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT_64", + 3: "UINT_64", + 4: "DURATION", + 6: "FLOAT_64", +} +var Field_FieldType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT_64": 2, + "UINT_64": 3, + "DURATION": 4, + "FLOAT_64": 6, +} + +func (x Field_FieldType) String() string { + return proto.EnumName(Field_FieldType_name, int32(x)) +} +func (Field_FieldType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinary, []int{3, 0} } + +type SpanContext struct { + TraceID uint64 `protobuf:"varint,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + SpanID uint64 `protobuf:"varint,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` +} + +func (m *SpanContext) Reset() { *m = SpanContext{} } +func (m *SpanContext) String() string { return proto.CompactTextString(m) } +func (*SpanContext) ProtoMessage() {} +func (*SpanContext) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{0} } + +func (m *SpanContext) GetTraceID() uint64 { + if m != nil { + return m.TraceID + } + return 0 +} + +func (m *SpanContext) GetSpanID() uint64 { + if m != nil { + return m.SpanID + } + return 0 +} + +type Span struct { + Context SpanContext `protobuf:"bytes,1,opt,name=context" json:"context"` + ParentSpanID uint64 `protobuf:"varint,2,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Start time.Time `protobuf:"bytes,4,opt,name=start_time,json=startTime,stdtime" json:"start_time"` + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + Fields []Field `protobuf:"bytes,6,rep,name=fields" json:"fields"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{1} } + +func (m *Span) GetContext() SpanContext { + if m != nil { + return m.Context + } + return SpanContext{} +} + +func (m *Span) GetParentSpanID() uint64 { + if m != nil { + return m.ParentSpanID + } + return 0 +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *Span) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Span) GetFields() []Field { + if m != nil { + return m.Fields + } + return nil +} + +type Trace struct { + Spans []*Span `protobuf:"bytes,1,rep,name=spans" json:"spans,omitempty"` +} + +func (m *Trace) Reset() { *m = Trace{} } +func (m *Trace) String() string { return proto.CompactTextString(m) } +func (*Trace) ProtoMessage() {} +func (*Trace) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{2} } + +func (m *Trace) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +type Field struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + FieldType Field_FieldType `protobuf:"varint,2,opt,name=field_type,json=fieldType,proto3,enum=wire.Field_FieldType" json:"field_type,omitempty"` + // Types that are valid to be assigned to Value: + // *Field_NumericVal + // *Field_StringVal + Value isField_Value `protobuf_oneof:"value"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorBinary, []int{3} } + +type isField_Value interface { + isField_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Field_NumericVal struct { + NumericVal int64 `protobuf:"fixed64,3,opt,name=numeric_val,json=numericVal,proto3,oneof"` +} +type Field_StringVal struct { + StringVal string `protobuf:"bytes,4,opt,name=string_val,json=stringVal,proto3,oneof"` +} + +func (*Field_NumericVal) isField_Value() {} +func (*Field_StringVal) isField_Value() {} + +func (m *Field) GetValue() isField_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Field) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Field) GetFieldType() Field_FieldType { + if m != nil { + return m.FieldType + } + return FieldTypeString +} + +func (m *Field) GetNumericVal() int64 { + if x, ok := m.GetValue().(*Field_NumericVal); ok { + return x.NumericVal + } + return 0 +} + +func (m *Field) GetStringVal() string { + if x, ok := m.GetValue().(*Field_StringVal); ok { + return x.StringVal + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Field) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Field_OneofMarshaler, _Field_OneofUnmarshaler, _Field_OneofSizer, []interface{}{ + (*Field_NumericVal)(nil), + (*Field_StringVal)(nil), + } +} + +func _Field_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Field) + // value + switch x := m.Value.(type) { + case *Field_NumericVal: + _ = b.EncodeVarint(3<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(uint64(x.NumericVal)) + case *Field_StringVal: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringVal) + case nil: + default: + return fmt.Errorf("Field.Value has unexpected type %T", x) + } + return nil +} + +func _Field_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Field) + switch tag { + case 3: // value.numeric_val + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &Field_NumericVal{int64(x)} + return true, err + case 4: // value.string_val + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Field_StringVal{x} + return true, err + default: + return false, nil + } +} + +func _Field_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Field) + // value + switch x := m.Value.(type) { + case *Field_NumericVal: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *Field_StringVal: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringVal))) + n += len(x.StringVal) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*SpanContext)(nil), "wire.SpanContext") + proto.RegisterType((*Span)(nil), "wire.Span") + proto.RegisterType((*Trace)(nil), "wire.Trace") + proto.RegisterType((*Field)(nil), "wire.Field") + proto.RegisterEnum("wire.Field_FieldType", Field_FieldType_name, Field_FieldType_value) +} +func (m *SpanContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TraceID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.TraceID)) + } + if m.SpanID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.SpanID)) + } + return i, nil +} + +func (m *Span) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.Context.Size())) + n1, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.ParentSpanID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.ParentSpanID)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintBinary(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Labels) > 0 { + for _, s := range m.Labels { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Fields) > 0 { + for _, msg := range m.Fields { + dAtA[i] = 0x32 + i++ + i = encodeVarintBinary(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Trace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Trace) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Spans) > 0 { + for _, msg := range m.Spans { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.FieldType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintBinary(dAtA, i, uint64(m.FieldType)) + } + if m.Value != nil { + nn3, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn3 + } + return i, nil +} + +func (m *Field_NumericVal) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x19 + i++ + i = encodeFixed64Binary(dAtA, i, uint64(m.NumericVal)) + return i, nil +} +func (m *Field_StringVal) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x22 + i++ + i = encodeVarintBinary(dAtA, i, uint64(len(m.StringVal))) + i += copy(dAtA[i:], m.StringVal) + return i, nil +} +func encodeFixed64Binary(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Binary(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintBinary(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *SpanContext) Size() (n int) { + var l int + _ = l + if m.TraceID != 0 { + n += 1 + sovBinary(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + sovBinary(uint64(m.SpanID)) + } + return n +} + +func (m *Span) Size() (n int) { + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovBinary(uint64(l)) + if m.ParentSpanID != 0 { + n += 1 + sovBinary(uint64(m.ParentSpanID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovBinary(uint64(l)) + if len(m.Labels) > 0 { + for _, s := range m.Labels { + l = len(s) + n += 1 + l + sovBinary(uint64(l)) + } + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinary(uint64(l)) + } + } + return n +} + +func (m *Trace) Size() (n int) { + var l int + _ = l + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.Size() + n += 1 + l + sovBinary(uint64(l)) + } + } + return n +} + +func (m *Field) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + if m.FieldType != 0 { + n += 1 + sovBinary(uint64(m.FieldType)) + } + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Field_NumericVal) Size() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Field_StringVal) Size() (n int) { + var l int + _ = l + l = len(m.StringVal) + n += 1 + l + sovBinary(uint64(l)) + return n +} + +func sovBinary(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozBinary(x uint64) (n int) { + return sovBinary(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SpanContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanID", wireType) + } + m.ParentSpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentSpanID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Trace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Trace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Trace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldType", wireType) + } + m.FieldType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldType |= (Field_FieldType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumericVal", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Value = &Field_NumericVal{v} + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Field_StringVal{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBinary(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthBinary + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipBinary(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthBinary = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBinary = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("binary.proto", fileDescriptorBinary) } + +var fileDescriptorBinary = []byte{ + // 624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x52, 0x41, 0x6f, 0xda, 0x4c, + 0x10, 0xc5, 0xc1, 0x98, 0x78, 0x48, 0xf8, 0xcc, 0x7e, 0x4d, 0x85, 0x5c, 0x09, 0x5b, 0x44, 0xaa, + 0xc8, 0xa1, 0x8e, 0x92, 0x46, 0xdc, 0xe3, 0xa0, 0xb4, 0x96, 0x22, 0xa8, 0x0c, 0xe9, 0xa1, 0x17, + 0xb4, 0xc0, 0x42, 0xad, 0x1a, 0xaf, 0x65, 0x2f, 0x69, 0xf9, 0x07, 0x15, 0xa7, 0x9c, 0x7a, 0xe3, + 0xd4, 0x43, 0xff, 0x4a, 0x8e, 0x3d, 0xf7, 0xe0, 0x56, 0xee, 0x1f, 0xa9, 0x76, 0x0d, 0x26, 0xed, + 0xc5, 0x9a, 0x99, 0xf7, 0xe6, 0xbd, 0x9d, 0x27, 0xc3, 0xc1, 0xc8, 0x0b, 0x70, 0xb4, 0xb4, 0xc2, + 0x88, 0x32, 0x8a, 0xe4, 0x8f, 0x5e, 0x44, 0xf4, 0x17, 0x33, 0x8f, 0xbd, 0x5f, 0x8c, 0xac, 0x31, + 0x9d, 0x9f, 0xce, 0xe8, 0x8c, 0x9e, 0x0a, 0x70, 0xb4, 0x98, 0x8a, 0x4e, 0x34, 0xa2, 0xca, 0x96, + 0x74, 0x63, 0x46, 0xe9, 0xcc, 0x27, 0x3b, 0x16, 0xf3, 0xe6, 0x24, 0x66, 0x78, 0x1e, 0x66, 0x84, + 0xe6, 0x3b, 0xa8, 0xf4, 0x43, 0x1c, 0x5c, 0xd1, 0x80, 0x91, 0x4f, 0x0c, 0x3d, 0x87, 0x7d, 0x16, + 0xe1, 0x31, 0x19, 0x7a, 0x93, 0xba, 0x64, 0x4a, 0x2d, 0xd9, 0xae, 0xa4, 0x89, 0x51, 0x1e, 0xf0, + 0x99, 0xd3, 0x71, 0xcb, 0x02, 0x74, 0x26, 0xe8, 0x18, 0xca, 0x71, 0x88, 0x03, 0x4e, 0xdb, 0x13, + 0x34, 0x48, 0x13, 0x43, 0xe1, 0x4a, 0x4e, 0xc7, 0x55, 0x38, 0xe4, 0x4c, 0x9a, 0x5f, 0xf6, 0x40, + 0xe6, 0x23, 0x74, 0x06, 0xe5, 0x71, 0x66, 0x20, 0x44, 0x2b, 0xe7, 0x35, 0x8b, 0x1f, 0x63, 0x3d, + 0x72, 0xb6, 0xe5, 0x87, 0xc4, 0x28, 0xb8, 0x5b, 0x1e, 0x6a, 0x43, 0x35, 0xc4, 0x11, 0x09, 0xd8, + 0xf0, 0x6f, 0x1f, 0x2d, 0x4d, 0x8c, 0x83, 0x37, 0x02, 0xd9, 0xb8, 0x1d, 0x84, 0xbb, 0x6e, 0x82, + 0x10, 0xc8, 0x01, 0x9e, 0x93, 0x7a, 0xd1, 0x94, 0x5a, 0xaa, 0x2b, 0x6a, 0x74, 0x03, 0x10, 0x33, + 0x1c, 0xb1, 0x21, 0x3f, 0xbe, 0x2e, 0x8b, 0x17, 0xe8, 0x56, 0x96, 0x8c, 0xb5, 0x4d, 0xc6, 0x1a, + 0x6c, 0x93, 0xb1, 0x6b, 0xfc, 0x29, 0x69, 0x62, 0x94, 0xfa, 0x7c, 0xeb, 0xfe, 0xa7, 0x21, 0xb9, + 0xaa, 0x10, 0xe0, 0x14, 0xf4, 0x14, 0x14, 0x1f, 0x8f, 0x88, 0x1f, 0xd7, 0x4b, 0x66, 0xb1, 0xa5, + 0xba, 0x9b, 0x0e, 0x9d, 0x80, 0x32, 0xf5, 0x88, 0x3f, 0x89, 0xeb, 0x8a, 0x59, 0x6c, 0x55, 0xce, + 0x2b, 0xd9, 0x8d, 0xd7, 0x7c, 0xb6, 0xb9, 0x6e, 0x43, 0x68, 0x9e, 0x40, 0x49, 0x24, 0x8a, 0x4c, + 0x28, 0xf1, 0xf3, 0xe2, 0xba, 0x24, 0x56, 0x60, 0x17, 0x8b, 0x9b, 0x01, 0xcd, 0x6f, 0x45, 0x28, + 0x09, 0x09, 0xa4, 0x41, 0xf1, 0x03, 0x59, 0x8a, 0x00, 0x55, 0x97, 0x97, 0xe8, 0x0a, 0x40, 0x08, + 0x0e, 0xd9, 0x32, 0x24, 0x22, 0x9f, 0xea, 0xf9, 0xd1, 0x23, 0xd7, 0xec, 0x3b, 0x58, 0x86, 0xc4, + 0x3e, 0x4c, 0x13, 0x43, 0xcd, 0x5b, 0x57, 0x9d, 0x6e, 0x4b, 0x74, 0x06, 0x95, 0x60, 0x31, 0x27, + 0x91, 0x37, 0x1e, 0xde, 0x61, 0x5f, 0xe4, 0xa6, 0xd9, 0xd5, 0x34, 0x31, 0xa0, 0x9b, 0x8d, 0xdf, + 0x62, 0xff, 0x75, 0xc1, 0x85, 0x20, 0xef, 0x90, 0xc5, 0xf3, 0x8c, 0xbc, 0x60, 0x26, 0x36, 0x78, + 0x9e, 0x6a, 0x66, 0xd0, 0x17, 0xd3, 0x6c, 0x41, 0x8d, 0xb7, 0x4d, 0xf3, 0x87, 0x04, 0x3b, 0x6f, + 0x64, 0x80, 0xd2, 0x1f, 0xb8, 0x4e, 0xf7, 0x95, 0x56, 0xd0, 0xff, 0x5f, 0xad, 0xcd, 0xff, 0x72, + 0x28, 0x5b, 0x47, 0xcf, 0x40, 0xb6, 0x7b, 0xbd, 0x1b, 0x4d, 0xd2, 0x6b, 0xab, 0xb5, 0x79, 0xb8, + 0x3b, 0x82, 0x52, 0x1f, 0x35, 0x40, 0x71, 0xba, 0x83, 0x61, 0xfb, 0x42, 0xdb, 0xd3, 0xd1, 0x6a, + 0x6d, 0x56, 0x73, 0xd8, 0x09, 0x58, 0xfb, 0x02, 0x99, 0x50, 0xbe, 0xdd, 0x10, 0x8a, 0xff, 0xc8, + 0xdf, 0x7a, 0x82, 0x71, 0x0c, 0xfb, 0x9d, 0x5b, 0xf7, 0x72, 0xe0, 0xf4, 0xba, 0x9a, 0xac, 0x1f, + 0xad, 0xd6, 0x66, 0x2d, 0xa7, 0x74, 0x16, 0x11, 0x66, 0x1e, 0x0d, 0x50, 0x13, 0xf6, 0xaf, 0x6f, + 0x7a, 0x97, 0x42, 0x47, 0xd1, 0x9f, 0xac, 0xd6, 0xa6, 0x96, 0x93, 0xae, 0x7d, 0x8a, 0x59, 0xfb, + 0x42, 0x97, 0x3f, 0x7f, 0x6d, 0x14, 0xec, 0x32, 0x94, 0xee, 0xb0, 0xbf, 0x20, 0xb6, 0xf6, 0x90, + 0x36, 0xa4, 0xef, 0x69, 0x43, 0xfa, 0x95, 0x36, 0xa4, 0xfb, 0xdf, 0x8d, 0xc2, 0x48, 0x11, 0xff, + 0xd6, 0xcb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x10, 0xad, 0x27, 0x39, 0xc8, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.proto b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.proto new file mode 100644 index 0000000..d0bda52 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/wire/binary.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; +package wire; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +message SpanContext { + uint64 trace_id = 1 [(gogoproto.customname) = "TraceID"]; + uint64 span_id = 2 [(gogoproto.customname) = "SpanID"]; +} + +message Span { + SpanContext context = 1 [(gogoproto.nullable) = false]; + uint64 parent_span_id = 2 [(gogoproto.customname) = "ParentSpanID"]; + string name = 3; + google.protobuf.Timestamp start_time = 4 [(gogoproto.customname) = "Start", (gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + repeated string labels = 5; + repeated Field fields = 6 [(gogoproto.nullable) = false]; +} + +message Trace { + repeated Span spans = 1; +} + +message Field { + enum FieldType { + option (gogoproto.goproto_enum_prefix) = false; + + STRING = 0 [(gogoproto.enumvalue_customname) = "FieldTypeString"]; + BOOL = 1 [(gogoproto.enumvalue_customname) = "FieldTypeBool"]; + INT_64 = 2 [(gogoproto.enumvalue_customname) = "FieldTypeInt64"]; + UINT_64 = 3 [(gogoproto.enumvalue_customname) = "FieldTypeUint64"]; + DURATION = 4 [(gogoproto.enumvalue_customname) = "FieldTypeDuration"]; + FLOAT_64 = 6 [(gogoproto.enumvalue_customname) = "FieldTypeFloat64"]; + } + + string key = 1; + FieldType field_type = 2 [(gogoproto.customname) = "FieldType"]; + + oneof value { + sfixed64 numeric_val = 3 [(gogoproto.customname) = "NumericVal"]; + string string_val = 4 [(gogoproto.customname) = "StringVal"]; + } +} diff --git a/vendor/github.com/influxdata/influxdb/prometheus/converters.go b/vendor/github.com/influxdata/influxdb/prometheus/converters.go new file mode 100644 index 0000000..b3d2206 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/prometheus/converters.go @@ -0,0 +1,275 @@ +package prometheus + +import ( + "errors" + "fmt" + "math" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/prometheus/remote" + "github.com/influxdata/influxdb/services/storage" +) + +const ( + // measurementName is the default name used if no Prometheus name can be found on write + measurementName = "prom_metric_not_specified" + + // fieldName is the field all prometheus values get written to + fieldName = "value" + + // fieldTagKey is the tag key that all field names use in the new storage processor + fieldTagKey = "_field" + + // prometheusNameTag is the tag key that Prometheus uses for metric names + prometheusNameTag = "__name__" + + // measurementTagKey is the tag key that all measurement names use in the new storage processor + measurementTagKey = "_measurement" +) + +var ErrNaNDropped = errors.New("dropped NaN from Prometheus since they are not supported") + +// WriteRequestToPoints converts a Prometheus remote write request of time series and their +// samples into Points that can be written into Influx +func WriteRequestToPoints(req *remote.WriteRequest) ([]models.Point, error) { + var maxPoints int + for _, ts := range req.Timeseries { + maxPoints += len(ts.Samples) + } + points := make([]models.Point, 0, maxPoints) + + var droppedNaN error + + for _, ts := range req.Timeseries { + measurement := measurementName + + tags := make(map[string]string, len(ts.Labels)) + for _, l := range ts.Labels { + tags[l.Name] = l.Value + if l.Name == prometheusNameTag { + measurement = l.Value + } + } + + for _, s := range ts.Samples { + // skip NaN values, which are valid in Prometheus + if math.IsNaN(s.Value) { + droppedNaN = ErrNaNDropped + continue + } + + // convert and append + t := time.Unix(0, s.TimestampMs*int64(time.Millisecond)) + fields := map[string]interface{}{fieldName: s.Value} + p, err := models.NewPoint(measurement, models.NewTags(tags), fields, t) + if err != nil { + return nil, err + } + + points = append(points, p) + } + } + return points, droppedNaN +} + +// ReadRequestToInfluxStorageRequest converts a Prometheus remote read request into one using the +// new storage API that IFQL uses. +func ReadRequestToInfluxStorageRequest(req *remote.ReadRequest, db, rp string) (*storage.ReadRequest, error) { + if len(req.Queries) != 1 { + return nil, errors.New("Prometheus read endpoint currently only supports one query at a time") + } + q := req.Queries[0] + + if rp != "" { + db = db + "/" + rp + } + + sreq := &storage.ReadRequest{ + Database: db, + TimestampRange: storage.TimestampRange{ + Start: time.Unix(0, q.StartTimestampMs*int64(time.Millisecond)).UnixNano(), + End: time.Unix(0, q.EndTimestampMs*int64(time.Millisecond)).UnixNano(), + }, + PointsLimit: math.MaxInt64, + } + + pred, err := predicateFromMatchers(q.Matchers) + if err != nil { + return nil, err + } + + sreq.Predicate = pred + return sreq, nil +} + +// RemoveInfluxSystemTags will remove tags that are Influx internal (_measurement and _field) +func RemoveInfluxSystemTags(tags models.Tags) models.Tags { + var t models.Tags + for _, tt := range tags { + if string(tt.Key) == measurementTagKey || string(tt.Key) == fieldTagKey { + continue + } + t = append(t, tt) + } + + return t +} + +// predicateFromMatchers takes Prometheus label matchers and converts them to a storage +// predicate that works with the schema that is written in, which assumes a single field +// named value +func predicateFromMatchers(matchers []*remote.LabelMatcher) (*storage.Predicate, error) { + left, err := nodeFromMatchers(matchers) + if err != nil { + return nil, err + } + right := fieldNode() + + return &storage.Predicate{ + Root: &storage.Node{ + NodeType: storage.NodeTypeLogicalExpression, + Value: &storage.Node_Logical_{Logical: storage.LogicalAnd}, + Children: []*storage.Node{left, right}, + }, + }, nil +} + +// fieldNode returns a storage.Node that will match that the fieldTagKey == fieldName +// which matches how Prometheus data is fed into the system +func fieldNode() *storage.Node { + children := []*storage.Node{ + &storage.Node{ + NodeType: storage.NodeTypeTagRef, + Value: &storage.Node_TagRefValue{ + TagRefValue: fieldTagKey, + }, + }, + &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_StringValue{ + StringValue: fieldName, + }, + }, + } + + return &storage.Node{ + NodeType: storage.NodeTypeComparisonExpression, + Value: &storage.Node_Comparison_{Comparison: storage.ComparisonEqual}, + Children: children, + } +} + +func nodeFromMatchers(matchers []*remote.LabelMatcher) (*storage.Node, error) { + if len(matchers) == 0 { + return nil, errors.New("expected matcher") + } else if len(matchers) == 1 { + return nodeFromMatcher(matchers[0]) + } + + left, err := nodeFromMatcher(matchers[0]) + if err != nil { + return nil, err + } + + right, err := nodeFromMatchers(matchers[1:]) + if err != nil { + return nil, err + } + + children := []*storage.Node{left, right} + return &storage.Node{ + NodeType: storage.NodeTypeLogicalExpression, + Value: &storage.Node_Logical_{Logical: storage.LogicalAnd}, + Children: children, + }, nil +} + +func nodeFromMatcher(m *remote.LabelMatcher) (*storage.Node, error) { + var op storage.Node_Comparison + switch m.Type { + case remote.MatchType_EQUAL: + op = storage.ComparisonEqual + case remote.MatchType_NOT_EQUAL: + op = storage.ComparisonNotEqual + case remote.MatchType_REGEX_MATCH: + op = storage.ComparisonRegex + case remote.MatchType_REGEX_NO_MATCH: + op = storage.ComparisonNotRegex + default: + return nil, fmt.Errorf("unknown match type %v", m.Type) + } + + name := m.Name + if m.Name == prometheusNameTag { + name = measurementTagKey + } + + left := &storage.Node{ + NodeType: storage.NodeTypeTagRef, + Value: &storage.Node_TagRefValue{ + TagRefValue: name, + }, + } + + var right *storage.Node + + if op == storage.ComparisonRegex || op == storage.ComparisonNotRegex { + right = &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_RegexValue{ + RegexValue: m.Value, + }, + } + } else { + right = &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_StringValue{ + StringValue: m.Value, + }, + } + } + + children := []*storage.Node{left, right} + return &storage.Node{ + NodeType: storage.NodeTypeComparisonExpression, + Value: &storage.Node_Comparison_{Comparison: op}, + Children: children, + }, nil +} + +// ModelTagsToLabelPairs converts models.Tags to a slice of Prometheus label pairs +func ModelTagsToLabelPairs(tags models.Tags) []*remote.LabelPair { + pairs := make([]*remote.LabelPair, 0, len(tags)) + for _, t := range tags { + if string(t.Value) == "" { + continue + } + pairs = append(pairs, &remote.LabelPair{ + Name: string(t.Key), + Value: string(t.Value), + }) + } + return pairs +} + +// TagsToLabelPairs converts a map of Influx tags into a slice of Prometheus label pairs +func TagsToLabelPairs(tags map[string]string) []*remote.LabelPair { + pairs := make([]*remote.LabelPair, 0, len(tags)) + for k, v := range tags { + if v == "" { + // If we select metrics with different sets of labels names, + // InfluxDB returns *all* possible tag names on all returned + // series, with empty tag values on series where they don't + // apply. In Prometheus, an empty label value is equivalent + // to a non-existent label, so we just skip empty ones here + // to make the result correct. + continue + } + pairs = append(pairs, &remote.LabelPair{ + Name: k, + Value: v, + }) + } + return pairs +} diff --git a/vendor/github.com/influxdata/influxdb/prometheus/remote/generate.go b/vendor/github.com/influxdata/influxdb/prometheus/remote/generate.go new file mode 100644 index 0000000..26e2eae --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/prometheus/remote/generate.go @@ -0,0 +1,3 @@ +package remote + +//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=. remote.proto diff --git a/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.pb.go b/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.pb.go new file mode 100644 index 0000000..a9e7826 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.pb.go @@ -0,0 +1,1759 @@ +// Code generated by protoc-gen-gogo. +// source: remote.proto +// DO NOT EDIT! + +/* + Package remote is a generated protocol buffer package. + + It is generated from these files: + remote.proto + + It has these top-level messages: + Sample + LabelPair + TimeSeries + WriteRequest + ReadRequest + ReadResponse + Query + LabelMatcher + QueryResult +*/ +package remote + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type MatchType int32 + +const ( + MatchType_EQUAL MatchType = 0 + MatchType_NOT_EQUAL MatchType = 1 + MatchType_REGEX_MATCH MatchType = 2 + MatchType_REGEX_NO_MATCH MatchType = 3 +) + +var MatchType_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "REGEX_MATCH", + 3: "REGEX_NO_MATCH", +} +var MatchType_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "REGEX_MATCH": 2, + "REGEX_NO_MATCH": 3, +} + +func (x MatchType) String() string { + return proto.EnumName(MatchType_name, int32(x)) +} +func (MatchType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRemote, []int{0} } + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{0} } + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{1} } + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type TimeSeries struct { + Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"` + // Sorted by time, oldest sample first. + Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{2} } + +func (m *TimeSeries) GetLabels() []*LabelPair { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TimeSeries) GetSamples() []*Sample { + if m != nil { + return m.Samples + } + return nil +} + +type WriteRequest struct { + Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{3} } + +func (m *WriteRequest) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +type ReadRequest struct { + Queries []*Query `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{4} } + +func (m *ReadRequest) GetQueries() []*Query { + if m != nil { + return m.Queries + } + return nil +} + +type ReadResponse struct { + // In same order as the request's queries. + Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{5} } + +func (m *ReadResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +type Query struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{6} } + +func (m *Query) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *Query) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *Query) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +type LabelMatcher struct { + Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=remote.MatchType" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{7} } + +func (m *LabelMatcher) GetType() MatchType { + if m != nil { + return m.Type + } + return MatchType_EQUAL +} + +func (m *LabelMatcher) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelMatcher) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type QueryResult struct { + Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{8} } + +func (m *QueryResult) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func init() { + proto.RegisterType((*Sample)(nil), "remote.Sample") + proto.RegisterType((*LabelPair)(nil), "remote.LabelPair") + proto.RegisterType((*TimeSeries)(nil), "remote.TimeSeries") + proto.RegisterType((*WriteRequest)(nil), "remote.WriteRequest") + proto.RegisterType((*ReadRequest)(nil), "remote.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "remote.ReadResponse") + proto.RegisterType((*Query)(nil), "remote.Query") + proto.RegisterType((*LabelMatcher)(nil), "remote.LabelMatcher") + proto.RegisterType((*QueryResult)(nil), "remote.QueryResult") + proto.RegisterEnum("remote.MatchType", MatchType_name, MatchType_value) +} +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Remote(dAtA, i, uint64(math.Float64bits(float64(m.Value)))) + } + if m.TimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRemote(dAtA, i, uint64(m.TimestampMs)) + } + return i, nil +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRemote(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Samples) > 0 { + for _, msg := range m.Samples { + dAtA[i] = 0x12 + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, msg := range m.Timeseries { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Queries) > 0 { + for _, msg := range m.Queries { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Results) > 0 { + for _, msg := range m.Results { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Query) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Query) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestampMs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRemote(dAtA, i, uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRemote(dAtA, i, uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, msg := range m.Matchers { + dAtA[i] = 0x1a + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRemote(dAtA, i, uint64(m.Type)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRemote(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRemote(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *QueryResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, msg := range m.Timeseries { + dAtA[i] = 0xa + i++ + i = encodeVarintRemote(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Remote(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Remote(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Sample) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.TimestampMs != 0 { + n += 1 + sovRemote(uint64(m.TimestampMs)) + } + return n +} + +func (m *LabelPair) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRemote(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRemote(uint64(l)) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func (m *WriteRequest) Size() (n int) { + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func (m *ReadRequest) Size() (n int) { + var l int + _ = l + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func (m *ReadResponse) Size() (n int) { + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func (m *Query) Size() (n int) { + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovRemote(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRemote(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRemote(uint64(l)) + } + return n +} + +func (m *QueryResult) Size() (n int) { + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func sovRemote(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRemote(x uint64) (n int) { + return sovRemote(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, &LabelPair{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &Query{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Query) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Query: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MatchType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRemote(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRemote + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRemote(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRemote = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("remote.proto", fileDescriptorRemote) } + +var fileDescriptorRemote = []byte{ + // 449 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xed, 0xc6, 0x4d, 0x82, 0xc7, 0x6e, 0x30, 0x43, 0x0f, 0x39, 0x45, 0xc1, 0x12, 0xc2, 0x20, + 0xa8, 0x50, 0x11, 0xdc, 0x38, 0xa4, 0x28, 0x02, 0xa1, 0xa6, 0xa5, 0x5b, 0x23, 0xb8, 0x59, 0x5b, + 0x32, 0x12, 0x96, 0xec, 0xc4, 0xdd, 0x5d, 0x23, 0xe5, 0x5f, 0xc0, 0xbf, 0xe2, 0xc8, 0x4f, 0x40, + 0xe1, 0x8f, 0xa0, 0xec, 0xc6, 0x1f, 0x91, 0x72, 0xea, 0x2d, 0x33, 0xef, 0xbd, 0x99, 0x97, 0x7d, + 0x63, 0xf0, 0x25, 0xe5, 0x4b, 0x4d, 0x27, 0x85, 0x5c, 0xea, 0x25, 0xf6, 0x6c, 0x15, 0x4e, 0xa0, + 0x77, 0x2d, 0xf2, 0x22, 0x23, 0x3c, 0x86, 0xee, 0x0f, 0x91, 0x95, 0x34, 0x64, 0x63, 0x16, 0x31, + 0x6e, 0x0b, 0x7c, 0x04, 0xbe, 0x4e, 0x73, 0x52, 0x5a, 0xe4, 0x45, 0x92, 0xab, 0x61, 0x67, 0xcc, + 0x22, 0x87, 0x7b, 0x75, 0x6f, 0xa6, 0xc2, 0xd7, 0xe0, 0x9e, 0x8b, 0x1b, 0xca, 0x3e, 0x89, 0x54, + 0x22, 0xc2, 0xe1, 0x42, 0xe4, 0x76, 0x88, 0xcb, 0xcd, 0xef, 0x66, 0x72, 0xc7, 0x34, 0x6d, 0x11, + 0x0a, 0x80, 0x38, 0xcd, 0xe9, 0x9a, 0x64, 0x4a, 0x0a, 0x9f, 0x42, 0x2f, 0xdb, 0x0c, 0x51, 0x43, + 0x36, 0x76, 0x22, 0xef, 0xf4, 0xc1, 0xc9, 0xd6, 0x6e, 0x3d, 0x9a, 0x6f, 0x09, 0x18, 0x41, 0x5f, + 0x19, 0xcb, 0x1b, 0x37, 0x1b, 0xee, 0xa0, 0xe2, 0xda, 0x7f, 0xc2, 0x2b, 0x38, 0x3c, 0x03, 0xff, + 0x8b, 0x4c, 0x35, 0x71, 0xba, 0x2d, 0x49, 0x69, 0x3c, 0x05, 0x30, 0xc6, 0xcd, 0xca, 0xed, 0x22, + 0xac, 0xc4, 0x8d, 0x19, 0xde, 0x62, 0x85, 0x6f, 0xc0, 0xe3, 0x24, 0xe6, 0xd5, 0x88, 0x27, 0xd0, + 0xbf, 0x2d, 0xdb, 0xfa, 0xa3, 0x4a, 0x7f, 0x55, 0x92, 0x5c, 0xf1, 0x0a, 0x0d, 0xdf, 0x82, 0x6f, + 0x75, 0xaa, 0x58, 0x2e, 0x14, 0xe1, 0x0b, 0xe8, 0x4b, 0x52, 0x65, 0xa6, 0x2b, 0xe1, 0xc3, 0x5d, + 0xa1, 0xc1, 0x78, 0xc5, 0x09, 0x7f, 0x31, 0xe8, 0x1a, 0x00, 0x9f, 0x03, 0x2a, 0x2d, 0xa4, 0x4e, + 0x76, 0x72, 0x60, 0x26, 0x87, 0xc0, 0x20, 0x71, 0x13, 0x06, 0x46, 0x10, 0xd0, 0x62, 0x9e, 0xec, + 0xc9, 0x6c, 0x40, 0x8b, 0x79, 0x9b, 0xf9, 0x12, 0xee, 0xe5, 0x42, 0x7f, 0xfb, 0x4e, 0x52, 0x0d, + 0x1d, 0xe3, 0xe8, 0x78, 0xe7, 0xcd, 0x67, 0x16, 0xe4, 0x35, 0x2b, 0x4c, 0xc0, 0x6f, 0x23, 0xf8, + 0x18, 0x0e, 0xf5, 0xaa, 0xb0, 0x59, 0x0f, 0x9a, 0xc4, 0x0c, 0x1c, 0xaf, 0x0a, 0xe2, 0x06, 0xae, + 0x4f, 0xa2, 0xb3, 0xef, 0x24, 0x9c, 0xf6, 0x49, 0x4c, 0xc0, 0x6b, 0x3d, 0xc6, 0x5d, 0xe2, 0x7a, + 0xf6, 0x11, 0xdc, 0x7a, 0x3f, 0xba, 0xd0, 0x9d, 0x5e, 0x7d, 0x9e, 0x9c, 0x07, 0x07, 0x78, 0x04, + 0xee, 0xc5, 0x65, 0x9c, 0xd8, 0x92, 0xe1, 0x7d, 0xf0, 0xf8, 0xf4, 0xfd, 0xf4, 0x6b, 0x32, 0x9b, + 0xc4, 0xef, 0x3e, 0x04, 0x1d, 0x44, 0x18, 0xd8, 0xc6, 0xc5, 0xe5, 0xb6, 0xe7, 0x9c, 0x05, 0xbf, + 0xd7, 0x23, 0xf6, 0x67, 0x3d, 0x62, 0x7f, 0xd7, 0x23, 0xf6, 0xf3, 0xdf, 0xe8, 0xe0, 0xa6, 0x67, + 0x3e, 0x9e, 0x57, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0xd6, 0xf1, 0x18, 0x4c, 0x03, 0x00, + 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.proto b/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.proto new file mode 100644 index 0000000..4e429e4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/prometheus/remote/remote.proto @@ -0,0 +1,70 @@ +// This file is copied (except for package name) from https://github.com/prometheus/prometheus/blob/master/storage/remote/remote.proto + +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package remote; + +message Sample { + double value = 1; + int64 timestamp_ms = 2; +} + +message LabelPair { + string name = 1; + string value = 2; +} + +message TimeSeries { + repeated LabelPair labels = 1; + // Sorted by time, oldest sample first. + repeated Sample samples = 2; +} + +message WriteRequest { + repeated TimeSeries timeseries = 1; +} + +message ReadRequest { + repeated Query queries = 1; +} + +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated LabelMatcher matchers = 3; +} + +enum MatchType { + EQUAL = 0; + NOT_EQUAL = 1; + REGEX_MATCH = 2; + REGEX_NO_MATCH = 3; +} + +message LabelMatcher { + MatchType type = 1; + string name = 2; + string value = 3; +} + +message QueryResult { + repeated TimeSeries timeseries = 1; +} \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/query/call_iterator.go b/vendor/github.com/influxdata/influxdb/query/call_iterator.go new file mode 100644 index 0000000..c47f35f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/call_iterator.go @@ -0,0 +1,1531 @@ +package query + +import ( + "fmt" + "math" + "sort" + "time" + + "github.com/influxdata/influxdb/query/internal/gota" + "github.com/influxdata/influxql" +) + +/* +This file contains iterator implementations for each function call available +in InfluxQL. Call iterators are separated into two groups: + +1. Map/reduce-style iterators - these are passed to IteratorCreator so that + processing can be at the low-level storage and aggregates are returned. + +2. Raw aggregate iterators - these require the full set of data for a window. + These are handled by the select() function and raw points are streamed in + from the low-level storage. + +There are helpers to aid in building aggregate iterators. For simple map/reduce +iterators, you can use the reduceIterator types and pass a reduce function. This +reduce function is passed a previous and current value and the new timestamp, +value, and auxilary fields are returned from it. + +For raw aggregate iterators, you can use the reduceSliceIterators which pass +in a slice of all points to the function and return a point. For more complex +iterator types, you may need to create your own iterators by hand. + +Once your iterator is complete, you'll need to add it to the NewCallIterator() +function if it is to be available to IteratorCreators and add it to the select() +function to allow it to be included during planning. +*/ + +// NewCallIterator returns a new iterator for a Call. +func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + name := opt.Expr.(*influxql.Call).Name + switch name { + case "count": + return newCountIterator(input, opt) + case "min": + return newMinIterator(input, opt) + case "max": + return newMaxIterator(input, opt) + case "sum": + return newSumIterator(input, opt) + case "first": + return newFirstIterator(input, opt) + case "last": + return newLastIterator(input, opt) + case "mean": + return newMeanIterator(input, opt) + default: + return nil, fmt.Errorf("unsupported function call: %s", name) + } +} + +// newCountIterator returns an iterator for operating on a count() call. +func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + // FIXME: Wrap iterator in int-type iterator and always output int value. + + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newFloatReduceIntegerIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, IntegerPointEmitter) { + fn := NewUnsignedFuncIntegerReducer(UnsignedCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newUnsignedReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newStringReduceIntegerIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newBooleanReduceIntegerIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported count iterator type: %T", input) + } +} + +// FloatCountReduce returns the count of points. +func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// IntegerCountReduce returns the count of points. +func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// UnsignedCountReduce returns the count of points. +func UnsignedCountReduce(prev *IntegerPoint, curr *UnsignedPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// StringCountReduce returns the count of points. +func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// BooleanCountReduce returns the count of points. +func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// newMinIterator returns an iterator for operating on a min() call. +func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMinReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMinReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedFuncReducer(UnsignedMinReduce, nil) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMinReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported min iterator type: %T", input) + } +} + +// FloatMinReduce returns the minimum value between prev & curr. +func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMinReduce returns the minimum value between prev & curr. +func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// UnsignedMinReduce returns the minimum value between prev & curr. +func UnsignedMinReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMinReduce returns the minimum value between prev & curr. +func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newMaxIterator returns an iterator for operating on a max() call. +func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMaxReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMaxReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedFuncReducer(UnsignedMaxReduce, nil) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMaxReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported max iterator type: %T", input) + } +} + +// FloatMaxReduce returns the maximum value between prev & curr. +func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMaxReduce returns the maximum value between prev & curr. +func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// UnsignedMaxReduce returns the maximum value between prev & curr. +func UnsignedMaxReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMaxReduce returns the minimum value between prev & curr. +func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newSumIterator returns an iterator for operating on a sum() call. +func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedFuncReducer(UnsignedSumReduce, &UnsignedPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported sum iterator type: %T", input) + } +} + +// FloatSumReduce returns the sum prev value & curr value. +func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// IntegerSumReduce returns the sum prev value & curr value. +func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// UnsignedSumReduce returns the sum prev value & curr value. +func UnsignedSumReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// newFirstIterator returns an iterator for operating on a first() call. +func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatFirstReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerFirstReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedFuncReducer(UnsignedFirstReduce, nil) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringFirstReduce, nil) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanFirstReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported first iterator type: %T", input) + } +} + +// FloatFirstReduce returns the first point sorted by time. +func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerFirstReduce returns the first point sorted by time. +func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// UnsignedFirstReduce returns the first point sorted by time. +func UnsignedFirstReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// StringFirstReduce returns the first point sorted by time. +func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanFirstReduce returns the first point sorted by time. +func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newLastIterator returns an iterator for operating on a last() call. +func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatLastReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerLastReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedFuncReducer(UnsignedLastReduce, nil) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringLastReduce, nil) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanLastReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported last iterator type: %T", input) + } +} + +// FloatLastReduce returns the last point sorted by time. +func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerLastReduce returns the last point sorted by time. +func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// UnsignedLastReduce returns the last point sorted by time. +func UnsignedLastReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// StringLastReduce returns the first point sorted by time. +func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanLastReduce returns the first point sorted by time. +func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// NewDistinctIterator returns an iterator for operating on a distinct() call. +func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDistinctReducer() + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDistinctReducer() + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedDistinctReducer() + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringDistinctReducer() + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanDistinctReducer() + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) + } +} + +// newMeanIterator returns an iterator for operating on a mean() call. +func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMeanReducer() + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMeanReducer() + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedMeanReducer() + return fn, fn + } + return newUnsignedReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported mean iterator type: %T", input) + } +} + +// NewMedianIterator returns an iterator for operating on a median() call. +func NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + return newMedianIterator(input, opt) +} + +// newMedianIterator returns an iterator for operating on a median() call. +func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedSliceFuncFloatReducer(UnsignedMedianReduceSlice) + return fn, fn + } + return newUnsignedReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatMedianReduceSlice returns the median value within a window. +func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(floatPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}} +} + +// IntegerMedianReduceSlice returns the median value within a window. +func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint { + if len(a) == 1 { + return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(integerPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} +} + +// UnsignedMedianReduceSlice returns the median value within a window. +func UnsignedMedianReduceSlice(a []UnsignedPoint) []FloatPoint { + if len(a) == 1 { + return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(unsignedPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} +} + +// newModeIterator returns an iterator for operating on a mode() call. +func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatModeReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedSliceFuncReducer(UnsignedModeReduceSlice) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSliceFuncReducer(StringModeReduceSlice) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatModeReduceSlice returns the mode value within a window. +func FloatModeReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + sort.Sort(floatPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []FloatPoint{{Time: ZeroTime, Value: mostMode}} +} + +// IntegerModeReduceSlice returns the mode value within a window. +func IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint { + if len(a) == 1 { + return a + } + sort.Sort(integerPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []IntegerPoint{{Time: ZeroTime, Value: mostMode}} +} + +// UnsignedModeReduceSlice returns the mode value within a window. +func UnsignedModeReduceSlice(a []UnsignedPoint) []UnsignedPoint { + if len(a) == 1 { + return a + } + sort.Sort(unsignedPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []UnsignedPoint{{Time: ZeroTime, Value: mostMode}} +} + +// StringModeReduceSlice returns the mode value within a window. +func StringModeReduceSlice(a []StringPoint) []StringPoint { + if len(a) == 1 { + return a + } + + sort.Sort(stringPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []StringPoint{{Time: ZeroTime, Value: mostMode}} +} + +// BooleanModeReduceSlice returns the mode value within a window. +func BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint { + if len(a) == 1 { + return a + } + + trueFreq := 0 + falsFreq := 0 + mostMode := false + + for _, p := range a { + if p.Value { + trueFreq++ + } else { + falsFreq++ + } + } + // In case either of true or false are mode then retuned mode value wont be + // of metric with oldest timestamp + if trueFreq >= falsFreq { + mostMode = true + } + + return []BooleanPoint{{Time: ZeroTime, Value: mostMode}} +} + +// newStddevIterator returns an iterator for operating on a stddev() call. +func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedSliceFuncFloatReducer(UnsignedStddevReduceSlice) + return fn, fn + } + return newUnsignedReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) + } +} + +// FloatStddevReduceSlice returns the stddev value within a window. +func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint { + // If there is only one point then return NaN. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + count++ + mean += (p.Value - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + variance += math.Pow(p.Value-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// IntegerStddevReduceSlice returns the stddev value within a window. +func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint { + // If there is only one point then return NaN. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + count++ + mean += (float64(p.Value) - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + variance += math.Pow(float64(p.Value)-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// UnsignedStddevReduceSlice returns the stddev value within a window. +func UnsignedStddevReduceSlice(a []UnsignedPoint) []FloatPoint { + // If there is only one point then return NaN. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + count++ + mean += (float64(p.Value) - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + variance += math.Pow(float64(p.Value)-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// newSpreadIterator returns an iterator for operating on a spread() call. +func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSpreadReducer() + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSpreadReducer() + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedSpreadReducer() + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported spread iterator type: %T", input) + } +} + +func newTopIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatTopReducer(n) + return fn, fn + } + itr := newFloatReduceFloatIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerTopReducer(n) + return fn, fn + } + itr := newIntegerReduceIntegerIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedTopReducer(n) + return fn, fn + } + itr := newUnsignedReduceUnsignedIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + default: + return nil, fmt.Errorf("unsupported top iterator type: %T", input) + } +} + +func newBottomIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatBottomReducer(n) + return fn, fn + } + itr := newFloatReduceFloatIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerBottomReducer(n) + return fn, fn + } + itr := newIntegerReduceIntegerIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedBottomReducer(n) + return fn, fn + } + itr := newUnsignedReduceUnsignedIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + default: + return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) + } +} + +// newPercentileIterator returns an iterator for operating on a percentile() call. +func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + unsignedPercentileReduceSlice := NewUnsignedPercentileReduceSliceFunc(percentile) + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedSliceFuncReducer(unsignedPercentileReduceSlice) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) + } +} + +// NewFloatPercentileReduceSliceFunc returns the percentile value within a window. +func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(floatPointsByValue(a)) + return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} + } +} + +// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window. +func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(integerPointsByValue(a)) + return []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} + } +} + +// NewUnsignedPercentileReduceSliceFunc returns the percentile value within a window. +func NewUnsignedPercentileReduceSliceFunc(percentile float64) UnsignedReduceSliceFunc { + return func(a []UnsignedPoint) []UnsignedPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(unsignedPointsByValue(a)) + return []UnsignedPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} + } +} + +// newDerivativeIterator returns an iterator for operating on a derivative() call. +func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) + } +} + +// newDifferenceIterator returns an iterator for operating on a difference() call. +func newDifferenceIterator(input Iterator, opt IteratorOptions, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDifferenceReducer(isNonNegative) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDifferenceReducer(isNonNegative) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedDifferenceReducer(isNonNegative) + return fn, fn + } + return newUnsignedStreamUnsignedIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported difference iterator type: %T", input) + } +} + +// newElapsedIterator returns an iterator for operating on a elapsed() call. +func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatElapsedReducer(interval) + return fn, fn + } + return newFloatStreamIntegerIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerElapsedReducer(interval) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, IntegerPointEmitter) { + fn := NewUnsignedElapsedReducer(interval) + return fn, fn + } + return newUnsignedStreamIntegerIterator(input, createFn, opt), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanElapsedReducer(interval) + return fn, fn + } + return newBooleanStreamIntegerIterator(input, createFn, opt), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringElapsedReducer(interval) + return fn, fn + } + return newStringStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// newMovingAverageIterator returns an iterator for operating on a moving_average() call. +func newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMovingAverageReducer(n) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMovingAverageReducer(n) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedMovingAverageReducer(n) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported moving average iterator type: %T", input) + } +} + +// newExponentialMovingAverageIterator returns an iterator for operating on an exponential_moving_average() call. +func newExponentialMovingAverageIterator(input Iterator, n, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported exponential moving average iterator type: %T", input) + } +} + +// newDoubleExponentialMovingAverageIterator returns an iterator for operating on a double_exponential_moving_average() call. +func newDoubleExponentialMovingAverageIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported double exponential moving average iterator type: %T", input) + } +} + +// newTripleExponentialMovingAverageIterator returns an iterator for operating on a triple_exponential_moving_average() call. +func newTripleExponentialMovingAverageIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported triple exponential moving average iterator type: %T", input) + } +} + +// newRelativeStrengthIndexIterator returns an iterator for operating on a triple_exponential_moving_average() call. +func newRelativeStrengthIndexIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported relative strength index iterator type: %T", input) + } +} + +// newTripleExponentialDerivativeIterator returns an iterator for operating on a triple_exponential_moving_average() call. +func newTripleExponentialDerivativeIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported triple exponential derivative iterator type: %T", input) + } +} + +// newKaufmansEfficiencyRatioIterator returns an iterator for operating on a kaufmans_efficiency_ratio() call. +func newKaufmansEfficiencyRatioIterator(input Iterator, n int, nHold int, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewKaufmansEfficiencyRatioReducer(n, nHold) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewKaufmansEfficiencyRatioReducer(n, nHold) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewKaufmansEfficiencyRatioReducer(n, nHold) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported kaufmans efficiency ratio iterator type: %T", input) + } +} + +// newKaufmansAdaptiveMovingAverageIterator returns an iterator for operating on a kaufmans_adaptive_moving_average() call. +func newKaufmansAdaptiveMovingAverageIterator(input Iterator, n int, nHold int, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported kaufmans adaptive moving average iterator type: %T", input) + } +} + +// newChandeMomentumOscillatorIterator returns an iterator for operating on a triple_exponential_moving_average() call. +func newChandeMomentumOscillatorIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported chande momentum oscillator iterator type: %T", input) + } +} + +// newCumulativeSumIterator returns an iterator for operating on a cumulative_sum() call. +func newCumulativeSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatCumulativeSumReducer() + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerCumulativeSumReducer() + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedCumulativeSumReducer() + return fn, fn + } + return newUnsignedStreamUnsignedIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported cumulative sum iterator type: %T", input) + } +} + +// newHoltWintersIterator returns an iterator for operating on a holt_winters() call. +func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// NewSampleIterator returns an iterator for operating on a sample() call (exported for use in test). +func NewSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + return newSampleIterator(input, opt, size) +} + +// newSampleIterator returns an iterator for operating on a sample() call. +func newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSampleReducer(size) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSampleReducer(size) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { + fn := NewUnsignedSampleReducer(size) + return fn, fn + } + return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSampleReducer(size) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSampleReducer(size) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// newIntegralIterator returns an iterator for operating on a integral() call. +func newIntegralIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatIntegralReducer(interval, opt) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerIntegralReducer(interval, opt) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + case UnsignedIterator: + createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { + fn := NewUnsignedIntegralReducer(interval, opt) + return fn, fn + } + return newUnsignedStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported integral iterator type: %T", input) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go b/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go new file mode 100644 index 0000000..ef5e019 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go @@ -0,0 +1,1213 @@ +package query_test + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +// Ensure that a float iterator can be created for a count() call. +func TestCallIterator_Count_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an integer iterator can be created for a count() call. +func TestCallIterator_Count_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an unsigned iterator can be created for a count() call. +func TestCallIterator_Count_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a string iterator can be created for a count() call. +func TestCallIterator_Count_String(t *testing.T) { + itr, _ := query.NewCallIterator( + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a count() call. +func TestCallIterator_Count_Boolean(t *testing.T) { + itr, _ := query.NewCallIterator( + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a min() call. +func TestCallIterator_Min_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a integer iterator can be created for a min() call. +func TestCallIterator_Min_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a unsigned iterator can be created for a min() call. +func TestCallIterator_Min_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a min() call. +func TestCallIterator_Min_Boolean(t *testing.T) { + itr, _ := query.NewCallIterator( + &BooleanIterator{Points: []query.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a max() call. +func TestCallIterator_Max_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a integer iterator can be created for a max() call. +func TestCallIterator_Max_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a unsigned iterator can be created for a max() call. +func TestCallIterator_Max_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a max() call. +func TestCallIterator_Max_Boolean(t *testing.T) { + itr, _ := query.NewCallIterator( + &BooleanIterator{Points: []query.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a sum() call. +func TestCallIterator_Sum_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an integer iterator can be created for a sum() call. +func TestCallIterator_Sum_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an unsigned iterator can be created for a sum() call. +func TestCallIterator_Sum_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a first() call. +func TestCallIterator_First_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an integer iterator can be created for a first() call. +func TestCallIterator_First_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an unsigned iterator can be created for a first() call. +func TestCallIterator_First_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.UnsignedPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a string iterator can be created for a first() call. +func TestCallIterator_First_String(t *testing.T) { + itr, _ := query.NewCallIterator( + &StringIterator{Points: []query.StringPoint{ + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a first() call. +func TestCallIterator_First_Boolean(t *testing.T) { + itr, _ := query.NewCallIterator( + &BooleanIterator{Points: []query.BooleanPoint{ + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a last() call. +func TestCallIterator_Last_Float(t *testing.T) { + itr, _ := query.NewCallIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an integer iterator can be created for a last() call. +func TestCallIterator_Last_Integer(t *testing.T) { + itr, _ := query.NewCallIterator( + &IntegerIterator{Points: []query.IntegerPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that an unsigned iterator can be created for a last() call. +func TestCallIterator_Last_Unsigned(t *testing.T) { + itr, _ := query.NewCallIterator( + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.UnsignedPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a string iterator can be created for a last() call. +func TestCallIterator_Last_String(t *testing.T) { + itr, _ := query.NewCallIterator( + &StringIterator{Points: []query.StringPoint{ + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a last() call. +func TestCallIterator_Last_Boolean(t *testing.T) { + itr, _ := query.NewCallIterator( + &BooleanIterator{Points: []query.BooleanPoint{ + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&query.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&query.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a float iterator can be created for a mode() call. +func TestCallIterator_Mode_Float(t *testing.T) { + itr, _ := query.NewModeIterator(&FloatIterator{Points: []query.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.FloatPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&query.FloatPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + {&query.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a integer iterator can be created for a mode() call. +func TestCallIterator_Mode_Integer(t *testing.T) { + itr, _ := query.NewModeIterator(&IntegerIterator{Points: []query.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, + {&query.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, + {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, + {&query.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a unsigned iterator can be created for a mode() call. +func TestCallIterator_Mode_Unsigned(t *testing.T) { + itr, _ := query.NewModeIterator(&UnsignedIterator{Points: []query.UnsignedPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.UnsignedPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, + {&query.UnsignedPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, + {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, + {&query.UnsignedPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a string iterator can be created for a mode() call. +func TestCallIterator_Mode_String(t *testing.T) { + itr, _ := query.NewModeIterator(&StringIterator{Points: []query.StringPoint{ + {Time: 0, Value: "15", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "10", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: "20", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "11", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: "25", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.StringPoint{Time: 0, Value: "10", Tags: ParseTags("host=hostA")}}, + {&query.StringPoint{Time: 5, Value: "21", Tags: ParseTags("host=hostA")}}, + {&query.StringPoint{Time: 1, Value: "11", Tags: ParseTags("host=hostB")}}, + {&query.StringPoint{Time: 20, Value: "8", Tags: ParseTags("host=hostB")}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +// Ensure that a boolean iterator can be created for a modBooleanl. +func TestCallIterator_Mode_Boolean(t *testing.T) { + itr, _ := query.NewModeIterator(&BooleanIterator{Points: []query.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + query.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: query.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(a, [][]query.Point{ + {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}}, + {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}}, + {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}}, + {&query.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}}, + }); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } +} + +func TestNewCallIterator_UnsupportedExprName(t *testing.T) { + _, err := query.NewCallIterator( + &FloatIterator{}, + query.IteratorOptions{ + Expr: MustParseExpr(`foobar("value")`), + }, + ) + + if err == nil || err.Error() != "unsupported function call: foobar" { + t.Errorf("unexpected error: %s", err) + } +} + +func BenchmarkCountIterator_1K(b *testing.B) { benchmarkCountIterator(b, 1000) } +func BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) } +func BenchmarkCountIterator_1M(b *testing.B) { benchmarkCountIterator(b, 1000000) } + +func benchmarkCountIterator(b *testing.B, pointN int) { + benchmarkCallIterator(b, query.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, pointN) +} + +func benchmarkCallIterator(b *testing.B, opt query.IteratorOptions, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := query.FloatPoint{Name: "cpu", Value: 100} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *query.FloatPoint { return &p }, + } + + // Execute call against input. + itr, err := query.NewCallIterator(&input, opt) + if err != nil { + b.Fatal(err) + } + query.DrainIterator(itr) + } +} + +func BenchmarkSampleIterator_1k(b *testing.B) { benchmarkSampleIterator(b, 1000) } +func BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) } +func BenchmarkSampleIterator_1M(b *testing.B) { benchmarkSampleIterator(b, 1000000) } + +func benchmarkSampleIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + // Create a lightweight point generator. + p := query.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *query.FloatPoint { + p.Value = float64(i) + return &p + }, + } + + for i := 0; i < b.N; i++ { + // Execute call against input. + itr, err := query.NewSampleIterator(&input, query.IteratorOptions{}, 100) + if err != nil { + b.Fatal(err) + } + query.DrainIterator(itr) + } +} + +func BenchmarkDistinctIterator_1K(b *testing.B) { benchmarkDistinctIterator(b, 1000) } +func BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) } +func BenchmarkDistinctIterator_1M(b *testing.B) { benchmarkDistinctIterator(b, 1000000) } + +func benchmarkDistinctIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := query.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *query.FloatPoint { + p.Value = float64(i % 10) + return &p + }, + } + + // Execute call against input. + itr, err := query.NewDistinctIterator(&input, query.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + query.DrainIterator(itr) + } +} + +func BenchmarkModeIterator_1K(b *testing.B) { benchmarkModeIterator(b, 1000) } +func BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) } +func BenchmarkModeIterator_1M(b *testing.B) { benchmarkModeIterator(b, 1000000) } + +func benchmarkModeIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := query.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *query.FloatPoint { + p.Value = float64(10) + return &p + }, + } + + // Execute call against input. + itr, err := query.NewModeIterator(&input, query.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + query.DrainIterator(itr) + } +} + +type FloatPointGenerator struct { + i int + N int + Fn func(i int) *query.FloatPoint +} + +func (g *FloatPointGenerator) Close() error { return nil } +func (g *FloatPointGenerator) Stats() query.IteratorStats { return query.IteratorStats{} } + +func (g *FloatPointGenerator) Next() (*query.FloatPoint, error) { + if g.i == g.N { + return nil, nil + } + p := g.Fn(g.i) + g.i++ + return p, nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/cast.go b/vendor/github.com/influxdata/influxdb/query/cast.go new file mode 100644 index 0000000..8c02f4a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/cast.go @@ -0,0 +1,88 @@ +package query + +import "github.com/influxdata/influxql" + +// castToType will coerce the underlying interface type to another +// interface depending on the type. +func castToType(v interface{}, typ influxql.DataType) interface{} { + switch typ { + case influxql.Float: + if val, ok := castToFloat(v); ok { + v = val + } + case influxql.Integer: + if val, ok := castToInteger(v); ok { + v = val + } + case influxql.Unsigned: + if val, ok := castToUnsigned(v); ok { + v = val + } + case influxql.String, influxql.Tag: + if val, ok := castToString(v); ok { + v = val + } + case influxql.Boolean: + if val, ok := castToBoolean(v); ok { + v = val + } + } + return v +} + +func castToFloat(v interface{}) (float64, bool) { + switch v := v.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return float64(0), false + } +} + +func castToInteger(v interface{}) (int64, bool) { + switch v := v.(type) { + case float64: + return int64(v), true + case int64: + return v, true + case uint64: + return int64(v), true + default: + return int64(0), false + } +} + +func castToUnsigned(v interface{}) (uint64, bool) { + switch v := v.(type) { + case float64: + return uint64(v), true + case uint64: + return v, true + case int64: + return uint64(v), true + default: + return uint64(0), false + } +} + +func castToString(v interface{}) (string, bool) { + switch v := v.(type) { + case string: + return v, true + default: + return "", false + } +} + +func castToBoolean(v interface{}) (bool, bool) { + switch v := v.(type) { + case bool: + return v, true + default: + return false, false + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/compile.go b/vendor/github.com/influxdata/influxdb/query/compile.go new file mode 100644 index 0000000..2f0f71c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/compile.go @@ -0,0 +1,1195 @@ +package query + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" +) + +// CompileOptions are the customization options for the compiler. +type CompileOptions struct { + Now time.Time +} + +// Statement is a compiled query statement. +type Statement interface { + // Prepare prepares the statement by mapping shards and finishing the creation + // of the query plan. + Prepare(shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) +} + +// compiledStatement represents a select statement that has undergone some initial processing to +// determine if it is valid and to have some initial modifications done on the AST. +type compiledStatement struct { + // Condition is the condition used for accessing data. + Condition influxql.Expr + + // TimeRange is the TimeRange for selecting data. + TimeRange influxql.TimeRange + + // Interval holds the time grouping interval. + Interval Interval + + // InheritedInterval marks if the interval was inherited by a parent. + // If this is set, then an interval that was inherited will not cause + // a query that shouldn't have an interval to fail. + InheritedInterval bool + + // ExtraIntervals is the number of extra intervals that will be read in addition + // to the TimeRange. It is a multiple of Interval and only applies to queries that + // have an Interval. It is used to extend the TimeRange of the mapped shards to + // include additional non-emitted intervals used by derivative and other functions. + // It will be set to the highest number of extra intervals that need to be read even + // if it doesn't apply to all functions. The number will always be positive. + // This value may be set to a non-zero value even if there is no interval for the + // compiled query. + ExtraIntervals int + + // Ascending is true if the time ordering is ascending. + Ascending bool + + // FunctionCalls holds a reference to the call expression of every function + // call that has been encountered. + FunctionCalls []*influxql.Call + + // OnlySelectors is set to true when there are no aggregate functions. + OnlySelectors bool + + // HasDistinct is set when the distinct() function is encountered. + HasDistinct bool + + // FillOption contains the fill option for aggregates. + FillOption influxql.FillOption + + // TopBottomFunction is set to top or bottom when one of those functions are + // used in the statement. + TopBottomFunction string + + // HasAuxiliaryFields is true when the function requires auxiliary fields. + HasAuxiliaryFields bool + + // Fields holds all of the fields that will be used. + Fields []*compiledField + + // TimeFieldName stores the name of the time field's column. + // The column names generated by the compiler will not conflict with + // this name. + TimeFieldName string + + // Limit is the number of rows per series this query should be limited to. + Limit int + + // HasTarget is true if this query is being written into a target. + HasTarget bool + + // Options holds the configured compiler options. + Options CompileOptions + + stmt *influxql.SelectStatement +} + +func newCompiler(opt CompileOptions) *compiledStatement { + if opt.Now.IsZero() { + opt.Now = time.Now().UTC() + } + return &compiledStatement{ + OnlySelectors: true, + TimeFieldName: "time", + Options: opt, + } +} + +func Compile(stmt *influxql.SelectStatement, opt CompileOptions) (Statement, error) { + c := newCompiler(opt) + c.stmt = stmt.Clone() + if err := c.preprocess(c.stmt); err != nil { + return nil, err + } + if err := c.compile(c.stmt); err != nil { + return nil, err + } + c.stmt.TimeAlias = c.TimeFieldName + c.stmt.Condition = c.Condition + + // Convert DISTINCT into a call. + c.stmt.RewriteDistinct() + + // Remove "time" from fields list. + c.stmt.RewriteTimeFields() + + // Rewrite any regex conditions that could make use of the index. + c.stmt.RewriteRegexConditions() + return c, nil +} + +// preprocess retrieves and records the global attributes of the current statement. +func (c *compiledStatement) preprocess(stmt *influxql.SelectStatement) error { + c.Ascending = stmt.TimeAscending() + c.Limit = stmt.Limit + c.HasTarget = stmt.Target != nil + + valuer := influxql.NowValuer{Now: c.Options.Now, Location: stmt.Location} + cond, t, err := influxql.ConditionExpr(stmt.Condition, &valuer) + if err != nil { + return err + } + // Verify that the condition is actually ok to use. + if err := c.validateCondition(cond); err != nil { + return err + } + c.Condition = cond + c.TimeRange = t + + // Read the dimensions of the query, validate them, and retrieve the interval + // if it exists. + if err := c.compileDimensions(stmt); err != nil { + return err + } + + // Retrieve the fill option for the statement. + c.FillOption = stmt.Fill + + // Resolve the min and max times now that we know if there is an interval or not. + if c.TimeRange.Min.IsZero() { + c.TimeRange.Min = time.Unix(0, influxql.MinTime).UTC() + } + if c.TimeRange.Max.IsZero() { + // If the interval is non-zero, then we have an aggregate query and + // need to limit the maximum time to now() for backwards compatibility + // and usability. + if !c.Interval.IsZero() { + c.TimeRange.Max = c.Options.Now + } else { + c.TimeRange.Max = time.Unix(0, influxql.MaxTime).UTC() + } + } + return nil +} + +func (c *compiledStatement) compile(stmt *influxql.SelectStatement) error { + if err := c.compileFields(stmt); err != nil { + return err + } + if err := c.validateFields(); err != nil { + return err + } + + // Look through the sources and compile each of the subqueries (if they exist). + // We do this after compiling the outside because subqueries may require + // inherited state. + for _, source := range stmt.Sources { + switch source := source.(type) { + case *influxql.SubQuery: + source.Statement.OmitTime = true + if err := c.subquery(source.Statement); err != nil { + return err + } + } + } + return nil +} + +func (c *compiledStatement) compileFields(stmt *influxql.SelectStatement) error { + valuer := MathValuer{} + + c.Fields = make([]*compiledField, 0, len(stmt.Fields)) + for _, f := range stmt.Fields { + // Remove any time selection (it is automatically selected by default) + // and set the time column name to the alias of the time field if it exists. + // Such as SELECT time, max(value) FROM cpu will be SELECT max(value) FROM cpu + // and SELECT time AS timestamp, max(value) FROM cpu will return "timestamp" + // as the column name for the time. + if ref, ok := f.Expr.(*influxql.VarRef); ok && ref.Val == "time" { + if f.Alias != "" { + c.TimeFieldName = f.Alias + } + continue + } + + // Append this field to the list of processed fields and compile it. + f.Expr = influxql.Reduce(f.Expr, &valuer) + field := &compiledField{ + global: c, + Field: f, + AllowWildcard: true, + } + c.Fields = append(c.Fields, field) + if err := field.compileExpr(field.Field.Expr); err != nil { + return err + } + } + return nil +} + +type compiledField struct { + // This holds the global state from the compiled statement. + global *compiledStatement + + // Field is the top level field that is being compiled. + Field *influxql.Field + + // AllowWildcard is set to true if a wildcard or regular expression is allowed. + AllowWildcard bool +} + +// compileExpr creates the node that executes the expression and connects that +// node to the WriteEdge as the output. +func (c *compiledField) compileExpr(expr influxql.Expr) error { + switch expr := expr.(type) { + case *influxql.VarRef: + // A bare variable reference will require auxiliary fields. + c.global.HasAuxiliaryFields = true + return nil + case *influxql.Wildcard: + // Wildcards use auxiliary fields. We assume there will be at least one + // expansion. + c.global.HasAuxiliaryFields = true + if !c.AllowWildcard { + return errors.New("unable to use wildcard in a binary expression") + } + return nil + case *influxql.RegexLiteral: + if !c.AllowWildcard { + return errors.New("unable to use regex in a binary expression") + } + c.global.HasAuxiliaryFields = true + return nil + case *influxql.Call: + if isMathFunction(expr) { + return c.compileMathFunction(expr) + } + + // Register the function call in the list of function calls. + c.global.FunctionCalls = append(c.global.FunctionCalls, expr) + + switch expr.Name { + case "percentile": + return c.compilePercentile(expr.Args) + case "sample": + return c.compileSample(expr.Args) + case "distinct": + return c.compileDistinct(expr.Args, false) + case "top", "bottom": + return c.compileTopBottom(expr) + case "derivative", "non_negative_derivative": + isNonNegative := expr.Name == "non_negative_derivative" + return c.compileDerivative(expr.Args, isNonNegative) + case "difference", "non_negative_difference": + isNonNegative := expr.Name == "non_negative_difference" + return c.compileDifference(expr.Args, isNonNegative) + case "cumulative_sum": + return c.compileCumulativeSum(expr.Args) + case "moving_average": + return c.compileMovingAverage(expr.Args) + case "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative": + return c.compileExponentialMovingAverage(expr.Name, expr.Args) + case "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average": + return c.compileKaufmans(expr.Name, expr.Args) + case "chande_momentum_oscillator": + return c.compileChandeMomentumOscillator(expr.Args) + case "elapsed": + return c.compileElapsed(expr.Args) + case "integral": + return c.compileIntegral(expr.Args) + case "holt_winters", "holt_winters_with_fit": + withFit := expr.Name == "holt_winters_with_fit" + return c.compileHoltWinters(expr.Args, withFit) + default: + return c.compileFunction(expr) + } + case *influxql.Distinct: + call := expr.NewCall() + c.global.FunctionCalls = append(c.global.FunctionCalls, call) + return c.compileDistinct(call.Args, false) + case *influxql.BinaryExpr: + // Disallow wildcards in binary expressions. RewriteFields, which expands + // wildcards, is too complicated if we allow wildcards inside of expressions. + c.AllowWildcard = false + + // Check if either side is a literal so we only compile one side if it is. + if _, ok := expr.LHS.(influxql.Literal); ok { + if _, ok := expr.RHS.(influxql.Literal); ok { + return errors.New("cannot perform a binary expression on two literals") + } + return c.compileExpr(expr.RHS) + } else if _, ok := expr.RHS.(influxql.Literal); ok { + return c.compileExpr(expr.LHS) + } else { + // Validate both sides of the expression. + if err := c.compileExpr(expr.LHS); err != nil { + return err + } + if err := c.compileExpr(expr.RHS); err != nil { + return err + } + return nil + } + case *influxql.ParenExpr: + return c.compileExpr(expr.Expr) + case influxql.Literal: + return errors.New("field must contain at least one variable") + } + return errors.New("unimplemented") +} + +// compileNestedExpr ensures that the expression is compiled as if it were +// a nested expression. +func (c *compiledField) compileNestedExpr(expr influxql.Expr) error { + // Intercept the distinct call so we can pass nested as true. + switch expr := expr.(type) { + case *influxql.Call: + if expr.Name == "distinct" { + return c.compileDistinct(expr.Args, true) + } + case *influxql.Distinct: + call := expr.NewCall() + return c.compileDistinct(call.Args, true) + } + return c.compileExpr(expr) +} + +func (c *compiledField) compileSymbol(name string, field influxql.Expr) error { + // Must be a variable reference, wildcard, or regexp. + switch field.(type) { + case *influxql.VarRef: + return nil + case *influxql.Wildcard: + if !c.AllowWildcard { + return fmt.Errorf("unsupported expression with wildcard: %s()", name) + } + c.global.OnlySelectors = false + return nil + case *influxql.RegexLiteral: + if !c.AllowWildcard { + return fmt.Errorf("unsupported expression with regex field: %s()", name) + } + c.global.OnlySelectors = false + return nil + default: + return fmt.Errorf("expected field argument in %s()", name) + } +} + +func (c *compiledField) compileFunction(expr *influxql.Call) error { + // Validate the function call and mark down some meta properties + // related to the function for query validation. + switch expr.Name { + case "max", "min", "first", "last": + // top/bottom are not included here since they are not typical functions. + case "count", "sum", "mean", "median", "mode", "stddev", "spread": + // These functions are not considered selectors. + c.global.OnlySelectors = false + default: + return fmt.Errorf("undefined function %s()", expr.Name) + } + + if exp, got := 1, len(expr.Args); exp != got { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + + // If this is a call to count(), allow distinct() to be used as the function argument. + if expr.Name == "count" { + // If we have count(), the argument may be a distinct() call. + if arg0, ok := expr.Args[0].(*influxql.Call); ok && arg0.Name == "distinct" { + return c.compileDistinct(arg0.Args, true) + } else if arg0, ok := expr.Args[0].(*influxql.Distinct); ok { + call := arg0.NewCall() + return c.compileDistinct(call.Args, true) + } + } + return c.compileSymbol(expr.Name, expr.Args[0]) +} + +func (c *compiledField) compilePercentile(args []influxql.Expr) error { + if exp, got := 2, len(args); got != exp { + return fmt.Errorf("invalid number of arguments for percentile, expected %d, got %d", exp, got) + } + + switch args[1].(type) { + case *influxql.IntegerLiteral: + case *influxql.NumberLiteral: + default: + return fmt.Errorf("expected float argument in percentile()") + } + return c.compileSymbol("percentile", args[0]) +} + +func (c *compiledField) compileSample(args []influxql.Expr) error { + if exp, got := 2, len(args); got != exp { + return fmt.Errorf("invalid number of arguments for sample, expected %d, got %d", exp, got) + } + + switch arg1 := args[1].(type) { + case *influxql.IntegerLiteral: + if arg1.Val <= 0 { + return fmt.Errorf("sample window must be greater than 1, got %d", arg1.Val) + } + default: + return fmt.Errorf("expected integer argument in sample()") + } + return c.compileSymbol("sample", args[0]) +} + +func (c *compiledField) compileDerivative(args []influxql.Expr, isNonNegative bool) error { + name := "derivative" + if isNonNegative { + name = "non_negative_derivative" + } + + if min, max, got := 1, 2, len(args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", name, min, max, got) + } + + // Retrieve the duration from the derivative() call, if specified. + if len(args) == 2 { + switch arg1 := args[1].(type) { + case *influxql.DurationLiteral: + if arg1.Val <= 0 { + return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) + } + default: + return fmt.Errorf("second argument to %s must be a duration, got %T", name, args[1]) + } + } + c.global.OnlySelectors = false + if c.global.ExtraIntervals < 1 { + c.global.ExtraIntervals = 1 + } + + // Must be a variable reference, function, wildcard, or regexp. + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) + } + return c.compileNestedExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to %s", name) + } + return c.compileSymbol(name, arg0) + } +} + +func (c *compiledField) compileElapsed(args []influxql.Expr) error { + if min, max, got := 1, 2, len(args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for elapsed, expected at least %d but no more than %d, got %d", min, max, got) + } + + // Retrieve the duration from the elapsed() call, if specified. + if len(args) == 2 { + switch arg1 := args[1].(type) { + case *influxql.DurationLiteral: + if arg1.Val <= 0 { + return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) + } + default: + return fmt.Errorf("second argument to elapsed must be a duration, got %T", args[1]) + } + } + c.global.OnlySelectors = false + if c.global.ExtraIntervals < 1 { + c.global.ExtraIntervals = 1 + } + + // Must be a variable reference, function, wildcard, or regexp. + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("elapsed aggregate requires a GROUP BY interval") + } + return c.compileNestedExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to elapsed") + } + return c.compileSymbol("elapsed", arg0) + } +} + +func (c *compiledField) compileDifference(args []influxql.Expr, isNonNegative bool) error { + name := "difference" + if isNonNegative { + name = "non_negative_difference" + } + + if got := len(args); got != 1 { + return fmt.Errorf("invalid number of arguments for %s, expected 1, got %d", name, got) + } + c.global.OnlySelectors = false + if c.global.ExtraIntervals < 1 { + c.global.ExtraIntervals = 1 + } + + // Must be a variable reference, function, wildcard, or regexp. + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) + } + return c.compileNestedExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to %s", name) + } + return c.compileSymbol(name, arg0) + } +} + +func (c *compiledField) compileCumulativeSum(args []influxql.Expr) error { + if got := len(args); got != 1 { + return fmt.Errorf("invalid number of arguments for cumulative_sum, expected 1, got %d", got) + } + c.global.OnlySelectors = false + if c.global.ExtraIntervals < 1 { + c.global.ExtraIntervals = 1 + } + + // Must be a variable reference, function, wildcard, or regexp. + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("cumulative_sum aggregate requires a GROUP BY interval") + } + return c.compileNestedExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to cumulative_sum") + } + return c.compileSymbol("cumulative_sum", arg0) + } +} + +func (c *compiledField) compileMovingAverage(args []influxql.Expr) error { + if got := len(args); got != 2 { + return fmt.Errorf("invalid number of arguments for moving_average, expected 2, got %d", got) + } + + arg1, ok := args[1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("second argument for moving_average must be an integer, got %T", args[1]) + } else if arg1.Val <= 1 { + return fmt.Errorf("moving_average window must be greater than 1, got %d", arg1.Val) + } + c.global.OnlySelectors = false + if c.global.ExtraIntervals < int(arg1.Val) { + c.global.ExtraIntervals = int(arg1.Val) + } + + // Must be a variable reference, function, wildcard, or regexp. + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("moving_average aggregate requires a GROUP BY interval") + } + return c.compileNestedExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to moving_average") + } + return c.compileSymbol("moving_average", arg0) + } +} + +func (c *compiledField) compileExponentialMovingAverage(name string, args []influxql.Expr) error { + if got := len(args); got < 2 || got > 4 { + return fmt.Errorf("invalid number of arguments for %s, expected at least 2 but no more than 4, got %d", name, got) + } + + arg1, ok := args[1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("%s period must be an integer", name) + } else if arg1.Val < 1 { + return fmt.Errorf("%s period must be greater than or equal to 1", name) + } + + if len(args) >= 3 { + switch arg2 := args[2].(type) { + case *influxql.IntegerLiteral: + if name == "triple_exponential_derivative" && arg2.Val < 1 && arg2.Val != -1 { + return fmt.Errorf("%s hold period must be greater than or equal to 1", name) + } + if arg2.Val < 0 && arg2.Val != -1 { + return fmt.Errorf("%s hold period must be greater than or equal to 0", name) + } + default: + return fmt.Errorf("%s hold period must be an integer", name) + } + } + + if len(args) >= 4 { + switch arg3 := args[3].(type) { + case *influxql.StringLiteral: + switch arg3.Val { + case "exponential", "simple": + default: + return fmt.Errorf("%s warmup type must be one of: 'exponential' 'simple'", name) + } + default: + return fmt.Errorf("%s warmup type must be a string", name) + } + } + + c.global.OnlySelectors = false + if c.global.ExtraIntervals < int(arg1.Val) { + c.global.ExtraIntervals = int(arg1.Val) + } + + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) + } + return c.compileExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to %s", name) + } + return c.compileSymbol(name, arg0) + } +} + +func (c *compiledField) compileKaufmans(name string, args []influxql.Expr) error { + if got := len(args); got < 2 || got > 3 { + return fmt.Errorf("invalid number of arguments for %s, expected at least 2 but no more than 3, got %d", name, got) + } + + arg1, ok := args[1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("%s period must be an integer", name) + } else if arg1.Val < 1 { + return fmt.Errorf("%s period must be greater than or equal to 1", name) + } + + if len(args) >= 3 { + switch arg2 := args[2].(type) { + case *influxql.IntegerLiteral: + if arg2.Val < 0 && arg2.Val != -1 { + return fmt.Errorf("%s hold period must be greater than or equal to 0", name) + } + default: + return fmt.Errorf("%s hold period must be an integer", name) + } + } + + c.global.OnlySelectors = false + if c.global.ExtraIntervals < int(arg1.Val) { + c.global.ExtraIntervals = int(arg1.Val) + } + + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) + } + return c.compileExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to %s", name) + } + return c.compileSymbol(name, arg0) + } +} + +func (c *compiledField) compileChandeMomentumOscillator(args []influxql.Expr) error { + if got := len(args); got < 2 || got > 4 { + return fmt.Errorf("invalid number of arguments for chande_momentum_oscillator, expected at least 2 but no more than 4, got %d", got) + } + + arg1, ok := args[1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("chande_momentum_oscillator period must be an integer") + } else if arg1.Val < 1 { + return fmt.Errorf("chande_momentum_oscillator period must be greater than or equal to 1") + } + + if len(args) >= 3 { + switch arg2 := args[2].(type) { + case *influxql.IntegerLiteral: + if arg2.Val < 0 && arg2.Val != -1 { + return fmt.Errorf("chande_momentum_oscillator hold period must be greater than or equal to 0") + } + default: + return fmt.Errorf("chande_momentum_oscillator hold period must be an integer") + } + } + + c.global.OnlySelectors = false + if c.global.ExtraIntervals < int(arg1.Val) { + c.global.ExtraIntervals = int(arg1.Val) + } + + if len(args) >= 4 { + switch arg3 := args[3].(type) { + case *influxql.StringLiteral: + switch arg3.Val { + case "none", "exponential", "simple": + default: + return fmt.Errorf("chande_momentum_oscillator warmup type must be one of: 'none' 'exponential' 'simple'") + } + default: + return fmt.Errorf("chande_momentum_oscillator warmup type must be a string") + } + } + + switch arg0 := args[0].(type) { + case *influxql.Call: + if c.global.Interval.IsZero() { + return fmt.Errorf("chande_momentum_oscillator aggregate requires a GROUP BY interval") + } + return c.compileExpr(arg0) + default: + if !c.global.Interval.IsZero() && !c.global.InheritedInterval { + return fmt.Errorf("aggregate function required inside the call to chande_momentum_oscillator") + } + return c.compileSymbol("chande_momentum_oscillator", arg0) + } +} + +func (c *compiledField) compileIntegral(args []influxql.Expr) error { + if min, max, got := 1, 2, len(args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for integral, expected at least %d but no more than %d, got %d", min, max, got) + } + + if len(args) == 2 { + switch arg1 := args[1].(type) { + case *influxql.DurationLiteral: + if arg1.Val <= 0 { + return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) + } + default: + return errors.New("second argument must be a duration") + } + } + c.global.OnlySelectors = false + + // Must be a variable reference, wildcard, or regexp. + return c.compileSymbol("integral", args[0]) +} + +func (c *compiledField) compileHoltWinters(args []influxql.Expr, withFit bool) error { + name := "holt_winters" + if withFit { + name = "holt_winters_with_fit" + } + + if exp, got := 3, len(args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", name, exp, got) + } + + n, ok := args[1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("expected integer argument as second arg in %s", name) + } else if n.Val <= 0 { + return fmt.Errorf("second arg to %s must be greater than 0, got %d", name, n.Val) + } + + s, ok := args[2].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("expected integer argument as third arg in %s", name) + } else if s.Val < 0 { + return fmt.Errorf("third arg to %s cannot be negative, got %d", name, s.Val) + } + c.global.OnlySelectors = false + + call, ok := args[0].(*influxql.Call) + if !ok { + return fmt.Errorf("must use aggregate function with %s", name) + } else if c.global.Interval.IsZero() { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) + } + return c.compileNestedExpr(call) +} + +func (c *compiledField) compileDistinct(args []influxql.Expr, nested bool) error { + if len(args) == 0 { + return errors.New("distinct function requires at least one argument") + } else if len(args) != 1 { + return errors.New("distinct function can only have one argument") + } + + if _, ok := args[0].(*influxql.VarRef); !ok { + return errors.New("expected field argument in distinct()") + } + if !nested { + c.global.HasDistinct = true + } + c.global.OnlySelectors = false + return nil +} + +func (c *compiledField) compileTopBottom(call *influxql.Call) error { + if c.global.TopBottomFunction != "" { + return fmt.Errorf("selector function %s() cannot be combined with other functions", c.global.TopBottomFunction) + } + + if exp, got := 2, len(call.Args); got < exp { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", call.Name, exp, got) + } + + limit, ok := call.Args[len(call.Args)-1].(*influxql.IntegerLiteral) + if !ok { + return fmt.Errorf("expected integer as last argument in %s(), found %s", call.Name, call.Args[len(call.Args)-1]) + } else if limit.Val <= 0 { + return fmt.Errorf("limit (%d) in %s function must be at least 1", limit.Val, call.Name) + } else if c.global.Limit > 0 && int(limit.Val) > c.global.Limit { + return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", limit.Val, call.Name, c.global.Limit) + } + + if _, ok := call.Args[0].(*influxql.VarRef); !ok { + return fmt.Errorf("expected first argument to be a field in %s(), found %s", call.Name, call.Args[0]) + } + + if len(call.Args) > 2 { + for _, v := range call.Args[1 : len(call.Args)-1] { + ref, ok := v.(*influxql.VarRef) + if !ok { + return fmt.Errorf("only fields or tags are allowed in %s(), found %s", call.Name, v) + } + + // Add a field for each of the listed dimensions when not writing the results. + if !c.global.HasTarget { + field := &compiledField{ + global: c.global, + Field: &influxql.Field{Expr: ref}, + } + c.global.Fields = append(c.global.Fields, field) + if err := field.compileExpr(ref); err != nil { + return err + } + } + } + } + c.global.TopBottomFunction = call.Name + return nil +} + +func (c *compiledField) compileMathFunction(expr *influxql.Call) error { + // How many arguments are we expecting? + nargs := 1 + switch expr.Name { + case "atan2", "pow", "log": + nargs = 2 + } + + // Did we get the expected number of args? + if got := len(expr.Args); got != nargs { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, nargs, got) + } + + // Compile all the argument expressions that are not just literals. + for _, arg := range expr.Args { + if _, ok := arg.(influxql.Literal); ok { + continue + } + if err := c.compileExpr(arg); err != nil { + return err + } + } + return nil +} + +func (c *compiledStatement) compileDimensions(stmt *influxql.SelectStatement) error { + for _, d := range stmt.Dimensions { + // Reduce the expression before attempting anything. Do not evaluate the call. + expr := influxql.Reduce(d.Expr, nil) + + switch expr := expr.(type) { + case *influxql.VarRef: + if strings.ToLower(expr.Val) == "time" { + return errors.New("time() is a function and expects at least one argument") + } + case *influxql.Call: + // Ensure the call is time() and it has one or two duration arguments. + // If we already have a duration + if expr.Name != "time" { + return errors.New("only time() calls allowed in dimensions") + } else if got := len(expr.Args); got < 1 || got > 2 { + return errors.New("time dimension expected 1 or 2 arguments") + } else if lit, ok := expr.Args[0].(*influxql.DurationLiteral); !ok { + return errors.New("time dimension must have duration argument") + } else if c.Interval.Duration != 0 { + return errors.New("multiple time dimensions not allowed") + } else { + c.Interval.Duration = lit.Val + if len(expr.Args) == 2 { + switch lit := expr.Args[1].(type) { + case *influxql.DurationLiteral: + c.Interval.Offset = lit.Val % c.Interval.Duration + case *influxql.TimeLiteral: + c.Interval.Offset = lit.Val.Sub(lit.Val.Truncate(c.Interval.Duration)) + case *influxql.Call: + if lit.Name != "now" { + return errors.New("time dimension offset function must be now()") + } else if len(lit.Args) != 0 { + return errors.New("time dimension offset now() function requires no arguments") + } + now := c.Options.Now + c.Interval.Offset = now.Sub(now.Truncate(c.Interval.Duration)) + + // Use the evaluated offset to replace the argument. Ideally, we would + // use the interval assigned above, but the query engine hasn't been changed + // to use the compiler information yet. + expr.Args[1] = &influxql.DurationLiteral{Val: c.Interval.Offset} + case *influxql.StringLiteral: + // If literal looks like a date time then parse it as a time literal. + if lit.IsTimeLiteral() { + t, err := lit.ToTimeLiteral(stmt.Location) + if err != nil { + return err + } + c.Interval.Offset = t.Val.Sub(t.Val.Truncate(c.Interval.Duration)) + } else { + return errors.New("time dimension offset must be duration or now()") + } + default: + return errors.New("time dimension offset must be duration or now()") + } + } + } + case *influxql.Wildcard: + case *influxql.RegexLiteral: + default: + return errors.New("only time and tag dimensions allowed") + } + + // Assign the reduced/changed expression to the dimension. + d.Expr = expr + } + return nil +} + +// validateFields validates that the fields are mutually compatible with each other. +// This runs at the end of compilation but before linking. +func (c *compiledStatement) validateFields() error { + // Validate that at least one field has been selected. + if len(c.Fields) == 0 { + return errors.New("at least 1 non-time field must be queried") + } + // Ensure there are not multiple calls if top/bottom is present. + if len(c.FunctionCalls) > 1 && c.TopBottomFunction != "" { + return fmt.Errorf("selector function %s() cannot be combined with other functions", c.TopBottomFunction) + } else if len(c.FunctionCalls) == 0 { + switch c.FillOption { + case influxql.NoFill: + return errors.New("fill(none) must be used with a function") + case influxql.LinearFill: + return errors.New("fill(linear) must be used with a function") + } + if !c.Interval.IsZero() && !c.InheritedInterval { + return errors.New("GROUP BY requires at least one aggregate function") + } + } + // If a distinct() call is present, ensure there is exactly one function. + if c.HasDistinct && (len(c.FunctionCalls) != 1 || c.HasAuxiliaryFields) { + return errors.New("aggregate function distinct() cannot be combined with other functions or fields") + } + // Validate we are using a selector or raw query if auxiliary fields are required. + if c.HasAuxiliaryFields { + if !c.OnlySelectors { + return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") + } else if len(c.FunctionCalls) > 1 { + return fmt.Errorf("mixing multiple selector functions with tags or fields is not supported") + } + } + return nil +} + +// validateCondition verifies that all elements in the condition are appropriate. +// For example, aggregate calls don't work in the condition and should throw an +// error as an invalid expression. +func (c *compiledStatement) validateCondition(expr influxql.Expr) error { + switch expr := expr.(type) { + case *influxql.BinaryExpr: + // Verify each side of the binary expression. We do not need to + // verify the binary expression itself since that should have been + // done by influxql.ConditionExpr. + if err := c.validateCondition(expr.LHS); err != nil { + return err + } + if err := c.validateCondition(expr.RHS); err != nil { + return err + } + return nil + case *influxql.Call: + if !isMathFunction(expr) { + return fmt.Errorf("invalid function call in condition: %s", expr) + } + + // How many arguments are we expecting? + nargs := 1 + switch expr.Name { + case "atan2", "pow": + nargs = 2 + } + + // Did we get the expected number of args? + if got := len(expr.Args); got != nargs { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, nargs, got) + } + + // Are all the args valid? + for _, arg := range expr.Args { + if err := c.validateCondition(arg); err != nil { + return err + } + } + return nil + default: + return nil + } +} + +// subquery compiles and validates a compiled statement for the subquery using +// this compiledStatement as the parent. +func (c *compiledStatement) subquery(stmt *influxql.SelectStatement) error { + subquery := newCompiler(c.Options) + if err := subquery.preprocess(stmt); err != nil { + return err + } + + // Substitute now() into the subquery condition. Then use ConditionExpr to + // validate the expression. Do not store the results. We have no way to store + // and read those results at the moment. + valuer := influxql.MultiValuer( + &influxql.NowValuer{Now: c.Options.Now, Location: stmt.Location}, + &MathValuer{}, + ) + stmt.Condition = influxql.Reduce(stmt.Condition, valuer) + + // If the ordering is different and the sort field was specified for the subquery, + // throw an error. + if len(stmt.SortFields) != 0 && subquery.Ascending != c.Ascending { + return errors.New("subqueries must be ordered in the same direction as the query itself") + } + subquery.Ascending = c.Ascending + + // Find the intersection between this time range and the parent. + // If the subquery doesn't have a time range, this causes it to + // inherit the parent's time range. + subquery.TimeRange = subquery.TimeRange.Intersect(c.TimeRange) + + // If the fill option is null, set it to none so we don't waste time on + // null values with a redundant fill iterator. + if !subquery.Interval.IsZero() && subquery.FillOption == influxql.NullFill { + subquery.FillOption = influxql.NoFill + } + + // Inherit the grouping interval if the subquery has none. + if !c.Interval.IsZero() && subquery.Interval.IsZero() { + subquery.Interval = c.Interval + subquery.InheritedInterval = true + } + return subquery.compile(stmt) +} + +func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) { + // If this is a query with a grouping, there is a bucket limit, and the minimum time has not been specified, + // we need to limit the possible time range that can be used when mapping shards but not when actually executing + // the select statement. Determine the shard time range here. + timeRange := c.TimeRange + if sopt.MaxBucketsN > 0 && !c.stmt.IsRawQuery && timeRange.MinTimeNano() == influxql.MinTime { + interval, err := c.stmt.GroupByInterval() + if err != nil { + return nil, err + } + + offset, err := c.stmt.GroupByOffset() + if err != nil { + return nil, err + } + + if interval > 0 { + // Determine the last bucket using the end time. + opt := IteratorOptions{ + Interval: Interval{ + Duration: interval, + Offset: offset, + }, + } + last, _ := opt.Window(c.TimeRange.MaxTimeNano() - 1) + + // Determine the time difference using the number of buckets. + // Determine the maximum difference between the buckets based on the end time. + maxDiff := last - models.MinNanoTime + if maxDiff/int64(interval) > int64(sopt.MaxBucketsN) { + timeRange.Min = time.Unix(0, models.MinNanoTime) + } else { + timeRange.Min = time.Unix(0, last-int64(interval)*int64(sopt.MaxBucketsN-1)) + } + } + } + + // Modify the time range if there are extra intervals and an interval. + if !c.Interval.IsZero() && c.ExtraIntervals > 0 { + if c.Ascending { + timeRange.Min = timeRange.Min.Add(time.Duration(-c.ExtraIntervals) * c.Interval.Duration) + } else { + timeRange.Max = timeRange.Max.Add(time.Duration(c.ExtraIntervals) * c.Interval.Duration) + } + } + + // Create an iterator creator based on the shards in the cluster. + shards, err := shardMapper.MapShards(c.stmt.Sources, timeRange, sopt) + if err != nil { + return nil, err + } + + // Rewrite wildcards, if any exist. + mapper := FieldMapper{FieldMapper: shards} + stmt, err := c.stmt.RewriteFields(mapper) + if err != nil { + shards.Close() + return nil, err + } + + // Validate if the types are correct now that they have been assigned. + if err := validateTypes(stmt); err != nil { + shards.Close() + return nil, err + } + + // Determine base options for iterators. + opt, err := newIteratorOptionsStmt(stmt, sopt) + if err != nil { + shards.Close() + return nil, err + } + opt.StartTime, opt.EndTime = c.TimeRange.MinTimeNano(), c.TimeRange.MaxTimeNano() + opt.Ascending = c.Ascending + + if sopt.MaxBucketsN > 0 && !stmt.IsRawQuery && c.TimeRange.MinTimeNano() > influxql.MinTime { + interval, err := stmt.GroupByInterval() + if err != nil { + shards.Close() + return nil, err + } + + if interval > 0 { + // Determine the start and end time matched to the interval (may not match the actual times). + first, _ := opt.Window(opt.StartTime) + last, _ := opt.Window(opt.EndTime - 1) + + // Determine the number of buckets by finding the time span and dividing by the interval. + buckets := (last - first + int64(interval)) / int64(interval) + if int(buckets) > sopt.MaxBucketsN { + shards.Close() + return nil, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, sopt.MaxBucketsN) + } + } + } + + columns := stmt.ColumnNames() + return &preparedStatement{ + stmt: stmt, + opt: opt, + ic: shards, + columns: columns, + maxPointN: sopt.MaxPointN, + now: c.Options.Now, + }, nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/compile_test.go b/vendor/github.com/influxdata/influxdb/query/compile_test.go new file mode 100644 index 0000000..43ee88c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/compile_test.go @@ -0,0 +1,433 @@ +package query_test + +import ( + "testing" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func TestCompile_Success(t *testing.T) { + for _, tt := range []string{ + `SELECT time, value FROM cpu`, + `SELECT value FROM cpu`, + `SELECT value, host FROM cpu`, + `SELECT * FROM cpu`, + `SELECT time, * FROM cpu`, + `SELECT value, * FROM cpu`, + `SELECT max(value) FROM cpu`, + `SELECT max(value), host FROM cpu`, + `SELECT max(value), * FROM cpu`, + `SELECT max(*) FROM cpu`, + `SELECT max(/val/) FROM cpu`, + `SELECT min(value) FROM cpu`, + `SELECT min(value), host FROM cpu`, + `SELECT min(value), * FROM cpu`, + `SELECT min(*) FROM cpu`, + `SELECT min(/val/) FROM cpu`, + `SELECT first(value) FROM cpu`, + `SELECT first(value), host FROM cpu`, + `SELECT first(value), * FROM cpu`, + `SELECT first(*) FROM cpu`, + `SELECT first(/val/) FROM cpu`, + `SELECT last(value) FROM cpu`, + `SELECT last(value), host FROM cpu`, + `SELECT last(value), * FROM cpu`, + `SELECT last(*) FROM cpu`, + `SELECT last(/val/) FROM cpu`, + `SELECT count(value) FROM cpu`, + `SELECT count(distinct(value)) FROM cpu`, + `SELECT count(distinct value) FROM cpu`, + `SELECT count(*) FROM cpu`, + `SELECT count(/val/) FROM cpu`, + `SELECT mean(value) FROM cpu`, + `SELECT mean(*) FROM cpu`, + `SELECT mean(/val/) FROM cpu`, + `SELECT min(value), max(value) FROM cpu`, + `SELECT min(*), max(*) FROM cpu`, + `SELECT min(/val/), max(/val/) FROM cpu`, + `SELECT first(value), last(value) FROM cpu`, + `SELECT first(*), last(*) FROM cpu`, + `SELECT first(/val/), last(/val/) FROM cpu`, + `SELECT count(value) FROM cpu WHERE time >= now() - 1h GROUP BY time(10m)`, + `SELECT distinct value FROM cpu`, + `SELECT distinct(value) FROM cpu`, + `SELECT value / total FROM cpu`, + `SELECT min(value) / total FROM cpu`, + `SELECT max(value) / total FROM cpu`, + `SELECT top(value, 1) FROM cpu`, + `SELECT top(value, host, 1) FROM cpu`, + `SELECT top(value, 1), host FROM cpu`, + `SELECT min(top) FROM (SELECT top(value, host, 1) FROM cpu) GROUP BY region`, + `SELECT bottom(value, 1) FROM cpu`, + `SELECT bottom(value, host, 1) FROM cpu`, + `SELECT bottom(value, 1), host FROM cpu`, + `SELECT max(bottom) FROM (SELECT bottom(value, host, 1) FROM cpu) GROUP BY region`, + `SELECT percentile(value, 75) FROM cpu`, + `SELECT percentile(value, 75.0) FROM cpu`, + `SELECT sample(value, 2) FROM cpu`, + `SELECT sample(*, 2) FROM cpu`, + `SELECT sample(/val/, 2) FROM cpu`, + `SELECT elapsed(value) FROM cpu`, + `SELECT elapsed(value, 10s) FROM cpu`, + `SELECT integral(value) FROM cpu`, + `SELECT integral(value, 10s) FROM cpu`, + `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, 5s)`, + `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, '2000-01-01T00:00:05Z')`, + `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, now())`, + `SELECT max(mean) FROM (SELECT mean(value) FROM cpu GROUP BY host)`, + `SELECT max(derivative) FROM (SELECT derivative(mean(value)) FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`, + `SELECT max(value) FROM (SELECT value + total FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`, + `SELECT value FROM cpu WHERE time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T01:00:00Z'`, + `SELECT value FROM (SELECT value FROM cpu) ORDER BY time DESC`, + `SELECT count(distinct(value)), max(value) FROM cpu`, + `SELECT derivative(distinct(value)), difference(distinct(value)) FROM cpu WHERE time >= now() - 1m GROUP BY time(5s)`, + `SELECT moving_average(distinct(value), 3) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, + `SELECT elapsed(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, + `SELECT cumulative_sum(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, + `SELECT last(value) / (1 - 0) FROM cpu`, + `SELECT abs(value) FROM cpu`, + `SELECT sin(value) FROM cpu`, + `SELECT cos(value) FROM cpu`, + `SELECT tan(value) FROM cpu`, + `SELECT asin(value) FROM cpu`, + `SELECT acos(value) FROM cpu`, + `SELECT atan(value) FROM cpu`, + `SELECT sqrt(value) FROM cpu`, + `SELECT pow(value, 2) FROM cpu`, + `SELECT pow(value, 3.14) FROM cpu`, + `SELECT pow(2, value) FROM cpu`, + `SELECT pow(3.14, value) FROM cpu`, + `SELECT exp(value) FROM cpu`, + `SELECT atan2(value, 0.1) FROM cpu`, + `SELECT atan2(0.2, value) FROM cpu`, + `SELECT atan2(value, 1) FROM cpu`, + `SELECT atan2(2, value) FROM cpu`, + `SELECT ln(value) FROM cpu`, + `SELECT log(value, 2) FROM cpu`, + `SELECT log2(value) FROM cpu`, + `SELECT log10(value) FROM cpu`, + `SELECT sin(value) - sin(1.3) FROM cpu`, + `SELECT value FROM cpu WHERE sin(value) > 0.5`, + `SELECT sum("out")/sum("in") FROM (SELECT derivative("out") AS "out", derivative("in") AS "in" FROM "m0" WHERE time >= now() - 5m GROUP BY "index") GROUP BY time(1m) fill(none)`, + } { + t.Run(tt, func(t *testing.T) { + stmt, err := influxql.ParseStatement(tt) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + s := stmt.(*influxql.SelectStatement) + + opt := query.CompileOptions{} + if _, err := query.Compile(s, opt); err != nil { + t.Errorf("unexpected error: %s", err) + } + }) + } +} + +func TestCompile_Failures(t *testing.T) { + for _, tt := range []struct { + s string + err string + }{ + {s: `SELECT time FROM cpu`, err: `at least 1 non-time field must be queried`}, + {s: `SELECT value, mean(value) FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT value, max(value), min(value) FROM cpu`, err: `mixing multiple selector functions with tags or fields is not supported`}, + {s: `SELECT top(value, 10), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, + {s: `SELECT bottom(value, 10), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, + {s: `SELECT count() FROM cpu`, err: `invalid number of arguments for count, expected 1, got 0`}, + {s: `SELECT count(value, host) FROM cpu`, err: `invalid number of arguments for count, expected 1, got 2`}, + {s: `SELECT min() FROM cpu`, err: `invalid number of arguments for min, expected 1, got 0`}, + {s: `SELECT min(value, host) FROM cpu`, err: `invalid number of arguments for min, expected 1, got 2`}, + {s: `SELECT max() FROM cpu`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT max(value, host) FROM cpu`, err: `invalid number of arguments for max, expected 1, got 2`}, + {s: `SELECT sum() FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 0`}, + {s: `SELECT sum(value, host) FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 2`}, + {s: `SELECT first() FROM cpu`, err: `invalid number of arguments for first, expected 1, got 0`}, + {s: `SELECT first(value, host) FROM cpu`, err: `invalid number of arguments for first, expected 1, got 2`}, + {s: `SELECT last() FROM cpu`, err: `invalid number of arguments for last, expected 1, got 0`}, + {s: `SELECT last(value, host) FROM cpu`, err: `invalid number of arguments for last, expected 1, got 2`}, + {s: `SELECT mean() FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 0`}, + {s: `SELECT mean(value, host) FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 2`}, + {s: `SELECT distinct(value), max(value) FROM cpu`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, + {s: `SELECT count(distinct()) FROM cpu`, err: `distinct function requires at least one argument`}, + {s: `SELECT count(distinct(value, host)) FROM cpu`, err: `distinct function can only have one argument`}, + {s: `SELECT count(distinct(2)) FROM cpu`, err: `expected field argument in distinct()`}, + {s: `SELECT value FROM cpu GROUP BY now()`, err: `only time() calls allowed in dimensions`}, + {s: `SELECT value FROM cpu GROUP BY time()`, err: `time dimension expected 1 or 2 arguments`}, + {s: `SELECT value FROM cpu GROUP BY time(5m, 30s, 1ms)`, err: `time dimension expected 1 or 2 arguments`}, + {s: `SELECT value FROM cpu GROUP BY time('unexpected')`, err: `time dimension must have duration argument`}, + {s: `SELECT value FROM cpu GROUP BY time(5m), time(1m)`, err: `multiple time dimensions not allowed`}, + {s: `SELECT value FROM cpu GROUP BY time(5m, unexpected())`, err: `time dimension offset function must be now()`}, + {s: `SELECT value FROM cpu GROUP BY time(5m, now(1m))`, err: `time dimension offset now() function requires no arguments`}, + {s: `SELECT value FROM cpu GROUP BY time(5m, 'unexpected')`, err: `time dimension offset must be duration or now()`}, + {s: `SELECT value FROM cpu GROUP BY 'unexpected'`, err: `only time and tag dimensions allowed`}, + {s: `SELECT top(value) FROM cpu`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT top('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in top(), found 'unexpected'`}, + {s: `SELECT top(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in top(), found 'unexpected'`}, + {s: `SELECT top(value, 2.5) FROM cpu`, err: `expected integer as last argument in top(), found 2.500`}, + {s: `SELECT top(value, -1) FROM cpu`, err: `limit (-1) in top function must be at least 1`}, + {s: `SELECT top(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in top function can not be larger than the LIMIT (2) in the select statement`}, + {s: `SELECT bottom(value) FROM cpu`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT bottom('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in bottom(), found 'unexpected'`}, + {s: `SELECT bottom(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in bottom(), found 'unexpected'`}, + {s: `SELECT bottom(value, 2.5) FROM cpu`, err: `expected integer as last argument in bottom(), found 2.500`}, + {s: `SELECT bottom(value, -1) FROM cpu`, err: `limit (-1) in bottom function must be at least 1`}, + {s: `SELECT bottom(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in bottom function can not be larger than the LIMIT (2) in the select statement`}, + // TODO(jsternberg): This query is wrong, but we cannot enforce this because of previous behavior: https://github.com/influxdata/influxdb/pull/8771 + //{s: `SELECT value FROM cpu WHERE time >= now() - 10m OR time < now() - 5m`, err: `cannot use OR with time conditions`}, + {s: `SELECT value FROM cpu WHERE value`, err: `invalid condition expression: value`}, + {s: `SELECT count(value), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT max(*), host FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT count(value), /ho/ FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT max(/val/), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT a(value) FROM cpu`, err: `undefined function a()`}, + {s: `SELECT count(max(value)) FROM myseries`, err: `expected field argument in count()`}, + {s: `SELECT count(distinct('value')) FROM myseries`, err: `expected field argument in distinct()`}, + {s: `SELECT distinct('value') FROM myseries`, err: `expected field argument in distinct()`}, + {s: `SELECT min(max(value)) FROM myseries`, err: `expected field argument in min()`}, + {s: `SELECT min(distinct(value)) FROM myseries`, err: `expected field argument in min()`}, + {s: `SELECT max(max(value)) FROM myseries`, err: `expected field argument in max()`}, + {s: `SELECT sum(max(value)) FROM myseries`, err: `expected field argument in sum()`}, + {s: `SELECT first(max(value)) FROM myseries`, err: `expected field argument in first()`}, + {s: `SELECT last(max(value)) FROM myseries`, err: `expected field argument in last()`}, + {s: `SELECT mean(max(value)) FROM myseries`, err: `expected field argument in mean()`}, + {s: `SELECT median(max(value)) FROM myseries`, err: `expected field argument in median()`}, + {s: `SELECT mode(max(value)) FROM myseries`, err: `expected field argument in mode()`}, + {s: `SELECT stddev(max(value)) FROM myseries`, err: `expected field argument in stddev()`}, + {s: `SELECT spread(max(value)) FROM myseries`, err: `expected field argument in spread()`}, + {s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`}, + {s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5`}, + {s: `SELECT top(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`}, + {s: `SELECT top(value, 10) + count(value) FROM myseries`, err: `selector function top() cannot be combined with other functions`}, + {s: `SELECT top(max(value), 10) FROM myseries`, err: `expected first argument to be a field in top(), found max(value)`}, + {s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`}, + {s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5`}, + {s: `SELECT bottom(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`}, + {s: `SELECT bottom(value, 10) + count(value) FROM myseries`, err: `selector function bottom() cannot be combined with other functions`}, + {s: `SELECT bottom(max(value), 10) FROM myseries`, err: `expected first argument to be a field in bottom(), found max(value)`}, + {s: `SELECT top(value, 10), bottom(value, 10) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, + {s: `SELECT bottom(value, 10), top(value, 10) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, + {s: `SELECT sample(value) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 1`}, + {s: `SELECT sample(value, 2, 3) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 3`}, + {s: `SELECT sample(value, 0) FROM myseries`, err: `sample window must be greater than 1, got 0`}, + {s: `SELECT sample(value, 2.5) FROM myseries`, err: `expected integer argument in sample()`}, + {s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`}, + {s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`}, + {s: `SELECT percentile(max(field1), 75) FROM myseries`, err: `expected field argument in percentile()`}, + {s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`}, + {s: `SELECT field1 FROM foo fill(none)`, err: `fill(none) must be used with a function`}, + {s: `SELECT field1 FROM foo fill(linear)`, err: `fill(linear) must be used with a function`}, + {s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`}, + {s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected 1 or 2 arguments`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have duration argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, b)`, err: `time dimension offset must be duration or now()`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, '5s')`, err: `time dimension offset must be duration or now()`}, + {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, + {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, + {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`}, + {s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`}, + {s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, + {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `invalid number of arguments for count, expected 1, got 2`}, + {s: `select count(distinct(too, many, arguments)) from myseries`, err: `distinct function can only have one argument`}, + {s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`}, + {s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`}, + {s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to derivative`}, + {s: `SELECT derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`}, + {s: `SELECT derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`}, + {s: `SELECT derivative(value, 10) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.IntegerLiteral`}, + {s: `SELECT derivative(f, true) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.BooleanLiteral`}, + {s: `SELECT non_negative_derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`}, + {s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT non_negative_derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_derivative`}, + {s: `SELECT non_negative_derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT non_negative_derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT non_negative_derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT non_negative_derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_derivative aggregate requires a GROUP BY interval`}, + {s: `SELECT non_negative_derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT non_negative_derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`}, + {s: `SELECT non_negative_derivative(value, 10) FROM myseries`, err: `second argument to non_negative_derivative must be a duration, got *influxql.IntegerLiteral`}, + {s: `SELECT difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT difference() from myseries`, err: `invalid number of arguments for difference, expected 1, got 0`}, + {s: `SELECT difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to difference`}, + {s: `SELECT difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `difference aggregate requires a GROUP BY interval`}, + {s: `SELECT non_negative_difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT non_negative_difference() from myseries`, err: `invalid number of arguments for non_negative_difference, expected 1, got 0`}, + {s: `SELECT non_negative_difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_difference`}, + {s: `SELECT non_negative_difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT non_negative_difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT non_negative_difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT non_negative_difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT non_negative_difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_difference aggregate requires a GROUP BY interval`}, + {s: `SELECT elapsed() FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 0`}, + {s: `SELECT elapsed(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to elapsed`}, + {s: `SELECT elapsed(value, 1s, host) FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT elapsed(value, 0s) FROM myseries`, err: `duration argument must be positive, got 0s`}, + {s: `SELECT elapsed(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`}, + {s: `SELECT elapsed(value, 10) FROM myseries`, err: `second argument to elapsed must be a duration, got *influxql.IntegerLiteral`}, + {s: `SELECT elapsed(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT elapsed(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT elapsed(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT elapsed(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT elapsed(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `elapsed aggregate requires a GROUP BY interval`}, + {s: `SELECT moving_average(field1, 2), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT moving_average(field1, 1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 1`}, + {s: `SELECT moving_average(field1, 0), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 0`}, + {s: `SELECT moving_average(field1, -1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got -1`}, + {s: `SELECT moving_average(field1, 2.0), field1 FROM myseries`, err: `second argument for moving_average must be an integer, got *influxql.NumberLiteral`}, + {s: `SELECT moving_average() from myseries`, err: `invalid number of arguments for moving_average, expected 2, got 0`}, + {s: `SELECT moving_average(value) FROM myseries`, err: `invalid number of arguments for moving_average, expected 2, got 1`}, + {s: `SELECT moving_average(value, 2) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to moving_average`}, + {s: `SELECT moving_average(top(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT moving_average(bottom(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT moving_average(max(), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT moving_average(percentile(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT moving_average(mean(value), 2) FROM myseries where time < now() and time > now() - 1d`, err: `moving_average aggregate requires a GROUP BY interval`}, + {s: `SELECT cumulative_sum(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `SELECT cumulative_sum() from myseries`, err: `invalid number of arguments for cumulative_sum, expected 1, got 0`}, + {s: `SELECT cumulative_sum(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to cumulative_sum`}, + {s: `SELECT cumulative_sum(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT cumulative_sum(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT cumulative_sum(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, + {s: `SELECT cumulative_sum(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT cumulative_sum(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `cumulative_sum aggregate requires a GROUP BY interval`}, + {s: `SELECT integral() FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 0`}, + {s: `SELECT integral(value, 10s, host) FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT integral(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`}, + {s: `SELECT integral(value, 10) FROM myseries`, err: `second argument must be a duration`}, + {s: `SELECT holt_winters(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters, expected 3, got 1`}, + {s: `SELECT holt_winters(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters`}, + {s: `SELECT holt_winters(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters aggregate requires a GROUP BY interval`}, + {s: `SELECT holt_winters(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters must be greater than 0, got 0`}, + {s: `SELECT holt_winters(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters`}, + {s: `SELECT holt_winters(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters`}, + {s: `SELECT holt_winters(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters cannot be negative, got -1`}, + {s: `SELECT holt_winters_with_fit(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters_with_fit, expected 3, got 1`}, + {s: `SELECT holt_winters_with_fit(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters_with_fit`}, + {s: `SELECT holt_winters_with_fit(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters_with_fit aggregate requires a GROUP BY interval`}, + {s: `SELECT holt_winters_with_fit(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters_with_fit must be greater than 0, got 0`}, + {s: `SELECT holt_winters_with_fit(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters_with_fit`}, + {s: `SELECT holt_winters_with_fit(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters_with_fit`}, + {s: `SELECT holt_winters_with_fit(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters_with_fit cannot be negative, got -1`}, + {s: `SELECT mean(value) + value FROM cpu WHERE time < now() and time > now() - 1h GROUP BY time(10m)`, err: `mixing aggregate and non-aggregate queries is not supported`}, + // TODO: Remove this restriction in the future: https://github.com/influxdata/influxdb/issues/5968 + {s: `SELECT mean(cpu_total - cpu_idle) FROM cpu`, err: `expected field argument in mean()`}, + {s: `SELECT derivative(mean(cpu_total - cpu_idle), 1s) FROM cpu WHERE time < now() AND time > now() - 1d GROUP BY time(1h)`, err: `expected field argument in mean()`}, + // TODO: The error message will change when math is allowed inside an aggregate: https://github.com/influxdata/influxdb/pull/5990#issuecomment-195565870 + {s: `SELECT count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`}, + {s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`}, + {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`}, + {s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, + {s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, + {s: `SELECT min(derivative) FROM (SELECT derivative(mean(value), 1h) FROM myseries) where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`}, + {s: `SELECT min(mean) FROM (SELECT mean(value) FROM myseries GROUP BY time)`, err: `time() is a function and expects at least one argument`}, + {s: `SELECT value FROM myseries WHERE value OR time >= now() - 1m`, err: `invalid condition expression: value`}, + {s: `SELECT value FROM myseries WHERE time >= now() - 1m OR value`, err: `invalid condition expression: value`}, + {s: `SELECT value FROM (SELECT value FROM cpu ORDER BY time DESC) ORDER BY time ASC`, err: `subqueries must be ordered in the same direction as the query itself`}, + {s: `SELECT sin(value, 3) FROM cpu`, err: `invalid number of arguments for sin, expected 1, got 2`}, + {s: `SELECT cos(2.3, value, 3) FROM cpu`, err: `invalid number of arguments for cos, expected 1, got 3`}, + {s: `SELECT tan(value, 3) FROM cpu`, err: `invalid number of arguments for tan, expected 1, got 2`}, + {s: `SELECT asin(value, 3) FROM cpu`, err: `invalid number of arguments for asin, expected 1, got 2`}, + {s: `SELECT acos(value, 3.2) FROM cpu`, err: `invalid number of arguments for acos, expected 1, got 2`}, + {s: `SELECT atan() FROM cpu`, err: `invalid number of arguments for atan, expected 1, got 0`}, + {s: `SELECT sqrt(42, 3, 4) FROM cpu`, err: `invalid number of arguments for sqrt, expected 1, got 3`}, + {s: `SELECT abs(value, 3) FROM cpu`, err: `invalid number of arguments for abs, expected 1, got 2`}, + {s: `SELECT ln(value, 3) FROM cpu`, err: `invalid number of arguments for ln, expected 1, got 2`}, + {s: `SELECT log2(value, 3) FROM cpu`, err: `invalid number of arguments for log2, expected 1, got 2`}, + {s: `SELECT log10(value, 3) FROM cpu`, err: `invalid number of arguments for log10, expected 1, got 2`}, + {s: `SELECT pow(value, 3, 3) FROM cpu`, err: `invalid number of arguments for pow, expected 2, got 3`}, + {s: `SELECT atan2(value, 3, 3) FROM cpu`, err: `invalid number of arguments for atan2, expected 2, got 3`}, + {s: `SELECT sin(1.3) FROM cpu`, err: `field must contain at least one variable`}, + {s: `SELECT nofunc(1.3) FROM cpu`, err: `undefined function nofunc()`}, + } { + t.Run(tt.s, func(t *testing.T) { + stmt, err := influxql.ParseStatement(tt.s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + s := stmt.(*influxql.SelectStatement) + + opt := query.CompileOptions{} + if _, err := query.Compile(s, opt); err == nil { + t.Error("expected error") + } else if have, want := err.Error(), tt.err; have != want { + t.Errorf("unexpected error: %s != %s", have, want) + } + }) + } +} + +func TestPrepare_MapShardsTimeRange(t *testing.T) { + for _, tt := range []struct { + s string + start, end string + }{ + { + s: `SELECT max(value) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, + start: "2018-09-03T15:00:00Z", + end: "2018-09-03T16:00:00Z", + }, + { + s: `SELECT derivative(mean(value)) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, + start: "2018-09-03T14:50:00Z", + end: "2018-09-03T16:00:00Z", + }, + { + s: `SELECT moving_average(mean(value), 3) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, + start: "2018-09-03T14:30:00Z", + end: "2018-09-03T16:00:00Z", + }, + } { + t.Run(tt.s, func(t *testing.T) { + stmt, err := influxql.ParseStatement(tt.s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + s := stmt.(*influxql.SelectStatement) + + opt := query.CompileOptions{} + c, err := query.Compile(s, opt) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + shardMapper := ShardMapper{ + MapShardsFn: func(_ influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + if got, want := tr.Min, mustParseTime(tt.start); !got.Equal(want) { + t.Errorf("unexpected start time: got=%s want=%s", got, want) + } + if got, want := tr.Max, mustParseTime(tt.end); !got.Equal(want) { + t.Errorf("unexpected end time: got=%s want=%s", got, want) + } + return &ShardGroup{} + }, + } + + if _, err := c.Prepare(&shardMapper, query.SelectOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/cursor.go b/vendor/github.com/influxdata/influxdb/query/cursor.go new file mode 100644 index 0000000..e59605e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/cursor.go @@ -0,0 +1,432 @@ +package query + +import ( + "math" + "time" + + "github.com/influxdata/influxql" +) + +var NullFloat interface{} = (*float64)(nil) + +// Series represents the metadata about a series. +type Series struct { + // Name is the measurement name. + Name string + + // Tags for the series. + Tags Tags + + // This is an internal id used to easily compare if a series is the + // same as another series. Whenever the internal cursor changes + // to a new series, this id gets incremented. It is not exposed to + // the user so we can implement this in whatever way we want. + // If a series is not generated by a cursor, this id is zero and + // it will instead attempt to compare the name and tags. + id uint64 +} + +// SameSeries checks if this is the same series as another one. +// It does not necessarily check for equality so this is different from +// checking to see if the name and tags are the same. It checks whether +// the two are part of the same series in the response. +func (s Series) SameSeries(other Series) bool { + if s.id != 0 && other.id != 0 { + return s.id == other.id + } + return s.Name == other.Name && s.Tags.ID() == other.Tags.ID() +} + +// Equal checks to see if the Series are identical. +func (s Series) Equal(other Series) bool { + if s.id != 0 && other.id != 0 { + // If the ids are the same, then we can short-circuit and assume they + // are the same. If they are not the same, do the long check since + // they may still be identical, but not necessarily generated from + // the same cursor. + if s.id == other.id { + return true + } + } + return s.Name == other.Name && s.Tags.ID() == other.Tags.ID() +} + +// Row represents a single row returned by the query engine. +type Row struct { + // Time returns the time for this row. If the cursor was created to + // return time as one of the values, the time will also be included as + // a time.Time in the appropriate column within Values. + // This ensures that time is always present in the Row structure + // even if it hasn't been requested in the output. + Time int64 + + // Series contains the series metadata for this row. + Series Series + + // Values contains the values within the current row. + Values []interface{} +} + +type Cursor interface { + // Scan will retrieve the next row and assign the result to + // the passed in Row. If the Row has not been initialized, the Cursor + // will initialize the Row. + // To increase speed and memory usage, the same Row can be used and + // the previous values will be overwritten while using the same memory. + Scan(row *Row) bool + + // Stats returns the IteratorStats from the underlying iterators. + Stats() IteratorStats + + // Err returns any errors that were encountered from scanning the rows. + Err() error + + // Columns returns the column names and types. + Columns() []influxql.VarRef + + // Close closes the underlying resources that the cursor is using. + Close() error +} + +// RowCursor returns a Cursor that iterates over Rows. +func RowCursor(rows []Row, columns []influxql.VarRef) Cursor { + return &rowCursor{ + rows: rows, + columns: columns, + } +} + +type rowCursor struct { + rows []Row + columns []influxql.VarRef + + series Series +} + +func (cur *rowCursor) Scan(row *Row) bool { + if len(cur.rows) == 0 { + return false + } + + *row = cur.rows[0] + if row.Series.Name != cur.series.Name || !row.Series.Tags.Equals(&cur.series.Tags) { + cur.series.Name = row.Series.Name + cur.series.Tags = row.Series.Tags + cur.series.id++ + } + cur.rows = cur.rows[1:] + return true +} + +func (cur *rowCursor) Stats() IteratorStats { + return IteratorStats{} +} + +func (cur *rowCursor) Err() error { + return nil +} + +func (cur *rowCursor) Columns() []influxql.VarRef { + return cur.columns +} + +func (cur *rowCursor) Close() error { + return nil +} + +type scannerFunc func(m map[string]interface{}) (int64, string, Tags) + +type scannerCursorBase struct { + fields []influxql.Expr + m map[string]interface{} + + series Series + columns []influxql.VarRef + loc *time.Location + + scan scannerFunc +} + +func newScannerCursorBase(scan scannerFunc, fields []*influxql.Field, loc *time.Location) scannerCursorBase { + typmap := FunctionTypeMapper{} + exprs := make([]influxql.Expr, len(fields)) + columns := make([]influxql.VarRef, len(fields)) + for i, f := range fields { + exprs[i] = f.Expr + columns[i] = influxql.VarRef{ + Val: f.Name(), + Type: influxql.EvalType(f.Expr, nil, typmap), + } + } + if loc == nil { + loc = time.UTC + } + + return scannerCursorBase{ + fields: exprs, + m: make(map[string]interface{}), + columns: columns, + loc: loc, + scan: scan, + } +} + +func (cur *scannerCursorBase) Scan(row *Row) bool { + ts, name, tags := cur.scan(cur.m) + if ts == ZeroTime { + return false + } + + row.Time = ts + if name != cur.series.Name || tags.ID() != cur.series.Tags.ID() { + cur.series.Name = name + cur.series.Tags = tags + cur.series.id++ + } + row.Series = cur.series + + if len(cur.columns) > len(row.Values) { + row.Values = make([]interface{}, len(cur.columns)) + } + + valuer := influxql.ValuerEval{ + Valuer: influxql.MultiValuer( + MathValuer{}, + influxql.MapValuer(cur.m), + ), + IntegerFloatDivision: true, + } + for i, expr := range cur.fields { + // A special case if the field is time to reduce memory allocations. + if ref, ok := expr.(*influxql.VarRef); ok && ref.Val == "time" { + row.Values[i] = time.Unix(0, row.Time).In(cur.loc) + continue + } + v := valuer.Eval(expr) + if fv, ok := v.(float64); ok && math.IsNaN(fv) { + // If the float value is NaN, convert it to a null float + // so this can be serialized correctly, but not mistaken for + // a null value that needs to be filled. + v = NullFloat + } + row.Values[i] = v + } + return true +} + +func (cur *scannerCursorBase) Columns() []influxql.VarRef { + return cur.columns +} + +var _ Cursor = (*scannerCursor)(nil) + +type scannerCursor struct { + scanner IteratorScanner + scannerCursorBase +} + +func newScannerCursor(s IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *scannerCursor { + cur := &scannerCursor{scanner: s} + cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location) + return cur +} + +func (s *scannerCursor) scan(m map[string]interface{}) (int64, string, Tags) { + ts, name, tags := s.scanner.Peek() + if ts == ZeroTime { + return ts, name, tags + } + s.scanner.ScanAt(ts, name, tags, m) + return ts, name, tags +} + +func (cur *scannerCursor) Stats() IteratorStats { + return cur.scanner.Stats() +} + +func (cur *scannerCursor) Err() error { + return cur.scanner.Err() +} + +func (cur *scannerCursor) Close() error { + return cur.scanner.Close() +} + +var _ Cursor = (*multiScannerCursor)(nil) + +type multiScannerCursor struct { + scanners []IteratorScanner + err error + ascending bool + scannerCursorBase +} + +func newMultiScannerCursor(scanners []IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *multiScannerCursor { + cur := &multiScannerCursor{ + scanners: scanners, + ascending: opt.Ascending, + } + cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location) + return cur +} + +func (cur *multiScannerCursor) scan(m map[string]interface{}) (ts int64, name string, tags Tags) { + ts = ZeroTime + for _, s := range cur.scanners { + curTime, curName, curTags := s.Peek() + if curTime == ZeroTime { + if err := s.Err(); err != nil { + cur.err = err + return ZeroTime, "", Tags{} + } + continue + } + + if ts == ZeroTime { + ts, name, tags = curTime, curName, curTags + continue + } + + if cur.ascending { + if (curName < name) || (curName == name && curTags.ID() < tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime < ts) { + ts, name, tags = curTime, curName, curTags + } + continue + } + + if (curName > name) || (curName == name && curTags.ID() > tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime > ts) { + ts, name, tags = curTime, curName, curTags + } + } + + if ts == ZeroTime { + return ts, name, tags + } + + for _, s := range cur.scanners { + s.ScanAt(ts, name, tags, m) + } + return ts, name, tags +} + +func (cur *multiScannerCursor) Stats() IteratorStats { + var stats IteratorStats + for _, s := range cur.scanners { + stats.Add(s.Stats()) + } + return stats +} + +func (cur *multiScannerCursor) Err() error { + return cur.err +} + +func (cur *multiScannerCursor) Close() error { + var err error + for _, s := range cur.scanners { + if e := s.Close(); e != nil && err == nil { + err = e + } + } + return err +} + +type filterCursor struct { + Cursor + // fields holds the mapping of field names to the index in the row + // based off of the column metadata. This only contains the fields + // we need and will exclude the ones we do not. + fields map[string]IteratorMap + filter influxql.Expr + m map[string]interface{} +} + +func newFilterCursor(cur Cursor, filter influxql.Expr) *filterCursor { + fields := make(map[string]IteratorMap) + for _, name := range influxql.ExprNames(filter) { + for i, col := range cur.Columns() { + if name.Val == col.Val { + fields[name.Val] = FieldMap{ + Index: i, + Type: name.Type, + } + break + } + } + + // If the field is not a column, assume it is a tag value. + // We do not know what the tag values will be, but there really + // isn't any different between NullMap and a TagMap that's pointed + // at the wrong location for the purposes described here. + if _, ok := fields[name.Val]; !ok { + fields[name.Val] = TagMap(name.Val) + } + } + return &filterCursor{ + Cursor: cur, + fields: fields, + filter: filter, + m: make(map[string]interface{}), + } +} + +func (cur *filterCursor) Scan(row *Row) bool { + for cur.Cursor.Scan(row) { + // Use the field mappings to prepare the map for the valuer. + for name, f := range cur.fields { + cur.m[name] = f.Value(row) + } + + valuer := influxql.ValuerEval{ + Valuer: influxql.MapValuer(cur.m), + } + if valuer.EvalBool(cur.filter) { + // Passes the filter! Return true. We no longer need to + // search for a suitable value. + return true + } + } + return false +} + +type nullCursor struct { + columns []influxql.VarRef +} + +func newNullCursor(fields []*influxql.Field) *nullCursor { + columns := make([]influxql.VarRef, len(fields)) + for i, f := range fields { + columns[i].Val = f.Name() + } + return &nullCursor{columns: columns} +} + +func (cur *nullCursor) Scan(row *Row) bool { + return false +} + +func (cur *nullCursor) Stats() IteratorStats { + return IteratorStats{} +} + +func (cur *nullCursor) Err() error { + return nil +} + +func (cur *nullCursor) Columns() []influxql.VarRef { + return cur.columns +} + +func (cur *nullCursor) Close() error { + return nil +} + +// DrainCursor will read and discard all values from a Cursor and return the error +// if one happens. +func DrainCursor(cur Cursor) error { + var row Row + for cur.Scan(&row) { + // Do nothing with the result. + } + return cur.Err() +} diff --git a/vendor/github.com/influxdata/influxdb/query/emitter.go b/vendor/github.com/influxdata/influxdb/query/emitter.go new file mode 100644 index 0000000..1888824 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/emitter.go @@ -0,0 +1,81 @@ +package query + +import ( + "github.com/influxdata/influxdb/models" +) + +// Emitter reads from a cursor into rows. +type Emitter struct { + cur Cursor + chunkSize int + + series Series + row *models.Row + columns []string +} + +// NewEmitter returns a new instance of Emitter that pulls from itrs. +func NewEmitter(cur Cursor, chunkSize int) *Emitter { + columns := make([]string, len(cur.Columns())) + for i, col := range cur.Columns() { + columns[i] = col.Val + } + return &Emitter{ + cur: cur, + chunkSize: chunkSize, + columns: columns, + } +} + +// Close closes the underlying iterators. +func (e *Emitter) Close() error { + return e.cur.Close() +} + +// Emit returns the next row from the iterators. +func (e *Emitter) Emit() (*models.Row, bool, error) { + // Continually read from the cursor until it is exhausted. + for { + // Scan the next row. If there are no rows left, return the current row. + var row Row + if !e.cur.Scan(&row) { + if err := e.cur.Err(); err != nil { + return nil, false, err + } + r := e.row + e.row = nil + return r, false, nil + } + + // If there's no row yet then create one. + // If the name and tags match the existing row, append to that row if + // the number of values doesn't exceed the chunk size. + // Otherwise return existing row and add values to next emitted row. + if e.row == nil { + e.createRow(row.Series, row.Values) + } else if e.series.SameSeries(row.Series) { + if e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize { + r := e.row + r.Partial = true + e.createRow(row.Series, row.Values) + return r, true, nil + } + e.row.Values = append(e.row.Values, row.Values) + } else { + r := e.row + e.createRow(row.Series, row.Values) + return r, true, nil + } + } +} + +// createRow creates a new row attached to the emitter. +func (e *Emitter) createRow(series Series, values []interface{}) { + e.series = series + e.row = &models.Row{ + Name: series.Name, + Tags: series.Tags.KeyValues(), + Columns: e.columns, + Values: [][]interface{}{values}, + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/execution_context.go b/vendor/github.com/influxdata/influxdb/query/execution_context.go new file mode 100644 index 0000000..a3d7d26 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/execution_context.go @@ -0,0 +1,113 @@ +package query + +import ( + "context" + "sync" +) + +// ExecutionContext contains state that the query is currently executing with. +type ExecutionContext struct { + context.Context + + // The statement ID of the executing query. + statementID int + + // The query ID of the executing query. + QueryID uint64 + + // The query task information available to the StatementExecutor. + task *Task + + // Output channel where results and errors should be sent. + Results chan *Result + + // Options used to start this query. + ExecutionOptions + + mu sync.RWMutex + done chan struct{} + err error +} + +func (ctx *ExecutionContext) watch() { + ctx.done = make(chan struct{}) + if ctx.err != nil { + close(ctx.done) + return + } + + go func() { + defer close(ctx.done) + + var taskCtx <-chan struct{} + if ctx.task != nil { + taskCtx = ctx.task.closing + } + + select { + case <-taskCtx: + ctx.err = ctx.task.Error() + if ctx.err == nil { + ctx.err = ErrQueryInterrupted + } + case <-ctx.AbortCh: + ctx.err = ErrQueryAborted + case <-ctx.Context.Done(): + ctx.err = ctx.Context.Err() + } + }() +} + +func (ctx *ExecutionContext) Done() <-chan struct{} { + ctx.mu.RLock() + if ctx.done != nil { + defer ctx.mu.RUnlock() + return ctx.done + } + ctx.mu.RUnlock() + + ctx.mu.Lock() + defer ctx.mu.Unlock() + if ctx.done == nil { + ctx.watch() + } + return ctx.done +} + +func (ctx *ExecutionContext) Err() error { + ctx.mu.RLock() + defer ctx.mu.RUnlock() + return ctx.err +} + +func (ctx *ExecutionContext) Value(key interface{}) interface{} { + switch key { + case monitorContextKey: + return ctx.task + } + return ctx.Context.Value(key) +} + +// send sends a Result to the Results channel and will exit if the query has +// been aborted. +func (ctx *ExecutionContext) send(result *Result) error { + result.StatementID = ctx.statementID + select { + case <-ctx.AbortCh: + return ErrQueryAborted + case ctx.Results <- result: + } + return nil +} + +// Send sends a Result to the Results channel and will exit if the query has +// been interrupted or aborted. +func (ctx *ExecutionContext) Send(result *Result) error { + result.StatementID = ctx.statementID + select { + case <-ctx.Done(): + return ctx.Err() + case ctx.Results <- result: + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/executor.go b/vendor/github.com/influxdata/influxdb/query/executor.go new file mode 100644 index 0000000..bd2fa3e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/executor.go @@ -0,0 +1,474 @@ +package query + +import ( + "context" + "errors" + "fmt" + "os" + "runtime/debug" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" + "go.uber.org/zap" +) + +var ( + // ErrInvalidQuery is returned when executing an unknown query type. + ErrInvalidQuery = errors.New("invalid query") + + // ErrNotExecuted is returned when a statement is not executed in a query. + // This can occur when a previous statement in the same query has errored. + ErrNotExecuted = errors.New("not executed") + + // ErrQueryInterrupted is an error returned when the query is interrupted. + ErrQueryInterrupted = errors.New("query interrupted") + + // ErrQueryAborted is an error returned when the query is aborted. + ErrQueryAborted = errors.New("query aborted") + + // ErrQueryEngineShutdown is an error sent when the query cannot be + // created because the query engine was shutdown. + ErrQueryEngineShutdown = errors.New("query engine shutdown") + + // ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run. + ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded") + + // ErrAlreadyKilled is returned when attempting to kill a query that has already been killed. + ErrAlreadyKilled = errors.New("already killed") +) + +// Statistics for the Executor +const ( + statQueriesActive = "queriesActive" // Number of queries currently being executed. + statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started). + statQueriesFinished = "queriesFinished" // Number of queries that have finished. + statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries. + statRecoveredPanics = "recoveredPanics" // Number of panics recovered by Query Executor. + + // PanicCrashEnv is the environment variable that, when set, will prevent + // the handler from recovering any panics. + PanicCrashEnv = "INFLUXDB_PANIC_CRASH" +) + +// ErrDatabaseNotFound returns a database not found error for the given database name. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points. +func ErrMaxSelectPointsLimitExceeded(n, limit int) error { + return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit) +} + +// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run +// because the maximum number of queries has been reached. +func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error { + return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit) +} + +// Authorizer determines if certain operations are authorized. +type Authorizer interface { + // AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name. + AuthorizeDatabase(p influxql.Privilege, name string) bool + + // AuthorizeQuery returns an error if the query cannot be executed + AuthorizeQuery(database string, query *influxql.Query) error + + // AuthorizeSeriesRead determines if a series is authorized for reading + AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool + + // AuthorizeSeriesWrite determines if a series is authorized for writing + AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool +} + +// OpenAuthorizer is the Authorizer used when authorization is disabled. +// It allows all operations. +type openAuthorizer struct{} + +// OpenAuthorizer can be shared by all goroutines. +var OpenAuthorizer = openAuthorizer{} + +// AuthorizeDatabase returns true to allow any operation on a database. +func (a openAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } + +// AuthorizeSeriesRead allows accesss to any series. +func (a openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { + return true +} + +// AuthorizeSeriesWrite allows accesss to any series. +func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { + return true +} + +// AuthorizeSeriesRead allows any query to execute. +func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } + +// AuthorizerIsOpen returns true if the provided Authorizer is guaranteed to +// authorize anything. A nil Authorizer returns true for this function, and this +// function should be preferred over directly checking if an Authorizer is nil +// or not. +func AuthorizerIsOpen(a Authorizer) bool { + return a == nil || a == OpenAuthorizer +} + +// ExecutionOptions contains the options for executing a query. +type ExecutionOptions struct { + // The database the query is running against. + Database string + + // The retention policy the query is running against. + RetentionPolicy string + + // How to determine whether the query is allowed to execute, + // what resources can be returned in SHOW queries, etc. + Authorizer Authorizer + + // The requested maximum number of points to return in each result. + ChunkSize int + + // If this query is being executed in a read-only context. + ReadOnly bool + + // Node to execute on. + NodeID uint64 + + // Quiet suppresses non-essential output from the query executor. + Quiet bool + + // AbortCh is a channel that signals when results are no longer desired by the caller. + AbortCh <-chan struct{} +} + +type contextKey int + +const ( + iteratorsContextKey contextKey = iota + monitorContextKey +) + +// NewContextWithIterators returns a new context.Context with the *Iterators slice added. +// The query planner will add instances of AuxIterator to the Iterators slice. +func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Context { + return context.WithValue(ctx, iteratorsContextKey, itr) +} + +// StatementExecutor executes a statement within the Executor. +type StatementExecutor interface { + // ExecuteStatement executes a statement. Results should be sent to the + // results channel in the ExecutionContext. + ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error +} + +// StatementNormalizer normalizes a statement before it is executed. +type StatementNormalizer interface { + // NormalizeStatement adds a default database and policy to the + // measurements in the statement. + NormalizeStatement(stmt influxql.Statement, database, retentionPolicy string) error +} + +// Executor executes every statement in an Query. +type Executor struct { + // Used for executing a statement in the query. + StatementExecutor StatementExecutor + + // Used for tracking running queries. + TaskManager *TaskManager + + // Logger to use for all logging. + // Defaults to discarding all log output. + Logger *zap.Logger + + // expvar-based stats. + stats *Statistics +} + +// NewExecutor returns a new instance of Executor. +func NewExecutor() *Executor { + return &Executor{ + TaskManager: NewTaskManager(), + Logger: zap.NewNop(), + stats: &Statistics{}, + } +} + +// Statistics keeps statistics related to the Executor. +type Statistics struct { + ActiveQueries int64 + ExecutedQueries int64 + FinishedQueries int64 + QueryExecutionDuration int64 + RecoveredPanics int64 +} + +// Statistics returns statistics for periodic monitoring. +func (e *Executor) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "queryExecutor", + Tags: tags, + Values: map[string]interface{}{ + statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries), + statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries), + statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries), + statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration), + statRecoveredPanics: atomic.LoadInt64(&e.stats.RecoveredPanics), + }, + }} +} + +// Close kills all running queries and prevents new queries from being attached. +func (e *Executor) Close() error { + return e.TaskManager.Close() +} + +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (e *Executor) WithLogger(log *zap.Logger) { + e.Logger = log.With(zap.String("service", "query")) + e.TaskManager.Logger = e.Logger +} + +// ExecuteQuery executes each statement within a query. +func (e *Executor) ExecuteQuery(query *influxql.Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result { + results := make(chan *Result) + go e.executeQuery(query, opt, closing, results) + return results +} + +func (e *Executor) executeQuery(query *influxql.Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) { + defer close(results) + defer e.recover(query, results) + + atomic.AddInt64(&e.stats.ActiveQueries, 1) + atomic.AddInt64(&e.stats.ExecutedQueries, 1) + defer func(start time.Time) { + atomic.AddInt64(&e.stats.ActiveQueries, -1) + atomic.AddInt64(&e.stats.FinishedQueries, 1) + atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds()) + }(time.Now()) + + ctx, detach, err := e.TaskManager.AttachQuery(query, opt, closing) + if err != nil { + select { + case results <- &Result{Err: err}: + case <-opt.AbortCh: + } + return + } + defer detach() + + // Setup the execution context that will be used when executing statements. + ctx.Results = results + + var i int +LOOP: + for ; i < len(query.Statements); i++ { + ctx.statementID = i + stmt := query.Statements[i] + + // If a default database wasn't passed in by the caller, check the statement. + defaultDB := opt.Database + if defaultDB == "" { + if s, ok := stmt.(influxql.HasDefaultDatabase); ok { + defaultDB = s.DefaultDatabase() + } + } + + // Do not let queries manually use the system measurements. If we find + // one, return an error. This prevents a person from using the + // measurement incorrectly and causing a panic. + if stmt, ok := stmt.(*influxql.SelectStatement); ok { + for _, s := range stmt.Sources { + switch s := s.(type) { + case *influxql.Measurement: + if influxql.IsSystemName(s.Name) { + command := "the appropriate meta command" + switch s.Name { + case "_fieldKeys": + command = "SHOW FIELD KEYS" + case "_measurements": + command = "SHOW MEASUREMENTS" + case "_series": + command = "SHOW SERIES" + case "_tagKeys": + command = "SHOW TAG KEYS" + case "_tags": + command = "SHOW TAG VALUES" + } + results <- &Result{ + Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command), + } + break LOOP + } + } + } + } + + // Rewrite statements, if necessary. + // This can occur on meta read statements which convert to SELECT statements. + newStmt, err := RewriteStatement(stmt) + if err != nil { + results <- &Result{Err: err} + break + } + stmt = newStmt + + // Normalize each statement if possible. + if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok { + if err := normalizer.NormalizeStatement(stmt, defaultDB, opt.RetentionPolicy); err != nil { + if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted { + return + } + break + } + } + + // Log each normalized statement. + if !ctx.Quiet { + e.Logger.Info("Executing query", zap.Stringer("query", stmt)) + } + + // Send any other statements to the underlying statement executor. + err = e.StatementExecutor.ExecuteStatement(stmt, ctx) + if err == ErrQueryInterrupted { + // Query was interrupted so retrieve the real interrupt error from + // the query task if there is one. + if qerr := ctx.Err(); qerr != nil { + err = qerr + } + } + + // Send an error for this result if it failed for some reason. + if err != nil { + if err := ctx.send(&Result{ + StatementID: i, + Err: err, + }); err == ErrQueryAborted { + return + } + // Stop after the first error. + break + } + + // Check if the query was interrupted during an uninterruptible statement. + interrupted := false + select { + case <-ctx.Done(): + interrupted = true + default: + // Query has not been interrupted. + } + + if interrupted { + break + } + } + + // Send error results for any statements which were not executed. + for ; i < len(query.Statements)-1; i++ { + if err := ctx.send(&Result{ + StatementID: i, + Err: ErrNotExecuted, + }); err == ErrQueryAborted { + return + } + } +} + +// Determines if the Executor will recover any panics or let them crash +// the server. +var willCrash bool + +func init() { + var err error + if willCrash, err = strconv.ParseBool(os.Getenv(PanicCrashEnv)); err != nil { + willCrash = false + } +} + +func (e *Executor) recover(query *influxql.Query, results chan *Result) { + if err := recover(); err != nil { + atomic.AddInt64(&e.stats.RecoveredPanics, 1) // Capture the panic in _internal stats. + e.Logger.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack())) + results <- &Result{ + StatementID: -1, + Err: fmt.Errorf("%s [panic:%s]", query.String(), err), + } + + if willCrash { + e.Logger.Error(fmt.Sprintf("\n\n=====\nAll goroutines now follow:")) + buf := debug.Stack() + e.Logger.Error(fmt.Sprintf("%s", buf)) + os.Exit(1) + } + } +} + +// Task is the internal data structure for managing queries. +// For the public use data structure that gets returned, see Task. +type Task struct { + query string + database string + status TaskStatus + startTime time.Time + closing chan struct{} + monitorCh chan error + err error + mu sync.Mutex +} + +// Monitor starts a new goroutine that will monitor a query. The function +// will be passed in a channel to signal when the query has been finished +// normally. If the function returns with an error and the query is still +// running, the query will be terminated. +func (q *Task) Monitor(fn MonitorFunc) { + go q.monitor(fn) +} + +// Error returns any asynchronous error that may have occured while executing +// the query. +func (q *Task) Error() error { + q.mu.Lock() + defer q.mu.Unlock() + return q.err +} + +func (q *Task) setError(err error) { + q.mu.Lock() + q.err = err + q.mu.Unlock() +} + +func (q *Task) monitor(fn MonitorFunc) { + if err := fn(q.closing); err != nil { + select { + case <-q.closing: + case q.monitorCh <- err: + } + } +} + +// close closes the query task closing channel if the query hasn't been previously killed. +func (q *Task) close() { + q.mu.Lock() + if q.status != KilledTask { + // Set the status to killed to prevent closing the channel twice. + q.status = KilledTask + close(q.closing) + } + q.mu.Unlock() +} + +func (q *Task) kill() error { + q.mu.Lock() + if q.status == KilledTask { + q.mu.Unlock() + return ErrAlreadyKilled + } + q.status = KilledTask + close(q.closing) + q.mu.Unlock() + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/executor_test.go b/vendor/github.com/influxdata/influxdb/query/executor_test.go new file mode 100644 index 0000000..6f7f0c1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/executor_test.go @@ -0,0 +1,535 @@ +package query_test + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +var errUnexpected = errors.New("unexpected error") + +type StatementExecutor struct { + ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error +} + +func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { + return e.ExecuteStatementFn(stmt, ctx) +} + +func NewQueryExecutor() *query.Executor { + return query.NewExecutor() +} + +func TestQueryExecutor_AttachQuery(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + if ctx.QueryID != 1 { + t.Errorf("incorrect query id: exp=1 got=%d", ctx.QueryID) + } + return nil + }, + } + + discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) +} + +func TestQueryExecutor_KillQuery(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + switch stmt.(type) { + case *influxql.KillQueryStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + qid <- ctx.QueryID + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(100 * time.Millisecond): + t.Error("killing the query did not close the channel after 100 milliseconds") + return errUnexpected + } + }, + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) + if err != nil { + t.Fatal(err) + } + discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + + result := <-results + if result.Err != query.ErrQueryInterrupted { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_KillQuery_Zombie(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + done := make(chan struct{}) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + switch stmt.(type) { + case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + qid <- ctx.QueryID + select { + case <-ctx.Done(): + select { + case <-done: + // Keep the query running until we run SHOW QUERIES. + case <-time.After(100 * time.Millisecond): + // Ensure that we don't have a lingering goroutine. + } + return query.ErrQueryInterrupted + case <-time.After(100 * time.Millisecond): + t.Error("killing the query did not close the channel after 100 milliseconds") + return errUnexpected + } + }, + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) + if err != nil { + t.Fatal(err) + } + discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + + // Display the queries and ensure that the original is still in there. + q, err = influxql.ParseQuery("SHOW QUERIES") + if err != nil { + t.Fatal(err) + } + tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + + // The killed query should still be there. + task := <-tasks + if len(task.Series) != 1 { + t.Errorf("expected %d series, got %d", 1, len(task.Series)) + } else if len(task.Series[0].Values) != 2 { + t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values)) + } + close(done) + + // The original query should return. + result := <-results + if result.Err != query.ErrQueryInterrupted { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_KillQuery_CloseTaskManager(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + // Open a channel to stall the statement executor forever. This keeps the statement executor + // running even after we kill the query which can happen with some queries. We only close it once + // the test has finished running. + done := make(chan struct{}) + defer close(done) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + switch stmt.(type) { + case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + qid <- ctx.QueryID + <-done + return nil + }, + } + + // Kill the query. This should switch it into a zombie state. + go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) + if err != nil { + t.Fatal(err) + } + discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + + // Display the queries and ensure that the original is still in there. + q, err = influxql.ParseQuery("SHOW QUERIES") + if err != nil { + t.Fatal(err) + } + tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + + // The killed query should still be there. + task := <-tasks + if len(task.Series) != 1 { + t.Errorf("expected %d series, got %d", 1, len(task.Series)) + } else if len(task.Series[0].Values) != 2 { + t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values)) + } + + // Close the task manager to ensure it doesn't cause a panic. + if err := e.TaskManager.Close(); err != nil { + t.Errorf("unexpected error: %s", err) + } +} + +func TestQueryExecutor_KillQuery_AlreadyKilled(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + // Open a channel to stall the statement executor forever. This keeps the statement executor + // running even after we kill the query which can happen with some queries. We only close it once + // the test has finished running. + done := make(chan struct{}) + defer close(done) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + switch stmt.(type) { + case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + qid <- ctx.QueryID + <-done + return nil + }, + } + + // Kill the query. This should switch it into a zombie state. + go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) + if err != nil { + t.Fatal(err) + } + discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + + // Now attempt to kill it again. We should get an error. + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if got, want := result.Err, query.ErrAlreadyKilled; got != want { + t.Errorf("unexpected error: got=%v want=%v", got, want) + } +} + +func TestQueryExecutor_Interrupt(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(100 * time.Millisecond): + t.Error("killing the query did not close the channel after 100 milliseconds") + return errUnexpected + } + }, + } + + closing := make(chan struct{}) + results := e.ExecuteQuery(q, query.ExecutionOptions{}, closing) + close(closing) + result := <-results + if result.Err != query.ErrQueryInterrupted { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Abort(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + <-ch1 + if err := ctx.Send(&query.Result{Err: errUnexpected}); err != query.ErrQueryAborted { + t.Errorf("unexpected error: %v", err) + } + close(ch2) + return nil + }, + } + + done := make(chan struct{}) + close(done) + + results := e.ExecuteQuery(q, query.ExecutionOptions{AbortCh: done}, nil) + close(ch1) + + <-ch2 + discardOutput(results) +} + +func TestQueryExecutor_ShowQueries(t *testing.T) { + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + switch stmt.(type) { + case *influxql.ShowQueriesStatement: + return e.TaskManager.ExecuteStatement(stmt, ctx) + } + + t.Errorf("unexpected statement: %s", stmt) + return errUnexpected + }, + } + + q, err := influxql.ParseQuery(`SHOW QUERIES`) + if err != nil { + t.Fatal(err) + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 1 { + t.Errorf("expected %d series, got %d", 1, len(result.Series)) + } else if len(result.Series[0].Values) != 1 { + t.Errorf("expected %d row, got %d", 1, len(result.Series[0].Values)) + } + if result.Err != nil { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Limit_Timeout(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second): + t.Errorf("timeout has not killed the query") + return errUnexpected + } + }, + } + e.TaskManager.QueryTimeout = time.Nanosecond + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if result.Err == nil || !strings.Contains(result.Err.Error(), "query-timeout") { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + qid := make(chan uint64) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + qid <- ctx.QueryID + <-ctx.Done() + return ctx.Err() + }, + } + e.TaskManager.MaxConcurrentQueries = 1 + defer e.Close() + + // Start first query and wait for it to be executing. + go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) + <-qid + + // Start second query and expect for it to fail. + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + + select { + case result := <-results: + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err == nil || !strings.Contains(result.Err.Error(), "max-concurrent-queries") { + t.Errorf("unexpected error: %s", result.Err) + } + case <-qid: + t.Errorf("unexpected statement execution for the second query") + } +} + +func TestQueryExecutor_Close(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + close(ch1) + <-ctx.Done() + return ctx.Err() + }, + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + go func(results <-chan *query.Result) { + result := <-results + if result.Err != query.ErrQueryEngineShutdown { + t.Errorf("unexpected error: %s", result.Err) + } + close(ch2) + }(results) + + // Wait for the statement to start executing. + <-ch1 + + // Close the query executor. + e.Close() + + // Check that the statement gets interrupted and finishes. + select { + case <-ch2: + case <-time.After(100 * time.Millisecond): + t.Fatal("closing the query manager did not kill the query after 100 milliseconds") + } + + results = e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err != query.ErrQueryEngineShutdown { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_Panic(t *testing.T) { + q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) + if err != nil { + t.Fatal(err) + } + + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + panic("test error") + }, + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("expected %d rows, got %d", 0, len(result.Series)) + } + if result.Err == nil || result.Err.Error() != "SELECT count(value) FROM cpu [panic:test error]" { + t.Errorf("unexpected error: %s", result.Err) + } +} + +func TestQueryExecutor_InvalidSource(t *testing.T) { + e := NewQueryExecutor() + e.StatementExecutor = &StatementExecutor{ + ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + return errors.New("statement executed unexpectedly") + }, + } + + for i, tt := range []struct { + q string + err string + }{ + { + q: `SELECT fieldKey, fieldType FROM _fieldKeys`, + err: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`, + }, + { + q: `SELECT "name" FROM _measurements`, + err: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`, + }, + { + q: `SELECT "key" FROM _series`, + err: `unable to use system source '_series': use SHOW SERIES instead`, + }, + { + q: `SELECT tagKey FROM _tagKeys`, + err: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`, + }, + { + q: `SELECT "key", value FROM _tags`, + err: `unable to use system source '_tags': use SHOW TAG VALUES instead`, + }, + } { + q, err := influxql.ParseQuery(tt.q) + if err != nil { + t.Errorf("%d. unable to parse: %s", i, tt.q) + continue + } + + results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + result := <-results + if len(result.Series) != 0 { + t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series)) + } + if result.Err == nil || result.Err.Error() != tt.err { + t.Errorf("%d. unexpected error: %s", i, result.Err) + } + } +} + +func discardOutput(results <-chan *query.Result) { + for range results { + // Read all results and discard. + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/explain.go b/vendor/github.com/influxdata/influxdb/query/explain.go new file mode 100644 index 0000000..b599af1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/explain.go @@ -0,0 +1,86 @@ +package query + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + + "github.com/influxdata/influxql" +) + +func (p *preparedStatement) Explain() (string, error) { + // Determine the cost of all iterators created as part of this plan. + ic := &explainIteratorCreator{ic: p.ic} + p.ic = ic + cur, err := p.Select(context.Background()) + p.ic = ic.ic + + if err != nil { + return "", err + } + cur.Close() + + var buf bytes.Buffer + for i, node := range ic.nodes { + if i > 0 { + buf.WriteString("\n") + } + + expr := "" + if node.Expr != nil { + expr = node.Expr.String() + } + fmt.Fprintf(&buf, "EXPRESSION: %s\n", expr) + if len(node.Aux) != 0 { + refs := make([]string, len(node.Aux)) + for i, ref := range node.Aux { + refs[i] = ref.String() + } + fmt.Fprintf(&buf, "AUXILIARY FIELDS: %s\n", strings.Join(refs, ", ")) + } + fmt.Fprintf(&buf, "NUMBER OF SHARDS: %d\n", node.Cost.NumShards) + fmt.Fprintf(&buf, "NUMBER OF SERIES: %d\n", node.Cost.NumSeries) + fmt.Fprintf(&buf, "CACHED VALUES: %d\n", node.Cost.CachedValues) + fmt.Fprintf(&buf, "NUMBER OF FILES: %d\n", node.Cost.NumFiles) + fmt.Fprintf(&buf, "NUMBER OF BLOCKS: %d\n", node.Cost.BlocksRead) + fmt.Fprintf(&buf, "SIZE OF BLOCKS: %d\n", node.Cost.BlockSize) + } + return buf.String(), nil +} + +type planNode struct { + Expr influxql.Expr + Aux []influxql.VarRef + Cost IteratorCost +} + +type explainIteratorCreator struct { + ic interface { + IteratorCreator + io.Closer + } + nodes []planNode +} + +func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) { + cost, err := e.ic.IteratorCost(m, opt) + if err != nil { + return nil, err + } + e.nodes = append(e.nodes, planNode{ + Expr: opt.Expr, + Aux: opt.Aux, + Cost: cost, + }) + return &nilFloatIterator{}, nil +} + +func (e *explainIteratorCreator) IteratorCost(m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) { + return e.ic.IteratorCost(m, opt) +} + +func (e *explainIteratorCreator) Close() error { + return e.ic.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/query/functions.gen.go b/vendor/github.com/influxdata/influxdb/query/functions.gen.go new file mode 100644 index 0000000..9c62a81 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/functions.gen.go @@ -0,0 +1,2433 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: functions.gen.go.tmpl + +package query + +import ( + "math/rand" + "sort" + "time" +) + +// FloatPointAggregator aggregates points to produce a single point. +type FloatPointAggregator interface { + AggregateFloat(p *FloatPoint) +} + +// FloatBulkPointAggregator aggregates multiple points at a time. +type FloatBulkPointAggregator interface { + AggregateFloatBulk(points []FloatPoint) +} + +// AggregateFloatPoints feeds a slice of FloatPoint into an +// aggregator. If the aggregator is a FloatBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateFloatPoints(a FloatPointAggregator, points []FloatPoint) { + switch a := a.(type) { + case FloatBulkPointAggregator: + a.AggregateFloatBulk(points) + default: + for _, p := range points { + a.AggregateFloat(&p) + } + } +} + +// FloatPointEmitter produces a single point from an aggregate. +type FloatPointEmitter interface { + Emit() []FloatPoint +} + +// FloatReduceFunc is the function called by a FloatPoint reducer. +type FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{}) + +// FloatFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncReducer struct { + prev *FloatPoint + fn FloatReduceFunc +} + +// NewFloatFuncReducer creates a new FloatFuncFloatReducer. +func NewFloatFuncReducer(fn FloatReduceFunc, prev *FloatPoint) *FloatFuncReducer { + return &FloatFuncReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// FloatReduceSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint + +// FloatSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncReducer struct { + points []FloatPoint + fn FloatReduceSliceFunc +} + +// NewFloatSliceFuncReducer creates a new FloatSliceFuncReducer. +func NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer { + return &FloatSliceFuncReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// FloatReduceIntegerFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{}) + +// FloatFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncIntegerReducer struct { + prev *IntegerPoint + fn FloatReduceIntegerFunc +} + +// NewFloatFuncIntegerReducer creates a new FloatFuncIntegerReducer. +func NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc, prev *IntegerPoint) *FloatFuncIntegerReducer { + return &FloatFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint + +// FloatSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncIntegerReducer struct { + points []FloatPoint + fn FloatReduceIntegerSliceFunc +} + +// NewFloatSliceFuncIntegerReducer creates a new FloatSliceFuncIntegerReducer. +func NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer { + return &FloatSliceFuncIntegerReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// FloatReduceUnsignedFunc is the function called by a FloatPoint reducer. +type FloatReduceUnsignedFunc func(prev *UnsignedPoint, curr *FloatPoint) (t int64, v uint64, aux []interface{}) + +// FloatFuncUnsignedReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncUnsignedReducer struct { + prev *UnsignedPoint + fn FloatReduceUnsignedFunc +} + +// NewFloatFuncUnsignedReducer creates a new FloatFuncUnsignedReducer. +func NewFloatFuncUnsignedReducer(fn FloatReduceUnsignedFunc, prev *UnsignedPoint) *FloatFuncUnsignedReducer { + return &FloatFuncUnsignedReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncUnsignedReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &UnsignedPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncUnsignedReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{*r.prev} +} + +// FloatReduceUnsignedSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceUnsignedSliceFunc func(a []FloatPoint) []UnsignedPoint + +// FloatSliceFuncUnsignedReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncUnsignedReducer struct { + points []FloatPoint + fn FloatReduceUnsignedSliceFunc +} + +// NewFloatSliceFuncUnsignedReducer creates a new FloatSliceFuncUnsignedReducer. +func NewFloatSliceFuncUnsignedReducer(fn FloatReduceUnsignedSliceFunc) *FloatSliceFuncUnsignedReducer { + return &FloatSliceFuncUnsignedReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncUnsignedReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncUnsignedReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncUnsignedReducer) Emit() []UnsignedPoint { + return r.fn(r.points) +} + +// FloatReduceStringFunc is the function called by a FloatPoint reducer. +type FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{}) + +// FloatFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncStringReducer struct { + prev *StringPoint + fn FloatReduceStringFunc +} + +// NewFloatFuncStringReducer creates a new FloatFuncStringReducer. +func NewFloatFuncStringReducer(fn FloatReduceStringFunc, prev *StringPoint) *FloatFuncStringReducer { + return &FloatFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint + +// FloatSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncStringReducer struct { + points []FloatPoint + fn FloatReduceStringSliceFunc +} + +// NewFloatSliceFuncStringReducer creates a new FloatSliceFuncStringReducer. +func NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer { + return &FloatSliceFuncStringReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// FloatReduceBooleanFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{}) + +// FloatFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncBooleanReducer struct { + prev *BooleanPoint + fn FloatReduceBooleanFunc +} + +// NewFloatFuncBooleanReducer creates a new FloatFuncBooleanReducer. +func NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc, prev *BooleanPoint) *FloatFuncBooleanReducer { + return &FloatFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint + +// FloatSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncBooleanReducer struct { + points []FloatPoint + fn FloatReduceBooleanSliceFunc +} + +// NewFloatSliceFuncBooleanReducer creates a new FloatSliceFuncBooleanReducer. +func NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer { + return &FloatSliceFuncBooleanReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// FloatDistinctReducer returns the distinct points in a series. +type FloatDistinctReducer struct { + m map[float64]FloatPoint +} + +// NewFloatDistinctReducer creates a new FloatDistinctReducer. +func NewFloatDistinctReducer() *FloatDistinctReducer { + return &FloatDistinctReducer{m: make(map[float64]FloatPoint)} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *FloatDistinctReducer) Emit() []FloatPoint { + points := make([]FloatPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(floatPoints(points)) + return points +} + +// FloatElapsedReducer calculates the elapsed of the aggregated points. +type FloatElapsedReducer struct { + unitConversion int64 + prev FloatPoint + curr FloatPoint +} + +// NewFloatElapsedReducer creates a new FloatElapsedReducer. +func NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer { + return &FloatElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *FloatElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// FloatSampleReducer implements a reservoir sampling to calculate a random subset of points +type FloatSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points floatPoints // the reservoir +} + +// NewFloatSampleReducer creates a new FloatSampleReducer +func NewFloatSampleReducer(size int) *FloatSampleReducer { + return &FloatSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(floatPoints, size), + } +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *FloatSampleReducer) Emit() []FloatPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// IntegerPointAggregator aggregates points to produce a single point. +type IntegerPointAggregator interface { + AggregateInteger(p *IntegerPoint) +} + +// IntegerBulkPointAggregator aggregates multiple points at a time. +type IntegerBulkPointAggregator interface { + AggregateIntegerBulk(points []IntegerPoint) +} + +// AggregateIntegerPoints feeds a slice of IntegerPoint into an +// aggregator. If the aggregator is a IntegerBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateIntegerPoints(a IntegerPointAggregator, points []IntegerPoint) { + switch a := a.(type) { + case IntegerBulkPointAggregator: + a.AggregateIntegerBulk(points) + default: + for _, p := range points { + a.AggregateInteger(&p) + } + } +} + +// IntegerPointEmitter produces a single point from an aggregate. +type IntegerPointEmitter interface { + Emit() []IntegerPoint +} + +// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{}) + +// IntegerFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncFloatReducer struct { + prev *FloatPoint + fn IntegerReduceFloatFunc +} + +// NewIntegerFuncFloatReducer creates a new IntegerFuncFloatReducer. +func NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc, prev *FloatPoint) *IntegerFuncFloatReducer { + return &IntegerFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint + +// IntegerSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncFloatReducer struct { + points []IntegerPoint + fn IntegerReduceFloatSliceFunc +} + +// NewIntegerSliceFuncFloatReducer creates a new IntegerSliceFuncFloatReducer. +func NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer { + return &IntegerSliceFuncFloatReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// IntegerReduceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{}) + +// IntegerFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncReducer struct { + prev *IntegerPoint + fn IntegerReduceFunc +} + +// NewIntegerFuncReducer creates a new IntegerFuncIntegerReducer. +func NewIntegerFuncReducer(fn IntegerReduceFunc, prev *IntegerPoint) *IntegerFuncReducer { + return &IntegerFuncReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint + +// IntegerSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncReducer struct { + points []IntegerPoint + fn IntegerReduceSliceFunc +} + +// NewIntegerSliceFuncReducer creates a new IntegerSliceFuncReducer. +func NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer { + return &IntegerSliceFuncReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// IntegerReduceUnsignedFunc is the function called by a IntegerPoint reducer. +type IntegerReduceUnsignedFunc func(prev *UnsignedPoint, curr *IntegerPoint) (t int64, v uint64, aux []interface{}) + +// IntegerFuncUnsignedReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncUnsignedReducer struct { + prev *UnsignedPoint + fn IntegerReduceUnsignedFunc +} + +// NewIntegerFuncUnsignedReducer creates a new IntegerFuncUnsignedReducer. +func NewIntegerFuncUnsignedReducer(fn IntegerReduceUnsignedFunc, prev *UnsignedPoint) *IntegerFuncUnsignedReducer { + return &IntegerFuncUnsignedReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncUnsignedReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &UnsignedPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncUnsignedReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{*r.prev} +} + +// IntegerReduceUnsignedSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceUnsignedSliceFunc func(a []IntegerPoint) []UnsignedPoint + +// IntegerSliceFuncUnsignedReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncUnsignedReducer struct { + points []IntegerPoint + fn IntegerReduceUnsignedSliceFunc +} + +// NewIntegerSliceFuncUnsignedReducer creates a new IntegerSliceFuncUnsignedReducer. +func NewIntegerSliceFuncUnsignedReducer(fn IntegerReduceUnsignedSliceFunc) *IntegerSliceFuncUnsignedReducer { + return &IntegerSliceFuncUnsignedReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncUnsignedReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncUnsignedReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncUnsignedReducer) Emit() []UnsignedPoint { + return r.fn(r.points) +} + +// IntegerReduceStringFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{}) + +// IntegerFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncStringReducer struct { + prev *StringPoint + fn IntegerReduceStringFunc +} + +// NewIntegerFuncStringReducer creates a new IntegerFuncStringReducer. +func NewIntegerFuncStringReducer(fn IntegerReduceStringFunc, prev *StringPoint) *IntegerFuncStringReducer { + return &IntegerFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint + +// IntegerSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncStringReducer struct { + points []IntegerPoint + fn IntegerReduceStringSliceFunc +} + +// NewIntegerSliceFuncStringReducer creates a new IntegerSliceFuncStringReducer. +func NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer { + return &IntegerSliceFuncStringReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{}) + +// IntegerFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncBooleanReducer struct { + prev *BooleanPoint + fn IntegerReduceBooleanFunc +} + +// NewIntegerFuncBooleanReducer creates a new IntegerFuncBooleanReducer. +func NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc, prev *BooleanPoint) *IntegerFuncBooleanReducer { + return &IntegerFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint + +// IntegerSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncBooleanReducer struct { + points []IntegerPoint + fn IntegerReduceBooleanSliceFunc +} + +// NewIntegerSliceFuncBooleanReducer creates a new IntegerSliceFuncBooleanReducer. +func NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer { + return &IntegerSliceFuncBooleanReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// IntegerDistinctReducer returns the distinct points in a series. +type IntegerDistinctReducer struct { + m map[int64]IntegerPoint +} + +// NewIntegerDistinctReducer creates a new IntegerDistinctReducer. +func NewIntegerDistinctReducer() *IntegerDistinctReducer { + return &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *IntegerDistinctReducer) Emit() []IntegerPoint { + points := make([]IntegerPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(integerPoints(points)) + return points +} + +// IntegerElapsedReducer calculates the elapsed of the aggregated points. +type IntegerElapsedReducer struct { + unitConversion int64 + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerElapsedReducer creates a new IntegerElapsedReducer. +func NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer { + return &IntegerElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *IntegerElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// IntegerSampleReducer implements a reservoir sampling to calculate a random subset of points +type IntegerSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points integerPoints // the reservoir +} + +// NewIntegerSampleReducer creates a new IntegerSampleReducer +func NewIntegerSampleReducer(size int) *IntegerSampleReducer { + return &IntegerSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(integerPoints, size), + } +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *IntegerSampleReducer) Emit() []IntegerPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// UnsignedPointAggregator aggregates points to produce a single point. +type UnsignedPointAggregator interface { + AggregateUnsigned(p *UnsignedPoint) +} + +// UnsignedBulkPointAggregator aggregates multiple points at a time. +type UnsignedBulkPointAggregator interface { + AggregateUnsignedBulk(points []UnsignedPoint) +} + +// AggregateUnsignedPoints feeds a slice of UnsignedPoint into an +// aggregator. If the aggregator is a UnsignedBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateUnsignedPoints(a UnsignedPointAggregator, points []UnsignedPoint) { + switch a := a.(type) { + case UnsignedBulkPointAggregator: + a.AggregateUnsignedBulk(points) + default: + for _, p := range points { + a.AggregateUnsigned(&p) + } + } +} + +// UnsignedPointEmitter produces a single point from an aggregate. +type UnsignedPointEmitter interface { + Emit() []UnsignedPoint +} + +// UnsignedReduceFloatFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceFloatFunc func(prev *FloatPoint, curr *UnsignedPoint) (t int64, v float64, aux []interface{}) + +// UnsignedFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type UnsignedFuncFloatReducer struct { + prev *FloatPoint + fn UnsignedReduceFloatFunc +} + +// NewUnsignedFuncFloatReducer creates a new UnsignedFuncFloatReducer. +func NewUnsignedFuncFloatReducer(fn UnsignedReduceFloatFunc, prev *FloatPoint) *UnsignedFuncFloatReducer { + return &UnsignedFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *UnsignedFuncFloatReducer) AggregateUnsigned(p *UnsignedPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. +func (r *UnsignedFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// UnsignedReduceFloatSliceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceFloatSliceFunc func(a []UnsignedPoint) []FloatPoint + +// UnsignedSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type UnsignedSliceFuncFloatReducer struct { + points []UnsignedPoint + fn UnsignedReduceFloatSliceFunc +} + +// NewUnsignedSliceFuncFloatReducer creates a new UnsignedSliceFuncFloatReducer. +func NewUnsignedSliceFuncFloatReducer(fn UnsignedReduceFloatSliceFunc) *UnsignedSliceFuncFloatReducer { + return &UnsignedSliceFuncFloatReducer{fn: fn} +} + +// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *UnsignedSliceFuncFloatReducer) AggregateUnsigned(p *UnsignedPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. +// This is a more efficient version of calling AggregateUnsigned on each point. +func (r *UnsignedSliceFuncFloatReducer) AggregateUnsignedBulk(points []UnsignedPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *UnsignedSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// UnsignedReduceIntegerFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceIntegerFunc func(prev *IntegerPoint, curr *UnsignedPoint) (t int64, v int64, aux []interface{}) + +// UnsignedFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type UnsignedFuncIntegerReducer struct { + prev *IntegerPoint + fn UnsignedReduceIntegerFunc +} + +// NewUnsignedFuncIntegerReducer creates a new UnsignedFuncIntegerReducer. +func NewUnsignedFuncIntegerReducer(fn UnsignedReduceIntegerFunc, prev *IntegerPoint) *UnsignedFuncIntegerReducer { + return &UnsignedFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *UnsignedFuncIntegerReducer) AggregateUnsigned(p *UnsignedPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. +func (r *UnsignedFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// UnsignedReduceIntegerSliceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceIntegerSliceFunc func(a []UnsignedPoint) []IntegerPoint + +// UnsignedSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type UnsignedSliceFuncIntegerReducer struct { + points []UnsignedPoint + fn UnsignedReduceIntegerSliceFunc +} + +// NewUnsignedSliceFuncIntegerReducer creates a new UnsignedSliceFuncIntegerReducer. +func NewUnsignedSliceFuncIntegerReducer(fn UnsignedReduceIntegerSliceFunc) *UnsignedSliceFuncIntegerReducer { + return &UnsignedSliceFuncIntegerReducer{fn: fn} +} + +// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *UnsignedSliceFuncIntegerReducer) AggregateUnsigned(p *UnsignedPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. +// This is a more efficient version of calling AggregateUnsigned on each point. +func (r *UnsignedSliceFuncIntegerReducer) AggregateUnsignedBulk(points []UnsignedPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *UnsignedSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// UnsignedReduceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceFunc func(prev *UnsignedPoint, curr *UnsignedPoint) (t int64, v uint64, aux []interface{}) + +// UnsignedFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type UnsignedFuncReducer struct { + prev *UnsignedPoint + fn UnsignedReduceFunc +} + +// NewUnsignedFuncReducer creates a new UnsignedFuncUnsignedReducer. +func NewUnsignedFuncReducer(fn UnsignedReduceFunc, prev *UnsignedPoint) *UnsignedFuncReducer { + return &UnsignedFuncReducer{fn: fn, prev: prev} +} + +// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *UnsignedFuncReducer) AggregateUnsigned(p *UnsignedPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &UnsignedPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. +func (r *UnsignedFuncReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{*r.prev} +} + +// UnsignedReduceSliceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceSliceFunc func(a []UnsignedPoint) []UnsignedPoint + +// UnsignedSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type UnsignedSliceFuncReducer struct { + points []UnsignedPoint + fn UnsignedReduceSliceFunc +} + +// NewUnsignedSliceFuncReducer creates a new UnsignedSliceFuncReducer. +func NewUnsignedSliceFuncReducer(fn UnsignedReduceSliceFunc) *UnsignedSliceFuncReducer { + return &UnsignedSliceFuncReducer{fn: fn} +} + +// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *UnsignedSliceFuncReducer) AggregateUnsigned(p *UnsignedPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. +// This is a more efficient version of calling AggregateUnsigned on each point. +func (r *UnsignedSliceFuncReducer) AggregateUnsignedBulk(points []UnsignedPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *UnsignedSliceFuncReducer) Emit() []UnsignedPoint { + return r.fn(r.points) +} + +// UnsignedReduceStringFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceStringFunc func(prev *StringPoint, curr *UnsignedPoint) (t int64, v string, aux []interface{}) + +// UnsignedFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type UnsignedFuncStringReducer struct { + prev *StringPoint + fn UnsignedReduceStringFunc +} + +// NewUnsignedFuncStringReducer creates a new UnsignedFuncStringReducer. +func NewUnsignedFuncStringReducer(fn UnsignedReduceStringFunc, prev *StringPoint) *UnsignedFuncStringReducer { + return &UnsignedFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *UnsignedFuncStringReducer) AggregateUnsigned(p *UnsignedPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. +func (r *UnsignedFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// UnsignedReduceStringSliceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceStringSliceFunc func(a []UnsignedPoint) []StringPoint + +// UnsignedSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type UnsignedSliceFuncStringReducer struct { + points []UnsignedPoint + fn UnsignedReduceStringSliceFunc +} + +// NewUnsignedSliceFuncStringReducer creates a new UnsignedSliceFuncStringReducer. +func NewUnsignedSliceFuncStringReducer(fn UnsignedReduceStringSliceFunc) *UnsignedSliceFuncStringReducer { + return &UnsignedSliceFuncStringReducer{fn: fn} +} + +// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *UnsignedSliceFuncStringReducer) AggregateUnsigned(p *UnsignedPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. +// This is a more efficient version of calling AggregateUnsigned on each point. +func (r *UnsignedSliceFuncStringReducer) AggregateUnsignedBulk(points []UnsignedPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *UnsignedSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// UnsignedReduceBooleanFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceBooleanFunc func(prev *BooleanPoint, curr *UnsignedPoint) (t int64, v bool, aux []interface{}) + +// UnsignedFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type UnsignedFuncBooleanReducer struct { + prev *BooleanPoint + fn UnsignedReduceBooleanFunc +} + +// NewUnsignedFuncBooleanReducer creates a new UnsignedFuncBooleanReducer. +func NewUnsignedFuncBooleanReducer(fn UnsignedReduceBooleanFunc, prev *BooleanPoint) *UnsignedFuncBooleanReducer { + return &UnsignedFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *UnsignedFuncBooleanReducer) AggregateUnsigned(p *UnsignedPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. +func (r *UnsignedFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// UnsignedReduceBooleanSliceFunc is the function called by a UnsignedPoint reducer. +type UnsignedReduceBooleanSliceFunc func(a []UnsignedPoint) []BooleanPoint + +// UnsignedSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type UnsignedSliceFuncBooleanReducer struct { + points []UnsignedPoint + fn UnsignedReduceBooleanSliceFunc +} + +// NewUnsignedSliceFuncBooleanReducer creates a new UnsignedSliceFuncBooleanReducer. +func NewUnsignedSliceFuncBooleanReducer(fn UnsignedReduceBooleanSliceFunc) *UnsignedSliceFuncBooleanReducer { + return &UnsignedSliceFuncBooleanReducer{fn: fn} +} + +// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *UnsignedSliceFuncBooleanReducer) AggregateUnsigned(p *UnsignedPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. +// This is a more efficient version of calling AggregateUnsigned on each point. +func (r *UnsignedSliceFuncBooleanReducer) AggregateUnsignedBulk(points []UnsignedPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *UnsignedSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// UnsignedDistinctReducer returns the distinct points in a series. +type UnsignedDistinctReducer struct { + m map[uint64]UnsignedPoint +} + +// NewUnsignedDistinctReducer creates a new UnsignedDistinctReducer. +func NewUnsignedDistinctReducer() *UnsignedDistinctReducer { + return &UnsignedDistinctReducer{m: make(map[uint64]UnsignedPoint)} +} + +// AggregateUnsigned aggregates a point into the reducer. +func (r *UnsignedDistinctReducer) AggregateUnsigned(p *UnsignedPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *UnsignedDistinctReducer) Emit() []UnsignedPoint { + points := make([]UnsignedPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, UnsignedPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(unsignedPoints(points)) + return points +} + +// UnsignedElapsedReducer calculates the elapsed of the aggregated points. +type UnsignedElapsedReducer struct { + unitConversion int64 + prev UnsignedPoint + curr UnsignedPoint +} + +// NewUnsignedElapsedReducer creates a new UnsignedElapsedReducer. +func NewUnsignedElapsedReducer(interval Interval) *UnsignedElapsedReducer { + return &UnsignedElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: UnsignedPoint{Nil: true}, + curr: UnsignedPoint{Nil: true}, + } +} + +// AggregateUnsigned aggregates a point into the reducer and updates the current window. +func (r *UnsignedElapsedReducer) AggregateUnsigned(p *UnsignedPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *UnsignedElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// UnsignedSampleReducer implements a reservoir sampling to calculate a random subset of points +type UnsignedSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points unsignedPoints // the reservoir +} + +// NewUnsignedSampleReducer creates a new UnsignedSampleReducer +func NewUnsignedSampleReducer(size int) *UnsignedSampleReducer { + return &UnsignedSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(unsignedPoints, size), + } +} + +// AggregateUnsigned aggregates a point into the reducer. +func (r *UnsignedSampleReducer) AggregateUnsigned(p *UnsignedPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *UnsignedSampleReducer) Emit() []UnsignedPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// StringPointAggregator aggregates points to produce a single point. +type StringPointAggregator interface { + AggregateString(p *StringPoint) +} + +// StringBulkPointAggregator aggregates multiple points at a time. +type StringBulkPointAggregator interface { + AggregateStringBulk(points []StringPoint) +} + +// AggregateStringPoints feeds a slice of StringPoint into an +// aggregator. If the aggregator is a StringBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateStringPoints(a StringPointAggregator, points []StringPoint) { + switch a := a.(type) { + case StringBulkPointAggregator: + a.AggregateStringBulk(points) + default: + for _, p := range points { + a.AggregateString(&p) + } + } +} + +// StringPointEmitter produces a single point from an aggregate. +type StringPointEmitter interface { + Emit() []StringPoint +} + +// StringReduceFloatFunc is the function called by a StringPoint reducer. +type StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{}) + +// StringFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncFloatReducer struct { + prev *FloatPoint + fn StringReduceFloatFunc +} + +// NewStringFuncFloatReducer creates a new StringFuncFloatReducer. +func NewStringFuncFloatReducer(fn StringReduceFloatFunc, prev *FloatPoint) *StringFuncFloatReducer { + return &StringFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncFloatReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// StringReduceFloatSliceFunc is the function called by a StringPoint reducer. +type StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint + +// StringSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncFloatReducer struct { + points []StringPoint + fn StringReduceFloatSliceFunc +} + +// NewStringSliceFuncFloatReducer creates a new StringSliceFuncFloatReducer. +func NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer { + return &StringSliceFuncFloatReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// StringReduceIntegerFunc is the function called by a StringPoint reducer. +type StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{}) + +// StringFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncIntegerReducer struct { + prev *IntegerPoint + fn StringReduceIntegerFunc +} + +// NewStringFuncIntegerReducer creates a new StringFuncIntegerReducer. +func NewStringFuncIntegerReducer(fn StringReduceIntegerFunc, prev *IntegerPoint) *StringFuncIntegerReducer { + return &StringFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer. +type StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint + +// StringSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncIntegerReducer struct { + points []StringPoint + fn StringReduceIntegerSliceFunc +} + +// NewStringSliceFuncIntegerReducer creates a new StringSliceFuncIntegerReducer. +func NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer { + return &StringSliceFuncIntegerReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// StringReduceUnsignedFunc is the function called by a StringPoint reducer. +type StringReduceUnsignedFunc func(prev *UnsignedPoint, curr *StringPoint) (t int64, v uint64, aux []interface{}) + +// StringFuncUnsignedReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncUnsignedReducer struct { + prev *UnsignedPoint + fn StringReduceUnsignedFunc +} + +// NewStringFuncUnsignedReducer creates a new StringFuncUnsignedReducer. +func NewStringFuncUnsignedReducer(fn StringReduceUnsignedFunc, prev *UnsignedPoint) *StringFuncUnsignedReducer { + return &StringFuncUnsignedReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncUnsignedReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &UnsignedPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncUnsignedReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{*r.prev} +} + +// StringReduceUnsignedSliceFunc is the function called by a StringPoint reducer. +type StringReduceUnsignedSliceFunc func(a []StringPoint) []UnsignedPoint + +// StringSliceFuncUnsignedReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncUnsignedReducer struct { + points []StringPoint + fn StringReduceUnsignedSliceFunc +} + +// NewStringSliceFuncUnsignedReducer creates a new StringSliceFuncUnsignedReducer. +func NewStringSliceFuncUnsignedReducer(fn StringReduceUnsignedSliceFunc) *StringSliceFuncUnsignedReducer { + return &StringSliceFuncUnsignedReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncUnsignedReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncUnsignedReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncUnsignedReducer) Emit() []UnsignedPoint { + return r.fn(r.points) +} + +// StringReduceFunc is the function called by a StringPoint reducer. +type StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{}) + +// StringFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncReducer struct { + prev *StringPoint + fn StringReduceFunc +} + +// NewStringFuncReducer creates a new StringFuncStringReducer. +func NewStringFuncReducer(fn StringReduceFunc, prev *StringPoint) *StringFuncReducer { + return &StringFuncReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// StringReduceSliceFunc is the function called by a StringPoint reducer. +type StringReduceSliceFunc func(a []StringPoint) []StringPoint + +// StringSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncReducer struct { + points []StringPoint + fn StringReduceSliceFunc +} + +// NewStringSliceFuncReducer creates a new StringSliceFuncReducer. +func NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer { + return &StringSliceFuncReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// StringReduceBooleanFunc is the function called by a StringPoint reducer. +type StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{}) + +// StringFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncBooleanReducer struct { + prev *BooleanPoint + fn StringReduceBooleanFunc +} + +// NewStringFuncBooleanReducer creates a new StringFuncBooleanReducer. +func NewStringFuncBooleanReducer(fn StringReduceBooleanFunc, prev *BooleanPoint) *StringFuncBooleanReducer { + return &StringFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer. +type StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint + +// StringSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncBooleanReducer struct { + points []StringPoint + fn StringReduceBooleanSliceFunc +} + +// NewStringSliceFuncBooleanReducer creates a new StringSliceFuncBooleanReducer. +func NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer { + return &StringSliceFuncBooleanReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// StringDistinctReducer returns the distinct points in a series. +type StringDistinctReducer struct { + m map[string]StringPoint +} + +// NewStringDistinctReducer creates a new StringDistinctReducer. +func NewStringDistinctReducer() *StringDistinctReducer { + return &StringDistinctReducer{m: make(map[string]StringPoint)} +} + +// AggregateString aggregates a point into the reducer. +func (r *StringDistinctReducer) AggregateString(p *StringPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *StringDistinctReducer) Emit() []StringPoint { + points := make([]StringPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, StringPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(stringPoints(points)) + return points +} + +// StringElapsedReducer calculates the elapsed of the aggregated points. +type StringElapsedReducer struct { + unitConversion int64 + prev StringPoint + curr StringPoint +} + +// NewStringElapsedReducer creates a new StringElapsedReducer. +func NewStringElapsedReducer(interval Interval) *StringElapsedReducer { + return &StringElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: StringPoint{Nil: true}, + curr: StringPoint{Nil: true}, + } +} + +// AggregateString aggregates a point into the reducer and updates the current window. +func (r *StringElapsedReducer) AggregateString(p *StringPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *StringElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// StringSampleReducer implements a reservoir sampling to calculate a random subset of points +type StringSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points stringPoints // the reservoir +} + +// NewStringSampleReducer creates a new StringSampleReducer +func NewStringSampleReducer(size int) *StringSampleReducer { + return &StringSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(stringPoints, size), + } +} + +// AggregateString aggregates a point into the reducer. +func (r *StringSampleReducer) AggregateString(p *StringPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *StringSampleReducer) Emit() []StringPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// BooleanPointAggregator aggregates points to produce a single point. +type BooleanPointAggregator interface { + AggregateBoolean(p *BooleanPoint) +} + +// BooleanBulkPointAggregator aggregates multiple points at a time. +type BooleanBulkPointAggregator interface { + AggregateBooleanBulk(points []BooleanPoint) +} + +// AggregateBooleanPoints feeds a slice of BooleanPoint into an +// aggregator. If the aggregator is a BooleanBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateBooleanPoints(a BooleanPointAggregator, points []BooleanPoint) { + switch a := a.(type) { + case BooleanBulkPointAggregator: + a.AggregateBooleanBulk(points) + default: + for _, p := range points { + a.AggregateBoolean(&p) + } + } +} + +// BooleanPointEmitter produces a single point from an aggregate. +type BooleanPointEmitter interface { + Emit() []BooleanPoint +} + +// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{}) + +// BooleanFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncFloatReducer struct { + prev *FloatPoint + fn BooleanReduceFloatFunc +} + +// NewBooleanFuncFloatReducer creates a new BooleanFuncFloatReducer. +func NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc, prev *FloatPoint) *BooleanFuncFloatReducer { + return &BooleanFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint + +// BooleanSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncFloatReducer struct { + points []BooleanPoint + fn BooleanReduceFloatSliceFunc +} + +// NewBooleanSliceFuncFloatReducer creates a new BooleanSliceFuncFloatReducer. +func NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer { + return &BooleanSliceFuncFloatReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{}) + +// BooleanFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncIntegerReducer struct { + prev *IntegerPoint + fn BooleanReduceIntegerFunc +} + +// NewBooleanFuncIntegerReducer creates a new BooleanFuncIntegerReducer. +func NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc, prev *IntegerPoint) *BooleanFuncIntegerReducer { + return &BooleanFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint + +// BooleanSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncIntegerReducer struct { + points []BooleanPoint + fn BooleanReduceIntegerSliceFunc +} + +// NewBooleanSliceFuncIntegerReducer creates a new BooleanSliceFuncIntegerReducer. +func NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer { + return &BooleanSliceFuncIntegerReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// BooleanReduceUnsignedFunc is the function called by a BooleanPoint reducer. +type BooleanReduceUnsignedFunc func(prev *UnsignedPoint, curr *BooleanPoint) (t int64, v uint64, aux []interface{}) + +// BooleanFuncUnsignedReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncUnsignedReducer struct { + prev *UnsignedPoint + fn BooleanReduceUnsignedFunc +} + +// NewBooleanFuncUnsignedReducer creates a new BooleanFuncUnsignedReducer. +func NewBooleanFuncUnsignedReducer(fn BooleanReduceUnsignedFunc, prev *UnsignedPoint) *BooleanFuncUnsignedReducer { + return &BooleanFuncUnsignedReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncUnsignedReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &UnsignedPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncUnsignedReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{*r.prev} +} + +// BooleanReduceUnsignedSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceUnsignedSliceFunc func(a []BooleanPoint) []UnsignedPoint + +// BooleanSliceFuncUnsignedReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncUnsignedReducer struct { + points []BooleanPoint + fn BooleanReduceUnsignedSliceFunc +} + +// NewBooleanSliceFuncUnsignedReducer creates a new BooleanSliceFuncUnsignedReducer. +func NewBooleanSliceFuncUnsignedReducer(fn BooleanReduceUnsignedSliceFunc) *BooleanSliceFuncUnsignedReducer { + return &BooleanSliceFuncUnsignedReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncUnsignedReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncUnsignedReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncUnsignedReducer) Emit() []UnsignedPoint { + return r.fn(r.points) +} + +// BooleanReduceStringFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{}) + +// BooleanFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncStringReducer struct { + prev *StringPoint + fn BooleanReduceStringFunc +} + +// NewBooleanFuncStringReducer creates a new BooleanFuncStringReducer. +func NewBooleanFuncStringReducer(fn BooleanReduceStringFunc, prev *StringPoint) *BooleanFuncStringReducer { + return &BooleanFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint + +// BooleanSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncStringReducer struct { + points []BooleanPoint + fn BooleanReduceStringSliceFunc +} + +// NewBooleanSliceFuncStringReducer creates a new BooleanSliceFuncStringReducer. +func NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer { + return &BooleanSliceFuncStringReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// BooleanReduceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{}) + +// BooleanFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncReducer struct { + prev *BooleanPoint + fn BooleanReduceFunc +} + +// NewBooleanFuncReducer creates a new BooleanFuncBooleanReducer. +func NewBooleanFuncReducer(fn BooleanReduceFunc, prev *BooleanPoint) *BooleanFuncReducer { + return &BooleanFuncReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint + +// BooleanSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncReducer struct { + points []BooleanPoint + fn BooleanReduceSliceFunc +} + +// NewBooleanSliceFuncReducer creates a new BooleanSliceFuncReducer. +func NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer { + return &BooleanSliceFuncReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// BooleanDistinctReducer returns the distinct points in a series. +type BooleanDistinctReducer struct { + m map[bool]BooleanPoint +} + +// NewBooleanDistinctReducer creates a new BooleanDistinctReducer. +func NewBooleanDistinctReducer() *BooleanDistinctReducer { + return &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)} +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *BooleanDistinctReducer) Emit() []BooleanPoint { + points := make([]BooleanPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(booleanPoints(points)) + return points +} + +// BooleanElapsedReducer calculates the elapsed of the aggregated points. +type BooleanElapsedReducer struct { + unitConversion int64 + prev BooleanPoint + curr BooleanPoint +} + +// NewBooleanElapsedReducer creates a new BooleanElapsedReducer. +func NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer { + return &BooleanElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: BooleanPoint{Nil: true}, + curr: BooleanPoint{Nil: true}, + } +} + +// AggregateBoolean aggregates a point into the reducer and updates the current window. +func (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *BooleanElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// BooleanSampleReducer implements a reservoir sampling to calculate a random subset of points +type BooleanSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points booleanPoints // the reservoir +} + +// NewBooleanSampleReducer creates a new BooleanSampleReducer +func NewBooleanSampleReducer(size int) *BooleanSampleReducer { + return &BooleanSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(booleanPoints, size), + } +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *BooleanSampleReducer) Emit() []BooleanPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} diff --git a/vendor/github.com/influxdata/influxdb/query/functions.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/query/functions.gen.go.tmpl new file mode 100644 index 0000000..bd0d15b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/functions.gen.go.tmpl @@ -0,0 +1,219 @@ +package query + +import ( +"sort" +"time" +"math/rand" +) + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}PointAggregator aggregates points to produce a single point. +type {{$k.Name}}PointAggregator interface { + Aggregate{{$k.Name}}(p *{{$k.Name}}Point) +} + +// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time. +type {{$k.Name}}BulkPointAggregator interface { + Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) +} + +// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an +// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will +// use the AggregateBulk method. +func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) { + switch a := a.(type) { + case {{$k.Name}}BulkPointAggregator: + a.Aggregate{{$k.Name}}Bulk(points) + default: + for _, p := range points { + a.Aggregate{{$k.Name}}(&p) + } + } +} + +// {{$k.Name}}PointEmitter produces a single point from an aggregate. +type {{$k.Name}}PointEmitter interface { + Emit() []{{$k.Name}}Point +} + +{{range $v := $types}} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{}) + +// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + prev *{{$v.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func +} + +// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer. +func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev} +} + +// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the +// current and new point to modify the current point. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &{{$v.Name}}Point{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return []{{$v.Name}}Point{*r.prev} +} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point + +// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + points []{{$k.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc +} + +// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer. +func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} +} + +// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.points = append(r.points, *p.Clone()) +} + +// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice. +// This is a more efficient version of calling Aggregate{{$k.Name}} on each point. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return r.fn(r.points) +} +{{end}} + +// {{$k.Name}}DistinctReducer returns the distinct points in a series. +type {{$k.Name}}DistinctReducer struct { + m map[{{$k.Type}}]{{$k.Name}}Point +} + +// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer. +func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer { + return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)} +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point { + points := make([]{{$k.Name}}Point, 0, len(r.m)) + for _, p := range r.m { + points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value}) + } + sort.Sort({{$k.name}}Points(points)) + return points +} + +// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points. +type {{$k.Name}}ElapsedReducer struct { + unitConversion int64 + prev {{$k.Name}}Point + curr {{$k.Name}}Point +} + +// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer. +func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer { + return &{{$k.Name}}ElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: {{$k.Name}}Point{Nil: true}, + curr: {{$k.Name}}Point{Nil: true}, + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window. +func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points +type {{$k.Name}}SampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points {{$k.name}}Points // the reservoir +} + +// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer +func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer { + return &{{$k.Name}}SampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make({{$k.name}}Points, size), + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + + +{{end}}{{end}} diff --git a/vendor/github.com/influxdata/influxdb/query/functions.go b/vendor/github.com/influxdata/influxdb/query/functions.go new file mode 100644 index 0000000..b53fd50 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/functions.go @@ -0,0 +1,2148 @@ +package query + +import ( + "container/heap" + "math" + "sort" + "time" + + "github.com/influxdata/influxdb/query/internal/gota" + "github.com/influxdata/influxdb/query/neldermead" + "github.com/influxdata/influxql" +) + +// FieldMapper is a FieldMapper that wraps another FieldMapper and exposes +// the functions implemented by the query engine. +type FieldMapper struct { + influxql.FieldMapper +} + +func (m FieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { + if mapper, ok := m.FieldMapper.(influxql.CallTypeMapper); ok { + typ, err := mapper.CallType(name, args) + if err != nil { + return influxql.Unknown, err + } else if typ != influxql.Unknown { + return typ, nil + } + } + + // Use the default FunctionTypeMapper for the query engine. + typmap := FunctionTypeMapper{} + return typmap.CallType(name, args) +} + +// CallTypeMapper returns the types for call iterator functions. +// Call iterator functions are commonly implemented within the storage engine +// so this mapper is limited to only the return values of those functions. +type CallTypeMapper struct{} + +func (CallTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + return influxql.Unknown +} + +func (CallTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { + // If the function is not implemented by the embedded field mapper, then + // see if we implement the function and return the type here. + switch name { + case "mean": + return influxql.Float, nil + case "count": + return influxql.Integer, nil + case "min", "max", "sum", "first", "last": + // TODO(jsternberg): Verify the input type. + return args[0], nil + } + return influxql.Unknown, nil +} + +// FunctionTypeMapper handles the type mapping for all functions implemented by the +// query engine. +type FunctionTypeMapper struct { + CallTypeMapper +} + +func (FunctionTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + return influxql.Unknown +} + +func (m FunctionTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { + if typ, err := m.CallTypeMapper.CallType(name, args); typ != influxql.Unknown || err != nil { + return typ, err + } + + // Handle functions implemented by the query engine. + switch name { + case "median", "integral", "stddev", + "derivative", "non_negative_derivative", + "moving_average", + "exponential_moving_average", + "double_exponential_moving_average", + "triple_exponential_moving_average", + "relative_strength_index", + "triple_exponential_derivative", + "kaufmans_efficiency_ratio", + "kaufmans_adaptive_moving_average", + "chande_momentum_oscillator", + "holt_winters", "holt_winters_with_fit": + return influxql.Float, nil + case "elapsed": + return influxql.Integer, nil + default: + // TODO(jsternberg): Do not use default for this. + return args[0], nil + } +} + +// FloatMeanReducer calculates the mean of the aggregated points. +type FloatMeanReducer struct { + sum float64 + count uint32 +} + +// NewFloatMeanReducer creates a new FloatMeanReducer. +func NewFloatMeanReducer() *FloatMeanReducer { + return &FloatMeanReducer{} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * float64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *FloatMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: r.sum / float64(r.count), + Aggregated: r.count, + }} +} + +// IntegerMeanReducer calculates the mean of the aggregated points. +type IntegerMeanReducer struct { + sum int64 + count uint32 +} + +// NewIntegerMeanReducer creates a new IntegerMeanReducer. +func NewIntegerMeanReducer() *IntegerMeanReducer { + return &IntegerMeanReducer{} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * int64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *IntegerMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: float64(r.sum) / float64(r.count), + Aggregated: r.count, + }} +} + +// UnsignedMeanReducer calculates the mean of the aggregated points. +type UnsignedMeanReducer struct { + sum uint64 + count uint32 +} + +// NewUnsignedMeanReducer creates a new UnsignedMeanReducer. +func NewUnsignedMeanReducer() *UnsignedMeanReducer { + return &UnsignedMeanReducer{} +} + +// AggregateUnsigned aggregates a point into the reducer. +func (r *UnsignedMeanReducer) AggregateUnsigned(p *UnsignedPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * uint64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *UnsignedMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: float64(r.sum) / float64(r.count), + Aggregated: r.count, + }} +} + +type FloatSpreadReducer struct { + min, max float64 + count uint32 +} + +func NewFloatSpreadReducer() *FloatSpreadReducer { + return &FloatSpreadReducer{ + min: math.Inf(1), + max: math.Inf(-1), + } +} + +func (r *FloatSpreadReducer) AggregateFloat(p *FloatPoint) { + r.min = math.Min(r.min, p.Value) + r.max = math.Max(r.max, p.Value) + r.count++ +} + +func (r *FloatSpreadReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: r.max - r.min, + Aggregated: r.count, + }} +} + +type IntegerSpreadReducer struct { + min, max int64 + count uint32 +} + +func NewIntegerSpreadReducer() *IntegerSpreadReducer { + return &IntegerSpreadReducer{ + min: math.MaxInt64, + max: math.MinInt64, + } +} + +func (r *IntegerSpreadReducer) AggregateInteger(p *IntegerPoint) { + if p.Value < r.min { + r.min = p.Value + } + if p.Value > r.max { + r.max = p.Value + } + r.count++ +} + +func (r *IntegerSpreadReducer) Emit() []IntegerPoint { + return []IntegerPoint{{ + Time: ZeroTime, + Value: r.max - r.min, + Aggregated: r.count, + }} +} + +type UnsignedSpreadReducer struct { + min, max uint64 + count uint32 +} + +func NewUnsignedSpreadReducer() *UnsignedSpreadReducer { + return &UnsignedSpreadReducer{ + min: math.MaxUint64, + max: 0, + } +} + +func (r *UnsignedSpreadReducer) AggregateUnsigned(p *UnsignedPoint) { + if p.Value < r.min { + r.min = p.Value + } + if p.Value > r.max { + r.max = p.Value + } + r.count++ +} + +func (r *UnsignedSpreadReducer) Emit() []UnsignedPoint { + return []UnsignedPoint{{ + Time: ZeroTime, + Value: r.max - r.min, + Aggregated: r.count, + }} +} + +// FloatDerivativeReducer calculates the derivative of the aggregated points. +type FloatDerivativeReducer struct { + interval Interval + prev FloatPoint + curr FloatPoint + isNonNegative bool + ascending bool +} + +// NewFloatDerivativeReducer creates a new FloatDerivativeReducer. +func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { + return &FloatDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *FloatDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := r.curr.Value - r.prev.Value + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDerivativeReducer calculates the derivative of the aggregated points. +type IntegerDerivativeReducer struct { + interval Interval + prev IntegerPoint + curr IntegerPoint + isNonNegative bool + ascending bool +} + +// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. +func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { + return &IntegerDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *IntegerDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := float64(r.curr.Value - r.prev.Value) + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// UnsignedDerivativeReducer calculates the derivative of the aggregated points. +type UnsignedDerivativeReducer struct { + interval Interval + prev UnsignedPoint + curr UnsignedPoint + isNonNegative bool + ascending bool +} + +// NewUnsignedDerivativeReducer creates a new UnsignedDerivativeReducer. +func NewUnsignedDerivativeReducer(interval Interval, isNonNegative, ascending bool) *UnsignedDerivativeReducer { + return &UnsignedDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: UnsignedPoint{Nil: true}, + curr: UnsignedPoint{Nil: true}, + } +} + +// AggregateUnsigned aggregates a point into the reducer and updates the current window. +func (r *UnsignedDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *UnsignedDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + var diff float64 + if r.curr.Value > r.prev.Value { + diff = float64(r.curr.Value - r.prev.Value) + } else { + diff = -float64(r.prev.Value - r.curr.Value) + } + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatDifferenceReducer calculates the derivative of the aggregated points. +type FloatDifferenceReducer struct { + isNonNegative bool + prev FloatPoint + curr FloatPoint +} + +// NewFloatDifferenceReducer creates a new FloatDifferenceReducer. +func NewFloatDifferenceReducer(isNonNegative bool) *FloatDifferenceReducer { + return &FloatDifferenceReducer{ + isNonNegative: isNonNegative, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *FloatDifferenceReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // If it is non_negative_difference discard any negative value. Since + // prev is still marked as unread. The correctness can be ensured. + if r.isNonNegative && value < 0 { + return nil + } + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDifferenceReducer calculates the derivative of the aggregated points. +type IntegerDifferenceReducer struct { + isNonNegative bool + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. +func NewIntegerDifferenceReducer(isNonNegative bool) *IntegerDifferenceReducer { + return &IntegerDifferenceReducer{ + isNonNegative: isNonNegative, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // If it is non_negative_difference discard any negative value. Since + // prev is still marked as unread. The correctness can be ensured. + if r.isNonNegative && value < 0 { + return nil + } + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + return []IntegerPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// UnsignedDifferenceReducer calculates the derivative of the aggregated points. +type UnsignedDifferenceReducer struct { + isNonNegative bool + prev UnsignedPoint + curr UnsignedPoint +} + +// NewUnsignedDifferenceReducer creates a new UnsignedDifferenceReducer. +func NewUnsignedDifferenceReducer(isNonNegative bool) *UnsignedDifferenceReducer { + return &UnsignedDifferenceReducer{ + isNonNegative: isNonNegative, + prev: UnsignedPoint{Nil: true}, + curr: UnsignedPoint{Nil: true}, + } +} + +// AggregateUnsigned aggregates a point into the reducer and updates the current window. +func (r *UnsignedDifferenceReducer) AggregateUnsigned(p *UnsignedPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *UnsignedDifferenceReducer) Emit() []UnsignedPoint { + if !r.prev.Nil { + // If it is non_negative_difference discard any negative value. Since + // prev is still marked as unread. The correctness can be ensured. + if r.isNonNegative && r.curr.Value < r.prev.Value { + return nil + } + + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + return []UnsignedPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatMovingAverageReducer calculates the moving average of the aggregated points. +type FloatMovingAverageReducer struct { + pos int + sum float64 + time int64 + buf []float64 +} + +// NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer. +func NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer { + return &FloatMovingAverageReducer{ + buf: make([]float64, 0, n), + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateFloat and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *FloatMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: r.sum / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// IntegerMovingAverageReducer calculates the moving average of the aggregated points. +type IntegerMovingAverageReducer struct { + pos int + sum int64 + time int64 + buf []int64 +} + +// NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer. +func NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer { + return &IntegerMovingAverageReducer{ + buf: make([]int64, 0, n), + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateInteger and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *IntegerMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: float64(r.sum) / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// UnsignedMovingAverageReducer calculates the moving average of the aggregated points. +type UnsignedMovingAverageReducer struct { + pos int + sum uint64 + time int64 + buf []uint64 +} + +// NewUnsignedMovingAverageReducer creates a new UnsignedMovingAverageReducer. +func NewUnsignedMovingAverageReducer(n int) *UnsignedMovingAverageReducer { + return &UnsignedMovingAverageReducer{ + buf: make([]uint64, 0, n), + } +} + +// AggregateUnsigned aggregates a point into the reducer and updates the current window. +func (r *UnsignedMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateUnsigned and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *UnsignedMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: float64(r.sum) / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +type ExponentialMovingAverageReducer struct { + ema gota.EMA + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ExponentialMovingAverageReducer { + ema := gota.NewEMA(period, warmupType) + if holdPeriod == -1 { + holdPeriod = ema.WarmCount() + } + return &ExponentialMovingAverageReducer{ + ema: *ema, + holdPeriod: uint32(holdPeriod), + } +} + +func (r *ExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *ExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *ExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *ExponentialMovingAverageReducer) aggregate(v float64, t int64) { + r.v = r.ema.Add(v) + r.t = t + r.count++ +} +func (r *ExponentialMovingAverageReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type DoubleExponentialMovingAverageReducer struct { + dema gota.DEMA + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewDoubleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *DoubleExponentialMovingAverageReducer { + dema := gota.NewDEMA(period, warmupType) + if holdPeriod == -1 { + holdPeriod = dema.WarmCount() + } + return &DoubleExponentialMovingAverageReducer{ + dema: *dema, + holdPeriod: uint32(holdPeriod), + } +} + +func (r *DoubleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *DoubleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *DoubleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *DoubleExponentialMovingAverageReducer) aggregate(v float64, t int64) { + r.v = r.dema.Add(v) + r.t = t + r.count++ +} +func (r *DoubleExponentialMovingAverageReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type TripleExponentialMovingAverageReducer struct { + tema gota.TEMA + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewTripleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialMovingAverageReducer { + tema := gota.NewTEMA(period, warmupType) + if holdPeriod == -1 { + holdPeriod = tema.WarmCount() + } + return &TripleExponentialMovingAverageReducer{ + tema: *tema, + holdPeriod: uint32(holdPeriod), + } +} + +func (r *TripleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *TripleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *TripleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *TripleExponentialMovingAverageReducer) aggregate(v float64, t int64) { + r.v = r.tema.Add(v) + r.t = t + r.count++ +} +func (r *TripleExponentialMovingAverageReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type RelativeStrengthIndexReducer struct { + rsi gota.RSI + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewRelativeStrengthIndexReducer(period int, holdPeriod int, warmupType gota.WarmupType) *RelativeStrengthIndexReducer { + rsi := gota.NewRSI(period, warmupType) + if holdPeriod == -1 { + holdPeriod = rsi.WarmCount() + } + return &RelativeStrengthIndexReducer{ + rsi: *rsi, + holdPeriod: uint32(holdPeriod), + } +} +func (r *RelativeStrengthIndexReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *RelativeStrengthIndexReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *RelativeStrengthIndexReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *RelativeStrengthIndexReducer) aggregate(v float64, t int64) { + r.v = r.rsi.Add(v) + r.t = t + r.count++ +} +func (r *RelativeStrengthIndexReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type TripleExponentialDerivativeReducer struct { + trix gota.TRIX + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewTripleExponentialDerivativeReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialDerivativeReducer { + trix := gota.NewTRIX(period, warmupType) + if holdPeriod == -1 { + holdPeriod = trix.WarmCount() + } + return &TripleExponentialDerivativeReducer{ + trix: *trix, + holdPeriod: uint32(holdPeriod), + } +} +func (r *TripleExponentialDerivativeReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *TripleExponentialDerivativeReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *TripleExponentialDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *TripleExponentialDerivativeReducer) aggregate(v float64, t int64) { + r.v = r.trix.Add(v) + r.t = t + r.count++ +} +func (r *TripleExponentialDerivativeReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + if math.IsInf(r.v, 0) { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type KaufmansEfficiencyRatioReducer struct { + ker gota.KER + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewKaufmansEfficiencyRatioReducer(period int, holdPeriod int) *KaufmansEfficiencyRatioReducer { + ker := gota.NewKER(period) + if holdPeriod == -1 { + holdPeriod = ker.WarmCount() + } + return &KaufmansEfficiencyRatioReducer{ + ker: *ker, + holdPeriod: uint32(holdPeriod), + } +} +func (r *KaufmansEfficiencyRatioReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *KaufmansEfficiencyRatioReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *KaufmansEfficiencyRatioReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *KaufmansEfficiencyRatioReducer) aggregate(v float64, t int64) { + r.v = r.ker.Add(v) + r.t = t + r.count++ +} +func (r *KaufmansEfficiencyRatioReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + if math.IsInf(r.v, 0) { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type KaufmansAdaptiveMovingAverageReducer struct { + kama gota.KAMA + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewKaufmansAdaptiveMovingAverageReducer(period int, holdPeriod int) *KaufmansAdaptiveMovingAverageReducer { + kama := gota.NewKAMA(period) + if holdPeriod == -1 { + holdPeriod = kama.WarmCount() + } + return &KaufmansAdaptiveMovingAverageReducer{ + kama: *kama, + holdPeriod: uint32(holdPeriod), + } +} +func (r *KaufmansAdaptiveMovingAverageReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *KaufmansAdaptiveMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *KaufmansAdaptiveMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *KaufmansAdaptiveMovingAverageReducer) aggregate(v float64, t int64) { + r.v = r.kama.Add(v) + r.t = t + r.count++ +} +func (r *KaufmansAdaptiveMovingAverageReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + if math.IsInf(r.v, 0) { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +type ChandeMomentumOscillatorReducer struct { + cmo gota.AlgSimple + holdPeriod uint32 + count uint32 + v float64 + t int64 +} + +func NewChandeMomentumOscillatorReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ChandeMomentumOscillatorReducer { + var cmo gota.AlgSimple + if warmupType == gota.WarmupType(-1) { + cmo = gota.NewCMO(period) + } else { + cmo = gota.NewCMOS(period, warmupType) + } + + if holdPeriod == -1 { + holdPeriod = cmo.WarmCount() + } + return &ChandeMomentumOscillatorReducer{ + cmo: cmo, + holdPeriod: uint32(holdPeriod), + } +} +func (r *ChandeMomentumOscillatorReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Value, p.Time) +} +func (r *ChandeMomentumOscillatorReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *ChandeMomentumOscillatorReducer) AggregateUnsigned(p *UnsignedPoint) { + r.aggregate(float64(p.Value), p.Time) +} +func (r *ChandeMomentumOscillatorReducer) aggregate(v float64, t int64) { + r.v = r.cmo.Add(v) + r.t = t + r.count++ +} +func (r *ChandeMomentumOscillatorReducer) Emit() []FloatPoint { + if r.count <= r.holdPeriod { + return nil + } + + return []FloatPoint{ + { + Value: r.v, + Time: r.t, + Aggregated: r.count, + }, + } +} + +// FloatCumulativeSumReducer cumulates the values from each point. +type FloatCumulativeSumReducer struct { + curr FloatPoint +} + +// NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer. +func NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer { + return &FloatCumulativeSumReducer{ + curr: FloatPoint{Nil: true}, + } +} + +func (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *FloatCumulativeSumReducer) Emit() []FloatPoint { + var pts []FloatPoint + if !r.curr.Nil { + pts = []FloatPoint{r.curr} + } + return pts +} + +// IntegerCumulativeSumReducer cumulates the values from each point. +type IntegerCumulativeSumReducer struct { + curr IntegerPoint +} + +// NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer. +func NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer { + return &IntegerCumulativeSumReducer{ + curr: IntegerPoint{Nil: true}, + } +} + +func (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint { + var pts []IntegerPoint + if !r.curr.Nil { + pts = []IntegerPoint{r.curr} + } + return pts +} + +// UnsignedCumulativeSumReducer cumulates the values from each point. +type UnsignedCumulativeSumReducer struct { + curr UnsignedPoint +} + +// NewUnsignedCumulativeSumReducer creates a new UnsignedCumulativeSumReducer. +func NewUnsignedCumulativeSumReducer() *UnsignedCumulativeSumReducer { + return &UnsignedCumulativeSumReducer{ + curr: UnsignedPoint{Nil: true}, + } +} + +func (r *UnsignedCumulativeSumReducer) AggregateUnsigned(p *UnsignedPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint { + var pts []UnsignedPoint + if !r.curr.Nil { + pts = []UnsignedPoint{r.curr} + } + return pts +} + +// FloatHoltWintersReducer forecasts a series into the future. +// This is done using the Holt-Winters damped method. +// 1. Using the series the initial values are calculated using a SSE. +// 2. The series is forecasted into the future using the iterative relations. +type FloatHoltWintersReducer struct { + // Season period + m int + seasonal bool + + // Horizon + h int + + // Interval between points + interval int64 + // interval / 2 -- used to perform rounding + halfInterval int64 + + // Whether to include all data or only future values + includeFitData bool + + // NelderMead optimizer + optim *neldermead.Optimizer + // Small difference bound for the optimizer + epsilon float64 + + y []float64 + points []FloatPoint +} + +const ( + // Arbitrary weight for initializing some intial guesses. + // This should be in the range [0,1] + hwWeight = 0.5 + // Epsilon value for the minimization process + hwDefaultEpsilon = 1.0e-4 + // Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi. + // Keep in mind that this grid is N^4 so we should keep N small + // The starting lower guess + hwGuessLower = 0.3 + // The upper bound on the grid + hwGuessUpper = 1.0 + // The step between guesses + hwGuessStep = 0.4 +) + +// NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer. +func NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer { + seasonal := true + if m < 2 { + seasonal = false + } + return &FloatHoltWintersReducer{ + h: h, + m: m, + seasonal: seasonal, + includeFitData: includeFitData, + interval: int64(interval), + halfInterval: int64(interval) / 2, + optim: neldermead.New(), + epsilon: hwDefaultEpsilon, + } +} + +func (r *FloatHoltWintersReducer) aggregate(time int64, value float64) { + r.points = append(r.points, FloatPoint{ + Time: time, + Value: value, + }) +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Time, p.Value) +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(p.Time, float64(p.Value)) +} + +func (r *FloatHoltWintersReducer) roundTime(t int64) int64 { + // Overflow safe round function + remainder := t % r.interval + if remainder > r.halfInterval { + // Round up + return (t/r.interval + 1) * r.interval + } + // Round down + return (t / r.interval) * r.interval +} + +// Emit returns the points generated by the HoltWinters algorithm. +func (r *FloatHoltWintersReducer) Emit() []FloatPoint { + if l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 { + return nil + } + // First fill in r.y with values and NaNs for missing values + start, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time) + count := (stop - start) / r.interval + if count <= 0 { + return nil + } + r.y = make([]float64, 1, count) + r.y[0] = r.points[0].Value + t := r.roundTime(r.points[0].Time) + for _, p := range r.points[1:] { + rounded := r.roundTime(p.Time) + if rounded <= t { + // Drop values that occur for the same time bucket + continue + } + t += r.interval + // Add any missing values before the next point + for rounded != t { + // Add in a NaN so we can skip it later. + r.y = append(r.y, math.NaN()) + t += r.interval + } + r.y = append(r.y, p.Value) + } + + // Seasonality + m := r.m + + // Starting guesses + // NOTE: Since these values are guesses + // in the cases where we were missing data, + // we can just skip the value and call it good. + + l0 := 0.0 + if r.seasonal { + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + l0 += (1 / float64(m)) * r.y[i] + } + } + } else { + l0 += hwWeight * r.y[0] + } + + b0 := 0.0 + if r.seasonal { + for i := 0; i < m && m+i < len(r.y); i++ { + if !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) { + b0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i]) + } + } + } else { + if !math.IsNaN(r.y[1]) { + b0 = hwWeight * (r.y[1] - r.y[0]) + } + } + + var s []float64 + if r.seasonal { + s = make([]float64, m) + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + s[i] = r.y[i] / l0 + } else { + s[i] = 0 + } + } + } + + parameters := make([]float64, 6+len(s)) + parameters[4] = l0 + parameters[5] = b0 + o := len(parameters) - len(s) + for i := range s { + parameters[i+o] = s[i] + } + + // Determine best fit for the various parameters + minSSE := math.Inf(1) + var bestParams []float64 + for alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep { + for beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep { + for gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep { + for phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep { + parameters[0] = alpha + parameters[1] = beta + parameters[2] = gamma + parameters[3] = phi + sse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1) + if sse < minSSE || bestParams == nil { + minSSE = sse + bestParams = params + } + } + } + } + } + + // Forecast + forecasted := r.forecast(r.h, bestParams) + var points []FloatPoint + if r.includeFitData { + start := r.points[0].Time + points = make([]FloatPoint, 0, len(forecasted)) + for i, v := range forecasted { + if !math.IsNaN(v) { + t := start + r.interval*(int64(i)) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } else { + stop := r.points[len(r.points)-1].Time + points = make([]FloatPoint, 0, r.h) + for i, v := range forecasted[len(r.y):] { + if !math.IsNaN(v) { + t := stop + r.interval*(int64(i)+1) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } + // Clear data set + r.y = r.y[0:0] + return points +} + +// Using the recursive relations compute the next values +func (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) { + lT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp) + bT = beta*(lT-lTp) + (1-beta)*phi*bTp + sT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm + yTh = (lT + phiH*bT) * sTmh + return +} + +// Forecast the data h points into the future. +func (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 { + // Constrain parameters + r.constrain(params) + + yT := r.y[0] + + phi := params[3] + phiH := phi + + lT := params[4] + bT := params[5] + + // seasonals is a ring buffer of past sT values + var seasonals []float64 + var m, so int + if r.seasonal { + seasonals = params[6:] + m = len(params[6:]) + if m == 1 { + seasonals[0] = 1 + } + // Season index offset + so = m - 1 + } + + forecasted := make([]float64, len(r.y)+h) + forecasted[0] = yT + l := len(r.y) + var hm int + stm, stmh := 1.0, 1.0 + for t := 1; t < l+h; t++ { + if r.seasonal { + hm = t % m + stm = seasonals[(t-m+so)%m] + stmh = seasonals[(t-m+hm+so)%m] + } + var sT float64 + yT, lT, bT, sT = r.next( + params[0], // alpha + params[1], // beta + params[2], // gamma + phi, + phiH, + yT, + lT, + bT, + stm, + stmh, + ) + phiH += math.Pow(phi, float64(t)) + + if r.seasonal { + seasonals[(t+so)%m] = sT + so++ + } + + forecasted[t] = yT + } + return forecasted +} + +// Compute sum squared error for the given parameters. +func (r *FloatHoltWintersReducer) sse(params []float64) float64 { + sse := 0.0 + forecasted := r.forecast(0, params) + for i := range forecasted { + // Skip missing values since we cannot use them to compute an error. + if !math.IsNaN(r.y[i]) { + // Compute error + if math.IsNaN(forecasted[i]) { + // Penalize forecasted NaNs + return math.Inf(1) + } + diff := forecasted[i] - r.y[i] + sse += diff * diff + } + } + return sse +} + +// Constrain alpha, beta, gamma, phi in the range [0, 1] +func (r *FloatHoltWintersReducer) constrain(x []float64) { + // alpha + if x[0] > 1 { + x[0] = 1 + } + if x[0] < 0 { + x[0] = 0 + } + // beta + if x[1] > 1 { + x[1] = 1 + } + if x[1] < 0 { + x[1] = 0 + } + // gamma + if x[2] > 1 { + x[2] = 1 + } + if x[2] < 0 { + x[2] = 0 + } + // phi + if x[3] > 1 { + x[3] = 1 + } + if x[3] < 0 { + x[3] = 0 + } +} + +// FloatIntegralReducer calculates the time-integral of the aggregated points. +type FloatIntegralReducer struct { + interval Interval + sum float64 + prev FloatPoint + window struct { + start int64 + end int64 + } + ch chan FloatPoint + opt IteratorOptions +} + +// NewFloatIntegralReducer creates a new FloatIntegralReducer. +func NewFloatIntegralReducer(interval Interval, opt IteratorOptions) *FloatIntegralReducer { + return &FloatIntegralReducer{ + interval: interval, + prev: FloatPoint{Nil: true}, + ch: make(chan FloatPoint, 1), + opt: opt, + } +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatIntegralReducer) AggregateFloat(p *FloatPoint) { + // If this is the first point, just save it + if r.prev.Nil { + r.prev = *p + if !r.opt.Interval.IsZero() { + // Record the end of the time interval. + // We do not care for whether the last number is inclusive or exclusive + // because we treat both the same for the involved math. + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + } + return + } + + // If this point has the same timestamp as the previous one, + // skip the point. Points sent into this reducer are expected + // to be fed in order. + if r.prev.Time == p.Time { + r.prev = *p + return + } else if !r.opt.Interval.IsZero() && ((r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end)) { + // If our previous time is not equal to the window, we need to + // interpolate the area at the end of this interval. + if r.prev.Time != r.window.end { + value := linearFloat(r.window.end, r.prev.Time, p.Time, r.prev.Value, p.Value) + elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + r.prev.Value) * elapsed + + r.prev.Value = value + r.prev.Time = r.window.end + } + + // Emit the current point through the channel and then clear it. + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + r.sum = 0.0 + } + + // Normal operation: update the sum using the trapezium rule + elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (p.Value + r.prev.Value) * elapsed + r.prev = *p +} + +// Emit emits the time-integral of the aggregated points as a single point. +// InfluxQL convention dictates that outside a group-by-time clause we return +// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime +// and a higher level will change it to the start of the time group. +func (r *FloatIntegralReducer) Emit() []FloatPoint { + select { + case pt, ok := <-r.ch: + if !ok { + return nil + } + return []FloatPoint{pt} + default: + return nil + } +} + +// Close flushes any in progress points to ensure any remaining points are +// emitted. +func (r *FloatIntegralReducer) Close() error { + // If our last point is at the start time, then discard this point since + // there is no area within this bucket. Otherwise, send off what we + // currently have as the final point. + if !r.prev.Nil && r.prev.Time != r.window.start { + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + } + close(r.ch) + return nil +} + +// IntegerIntegralReducer calculates the time-integral of the aggregated points. +type IntegerIntegralReducer struct { + interval Interval + sum float64 + prev IntegerPoint + window struct { + start int64 + end int64 + } + ch chan FloatPoint + opt IteratorOptions +} + +// NewIntegerIntegralReducer creates a new IntegerIntegralReducer. +func NewIntegerIntegralReducer(interval Interval, opt IteratorOptions) *IntegerIntegralReducer { + return &IntegerIntegralReducer{ + interval: interval, + prev: IntegerPoint{Nil: true}, + ch: make(chan FloatPoint, 1), + opt: opt, + } +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerIntegralReducer) AggregateInteger(p *IntegerPoint) { + // If this is the first point, just save it + if r.prev.Nil { + r.prev = *p + + // Record the end of the time interval. + // We do not care for whether the last number is inclusive or exclusive + // because we treat both the same for the involved math. + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if r.window.start == influxql.MinTime { + r.window.start = 0 + } + return + } + + // If this point has the same timestamp as the previous one, + // skip the point. Points sent into this reducer are expected + // to be fed in order. + value := float64(p.Value) + if r.prev.Time == p.Time { + r.prev = *p + return + } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { + // If our previous time is not equal to the window, we need to + // interpolate the area at the end of this interval. + if r.prev.Time != r.window.end { + value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) + elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + + r.prev.Time = r.window.end + } + + // Emit the current point through the channel and then clear it. + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + r.sum = 0.0 + } + + // Normal operation: update the sum using the trapezium rule + elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + r.prev = *p +} + +// Emit emits the time-integral of the aggregated points as a single FLOAT point +// InfluxQL convention dictates that outside a group-by-time clause we return +// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime +// and a higher level will change it to the start of the time group. +func (r *IntegerIntegralReducer) Emit() []FloatPoint { + select { + case pt, ok := <-r.ch: + if !ok { + return nil + } + return []FloatPoint{pt} + default: + return nil + } +} + +// Close flushes any in progress points to ensure any remaining points are +// emitted. +func (r *IntegerIntegralReducer) Close() error { + // If our last point is at the start time, then discard this point since + // there is no area within this bucket. Otherwise, send off what we + // currently have as the final point. + if !r.prev.Nil && r.prev.Time != r.window.start { + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + } + close(r.ch) + return nil +} + +// IntegerIntegralReducer calculates the time-integral of the aggregated points. +type UnsignedIntegralReducer struct { + interval Interval + sum float64 + prev UnsignedPoint + window struct { + start int64 + end int64 + } + ch chan FloatPoint + opt IteratorOptions +} + +// NewUnsignedIntegralReducer creates a new UnsignedIntegralReducer. +func NewUnsignedIntegralReducer(interval Interval, opt IteratorOptions) *UnsignedIntegralReducer { + return &UnsignedIntegralReducer{ + interval: interval, + prev: UnsignedPoint{Nil: true}, + ch: make(chan FloatPoint, 1), + opt: opt, + } +} + +// AggregateUnsigned aggregates a point into the reducer. +func (r *UnsignedIntegralReducer) AggregateUnsigned(p *UnsignedPoint) { + // If this is the first point, just save it + if r.prev.Nil { + r.prev = *p + + // Record the end of the time interval. + // We do not care for whether the last number is inclusive or exclusive + // because we treat both the same for the involved math. + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if r.window.start == influxql.MinTime { + r.window.start = 0 + } + return + } + + // If this point has the same timestamp as the previous one, + // skip the point. Points sent into this reducer are expected + // to be fed in order. + value := float64(p.Value) + if r.prev.Time == p.Time { + r.prev = *p + return + } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { + // If our previous time is not equal to the window, we need to + // interpolate the area at the end of this interval. + if r.prev.Time != r.window.end { + value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) + elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + + r.prev.Time = r.window.end + } + + // Emit the current point through the channel and then clear it. + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + r.sum = 0.0 + } + + // Normal operation: update the sum using the trapezium rule + elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + r.prev = *p +} + +// Emit emits the time-integral of the aggregated points as a single FLOAT point +// InfluxQL convention dictates that outside a group-by-time clause we return +// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime +// and a higher level will change it to the start of the time group. +func (r *UnsignedIntegralReducer) Emit() []FloatPoint { + select { + case pt, ok := <-r.ch: + if !ok { + return nil + } + return []FloatPoint{pt} + default: + return nil + } +} + +// Close flushes any in progress points to ensure any remaining points are +// emitted. +func (r *UnsignedIntegralReducer) Close() error { + // If our last point is at the start time, then discard this point since + // there is no area within this bucket. Otherwise, send off what we + // currently have as the final point. + if !r.prev.Nil && r.prev.Time != r.window.start { + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + } + close(r.ch) + return nil +} + +type FloatTopReducer struct { + h *floatPointsByFunc +} + +func NewFloatTopReducer(n int) *FloatTopReducer { + return &FloatTopReducer{ + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone FloatPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *FloatTopReducer) Emit() []FloatPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]FloatPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := floatPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type IntegerTopReducer struct { + h *integerPointsByFunc +} + +func NewIntegerTopReducer(n int) *IntegerTopReducer { + return &IntegerTopReducer{ + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone IntegerPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *IntegerTopReducer) Emit() []IntegerPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]IntegerPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := integerPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type UnsignedTopReducer struct { + h *unsignedPointsByFunc +} + +func NewUnsignedTopReducer(n int) *UnsignedTopReducer { + return &UnsignedTopReducer{ + h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *UnsignedTopReducer) AggregateUnsigned(p *UnsignedPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone UnsignedPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *UnsignedTopReducer) Emit() []UnsignedPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]UnsignedPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type FloatBottomReducer struct { + h *floatPointsByFunc +} + +func NewFloatBottomReducer(n int) *FloatBottomReducer { + return &FloatBottomReducer{ + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone FloatPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *FloatBottomReducer) Emit() []FloatPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]FloatPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := floatPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type IntegerBottomReducer struct { + h *integerPointsByFunc +} + +func NewIntegerBottomReducer(n int) *IntegerBottomReducer { + return &IntegerBottomReducer{ + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone IntegerPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *IntegerBottomReducer) Emit() []IntegerPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]IntegerPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := integerPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type UnsignedBottomReducer struct { + h *unsignedPointsByFunc +} + +func NewUnsignedBottomReducer(n int) *UnsignedBottomReducer { + return &UnsignedBottomReducer{ + h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *UnsignedBottomReducer) AggregateUnsigned(p *UnsignedPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + p.CopyTo(&r.h.points[0]) + heap.Fix(r.h, 0) + return + } + + var clone UnsignedPoint + p.CopyTo(&clone) + heap.Push(r.h, clone) +} + +func (r *UnsignedBottomReducer) Emit() []UnsignedPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]UnsignedPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} diff --git a/vendor/github.com/influxdata/influxdb/query/functions_test.go b/vendor/github.com/influxdata/influxdb/query/functions_test.go new file mode 100644 index 0000000..d88f599 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/functions_test.go @@ -0,0 +1,499 @@ +package query_test + +import ( + "math" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/pkg/deep" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func almostEqual(got, exp float64) bool { + return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got) +} + +func TestHoltWinters_AusTourists(t *testing.T) { + hw := query.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []query.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 2, Value: 19.148496}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 8, Value: 35.123753}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 18, Value: 19.775244}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 29, Value: 46.213153}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 33, Value: 48.901525}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 39, Value: 40.978263}, + {Time: 40, Value: 43.772491}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: 49, Value: 51.85064132137853}, + {Time: 50, Value: 43.26055282315273}, + {Time: 51, Value: 41.827258044814464}, + {Time: 52, Value: 54.3990354591749}, + {Time: 53, Value: 54.62334472770803}, + {Time: 54, Value: 45.57155693625209}, + {Time: 55, Value: 44.06051240252263}, + {Time: 56, Value: 57.30029870759433}, + {Time: 57, Value: 57.53591513519172}, + {Time: 58, Value: 47.999008139396096}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_AusTourists_Missing(t *testing.T) { + hw := query.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []query.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: 49, Value: 54.84533610387743}, + {Time: 50, Value: 41.19329421863249}, + {Time: 51, Value: 45.71673175112451}, + {Time: 52, Value: 56.05759298805955}, + {Time: 53, Value: 59.32337460282217}, + {Time: 54, Value: 44.75280096850461}, + {Time: 55, Value: 49.98865098113751}, + {Time: 56, Value: 61.86084934967605}, + {Time: 57, Value: 65.95805633454883}, + {Time: 58, Value: 50.1502170480547}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation(t *testing.T) { + series := []query.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 9, Value: 39.80}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 14, Value: 105.70}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 18, Value: 179.30}, + {Time: 19, Value: 203.20}, + } + hw := query.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.957405463559748}, + {Time: 3, Value: 7.012210102535647}, + {Time: 4, Value: 10.099589257439924}, + {Time: 5, Value: 14.229926188104242}, + {Time: 6, Value: 19.418878968703797}, + {Time: 7, Value: 25.68749172281409}, + {Time: 8, Value: 33.062351305731305}, + {Time: 9, Value: 41.575791076125206}, + {Time: 10, Value: 51.26614395589263}, + {Time: 11, Value: 62.178047564264595}, + {Time: 12, Value: 74.36280483872488}, + {Time: 13, Value: 87.87880423073163}, + {Time: 14, Value: 102.79200429905801}, + {Time: 15, Value: 119.17648832929542}, + {Time: 16, Value: 137.11509549747296}, + {Time: 17, Value: 156.70013608313175}, + {Time: 18, Value: 178.03419933863566}, + {Time: 19, Value: 201.23106385518594}, + {Time: 20, Value: 226.4167216525905}, + {Time: 21, Value: 253.73052878285205}, + {Time: 22, Value: 283.32649700397553}, + {Time: 23, Value: 315.37474308085984}, + {Time: 24, Value: 350.06311454009256}, + {Time: 25, Value: 387.59901328556873}, + {Time: 26, Value: 428.21144141893404}, + {Time: 27, Value: 472.1532969569147}, + {Time: 28, Value: 519.7039509590035}, + {Time: 29, Value: 571.1721419458248}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation_Missing(t *testing.T) { + series := []query.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 19, Value: 203.20}, + } + hw := query.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.8931364428135105}, + {Time: 3, Value: 6.962653629047061}, + {Time: 4, Value: 10.056207765903274}, + {Time: 5, Value: 14.18435088129532}, + {Time: 6, Value: 19.362939306110846}, + {Time: 7, Value: 25.613247940326584}, + {Time: 8, Value: 32.96213087008264}, + {Time: 9, Value: 41.442230043017204}, + {Time: 10, Value: 51.09223428526052}, + {Time: 11, Value: 61.95719155158485}, + {Time: 12, Value: 74.08887794968567}, + {Time: 13, Value: 87.54622778052787}, + {Time: 14, Value: 102.39582960014131}, + {Time: 15, Value: 118.7124941463221}, + {Time: 16, Value: 136.57990089987464}, + {Time: 17, Value: 156.09133107941278}, + {Time: 18, Value: 177.35049601833734}, + {Time: 19, Value: 200.472471161683}, + {Time: 20, Value: 225.58474737097785}, + {Time: 21, Value: 252.82841286206823}, + {Time: 22, Value: 282.35948095261017}, + {Time: 23, Value: 314.3503808953992}, + {Time: 24, Value: 348.99163145856954}, + {Time: 25, Value: 386.49371962730555}, + {Time: 26, Value: 427.08920989407727}, + {Time: 27, Value: 471.0351131332573}, + {Time: 28, Value: 518.615548088049}, + {Time: 29, Value: 570.1447331101863}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} +func TestHoltWinters_RoundTime(t *testing.T) { + maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano() + data := []query.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10}, + {Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2}, + {Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11}, + } + hw := query.NewFloatHoltWintersReducer(2, 2, true, time.Second) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second), Value: 10.006729104838234}, + {Time: maxTime - int64(3*time.Second), Value: 1.998341814469269}, + {Time: maxTime - int64(2*time.Second), Value: 10.997858830631172}, + {Time: maxTime - int64(1*time.Second), Value: 4.085860238030013}, + {Time: maxTime - int64(0*time.Second), Value: 11.35713604403339}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_MaxTime(t *testing.T) { + data := []query.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2}, + } + hw := query.NewFloatHoltWintersReducer(1, 0, true, 1) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []query.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2.001516944066403}, + {Time: influxql.MaxTime + 1, Value: 2.5365248972488343}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +// TestSample_AllSamplesSeen attempts to verify that it is possible +// to get every subsample in a reasonable number of iterations. +// +// The idea here is that 30 iterations should be enough to hit every possible +// sequence at least once. +func TestSample_AllSamplesSeen(t *testing.T) { + ps := []query.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + // List of all the possible subsamples + samples := [][]query.FloatPoint{ + { + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + }, + { + {Time: 1, Value: 1}, + {Time: 3, Value: 3}, + }, + { + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }, + } + + // 30 iterations should be sufficient to guarantee that + // we hit every possible subsample. + for i := 0; i < 30; i++ { + s := query.NewFloatSampleReducer(2) + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + for i, sample := range samples { + // if we find a sample that it matches, remove it from + // this list of possible samples + if deep.Equal(sample, points) { + samples = append(samples[:i], samples[i+1:]...) + break + } + } + + // if samples is empty we've seen every sample, so we're done + if len(samples) == 0 { + return + } + + // The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep, + // this test will fail on machines where UnixNano doesn't return full resolution. + // Specifically, some Windows machines will only return timestamps accurate to 100ns. + // While iterating through this test without an explicit sleep, + // we would only see one or two unique seeds across all the calls to NewFloatSampleReducer. + time.Sleep(time.Millisecond) + } + + // If we missed a sample, report the error + if len(samples) != 0 { + t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples) + } +} + +func TestSample_SampleSizeLessThanNumPoints(t *testing.T) { + s := query.NewFloatSampleReducer(2) + + ps := []query.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := 2, len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } +} + +func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) { + s := query.NewFloatSampleReducer(4) + + ps := []query.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := len(ps), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + if !deep.Equal(ps, points) { + t.Fatalf("unexpected points: %s", spew.Sdump(points)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/README.md b/vendor/github.com/influxdata/influxdb/query/internal/gota/README.md new file mode 100644 index 0000000..457c58e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/README.md @@ -0,0 +1,3 @@ +This is a port of [gota](https://github.com/phemmer/gota) to be adapted inside of InfluxDB. + +This port was made with the permission of the author, Patrick Hemmer, and has been modified to remove dependencies that are not part of InfluxDB. diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo.go new file mode 100644 index 0000000..772644f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo.go @@ -0,0 +1,127 @@ +package gota + +// CMO - Chande Momentum Oscillator (https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo) +type CMO struct { + points []cmoPoint + sumUp float64 + sumDown float64 + count int + idx int // index of newest point +} + +type cmoPoint struct { + price float64 + diff float64 +} + +// NewCMO constructs a new CMO. +func NewCMO(inTimePeriod int) *CMO { + return &CMO{ + points: make([]cmoPoint, inTimePeriod-1), + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (cmo *CMO) WarmCount() int { + return len(cmo.points) +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (cmo *CMO) Add(v float64) float64 { + idxOldest := cmo.idx + 1 + if idxOldest == len(cmo.points) { + idxOldest = 0 + } + + var diff float64 + if cmo.count != 0 { + prev := cmo.points[cmo.idx] + diff = v - prev.price + if diff > 0 { + cmo.sumUp += diff + } else if diff < 0 { + cmo.sumDown -= diff + } + } + + var outV float64 + if cmo.sumUp != 0 || cmo.sumDown != 0 { + outV = 100.0 * ((cmo.sumUp - cmo.sumDown) / (cmo.sumUp + cmo.sumDown)) + } + + oldest := cmo.points[idxOldest] + //NOTE: because we're just adding and subtracting the difference, and not recalculating sumUp/sumDown using cmo.points[].price, it's possible for imprecision to creep in over time. Not sure how significant this is going to be, but if we want to fix it, we could recalculate it from scratch every N points. + if oldest.diff > 0 { + cmo.sumUp -= oldest.diff + } else if oldest.diff < 0 { + cmo.sumDown += oldest.diff + } + + p := cmoPoint{ + price: v, + diff: diff, + } + cmo.points[idxOldest] = p + cmo.idx = idxOldest + + if !cmo.Warmed() { + cmo.count++ + } + + return outV +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (cmo *CMO) Warmed() bool { + return cmo.count == len(cmo.points)+2 +} + +// CMOS is a smoothed version of the Chande Momentum Oscillator. +// This is the version of CMO utilized by ta-lib. +type CMOS struct { + emaUp EMA + emaDown EMA + lastV float64 +} + +// NewCMOS constructs a new CMOS. +func NewCMOS(inTimePeriod int, warmType WarmupType) *CMOS { + ema := NewEMA(inTimePeriod+1, warmType) + ema.alpha = float64(1) / float64(inTimePeriod) + return &CMOS{ + emaUp: *ema, + emaDown: *ema, + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (cmos CMOS) WarmCount() int { + return cmos.emaUp.WarmCount() +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (cmos CMOS) Warmed() bool { + return cmos.emaUp.Warmed() +} + +// Last returns the last output value. +func (cmos CMOS) Last() float64 { + up := cmos.emaUp.Last() + down := cmos.emaDown.Last() + return 100.0 * ((up - down) / (up + down)) +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (cmos *CMOS) Add(v float64) float64 { + var up float64 + var down float64 + if v > cmos.lastV { + up = v - cmos.lastV + } else if v < cmos.lastV { + down = cmos.lastV - v + } + cmos.emaUp.Add(up) + cmos.emaDown.Add(down) + cmos.lastV = v + return cmos.Last() +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo_test.go new file mode 100644 index 0000000..e375e40 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/cmo_test.go @@ -0,0 +1,41 @@ +package gota + +import "testing" + +func TestCMO(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + expList := []float64{100, 100, 100, 100, 100, 80, 60, 40, 20, 0, -20, -40, -60, -80, -100, -100, -100, -100, -100} + + cmo := NewCMO(10) + var actList []float64 + for _, v := range list { + if vOut := cmo.Add(v); cmo.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 1E-7); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestCMOS(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Cmo(list, 10, nil) + expList := []float64{100, 100, 100, 100, 100, 80, 61.999999999999986, 45.79999999999999, 31.22, 18.097999999999992, 6.288199999999988, -4.340620000000012, -13.906558000000008, -22.515902200000014, -30.264311980000013, -37.23788078200001, -43.51409270380002, -49.16268343342002, -54.24641509007802} + + cmo := NewCMOS(10, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := cmo.Add(v); cmo.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 1E-7); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/ema.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/ema.go new file mode 100644 index 0000000..6968144 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/ema.go @@ -0,0 +1,188 @@ +package gota + +import ( + "fmt" +) + +type AlgSimple interface { + Add(float64) float64 + Warmed() bool + WarmCount() int +} + +type WarmupType int8 + +const ( + WarmEMA WarmupType = iota // Exponential Moving Average + WarmSMA // Simple Moving Average +) + +func ParseWarmupType(wt string) (WarmupType, error) { + switch wt { + case "exponential": + return WarmEMA, nil + case "simple": + return WarmSMA, nil + default: + return 0, fmt.Errorf("invalid warmup type '%s'", wt) + } +} + +// EMA - Exponential Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages#exponential_moving_average_calculation) +type EMA struct { + inTimePeriod int + last float64 + count int + alpha float64 + warmType WarmupType +} + +// NewEMA constructs a new EMA. +// +// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. +// +// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. +func NewEMA(inTimePeriod int, warmType WarmupType) *EMA { + return &EMA{ + inTimePeriod: inTimePeriod, + alpha: 2 / float64(inTimePeriod+1), + warmType: warmType, + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (ema *EMA) WarmCount() int { + return ema.inTimePeriod - 1 +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (ema *EMA) Warmed() bool { + return ema.count == ema.inTimePeriod +} + +// Last returns the last output value. +func (ema *EMA) Last() float64 { + return ema.last +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (ema *EMA) Add(v float64) float64 { + var avg float64 + if ema.count == 0 { + avg = v + } else { + lastAvg := ema.Last() + if !ema.Warmed() { + if ema.warmType == WarmSMA { + avg = (lastAvg*float64(ema.count) + v) / float64(ema.count+1) + } else { // ema.warmType == WarmEMA + // scale the alpha so that we don't excessively weight the result towards the first value + alpha := 2 / float64(ema.count+2) + avg = (v-lastAvg)*alpha + lastAvg + } + } else { + avg = (v-lastAvg)*ema.alpha + lastAvg + } + } + + ema.last = avg + if ema.count < ema.inTimePeriod { + // don't just keep incrementing to prevent potential overflow + ema.count++ + } + return avg +} + +// DEMA - Double Exponential Moving Average (https://en.wikipedia.org/wiki/Double_exponential_moving_average) +type DEMA struct { + ema1 EMA + ema2 EMA +} + +// NewDEMA constructs a new DEMA. +// +// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. +// +// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. +func NewDEMA(inTimePeriod int, warmType WarmupType) *DEMA { + return &DEMA{ + ema1: *NewEMA(inTimePeriod, warmType), + ema2: *NewEMA(inTimePeriod, warmType), + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (dema *DEMA) WarmCount() int { + if dema.ema1.warmType == WarmEMA { + return dema.ema1.WarmCount() + } + return dema.ema1.WarmCount() + dema.ema2.WarmCount() +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (dema *DEMA) Add(v float64) float64 { + avg1 := dema.ema1.Add(v) + var avg2 float64 + if dema.ema1.Warmed() || dema.ema1.warmType == WarmEMA { + avg2 = dema.ema2.Add(avg1) + } else { + avg2 = avg1 + } + return 2*avg1 - avg2 +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (dema *DEMA) Warmed() bool { + return dema.ema2.Warmed() +} + +// TEMA - Triple Exponential Moving Average (https://en.wikipedia.org/wiki/Triple_exponential_moving_average) +type TEMA struct { + ema1 EMA + ema2 EMA + ema3 EMA +} + +// NewTEMA constructs a new TEMA. +// +// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. +// +// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. +func NewTEMA(inTimePeriod int, warmType WarmupType) *TEMA { + return &TEMA{ + ema1: *NewEMA(inTimePeriod, warmType), + ema2: *NewEMA(inTimePeriod, warmType), + ema3: *NewEMA(inTimePeriod, warmType), + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (tema *TEMA) WarmCount() int { + if tema.ema1.warmType == WarmEMA { + return tema.ema1.WarmCount() + } + return tema.ema1.WarmCount() + tema.ema2.WarmCount() + tema.ema3.WarmCount() +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (tema *TEMA) Add(v float64) float64 { + avg1 := tema.ema1.Add(v) + var avg2 float64 + if tema.ema1.Warmed() || tema.ema1.warmType == WarmEMA { + avg2 = tema.ema2.Add(avg1) + } else { + avg2 = avg1 + } + var avg3 float64 + if tema.ema2.Warmed() || tema.ema2.warmType == WarmEMA { + avg3 = tema.ema3.Add(avg2) + } else { + avg3 = avg2 + } + return 3*avg1 - 3*avg2 + avg3 +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (tema *TEMA) Warmed() bool { + return tema.ema3.Warmed() +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/ema_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/ema_test.go new file mode 100644 index 0000000..3114506 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/ema_test.go @@ -0,0 +1,114 @@ +package gota + +import "testing" + +func TestEMA(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Ema(list, 10, nil) + expList := []float64{5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.136363636363637, 11.475206611570249, 11.570623591284749, 11.466873847414794, 11.200169511521196, 10.800138691244614, 10.291022565563775, 9.692654826370362, 9.021263039757569, 8.290124305256192, 7.510101704300521, 6.690083212609517, 5.837340810316878, 4.957824299350173} + + ema := NewEMA(10, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := ema.Add(v); ema.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestDEMA(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Dema(list, 10, nil) + expList := []float64{13.568840926166246, 12.701748119313985, 11.701405062848783, 10.611872766773773, 9.465595022565749, 8.28616628396151, 7.090477085921927, 5.8903718513360275, 4.693925476073202, 3.5064225149113692, 2.331104912318361} + + dema := NewDEMA(10, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := dema.Add(v); dema.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestTEMA(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Tema(list, 4, nil) + expList := []float64{10, 11, 12, 13, 14, 15, 14.431999999999995, 13.345600000000001, 12.155520000000001, 11, 9.906687999999997, 8.86563072, 7.8589122560000035, 6.871005491200005, 5.891160883200005, 4.912928706560004, 3.932955104051203, 2.9498469349785603, 1.9633255712030717, 0.9736696408637435} + + tema := NewTEMA(4, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := tema.Add(v); tema.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestEmaWarmCount(t *testing.T) { + period := 9 + ema := NewEMA(period, WarmSMA) + + var i int + for i = 0; i < period*10; i++ { + ema.Add(float64(i)) + if ema.Warmed() { + break + } + } + + if got, want := i, ema.WarmCount(); got != want { + t.Errorf("unexpected warm count: got=%d want=%d", got, want) + } +} + +func TestDemaWarmCount(t *testing.T) { + period := 9 + dema := NewDEMA(period, WarmSMA) + + var i int + for i = 0; i < period*10; i++ { + dema.Add(float64(i)) + if dema.Warmed() { + break + } + } + + if got, want := i, dema.WarmCount(); got != want { + t.Errorf("unexpected warm count: got=%d want=%d", got, want) + } +} + +func TestTemaWarmCount(t *testing.T) { + period := 9 + tema := NewTEMA(period, WarmSMA) + + var i int + for i = 0; i < period*10; i++ { + tema.Add(float64(i)) + if tema.Warmed() { + break + } + } + + if got, want := i, tema.WarmCount(); got != want { + t.Errorf("unexpected warm count: got=%d want=%d", got, want) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/kama.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/kama.go new file mode 100644 index 0000000..a43f96d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/kama.go @@ -0,0 +1,113 @@ +package gota + +import ( + "math" +) + +// KER - Kaufman's Efficiency Ratio (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average#efficiency_ratio_er) +type KER struct { + points []kerPoint + noise float64 + count int + idx int // index of newest point +} + +type kerPoint struct { + price float64 + diff float64 +} + +// NewKER constructs a new KER. +func NewKER(inTimePeriod int) *KER { + return &KER{ + points: make([]kerPoint, inTimePeriod), + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (ker *KER) WarmCount() int { + return len(ker.points) +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (ker *KER) Add(v float64) float64 { + //TODO this does not return a sensible value if not warmed. + n := len(ker.points) + idxOldest := ker.idx + 1 + if idxOldest >= n { + idxOldest = 0 + } + + signal := math.Abs(v - ker.points[idxOldest].price) + + kp := kerPoint{ + price: v, + diff: math.Abs(v - ker.points[ker.idx].price), + } + ker.noise -= ker.points[idxOldest].diff + ker.noise += kp.diff + noise := ker.noise + + ker.idx = idxOldest + ker.points[ker.idx] = kp + + if !ker.Warmed() { + ker.count++ + } + + if signal == 0 || noise == 0 { + return 0 + } + return signal / noise +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (ker *KER) Warmed() bool { + return ker.count == len(ker.points)+1 +} + +// KAMA - Kaufman's Adaptive Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average) +type KAMA struct { + ker KER + last float64 +} + +// NewKAMA constructs a new KAMA. +func NewKAMA(inTimePeriod int) *KAMA { + ker := NewKER(inTimePeriod) + return &KAMA{ + ker: *ker, + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (kama *KAMA) WarmCount() int { + return kama.ker.WarmCount() +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (kama *KAMA) Add(v float64) float64 { + if !kama.Warmed() { + /* + // initialize with a simple moving average + kama.last = 0 + for _, v := range kama.ker.points[:kama.ker.count] { + kama.last += v + } + kama.last /= float64(kama.ker.count + 1) + */ + // initialize with the last value + kama.last = kama.ker.points[kama.ker.idx].price + } + + er := kama.ker.Add(v) + sc := math.Pow(er*(2.0/(2.0+1.0)-2.0/(30.0+1.0))+2.0/(30.0+1.0), 2) + + kama.last = kama.last + sc*(v-kama.last) + return kama.last +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (kama *KAMA) Warmed() bool { + return kama.ker.Warmed() +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/kama_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/kama_test.go new file mode 100644 index 0000000..d9a2f65 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/kama_test.go @@ -0,0 +1,70 @@ +package gota + +import "testing" + +func TestKER(t *testing.T) { + list := []float64{20, 21, 22, 23, 22, 21} + + expList := []float64{1, 1.0 / 3, 1.0 / 3} + + ker := NewKER(3) + var actList []float64 + for _, v := range list { + if vOut := ker.Add(v); ker.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestKAMA(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Cmo(list, 10, nil) + expList := []float64{10.444444444444445, 11.135802469135802, 11.964334705075446, 12.869074836153025, 13.81615268675168, 13.871008014588556, 13.71308456353558, 13.553331356741122, 13.46599437575161, 13.4515677602438, 13.29930139347417, 12.805116570729284, 11.752584300922967, 10.036160535131103, 7.797866963961725, 6.109926091089847, 4.727736717272138, 3.5154092873734104, 2.3974496040963396} + + kama := NewKAMA(10) + var actList []float64 + for _, v := range list { + if vOut := kama.Add(v); kama.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} + +func TestKAMAWarmCount(t *testing.T) { + period := 9 + kama := NewKAMA(period) + + var i int + for i = 0; i < period*10; i++ { + kama.Add(float64(i)) + if kama.Warmed() { + break + } + } + + if got, want := i, kama.WarmCount(); got != want { + t.Errorf("unexpected warm count: got=%d want=%d", got, want) + } +} + +var BenchmarkKAMAVal float64 + +func BenchmarkKAMA(b *testing.B) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + for n := 0; n < b.N; n++ { + kama := NewKAMA(5) + for _, v := range list { + BenchmarkKAMAVal = kama.Add(v) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi.go new file mode 100644 index 0000000..82811c3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi.go @@ -0,0 +1,48 @@ +package gota + +// RSI - Relative Strength Index (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi) +type RSI struct { + emaUp EMA + emaDown EMA + lastV float64 +} + +// NewRSI constructs a new RSI. +func NewRSI(inTimePeriod int, warmType WarmupType) *RSI { + ema := NewEMA(inTimePeriod+1, warmType) + ema.alpha = float64(1) / float64(inTimePeriod) + return &RSI{ + emaUp: *ema, + emaDown: *ema, + } +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (rsi RSI) WarmCount() int { + return rsi.emaUp.WarmCount() +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (rsi RSI) Warmed() bool { + return rsi.emaUp.Warmed() +} + +// Last returns the last output value. +func (rsi RSI) Last() float64 { + return 100 - (100 / (1 + rsi.emaUp.Last()/rsi.emaDown.Last())) +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (rsi *RSI) Add(v float64) float64 { + var up float64 + var down float64 + if v > rsi.lastV { + up = v - rsi.lastV + } else if v < rsi.lastV { + down = rsi.lastV - v + } + rsi.emaUp.Add(up) + rsi.emaDown.Add(down) + rsi.lastV = v + return rsi.Last() +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi_test.go new file mode 100644 index 0000000..66675c3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/rsi_test.go @@ -0,0 +1,23 @@ +package gota + +import "testing" + +func TestRSI(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Rsi(list, 10, nil) + expList := []float64{100, 100, 100, 100, 100, 90, 81, 72.89999999999999, 65.61, 59.04899999999999, 53.144099999999995, 47.82969, 43.04672099999999, 38.74204889999999, 34.86784400999999, 31.381059608999994, 28.242953648099995, 25.418658283289997, 22.876792454961} + + rsi := NewRSI(10, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := rsi.Add(v); rsi.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 0.0000001); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/trix.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/trix.go new file mode 100644 index 0000000..0619e21 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/trix.go @@ -0,0 +1,53 @@ +package gota + +// Trix - TRIple Exponential average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix) +type TRIX struct { + ema1 EMA + ema2 EMA + ema3 EMA + last float64 + count int +} + +// NewTRIX constructs a new TRIX. +func NewTRIX(inTimePeriod int, warmType WarmupType) *TRIX { + ema1 := NewEMA(inTimePeriod, warmType) + ema2 := NewEMA(inTimePeriod, warmType) + ema3 := NewEMA(inTimePeriod, warmType) + return &TRIX{ + ema1: *ema1, + ema2: *ema2, + ema3: *ema3, + } +} + +// Add adds a new sample value to the algorithm and returns the computed value. +func (trix *TRIX) Add(v float64) float64 { + cur := trix.ema1.Add(v) + if trix.ema1.Warmed() || trix.ema1.warmType == WarmEMA { + cur = trix.ema2.Add(cur) + if trix.ema2.Warmed() || trix.ema2.warmType == WarmEMA { + cur = trix.ema3.Add(cur) + } + } + + rate := ((cur / trix.last) - 1) * 100 + trix.last = cur + if !trix.Warmed() && trix.ema3.Warmed() { + trix.count++ + } + return rate +} + +// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". +func (trix *TRIX) WarmCount() int { + if trix.ema1.warmType == WarmEMA { + return trix.ema1.WarmCount() + 1 + } + return trix.ema1.WarmCount()*3 + 1 +} + +// Warmed indicates whether the algorithm has enough data to generate accurate results. +func (trix *TRIX) Warmed() bool { + return trix.count == 2 +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/trix_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/trix_test.go new file mode 100644 index 0000000..041d7b4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/trix_test.go @@ -0,0 +1,23 @@ +package gota + +import "testing" + +func TestTRIX(t *testing.T) { + list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} + + // expList is generated by the following code: + // expList, _ := talib.Trix(list, 4, nil) + expList := []float64{18.181818181818187, 15.384615384615374, 13.33333333333333, 11.764705882352944, 10.526315789473696, 8.304761904761904, 5.641927541329594, 3.0392222148232007, 0.7160675740302658, -1.2848911076603242, -2.9999661985600667, -4.493448741755901, -5.836238000516913, -7.099092024379772, -8.352897627933453, -9.673028502435233, -11.147601363985949, -12.891818138458877, -15.074463280730022} + + trix := NewTRIX(4, WarmSMA) + var actList []float64 + for _, v := range list { + if vOut := trix.Add(v); trix.Warmed() { + actList = append(actList, vOut) + } + } + + if diff := diffFloats(expList, actList, 1E-7); diff != "" { + t.Errorf("unexpected floats:\n%s", diff) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/gota/utils_test.go b/vendor/github.com/influxdata/influxdb/query/internal/gota/utils_test.go new file mode 100644 index 0000000..a0b7360 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/gota/utils_test.go @@ -0,0 +1,10 @@ +package gota + +import ( + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +func diffFloats(exp, act []float64, delta float64) string { + return cmp.Diff(exp, act, cmpopts.EquateApprox(0, delta)) +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go b/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go new file mode 100644 index 0000000..dd76e1b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go @@ -0,0 +1,606 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: internal/internal.proto + +/* +Package query is a generated protocol buffer package. + +It is generated from these files: + internal/internal.proto + +It has these top-level messages: + Point + Aux + IteratorOptions + Measurements + Measurement + Interval + IteratorStats + VarRef +*/ +package query + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Point struct { + Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` + Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"` + Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"` + Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"` + Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"` + Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"` + FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + UnsignedValue *uint64 `protobuf:"varint,12,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"` + Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"` + Trace []byte `protobuf:"bytes,13,opt,name=Trace" json:"Trace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } + +func (m *Point) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Point) GetTags() string { + if m != nil && m.Tags != nil { + return *m.Tags + } + return "" +} + +func (m *Point) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *Point) GetNil() bool { + if m != nil && m.Nil != nil { + return *m.Nil + } + return false +} + +func (m *Point) GetAux() []*Aux { + if m != nil { + return m.Aux + } + return nil +} + +func (m *Point) GetAggregated() uint32 { + if m != nil && m.Aggregated != nil { + return *m.Aggregated + } + return 0 +} + +func (m *Point) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Point) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Point) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Point) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Point) GetUnsignedValue() uint64 { + if m != nil && m.UnsignedValue != nil { + return *m.UnsignedValue + } + return 0 +} + +func (m *Point) GetStats() *IteratorStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *Point) GetTrace() []byte { + if m != nil { + return m.Trace + } + return nil +} + +type Aux struct { + DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` + FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + UnsignedValue *uint64 `protobuf:"varint,6,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Aux) Reset() { *m = Aux{} } +func (m *Aux) String() string { return proto.CompactTextString(m) } +func (*Aux) ProtoMessage() {} +func (*Aux) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } + +func (m *Aux) GetDataType() int32 { + if m != nil && m.DataType != nil { + return *m.DataType + } + return 0 +} + +func (m *Aux) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Aux) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Aux) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Aux) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Aux) GetUnsignedValue() uint64 { + if m != nil && m.UnsignedValue != nil { + return *m.UnsignedValue + } + return 0 +} + +type IteratorOptions struct { + Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"` + Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"` + Fields []*VarRef `protobuf:"bytes,17,rep,name=Fields" json:"Fields,omitempty"` + Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` + Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` + Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` + GroupBy []string `protobuf:"bytes,19,rep,name=GroupBy" json:"GroupBy,omitempty"` + Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` + FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` + Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` + StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"` + EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"` + Location *string `protobuf:"bytes,21,opt,name=Location" json:"Location,omitempty"` + Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"` + Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"` + Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"` + SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` + SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` + StripName *bool `protobuf:"varint,22,opt,name=StripName" json:"StripName,omitempty"` + Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` + MaxSeriesN *int64 `protobuf:"varint,18,opt,name=MaxSeriesN" json:"MaxSeriesN,omitempty"` + Ordered *bool `protobuf:"varint,20,opt,name=Ordered" json:"Ordered,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorOptions) Reset() { *m = IteratorOptions{} } +func (m *IteratorOptions) String() string { return proto.CompactTextString(m) } +func (*IteratorOptions) ProtoMessage() {} +func (*IteratorOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } + +func (m *IteratorOptions) GetExpr() string { + if m != nil && m.Expr != nil { + return *m.Expr + } + return "" +} + +func (m *IteratorOptions) GetAux() []string { + if m != nil { + return m.Aux + } + return nil +} + +func (m *IteratorOptions) GetFields() []*VarRef { + if m != nil { + return m.Fields + } + return nil +} + +func (m *IteratorOptions) GetSources() []*Measurement { + if m != nil { + return m.Sources + } + return nil +} + +func (m *IteratorOptions) GetInterval() *Interval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *IteratorOptions) GetDimensions() []string { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *IteratorOptions) GetGroupBy() []string { + if m != nil { + return m.GroupBy + } + return nil +} + +func (m *IteratorOptions) GetFill() int32 { + if m != nil && m.Fill != nil { + return *m.Fill + } + return 0 +} + +func (m *IteratorOptions) GetFillValue() float64 { + if m != nil && m.FillValue != nil { + return *m.FillValue + } + return 0 +} + +func (m *IteratorOptions) GetCondition() string { + if m != nil && m.Condition != nil { + return *m.Condition + } + return "" +} + +func (m *IteratorOptions) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *IteratorOptions) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *IteratorOptions) GetLocation() string { + if m != nil && m.Location != nil { + return *m.Location + } + return "" +} + +func (m *IteratorOptions) GetAscending() bool { + if m != nil && m.Ascending != nil { + return *m.Ascending + } + return false +} + +func (m *IteratorOptions) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *IteratorOptions) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *IteratorOptions) GetSLimit() int64 { + if m != nil && m.SLimit != nil { + return *m.SLimit + } + return 0 +} + +func (m *IteratorOptions) GetSOffset() int64 { + if m != nil && m.SOffset != nil { + return *m.SOffset + } + return 0 +} + +func (m *IteratorOptions) GetStripName() bool { + if m != nil && m.StripName != nil { + return *m.StripName + } + return false +} + +func (m *IteratorOptions) GetDedupe() bool { + if m != nil && m.Dedupe != nil { + return *m.Dedupe + } + return false +} + +func (m *IteratorOptions) GetMaxSeriesN() int64 { + if m != nil && m.MaxSeriesN != nil { + return *m.MaxSeriesN + } + return 0 +} + +func (m *IteratorOptions) GetOrdered() bool { + if m != nil && m.Ordered != nil { + return *m.Ordered + } + return false +} + +type Measurements struct { + Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurements) Reset() { *m = Measurements{} } +func (m *Measurements) String() string { return proto.CompactTextString(m) } +func (*Measurements) ProtoMessage() {} +func (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} } + +func (m *Measurements) GetItems() []*Measurement { + if m != nil { + return m.Items + } + return nil +} + +type Measurement struct { + Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` + RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` + Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` + Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` + IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` + SystemIterator *string `protobuf:"bytes,6,opt,name=SystemIterator" json:"SystemIterator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurement) Reset() { *m = Measurement{} } +func (m *Measurement) String() string { return proto.CompactTextString(m) } +func (*Measurement) ProtoMessage() {} +func (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} } + +func (m *Measurement) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +func (m *Measurement) GetRetentionPolicy() string { + if m != nil && m.RetentionPolicy != nil { + return *m.RetentionPolicy + } + return "" +} + +func (m *Measurement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Measurement) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *Measurement) GetIsTarget() bool { + if m != nil && m.IsTarget != nil { + return *m.IsTarget + } + return false +} + +func (m *Measurement) GetSystemIterator() string { + if m != nil && m.SystemIterator != nil { + return *m.SystemIterator + } + return "" +} + +type Interval struct { + Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"` + Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Interval) Reset() { *m = Interval{} } +func (m *Interval) String() string { return proto.CompactTextString(m) } +func (*Interval) ProtoMessage() {} +func (*Interval) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } + +func (m *Interval) GetDuration() int64 { + if m != nil && m.Duration != nil { + return *m.Duration + } + return 0 +} + +func (m *Interval) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +type IteratorStats struct { + SeriesN *int64 `protobuf:"varint,1,opt,name=SeriesN" json:"SeriesN,omitempty"` + PointN *int64 `protobuf:"varint,2,opt,name=PointN" json:"PointN,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorStats) Reset() { *m = IteratorStats{} } +func (m *IteratorStats) String() string { return proto.CompactTextString(m) } +func (*IteratorStats) ProtoMessage() {} +func (*IteratorStats) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } + +func (m *IteratorStats) GetSeriesN() int64 { + if m != nil && m.SeriesN != nil { + return *m.SeriesN + } + return 0 +} + +func (m *IteratorStats) GetPointN() int64 { + if m != nil && m.PointN != nil { + return *m.PointN + } + return 0 +} + +type VarRef struct { + Val *string `protobuf:"bytes,1,req,name=Val" json:"Val,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *VarRef) Reset() { *m = VarRef{} } +func (m *VarRef) String() string { return proto.CompactTextString(m) } +func (*VarRef) ProtoMessage() {} +func (*VarRef) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} } + +func (m *VarRef) GetVal() string { + if m != nil && m.Val != nil { + return *m.Val + } + return "" +} + +func (m *VarRef) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func init() { + proto.RegisterType((*Point)(nil), "query.Point") + proto.RegisterType((*Aux)(nil), "query.Aux") + proto.RegisterType((*IteratorOptions)(nil), "query.IteratorOptions") + proto.RegisterType((*Measurements)(nil), "query.Measurements") + proto.RegisterType((*Measurement)(nil), "query.Measurement") + proto.RegisterType((*Interval)(nil), "query.Interval") + proto.RegisterType((*IteratorStats)(nil), "query.IteratorStats") + proto.RegisterType((*VarRef)(nil), "query.VarRef") +} + +func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } + +var fileDescriptorInternal = []byte{ + // 796 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x6d, 0x6f, 0xe3, 0x44, + 0x10, 0x96, 0xe3, 0x3a, 0x8d, 0x27, 0xcd, 0xf5, 0x58, 0x4a, 0x59, 0xa1, 0x13, 0xb2, 0x2c, 0x40, + 0x16, 0xa0, 0x22, 0xf5, 0x13, 0x9f, 0x90, 0x72, 0xf4, 0x8a, 0x2a, 0xdd, 0xb5, 0xa7, 0x4d, 0xe9, + 0xf7, 0x25, 0x9e, 0x5a, 0x2b, 0x39, 0xeb, 0xb0, 0x5e, 0xa3, 0xe4, 0x07, 0xf4, 0x87, 0xf1, 0x13, + 0xf8, 0x47, 0x68, 0x67, 0xd7, 0x89, 0x53, 0x81, 0x7a, 0x9f, 0x32, 0xcf, 0x33, 0x93, 0x7d, 0x79, + 0xe6, 0x99, 0x35, 0x7c, 0xa9, 0xb4, 0x45, 0xa3, 0x65, 0xfd, 0x53, 0x1f, 0x5c, 0xac, 0x4d, 0x63, + 0x1b, 0x96, 0xfc, 0xd9, 0xa1, 0xd9, 0xe6, 0x4f, 0x31, 0x24, 0x1f, 0x1b, 0xa5, 0x2d, 0x63, 0x70, + 0x74, 0x2b, 0x57, 0xc8, 0xa3, 0x6c, 0x54, 0xa4, 0x82, 0x62, 0xc7, 0xdd, 0xcb, 0xaa, 0xe5, 0x23, + 0xcf, 0xb9, 0x98, 0x38, 0xb5, 0x42, 0x1e, 0x67, 0xa3, 0x22, 0x16, 0x14, 0xb3, 0xd7, 0x10, 0xdf, + 0xaa, 0x9a, 0x1f, 0x65, 0xa3, 0x62, 0x22, 0x5c, 0xc8, 0xde, 0x40, 0x3c, 0xef, 0x36, 0x3c, 0xc9, + 0xe2, 0x62, 0x7a, 0x09, 0x17, 0xb4, 0xd9, 0xc5, 0xbc, 0xdb, 0x08, 0x47, 0xb3, 0xaf, 0x01, 0xe6, + 0x55, 0x65, 0xb0, 0x92, 0x16, 0x4b, 0x3e, 0xce, 0xa2, 0x62, 0x26, 0x06, 0x8c, 0xcb, 0x5f, 0xd7, + 0x8d, 0xb4, 0x0f, 0xb2, 0xee, 0x90, 0x1f, 0x67, 0x51, 0x11, 0x89, 0x01, 0xc3, 0x72, 0x38, 0xb9, + 0xd1, 0x16, 0x2b, 0x34, 0xbe, 0x62, 0x92, 0x45, 0x45, 0x2c, 0x0e, 0x38, 0x96, 0xc1, 0x74, 0x61, + 0x8d, 0xd2, 0x95, 0x2f, 0x49, 0xb3, 0xa8, 0x48, 0xc5, 0x90, 0x72, 0xab, 0xbc, 0x6d, 0x9a, 0x1a, + 0xa5, 0xf6, 0x25, 0x90, 0x45, 0xc5, 0x44, 0x1c, 0x70, 0xec, 0x1b, 0x98, 0xfd, 0xae, 0x5b, 0x55, + 0x69, 0x2c, 0x7d, 0xd1, 0x49, 0x16, 0x15, 0x47, 0xe2, 0x90, 0x64, 0xdf, 0x43, 0xb2, 0xb0, 0xd2, + 0xb6, 0x7c, 0x9a, 0x45, 0xc5, 0xf4, 0xf2, 0x2c, 0xdc, 0xf7, 0xc6, 0xa2, 0x91, 0xb6, 0x31, 0x94, + 0x13, 0xbe, 0x84, 0x9d, 0x41, 0x72, 0x6f, 0xe4, 0x12, 0xf9, 0x2c, 0x8b, 0x8a, 0x13, 0xe1, 0x41, + 0xfe, 0x4f, 0x44, 0x82, 0xb1, 0xaf, 0x60, 0x72, 0x25, 0xad, 0xbc, 0xdf, 0xae, 0x7d, 0x27, 0x12, + 0xb1, 0xc3, 0xcf, 0x54, 0x19, 0xbd, 0xa8, 0x4a, 0xfc, 0xb2, 0x2a, 0x47, 0x2f, 0xab, 0x92, 0x7c, + 0x8a, 0x2a, 0xe3, 0xff, 0x50, 0x25, 0x7f, 0x4a, 0xe0, 0xb4, 0x97, 0xe0, 0x6e, 0x6d, 0x55, 0xa3, + 0xc9, 0x3d, 0xef, 0x36, 0x6b, 0xc3, 0x23, 0xda, 0x98, 0x62, 0xe7, 0x1e, 0xe7, 0x95, 0x51, 0x16, + 0x17, 0xa9, 0xf7, 0xc7, 0xb7, 0x30, 0xbe, 0x56, 0x58, 0x97, 0x2d, 0xff, 0x8c, 0x0c, 0x34, 0x0b, + 0x82, 0x3e, 0x48, 0x23, 0xf0, 0x51, 0x84, 0x24, 0xfb, 0x11, 0x8e, 0x17, 0x4d, 0x67, 0x96, 0xd8, + 0xf2, 0x98, 0xea, 0x58, 0xa8, 0xfb, 0x80, 0xb2, 0xed, 0x0c, 0xae, 0x50, 0x5b, 0xd1, 0x97, 0xb0, + 0x1f, 0x60, 0xe2, 0xa4, 0x30, 0x7f, 0xc9, 0x9a, 0xee, 0x3d, 0xbd, 0x3c, 0xed, 0xfb, 0x14, 0x68, + 0xb1, 0x2b, 0x70, 0x5a, 0x5f, 0xa9, 0x15, 0xea, 0xd6, 0x9d, 0x9a, 0x6c, 0x9c, 0x8a, 0x01, 0xc3, + 0x38, 0x1c, 0xff, 0x66, 0x9a, 0x6e, 0xfd, 0x76, 0xcb, 0x3f, 0xa7, 0x64, 0x0f, 0xdd, 0x0d, 0xaf, + 0x55, 0x5d, 0x93, 0x24, 0x89, 0xa0, 0x98, 0xbd, 0x81, 0xd4, 0xfd, 0x0e, 0xed, 0xbc, 0x27, 0x5c, + 0xf6, 0xd7, 0x46, 0x97, 0xca, 0x29, 0x44, 0x56, 0x4e, 0xc5, 0x9e, 0x70, 0xd9, 0x85, 0x95, 0xc6, + 0xd2, 0xd0, 0xa5, 0xd4, 0xd2, 0x3d, 0xe1, 0xce, 0xf1, 0x4e, 0x97, 0x94, 0x03, 0xca, 0xf5, 0xd0, + 0x39, 0xe9, 0x7d, 0xb3, 0x94, 0xb4, 0xe8, 0x17, 0xb4, 0xe8, 0x0e, 0xbb, 0x35, 0xe7, 0xed, 0x12, + 0x75, 0xa9, 0x74, 0x45, 0x9e, 0x9d, 0x88, 0x3d, 0xe1, 0x1c, 0xfa, 0x5e, 0xad, 0x94, 0x25, 0xaf, + 0xc7, 0xc2, 0x03, 0x76, 0x0e, 0xe3, 0xbb, 0xc7, 0xc7, 0x16, 0x2d, 0x19, 0x37, 0x16, 0x01, 0x39, + 0x7e, 0xe1, 0xcb, 0x5f, 0x79, 0xde, 0x23, 0x77, 0xb2, 0x45, 0xf8, 0xc3, 0xa9, 0x3f, 0x59, 0x80, + 0xfe, 0x46, 0x46, 0xad, 0xe9, 0xb9, 0x39, 0xf7, 0xbb, 0xef, 0x08, 0xb7, 0xde, 0x15, 0x96, 0xdd, + 0x1a, 0xf9, 0x6b, 0x4a, 0x05, 0xe4, 0x3a, 0xf2, 0x41, 0x6e, 0x16, 0x68, 0x14, 0xb6, 0xb7, 0x9c, + 0xd1, 0x92, 0x03, 0xc6, 0xed, 0x77, 0x67, 0x4a, 0x34, 0x58, 0xf2, 0x33, 0xfa, 0x63, 0x0f, 0xf3, + 0x9f, 0xe1, 0x64, 0x60, 0x88, 0x96, 0x15, 0x90, 0xdc, 0x58, 0x5c, 0xb5, 0x3c, 0xfa, 0x5f, 0xd3, + 0xf8, 0x82, 0xfc, 0xef, 0x08, 0xa6, 0x03, 0xba, 0x9f, 0xce, 0x3f, 0x64, 0x8b, 0xc1, 0xc1, 0x3b, + 0xcc, 0x0a, 0x38, 0x15, 0x68, 0x51, 0x3b, 0x81, 0x3f, 0x36, 0xb5, 0x5a, 0x6e, 0x69, 0x44, 0x53, + 0xf1, 0x9c, 0xde, 0xbd, 0xb4, 0xb1, 0x9f, 0x01, 0xba, 0xf5, 0x19, 0x24, 0x02, 0x2b, 0xdc, 0x84, + 0x89, 0xf4, 0xc0, 0xed, 0x77, 0xd3, 0xde, 0x4b, 0x53, 0xa1, 0x0d, 0x73, 0xb8, 0xc3, 0xec, 0x3b, + 0x78, 0xb5, 0xd8, 0xb6, 0x16, 0x57, 0xfd, 0x88, 0x91, 0xe3, 0x52, 0xf1, 0x8c, 0xcd, 0x7f, 0xd9, + 0xdb, 0x9e, 0xce, 0xdf, 0x19, 0xef, 0x89, 0x88, 0x14, 0xdc, 0xe1, 0x41, 0x7f, 0x47, 0xc3, 0xfe, + 0xe6, 0x73, 0x98, 0x1d, 0xbc, 0x63, 0xd4, 0xd8, 0xd0, 0x85, 0x28, 0x34, 0x36, 0xb4, 0xe0, 0x1c, + 0xc6, 0xf4, 0x2d, 0xb9, 0xed, 0x97, 0xf0, 0x28, 0xbf, 0x80, 0xb1, 0x9f, 0x5c, 0x37, 0xea, 0x0f, + 0xb2, 0x0e, 0xdf, 0x18, 0x17, 0xd2, 0xe7, 0xc4, 0x3d, 0x76, 0x23, 0x3f, 0x2e, 0x2e, 0xfe, 0x37, + 0x00, 0x00, 0xff, 0xff, 0x07, 0x98, 0x54, 0xa1, 0xb5, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/query/internal/internal.proto b/vendor/github.com/influxdata/influxdb/query/internal/internal.proto new file mode 100644 index 0000000..eb3dd90 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/internal/internal.proto @@ -0,0 +1,82 @@ +syntax = "proto2"; +package query; + +message Point { + required string Name = 1; + required string Tags = 2; + required int64 Time = 3; + required bool Nil = 4; + repeated Aux Aux = 5; + optional uint32 Aggregated = 6; + + optional double FloatValue = 7; + optional int64 IntegerValue = 8; + optional string StringValue = 9; + optional bool BooleanValue = 10; + optional uint64 UnsignedValue = 12; + + optional IteratorStats Stats = 11; + optional bytes Trace = 13; +} + +message Aux { + required int32 DataType = 1; + optional double FloatValue = 2; + optional int64 IntegerValue = 3; + optional string StringValue = 4; + optional bool BooleanValue = 5; + optional uint64 UnsignedValue = 6; +} + +message IteratorOptions { + optional string Expr = 1; + repeated string Aux = 2; + repeated VarRef Fields = 17; + repeated Measurement Sources = 3; + optional Interval Interval = 4; + repeated string Dimensions = 5; + repeated string GroupBy = 19; + optional int32 Fill = 6; + optional double FillValue = 7; + optional string Condition = 8; + optional int64 StartTime = 9; + optional int64 EndTime = 10; + optional string Location = 21; + optional bool Ascending = 11; + optional int64 Limit = 12; + optional int64 Offset = 13; + optional int64 SLimit = 14; + optional int64 SOffset = 15; + optional bool StripName = 22; + optional bool Dedupe = 16; + optional int64 MaxSeriesN = 18; + optional bool Ordered = 20; +} + +message Measurements { + repeated Measurement Items = 1; +} + +message Measurement { + optional string Database = 1; + optional string RetentionPolicy = 2; + optional string Name = 3; + optional string Regex = 4; + optional bool IsTarget = 5; + optional string SystemIterator = 6; +} + +message Interval { + optional int64 Duration = 1; + optional int64 Offset = 2; +} + +message IteratorStats { + optional int64 SeriesN = 1; + optional int64 PointN = 2; +} + +message VarRef { + required string Val = 1; + optional int32 Type = 2; +} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go new file mode 100644 index 0000000..33027fe --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go @@ -0,0 +1,13329 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: iterator.gen.go.tmpl + +package query + +import ( + "container/heap" + "context" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxql" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = time.Second + +// FloatIterator represents a stream of float points. +type FloatIterator interface { + Iterator + Next() (*FloatPoint, error) +} + +// newFloatIterators converts a slice of Iterator to a slice of FloatIterator. +// Drop and closes any iterator in itrs that is not a FloatIterator and cannot +// be cast to a FloatIterator. +func newFloatIterators(itrs []Iterator) []FloatIterator { + a := make([]FloatIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + +// bufFloatIterator represents a buffered FloatIterator. +type bufFloatIterator struct { + itr FloatIterator + buf *FloatPoint +} + +// newBufFloatIterator returns a buffered FloatIterator. +func newBufFloatIterator(itr FloatIterator) *bufFloatIterator { + return &bufFloatIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufFloatIterator) peek() (*FloatPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufFloatIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufFloatIterator) Next() (*FloatPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v } + +// floatMergeIterator represents an iterator that combines multiple float iterators. +type floatMergeIterator struct { + inputs []FloatIterator + heap *floatMergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *floatMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newFloatMergeIterator returns a new instance of floatMergeIterator. +func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator { + itr := &floatMergeIterator{ + inputs: inputs, + heap: &floatMergeHeap{ + items: make([]*floatMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufFloatIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatMergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatMergeIterator) Next() (*FloatPoint, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*floatMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// floatMergeHeap represents a heap of floatMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type floatMergeHeap struct { + opt IteratorOptions + items []*floatMergeHeapItem +} + +func (h *floatMergeHeap) Len() int { return len(h.items) } +func (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *floatMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatMergeHeapItem)) +} + +func (h *floatMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatMergeHeapItem struct { + itr *bufFloatIterator +} + +// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type floatSortedMergeIterator struct { + inputs []FloatIterator + heap *floatSortedMergeHeap + init bool +} + +// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. +func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { + itr := &floatSortedMergeIterator{ + inputs: inputs, + heap: &floatSortedMergeHeap{ + items: make([]*floatSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*floatSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type floatSortedMergeHeap struct { + opt IteratorOptions + items []*floatSortedMergeHeapItem +} + +func (h *floatSortedMergeHeap) Len() int { return len(h.items) } +func (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time { + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time { + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *floatSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatSortedMergeHeapItem)) +} + +func (h *floatSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatSortedMergeHeapItem struct { + point *FloatPoint + err error + itr FloatIterator +} + +// floatIteratorScanner scans the results of a FloatIterator into a map. +type floatIteratorScanner struct { + input *bufFloatIterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// newFloatIteratorScanner creates a new IteratorScanner. +func newFloatIteratorScanner(input FloatIterator, keys []influxql.VarRef, defaultValue interface{}) *floatIteratorScanner { + return &floatIteratorScanner{ + input: newBufFloatIterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *floatIteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *floatIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *floatIteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *floatIteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *floatIteratorScanner) Err() error { return s.err } +func (s *floatIteratorScanner) Close() error { return s.input.Close() } + +// floatParallelIterator represents an iterator that pulls data in a separate goroutine. +type floatParallelIterator struct { + input FloatIterator + ch chan floatPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newFloatParallelIterator returns a new instance of floatParallelIterator. +func newFloatParallelIterator(input FloatIterator) *floatParallelIterator { + itr := &floatParallelIterator{ + input: input, + ch: make(chan floatPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *floatParallelIterator) Next() (*FloatPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *floatParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- floatPointError{point: p, err: err}: + } + } +} + +type floatPointError struct { + point *FloatPoint + err error +} + +// floatLimitIterator represents an iterator that limits points per group. +type floatLimitIterator struct { + input FloatIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newFloatLimitIterator returns a new instance of floatLimitIterator. +func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator { + return &floatLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *floatLimitIterator) Next() (*FloatPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type floatFillIterator struct { + input *bufFloatIterator + prev FloatPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newFloatFillIterator(input FloatIterator, expr influxql.Expr, opt IteratorOptions) *floatFillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = float64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &floatFillIterator{ + input: newBufFloatIterator(input), + prev: FloatPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatFillIterator) Close() error { return itr.input.Close() } + +func (itr *floatFillIterator) Next() (*FloatPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = FloatPoint{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &FloatPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castToFloat(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// floatIntervalIterator represents a float implementation of IntervalIterator. +type floatIntervalIterator struct { + input FloatIterator + opt IteratorOptions +} + +func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator { + return &floatIntervalIterator{input: input, opt: opt} +} + +func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *floatIntervalIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// floatInterruptIterator represents a float implementation of InterruptIterator. +type floatInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + count int +} + +func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator { + return &floatInterruptIterator{input: input, closing: closing} +} + +func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *floatInterruptIterator) Next() (*FloatPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator. +type floatCloseInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator { + itr := &floatCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *floatCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *floatCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *floatCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// floatReduceFloatIterator executes a reducer for every interval and buffers the result. +type floatReduceFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator { + return &floatReduceFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceFloatPoint stores the reduced data for a name/tag combination. +type floatReduceFloatPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// floatStreamFloatIterator streams inputs into the iterator and emits points gradually. +type floatStreamFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceFloatPoint + points []FloatPoint +} + +// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator. +func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator { + return &floatStreamFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatReduceIntegerIterator executes a reducer for every interval and buffers the result. +type floatReduceIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator { + return &floatReduceIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceIntegerPoint stores the reduced data for a name/tag combination. +type floatReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type floatStreamIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceIntegerPoint + points []IntegerPoint +} + +// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator. +func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator { + return &floatStreamIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatReduceUnsignedIterator executes a reducer for every interval and buffers the result. +type floatReduceUnsignedIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + points []UnsignedPoint + keepTags bool +} + +func newFloatReduceUnsignedIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, UnsignedPointEmitter)) *floatReduceUnsignedIterator { + return &floatReduceUnsignedIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceUnsignedPoint stores the reduced data for a name/tag combination. +type floatReduceUnsignedPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter UnsignedPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceUnsignedPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]UnsignedPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(unsignedPointsByTime(a))) + } + + return a, nil +} + +// floatStreamUnsignedIterator streams inputs into the iterator and emits points gradually. +type floatStreamUnsignedIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceUnsignedPoint + points []UnsignedPoint +} + +// newFloatStreamUnsignedIterator returns a new instance of floatStreamUnsignedIterator. +func newFloatStreamUnsignedIterator(input FloatIterator, createFn func() (FloatPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *floatStreamUnsignedIterator { + return &floatStreamUnsignedIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceUnsignedPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []UnsignedPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatReduceStringIterator executes a reducer for every interval and buffers the result. +type floatReduceStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator { + return &floatReduceStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceStringPoint stores the reduced data for a name/tag combination. +type floatReduceStringPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// floatStreamStringIterator streams inputs into the iterator and emits points gradually. +type floatStreamStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceStringPoint + points []StringPoint +} + +// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator. +func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator { + return &floatStreamStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatReduceBooleanIterator executes a reducer for every interval and buffers the result. +type floatReduceBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator { + return &floatReduceBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceBooleanPoint stores the reduced data for a name/tag combination. +type floatReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type floatStreamBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceBooleanPoint + points []BooleanPoint +} + +// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator. +func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator { + return &floatStreamBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type floatDedupeIterator struct { + input FloatIterator + m map[string]struct{} // lookup of points already sent +} + +type floatIteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point FloatPoint +} + +func newFloatIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *floatIteratorMapper { + return &floatIteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: FloatPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *floatIteratorMapper) Next() (*FloatPoint, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castToFloat(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *floatIteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *floatIteratorMapper) Close() error { + return itr.cur.Close() +} + +type floatFilterIterator struct { + input FloatIterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func newFloatFilterIterator(input FloatIterator, cond influxql.Expr, opt IteratorOptions) FloatIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &floatFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *floatFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatFilterIterator) Close() error { return itr.input.Close() } + +func (itr *floatFilterIterator) Next() (*FloatPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type floatTagSubsetIterator struct { + input FloatIterator + point FloatPoint + lastTags Tags + dimensions []string +} + +func newFloatTagSubsetIterator(input FloatIterator, opt IteratorOptions) *floatTagSubsetIterator { + return &floatTagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *floatTagSubsetIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *floatTagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *floatTagSubsetIterator) Close() error { + return itr.input.Close() +} + +// newFloatDedupeIterator returns a new instance of floatDedupeIterator. +func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator { + return &floatDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// floatReaderIterator represents an iterator that streams from a reader. +type floatReaderIterator struct { + r io.Reader + dec *FloatPointDecoder +} + +// newFloatReaderIterator returns a new instance of floatReaderIterator. +func newFloatReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *floatReaderIterator { + dec := NewFloatPointDecoder(ctx, r) + dec.stats = stats + + return &floatReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *floatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatReaderIterator) Next() (*FloatPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &FloatPoint{} + if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// IntegerIterator represents a stream of integer points. +type IntegerIterator interface { + Iterator + Next() (*IntegerPoint, error) +} + +// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. +// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot +// be cast to a IntegerIterator. +func newIntegerIterators(itrs []Iterator) []IntegerIterator { + a := make([]IntegerIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case IntegerIterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + +// bufIntegerIterator represents a buffered IntegerIterator. +type bufIntegerIterator struct { + itr IntegerIterator + buf *IntegerPoint +} + +// newBufIntegerIterator returns a buffered IntegerIterator. +func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator { + return &bufIntegerIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufIntegerIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v } + +// integerMergeIterator represents an iterator that combines multiple integer iterators. +type integerMergeIterator struct { + inputs []IntegerIterator + heap *integerMergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *integerMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newIntegerMergeIterator returns a new instance of integerMergeIterator. +func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator { + itr := &integerMergeIterator{ + inputs: inputs, + heap: &integerMergeHeap{ + items: make([]*integerMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufIntegerIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerMergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*integerMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// integerMergeHeap represents a heap of integerMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type integerMergeHeap struct { + opt IteratorOptions + items []*integerMergeHeapItem +} + +func (h *integerMergeHeap) Len() int { return len(h.items) } +func (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *integerMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerMergeHeapItem)) +} + +func (h *integerMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerMergeHeapItem struct { + itr *bufIntegerIterator +} + +// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type integerSortedMergeIterator struct { + inputs []IntegerIterator + heap *integerSortedMergeHeap + init bool +} + +// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. +func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { + itr := &integerSortedMergeIterator{ + inputs: inputs, + heap: &integerSortedMergeHeap{ + items: make([]*integerSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*integerSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type integerSortedMergeHeap struct { + opt IteratorOptions + items []*integerSortedMergeHeapItem +} + +func (h *integerSortedMergeHeap) Len() int { return len(h.items) } +func (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time { + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time { + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *integerSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerSortedMergeHeapItem)) +} + +func (h *integerSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerSortedMergeHeapItem struct { + point *IntegerPoint + err error + itr IntegerIterator +} + +// integerIteratorScanner scans the results of a IntegerIterator into a map. +type integerIteratorScanner struct { + input *bufIntegerIterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// newIntegerIteratorScanner creates a new IteratorScanner. +func newIntegerIteratorScanner(input IntegerIterator, keys []influxql.VarRef, defaultValue interface{}) *integerIteratorScanner { + return &integerIteratorScanner{ + input: newBufIntegerIterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *integerIteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *integerIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *integerIteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *integerIteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *integerIteratorScanner) Err() error { return s.err } +func (s *integerIteratorScanner) Close() error { return s.input.Close() } + +// integerParallelIterator represents an iterator that pulls data in a separate goroutine. +type integerParallelIterator struct { + input IntegerIterator + ch chan integerPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newIntegerParallelIterator returns a new instance of integerParallelIterator. +func newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator { + itr := &integerParallelIterator{ + input: input, + ch: make(chan integerPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *integerParallelIterator) Next() (*IntegerPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *integerParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- integerPointError{point: p, err: err}: + } + } +} + +type integerPointError struct { + point *IntegerPoint + err error +} + +// integerLimitIterator represents an iterator that limits points per group. +type integerLimitIterator struct { + input IntegerIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newIntegerLimitIterator returns a new instance of integerLimitIterator. +func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator { + return &integerLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type integerFillIterator struct { + input *bufIntegerIterator + prev IntegerPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newIntegerFillIterator(input IntegerIterator, expr influxql.Expr, opt IteratorOptions) *integerFillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = int64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &integerFillIterator{ + input: newBufIntegerIterator(input), + prev: IntegerPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFillIterator) Close() error { return itr.input.Close() } + +func (itr *integerFillIterator) Next() (*IntegerPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = IntegerPoint{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &IntegerPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castToInteger(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// integerIntervalIterator represents a integer implementation of IntervalIterator. +type integerIntervalIterator struct { + input IntegerIterator + opt IteratorOptions +} + +func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator { + return &integerIntervalIterator{input: input, opt: opt} +} + +func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// integerInterruptIterator represents a integer implementation of InterruptIterator. +type integerInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + count int +} + +func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator { + return &integerInterruptIterator{input: input, closing: closing} +} + +func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator. +type integerCloseInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator { + itr := &integerCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *integerCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *integerCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *integerCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// integerReduceFloatIterator executes a reducer for every interval and buffers the result. +type integerReduceFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator { + return &integerReduceFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceFloatPoint stores the reduced data for a name/tag combination. +type integerReduceFloatPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// integerStreamFloatIterator streams inputs into the iterator and emits points gradually. +type integerStreamFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceFloatPoint + points []FloatPoint +} + +// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator. +func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator { + return &integerStreamFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerReduceIntegerIterator executes a reducer for every interval and buffers the result. +type integerReduceIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator { + return &integerReduceIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceIntegerPoint stores the reduced data for a name/tag combination. +type integerReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type integerStreamIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceIntegerPoint + points []IntegerPoint +} + +// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator. +func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator { + return &integerStreamIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerReduceUnsignedIterator executes a reducer for every interval and buffers the result. +type integerReduceUnsignedIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + points []UnsignedPoint + keepTags bool +} + +func newIntegerReduceUnsignedIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, UnsignedPointEmitter)) *integerReduceUnsignedIterator { + return &integerReduceUnsignedIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceUnsignedPoint stores the reduced data for a name/tag combination. +type integerReduceUnsignedPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter UnsignedPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceUnsignedPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]UnsignedPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(unsignedPointsByTime(a))) + } + + return a, nil +} + +// integerStreamUnsignedIterator streams inputs into the iterator and emits points gradually. +type integerStreamUnsignedIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceUnsignedPoint + points []UnsignedPoint +} + +// newIntegerStreamUnsignedIterator returns a new instance of integerStreamUnsignedIterator. +func newIntegerStreamUnsignedIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *integerStreamUnsignedIterator { + return &integerStreamUnsignedIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceUnsignedPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []UnsignedPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerReduceStringIterator executes a reducer for every interval and buffers the result. +type integerReduceStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator { + return &integerReduceStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceStringPoint stores the reduced data for a name/tag combination. +type integerReduceStringPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// integerStreamStringIterator streams inputs into the iterator and emits points gradually. +type integerStreamStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceStringPoint + points []StringPoint +} + +// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator. +func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator { + return &integerStreamStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerReduceBooleanIterator executes a reducer for every interval and buffers the result. +type integerReduceBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator { + return &integerReduceBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceBooleanPoint stores the reduced data for a name/tag combination. +type integerReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type integerStreamBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceBooleanPoint + points []BooleanPoint +} + +// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator. +func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator { + return &integerStreamBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type integerDedupeIterator struct { + input IntegerIterator + m map[string]struct{} // lookup of points already sent +} + +type integerIteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point IntegerPoint +} + +func newIntegerIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *integerIteratorMapper { + return &integerIteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: IntegerPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *integerIteratorMapper) Next() (*IntegerPoint, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castToInteger(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *integerIteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *integerIteratorMapper) Close() error { + return itr.cur.Close() +} + +type integerFilterIterator struct { + input IntegerIterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func newIntegerFilterIterator(input IntegerIterator, cond influxql.Expr, opt IteratorOptions) IntegerIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &integerFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *integerFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFilterIterator) Close() error { return itr.input.Close() } + +func (itr *integerFilterIterator) Next() (*IntegerPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type integerTagSubsetIterator struct { + input IntegerIterator + point IntegerPoint + lastTags Tags + dimensions []string +} + +func newIntegerTagSubsetIterator(input IntegerIterator, opt IteratorOptions) *integerTagSubsetIterator { + return &integerTagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *integerTagSubsetIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *integerTagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *integerTagSubsetIterator) Close() error { + return itr.input.Close() +} + +// newIntegerDedupeIterator returns a new instance of integerDedupeIterator. +func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator { + return &integerDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// integerReaderIterator represents an iterator that streams from a reader. +type integerReaderIterator struct { + r io.Reader + dec *IntegerPointDecoder +} + +// newIntegerReaderIterator returns a new instance of integerReaderIterator. +func newIntegerReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *integerReaderIterator { + dec := NewIntegerPointDecoder(ctx, r) + dec.stats = stats + + return &integerReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *integerReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerReaderIterator) Next() (*IntegerPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &IntegerPoint{} + if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// UnsignedIterator represents a stream of unsigned points. +type UnsignedIterator interface { + Iterator + Next() (*UnsignedPoint, error) +} + +// newUnsignedIterators converts a slice of Iterator to a slice of UnsignedIterator. +// Drop and closes any iterator in itrs that is not a UnsignedIterator and cannot +// be cast to a UnsignedIterator. +func newUnsignedIterators(itrs []Iterator) []UnsignedIterator { + a := make([]UnsignedIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case UnsignedIterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + +// bufUnsignedIterator represents a buffered UnsignedIterator. +type bufUnsignedIterator struct { + itr UnsignedIterator + buf *UnsignedPoint +} + +// newBufUnsignedIterator returns a buffered UnsignedIterator. +func newBufUnsignedIterator(itr UnsignedIterator) *bufUnsignedIterator { + return &bufUnsignedIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufUnsignedIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufUnsignedIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufUnsignedIterator) peek() (*UnsignedPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufUnsignedIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufUnsignedIterator) Next() (*UnsignedPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufUnsignedIterator) NextInWindow(startTime, endTime int64) (*UnsignedPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufUnsignedIterator) unread(v *UnsignedPoint) { itr.buf = v } + +// unsignedMergeIterator represents an iterator that combines multiple unsigned iterators. +type unsignedMergeIterator struct { + inputs []UnsignedIterator + heap *unsignedMergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *unsignedMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newUnsignedMergeIterator returns a new instance of unsignedMergeIterator. +func newUnsignedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) *unsignedMergeIterator { + itr := &unsignedMergeIterator{ + inputs: inputs, + heap: &unsignedMergeHeap{ + items: make([]*unsignedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufUnsignedIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &unsignedMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *unsignedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *unsignedMergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *unsignedMergeIterator) Next() (*UnsignedPoint, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*unsignedMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*unsignedMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// unsignedMergeHeap represents a heap of unsignedMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type unsignedMergeHeap struct { + opt IteratorOptions + items []*unsignedMergeHeapItem +} + +func (h *unsignedMergeHeap) Len() int { return len(h.items) } +func (h *unsignedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *unsignedMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *unsignedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*unsignedMergeHeapItem)) +} + +func (h *unsignedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type unsignedMergeHeapItem struct { + itr *bufUnsignedIterator +} + +// unsignedSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type unsignedSortedMergeIterator struct { + inputs []UnsignedIterator + heap *unsignedSortedMergeHeap + init bool +} + +// newUnsignedSortedMergeIterator returns an instance of unsignedSortedMergeIterator. +func newUnsignedSortedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) Iterator { + itr := &unsignedSortedMergeIterator{ + inputs: inputs, + heap: &unsignedSortedMergeHeap{ + items: make([]*unsignedSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &unsignedSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *unsignedSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *unsignedSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *unsignedSortedMergeIterator) Next() (*UnsignedPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*unsignedSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*unsignedSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type unsignedSortedMergeHeap struct { + opt IteratorOptions + items []*unsignedSortedMergeHeapItem +} + +func (h *unsignedSortedMergeHeap) Len() int { return len(h.items) } +func (h *unsignedSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *unsignedSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time { + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time { + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *unsignedSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*unsignedSortedMergeHeapItem)) +} + +func (h *unsignedSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type unsignedSortedMergeHeapItem struct { + point *UnsignedPoint + err error + itr UnsignedIterator +} + +// unsignedIteratorScanner scans the results of a UnsignedIterator into a map. +type unsignedIteratorScanner struct { + input *bufUnsignedIterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// newUnsignedIteratorScanner creates a new IteratorScanner. +func newUnsignedIteratorScanner(input UnsignedIterator, keys []influxql.VarRef, defaultValue interface{}) *unsignedIteratorScanner { + return &unsignedIteratorScanner{ + input: newBufUnsignedIterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *unsignedIteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *unsignedIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *unsignedIteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *unsignedIteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *unsignedIteratorScanner) Err() error { return s.err } +func (s *unsignedIteratorScanner) Close() error { return s.input.Close() } + +// unsignedParallelIterator represents an iterator that pulls data in a separate goroutine. +type unsignedParallelIterator struct { + input UnsignedIterator + ch chan unsignedPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newUnsignedParallelIterator returns a new instance of unsignedParallelIterator. +func newUnsignedParallelIterator(input UnsignedIterator) *unsignedParallelIterator { + itr := &unsignedParallelIterator{ + input: input, + ch: make(chan unsignedPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *unsignedParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *unsignedParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *unsignedParallelIterator) Next() (*UnsignedPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *unsignedParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- unsignedPointError{point: p, err: err}: + } + } +} + +type unsignedPointError struct { + point *UnsignedPoint + err error +} + +// unsignedLimitIterator represents an iterator that limits points per group. +type unsignedLimitIterator struct { + input UnsignedIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newUnsignedLimitIterator returns a new instance of unsignedLimitIterator. +func newUnsignedLimitIterator(input UnsignedIterator, opt IteratorOptions) *unsignedLimitIterator { + return &unsignedLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *unsignedLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *unsignedLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *unsignedLimitIterator) Next() (*UnsignedPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type unsignedFillIterator struct { + input *bufUnsignedIterator + prev UnsignedPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newUnsignedFillIterator(input UnsignedIterator, expr influxql.Expr, opt IteratorOptions) *unsignedFillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = uint64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &unsignedFillIterator{ + input: newBufUnsignedIterator(input), + prev: UnsignedPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *unsignedFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *unsignedFillIterator) Close() error { return itr.input.Close() } + +func (itr *unsignedFillIterator) Next() (*UnsignedPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = UnsignedPoint{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &UnsignedPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearUnsigned(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castToUnsigned(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// unsignedIntervalIterator represents a unsigned implementation of IntervalIterator. +type unsignedIntervalIterator struct { + input UnsignedIterator + opt IteratorOptions +} + +func newUnsignedIntervalIterator(input UnsignedIterator, opt IteratorOptions) *unsignedIntervalIterator { + return &unsignedIntervalIterator{input: input, opt: opt} +} + +func (itr *unsignedIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *unsignedIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *unsignedIntervalIterator) Next() (*UnsignedPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// unsignedInterruptIterator represents a unsigned implementation of InterruptIterator. +type unsignedInterruptIterator struct { + input UnsignedIterator + closing <-chan struct{} + count int +} + +func newUnsignedInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedInterruptIterator { + return &unsignedInterruptIterator{input: input, closing: closing} +} + +func (itr *unsignedInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *unsignedInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *unsignedInterruptIterator) Next() (*UnsignedPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// unsignedCloseInterruptIterator represents a unsigned implementation of CloseInterruptIterator. +type unsignedCloseInterruptIterator struct { + input UnsignedIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newUnsignedCloseInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedCloseInterruptIterator { + itr := &unsignedCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *unsignedCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *unsignedCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *unsignedCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *unsignedCloseInterruptIterator) Next() (*UnsignedPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// unsignedReduceFloatIterator executes a reducer for every interval and buffers the result. +type unsignedReduceFloatIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newUnsignedReduceFloatIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, FloatPointEmitter)) *unsignedReduceFloatIterator { + return &unsignedReduceFloatIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *unsignedReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// unsignedReduceFloatPoint stores the reduced data for a name/tag combination. +type unsignedReduceFloatPoint struct { + Name string + Tags Tags + Aggregator UnsignedPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *unsignedReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*unsignedReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// unsignedStreamFloatIterator streams inputs into the iterator and emits points gradually. +type unsignedStreamFloatIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*unsignedReduceFloatPoint + points []FloatPoint +} + +// newUnsignedStreamFloatIterator returns a new instance of unsignedStreamFloatIterator. +func newUnsignedStreamFloatIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, FloatPointEmitter), opt IteratorOptions) *unsignedStreamFloatIterator { + return &unsignedStreamFloatIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*unsignedReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *unsignedStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *unsignedStreamFloatIterator) reduce() ([]FloatPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// unsignedReduceIntegerIterator executes a reducer for every interval and buffers the result. +type unsignedReduceIntegerIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newUnsignedReduceIntegerIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, IntegerPointEmitter)) *unsignedReduceIntegerIterator { + return &unsignedReduceIntegerIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *unsignedReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// unsignedReduceIntegerPoint stores the reduced data for a name/tag combination. +type unsignedReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator UnsignedPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *unsignedReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*unsignedReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// unsignedStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type unsignedStreamIntegerIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*unsignedReduceIntegerPoint + points []IntegerPoint +} + +// newUnsignedStreamIntegerIterator returns a new instance of unsignedStreamIntegerIterator. +func newUnsignedStreamIntegerIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, IntegerPointEmitter), opt IteratorOptions) *unsignedStreamIntegerIterator { + return &unsignedStreamIntegerIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*unsignedReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *unsignedStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *unsignedStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// unsignedReduceUnsignedIterator executes a reducer for every interval and buffers the result. +type unsignedReduceUnsignedIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + points []UnsignedPoint + keepTags bool +} + +func newUnsignedReduceUnsignedIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter)) *unsignedReduceUnsignedIterator { + return &unsignedReduceUnsignedIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedReduceUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *unsignedReduceUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// unsignedReduceUnsignedPoint stores the reduced data for a name/tag combination. +type unsignedReduceUnsignedPoint struct { + Name string + Tags Tags + Aggregator UnsignedPointAggregator + Emitter UnsignedPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *unsignedReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*unsignedReduceUnsignedPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]UnsignedPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(unsignedPointsByTime(a))) + } + + return a, nil +} + +// unsignedStreamUnsignedIterator streams inputs into the iterator and emits points gradually. +type unsignedStreamUnsignedIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + m map[string]*unsignedReduceUnsignedPoint + points []UnsignedPoint +} + +// newUnsignedStreamUnsignedIterator returns a new instance of unsignedStreamUnsignedIterator. +func newUnsignedStreamUnsignedIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *unsignedStreamUnsignedIterator { + return &unsignedStreamUnsignedIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*unsignedReduceUnsignedPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedStreamUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *unsignedStreamUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *unsignedStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []UnsignedPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// unsignedReduceStringIterator executes a reducer for every interval and buffers the result. +type unsignedReduceStringIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newUnsignedReduceStringIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, StringPointEmitter)) *unsignedReduceStringIterator { + return &unsignedReduceStringIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *unsignedReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// unsignedReduceStringPoint stores the reduced data for a name/tag combination. +type unsignedReduceStringPoint struct { + Name string + Tags Tags + Aggregator UnsignedPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *unsignedReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*unsignedReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// unsignedStreamStringIterator streams inputs into the iterator and emits points gradually. +type unsignedStreamStringIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*unsignedReduceStringPoint + points []StringPoint +} + +// newUnsignedStreamStringIterator returns a new instance of unsignedStreamStringIterator. +func newUnsignedStreamStringIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, StringPointEmitter), opt IteratorOptions) *unsignedStreamStringIterator { + return &unsignedStreamStringIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*unsignedReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *unsignedStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *unsignedStreamStringIterator) reduce() ([]StringPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// unsignedReduceBooleanIterator executes a reducer for every interval and buffers the result. +type unsignedReduceBooleanIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newUnsignedReduceBooleanIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, BooleanPointEmitter)) *unsignedReduceBooleanIterator { + return &unsignedReduceBooleanIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *unsignedReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// unsignedReduceBooleanPoint stores the reduced data for a name/tag combination. +type unsignedReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator UnsignedPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *unsignedReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*unsignedReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// unsignedStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type unsignedStreamBooleanIterator struct { + input *bufUnsignedIterator + create func() (UnsignedPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*unsignedReduceBooleanPoint + points []BooleanPoint +} + +// newUnsignedStreamBooleanIterator returns a new instance of unsignedStreamBooleanIterator. +func newUnsignedStreamBooleanIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, BooleanPointEmitter), opt IteratorOptions) *unsignedStreamBooleanIterator { + return &unsignedStreamBooleanIterator{ + input: newBufUnsignedIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*unsignedReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *unsignedStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *unsignedStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &unsignedReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateUnsigned(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// unsignedDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type unsignedDedupeIterator struct { + input UnsignedIterator + m map[string]struct{} // lookup of points already sent +} + +type unsignedIteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point UnsignedPoint +} + +func newUnsignedIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *unsignedIteratorMapper { + return &unsignedIteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: UnsignedPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *unsignedIteratorMapper) Next() (*UnsignedPoint, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castToUnsigned(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *unsignedIteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *unsignedIteratorMapper) Close() error { + return itr.cur.Close() +} + +type unsignedFilterIterator struct { + input UnsignedIterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func newUnsignedFilterIterator(input UnsignedIterator, cond influxql.Expr, opt IteratorOptions) UnsignedIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &unsignedFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *unsignedFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *unsignedFilterIterator) Close() error { return itr.input.Close() } + +func (itr *unsignedFilterIterator) Next() (*UnsignedPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type unsignedTagSubsetIterator struct { + input UnsignedIterator + point UnsignedPoint + lastTags Tags + dimensions []string +} + +func newUnsignedTagSubsetIterator(input UnsignedIterator, opt IteratorOptions) *unsignedTagSubsetIterator { + return &unsignedTagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *unsignedTagSubsetIterator) Next() (*UnsignedPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *unsignedTagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *unsignedTagSubsetIterator) Close() error { + return itr.input.Close() +} + +// newUnsignedDedupeIterator returns a new instance of unsignedDedupeIterator. +func newUnsignedDedupeIterator(input UnsignedIterator) *unsignedDedupeIterator { + return &unsignedDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *unsignedDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *unsignedDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *unsignedDedupeIterator) Next() (*UnsignedPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeUnsignedPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// unsignedReaderIterator represents an iterator that streams from a reader. +type unsignedReaderIterator struct { + r io.Reader + dec *UnsignedPointDecoder +} + +// newUnsignedReaderIterator returns a new instance of unsignedReaderIterator. +func newUnsignedReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *unsignedReaderIterator { + dec := NewUnsignedPointDecoder(ctx, r) + dec.stats = stats + + return &unsignedReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *unsignedReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *unsignedReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *unsignedReaderIterator) Next() (*UnsignedPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &UnsignedPoint{} + if err := itr.dec.DecodeUnsignedPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// StringIterator represents a stream of string points. +type StringIterator interface { + Iterator + Next() (*StringPoint, error) +} + +// newStringIterators converts a slice of Iterator to a slice of StringIterator. +// Drop and closes any iterator in itrs that is not a StringIterator and cannot +// be cast to a StringIterator. +func newStringIterators(itrs []Iterator) []StringIterator { + a := make([]StringIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case StringIterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + +// bufStringIterator represents a buffered StringIterator. +type bufStringIterator struct { + itr StringIterator + buf *StringPoint +} + +// newBufStringIterator returns a buffered StringIterator. +func newBufStringIterator(itr StringIterator) *bufStringIterator { + return &bufStringIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufStringIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufStringIterator) peek() (*StringPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufStringIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufStringIterator) Next() (*StringPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v } + +// stringMergeIterator represents an iterator that combines multiple string iterators. +type stringMergeIterator struct { + inputs []StringIterator + heap *stringMergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *stringMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newStringMergeIterator returns a new instance of stringMergeIterator. +func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator { + itr := &stringMergeIterator{ + inputs: inputs, + heap: &stringMergeHeap{ + items: make([]*stringMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufStringIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringMergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringMergeIterator) Next() (*StringPoint, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*stringMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// stringMergeHeap represents a heap of stringMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type stringMergeHeap struct { + opt IteratorOptions + items []*stringMergeHeapItem +} + +func (h *stringMergeHeap) Len() int { return len(h.items) } +func (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *stringMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringMergeHeapItem)) +} + +func (h *stringMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringMergeHeapItem struct { + itr *bufStringIterator +} + +// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type stringSortedMergeIterator struct { + inputs []StringIterator + heap *stringSortedMergeHeap + init bool +} + +// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. +func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { + itr := &stringSortedMergeIterator{ + inputs: inputs, + heap: &stringSortedMergeHeap{ + items: make([]*stringSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*stringSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type stringSortedMergeHeap struct { + opt IteratorOptions + items []*stringSortedMergeHeapItem +} + +func (h *stringSortedMergeHeap) Len() int { return len(h.items) } +func (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time { + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time { + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *stringSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringSortedMergeHeapItem)) +} + +func (h *stringSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringSortedMergeHeapItem struct { + point *StringPoint + err error + itr StringIterator +} + +// stringIteratorScanner scans the results of a StringIterator into a map. +type stringIteratorScanner struct { + input *bufStringIterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// newStringIteratorScanner creates a new IteratorScanner. +func newStringIteratorScanner(input StringIterator, keys []influxql.VarRef, defaultValue interface{}) *stringIteratorScanner { + return &stringIteratorScanner{ + input: newBufStringIterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *stringIteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *stringIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *stringIteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *stringIteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *stringIteratorScanner) Err() error { return s.err } +func (s *stringIteratorScanner) Close() error { return s.input.Close() } + +// stringParallelIterator represents an iterator that pulls data in a separate goroutine. +type stringParallelIterator struct { + input StringIterator + ch chan stringPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newStringParallelIterator returns a new instance of stringParallelIterator. +func newStringParallelIterator(input StringIterator) *stringParallelIterator { + itr := &stringParallelIterator{ + input: input, + ch: make(chan stringPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *stringParallelIterator) Next() (*StringPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *stringParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- stringPointError{point: p, err: err}: + } + } +} + +type stringPointError struct { + point *StringPoint + err error +} + +// stringLimitIterator represents an iterator that limits points per group. +type stringLimitIterator struct { + input StringIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newStringLimitIterator returns a new instance of stringLimitIterator. +func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator { + return &stringLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *stringLimitIterator) Next() (*StringPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type stringFillIterator struct { + input *bufStringIterator + prev StringPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newStringFillIterator(input StringIterator, expr influxql.Expr, opt IteratorOptions) *stringFillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = "" + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &stringFillIterator{ + input: newBufStringIterator(input), + prev: StringPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringFillIterator) Close() error { return itr.input.Close() } + +func (itr *stringFillIterator) Next() (*StringPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = StringPoint{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &StringPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + fallthrough + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castToString(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// stringIntervalIterator represents a string implementation of IntervalIterator. +type stringIntervalIterator struct { + input StringIterator + opt IteratorOptions +} + +func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator { + return &stringIntervalIterator{input: input, opt: opt} +} + +func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *stringIntervalIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// stringInterruptIterator represents a string implementation of InterruptIterator. +type stringInterruptIterator struct { + input StringIterator + closing <-chan struct{} + count int +} + +func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator { + return &stringInterruptIterator{input: input, closing: closing} +} + +func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *stringInterruptIterator) Next() (*StringPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator. +type stringCloseInterruptIterator struct { + input StringIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator { + itr := &stringCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *stringCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *stringCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *stringCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// stringReduceFloatIterator executes a reducer for every interval and buffers the result. +type stringReduceFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator { + return &stringReduceFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceFloatPoint stores the reduced data for a name/tag combination. +type stringReduceFloatPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// stringStreamFloatIterator streams inputs into the iterator and emits points gradually. +type stringStreamFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceFloatPoint + points []FloatPoint +} + +// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator. +func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator { + return &stringStreamFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringReduceIntegerIterator executes a reducer for every interval and buffers the result. +type stringReduceIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator { + return &stringReduceIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceIntegerPoint stores the reduced data for a name/tag combination. +type stringReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type stringStreamIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceIntegerPoint + points []IntegerPoint +} + +// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator. +func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator { + return &stringStreamIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringReduceUnsignedIterator executes a reducer for every interval and buffers the result. +type stringReduceUnsignedIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + points []UnsignedPoint + keepTags bool +} + +func newStringReduceUnsignedIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, UnsignedPointEmitter)) *stringReduceUnsignedIterator { + return &stringReduceUnsignedIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceUnsignedPoint stores the reduced data for a name/tag combination. +type stringReduceUnsignedPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter UnsignedPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceUnsignedPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]UnsignedPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(unsignedPointsByTime(a))) + } + + return a, nil +} + +// stringStreamUnsignedIterator streams inputs into the iterator and emits points gradually. +type stringStreamUnsignedIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceUnsignedPoint + points []UnsignedPoint +} + +// newStringStreamUnsignedIterator returns a new instance of stringStreamUnsignedIterator. +func newStringStreamUnsignedIterator(input StringIterator, createFn func() (StringPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *stringStreamUnsignedIterator { + return &stringStreamUnsignedIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceUnsignedPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []UnsignedPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringReduceStringIterator executes a reducer for every interval and buffers the result. +type stringReduceStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator { + return &stringReduceStringIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceStringPoint stores the reduced data for a name/tag combination. +type stringReduceStringPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// stringStreamStringIterator streams inputs into the iterator and emits points gradually. +type stringStreamStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceStringPoint + points []StringPoint +} + +// newStringStreamStringIterator returns a new instance of stringStreamStringIterator. +func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator { + return &stringStreamStringIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringReduceBooleanIterator executes a reducer for every interval and buffers the result. +type stringReduceBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator { + return &stringReduceBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceBooleanPoint stores the reduced data for a name/tag combination. +type stringReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type stringStreamBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceBooleanPoint + points []BooleanPoint +} + +// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator. +func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator { + return &stringStreamBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type stringDedupeIterator struct { + input StringIterator + m map[string]struct{} // lookup of points already sent +} + +type stringIteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point StringPoint +} + +func newStringIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *stringIteratorMapper { + return &stringIteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: StringPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *stringIteratorMapper) Next() (*StringPoint, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castToString(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = "" + itr.point.Nil = true + } + } else { + itr.point.Value = "" + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *stringIteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *stringIteratorMapper) Close() error { + return itr.cur.Close() +} + +type stringFilterIterator struct { + input StringIterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func newStringFilterIterator(input StringIterator, cond influxql.Expr, opt IteratorOptions) StringIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &stringFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *stringFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringFilterIterator) Close() error { return itr.input.Close() } + +func (itr *stringFilterIterator) Next() (*StringPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type stringTagSubsetIterator struct { + input StringIterator + point StringPoint + lastTags Tags + dimensions []string +} + +func newStringTagSubsetIterator(input StringIterator, opt IteratorOptions) *stringTagSubsetIterator { + return &stringTagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *stringTagSubsetIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *stringTagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *stringTagSubsetIterator) Close() error { + return itr.input.Close() +} + +// newStringDedupeIterator returns a new instance of stringDedupeIterator. +func newStringDedupeIterator(input StringIterator) *stringDedupeIterator { + return &stringDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *stringDedupeIterator) Next() (*StringPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// stringReaderIterator represents an iterator that streams from a reader. +type stringReaderIterator struct { + r io.Reader + dec *StringPointDecoder +} + +// newStringReaderIterator returns a new instance of stringReaderIterator. +func newStringReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *stringReaderIterator { + dec := NewStringPointDecoder(ctx, r) + dec.stats = stats + + return &stringReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *stringReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringReaderIterator) Next() (*StringPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &StringPoint{} + if err := itr.dec.DecodeStringPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// BooleanIterator represents a stream of boolean points. +type BooleanIterator interface { + Iterator + Next() (*BooleanPoint, error) +} + +// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. +// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot +// be cast to a BooleanIterator. +func newBooleanIterators(itrs []Iterator) []BooleanIterator { + a := make([]BooleanIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case BooleanIterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + +// bufBooleanIterator represents a buffered BooleanIterator. +type bufBooleanIterator struct { + itr BooleanIterator + buf *BooleanPoint +} + +// newBufBooleanIterator returns a buffered BooleanIterator. +func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator { + return &bufBooleanIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufBooleanIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v } + +// booleanMergeIterator represents an iterator that combines multiple boolean iterators. +type booleanMergeIterator struct { + inputs []BooleanIterator + heap *booleanMergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *booleanMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newBooleanMergeIterator returns a new instance of booleanMergeIterator. +func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator { + itr := &booleanMergeIterator{ + inputs: inputs, + heap: &booleanMergeHeap{ + items: make([]*booleanMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufBooleanIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanMergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// booleanMergeHeap represents a heap of booleanMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type booleanMergeHeap struct { + opt IteratorOptions + items []*booleanMergeHeapItem +} + +func (h *booleanMergeHeap) Len() int { return len(h.items) } +func (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *booleanMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanMergeHeapItem)) +} + +func (h *booleanMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanMergeHeapItem struct { + itr *bufBooleanIterator +} + +// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type booleanSortedMergeIterator struct { + inputs []BooleanIterator + heap *booleanSortedMergeHeap + init bool +} + +// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. +func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { + itr := &booleanSortedMergeIterator{ + inputs: inputs, + heap: &booleanSortedMergeHeap{ + items: make([]*booleanSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type booleanSortedMergeHeap struct { + opt IteratorOptions + items []*booleanSortedMergeHeapItem +} + +func (h *booleanSortedMergeHeap) Len() int { return len(h.items) } +func (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time { + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time { + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *booleanSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanSortedMergeHeapItem)) +} + +func (h *booleanSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanSortedMergeHeapItem struct { + point *BooleanPoint + err error + itr BooleanIterator +} + +// booleanIteratorScanner scans the results of a BooleanIterator into a map. +type booleanIteratorScanner struct { + input *bufBooleanIterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// newBooleanIteratorScanner creates a new IteratorScanner. +func newBooleanIteratorScanner(input BooleanIterator, keys []influxql.VarRef, defaultValue interface{}) *booleanIteratorScanner { + return &booleanIteratorScanner{ + input: newBufBooleanIterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *booleanIteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *booleanIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *booleanIteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *booleanIteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *booleanIteratorScanner) Err() error { return s.err } +func (s *booleanIteratorScanner) Close() error { return s.input.Close() } + +// booleanParallelIterator represents an iterator that pulls data in a separate goroutine. +type booleanParallelIterator struct { + input BooleanIterator + ch chan booleanPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newBooleanParallelIterator returns a new instance of booleanParallelIterator. +func newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator { + itr := &booleanParallelIterator{ + input: input, + ch: make(chan booleanPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *booleanParallelIterator) Next() (*BooleanPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *booleanParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- booleanPointError{point: p, err: err}: + } + } +} + +type booleanPointError struct { + point *BooleanPoint + err error +} + +// booleanLimitIterator represents an iterator that limits points per group. +type booleanLimitIterator struct { + input BooleanIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newBooleanLimitIterator returns a new instance of booleanLimitIterator. +func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator { + return &booleanLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type booleanFillIterator struct { + input *bufBooleanIterator + prev BooleanPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newBooleanFillIterator(input BooleanIterator, expr influxql.Expr, opt IteratorOptions) *booleanFillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = false + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &booleanFillIterator{ + input: newBufBooleanIterator(input), + prev: BooleanPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanFillIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = BooleanPoint{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &BooleanPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + fallthrough + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castToBoolean(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// booleanIntervalIterator represents a boolean implementation of IntervalIterator. +type booleanIntervalIterator struct { + input BooleanIterator + opt IteratorOptions +} + +func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator { + return &booleanIntervalIterator{input: input, opt: opt} +} + +func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// booleanInterruptIterator represents a boolean implementation of InterruptIterator. +type booleanInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + count int +} + +func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator { + return &booleanInterruptIterator{input: input, closing: closing} +} + +func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator. +type booleanCloseInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator { + itr := &booleanCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *booleanCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *booleanCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *booleanCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// booleanReduceFloatIterator executes a reducer for every interval and buffers the result. +type booleanReduceFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator { + return &booleanReduceFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceFloatPoint stores the reduced data for a name/tag combination. +type booleanReduceFloatPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. +type booleanStreamFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceFloatPoint + points []FloatPoint +} + +// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator. +func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator { + return &booleanStreamFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result. +type booleanReduceIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator { + return &booleanReduceIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceIntegerPoint stores the reduced data for a name/tag combination. +type booleanReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type booleanStreamIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceIntegerPoint + points []IntegerPoint +} + +// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator. +func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator { + return &booleanStreamIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanReduceUnsignedIterator executes a reducer for every interval and buffers the result. +type booleanReduceUnsignedIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + points []UnsignedPoint + keepTags bool +} + +func newBooleanReduceUnsignedIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, UnsignedPointEmitter)) *booleanReduceUnsignedIterator { + return &booleanReduceUnsignedIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceUnsignedPoint stores the reduced data for a name/tag combination. +type booleanReduceUnsignedPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter UnsignedPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceUnsignedPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]UnsignedPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(unsignedPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamUnsignedIterator streams inputs into the iterator and emits points gradually. +type booleanStreamUnsignedIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, UnsignedPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceUnsignedPoint + points []UnsignedPoint +} + +// newBooleanStreamUnsignedIterator returns a new instance of booleanStreamUnsignedIterator. +func newBooleanStreamUnsignedIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *booleanStreamUnsignedIterator { + return &booleanStreamUnsignedIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceUnsignedPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamUnsignedIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamUnsignedIterator) Next() (*UnsignedPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []UnsignedPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceUnsignedPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanReduceStringIterator executes a reducer for every interval and buffers the result. +type booleanReduceStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator { + return &booleanReduceStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceStringPoint stores the reduced data for a name/tag combination. +type booleanReduceStringPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamStringIterator streams inputs into the iterator and emits points gradually. +type booleanStreamStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceStringPoint + points []StringPoint +} + +// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator. +func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator { + return &booleanStreamStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result. +type booleanReduceBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator { + return &booleanReduceBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceBooleanPoint stores the reduced data for a name/tag combination. +type booleanReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type booleanStreamBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceBooleanPoint + points []BooleanPoint +} + +// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator. +func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator { + return &booleanStreamBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type booleanDedupeIterator struct { + input BooleanIterator + m map[string]struct{} // lookup of points already sent +} + +type booleanIteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point BooleanPoint +} + +func newBooleanIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *booleanIteratorMapper { + return &booleanIteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: BooleanPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *booleanIteratorMapper) Next() (*BooleanPoint, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castToBoolean(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = false + itr.point.Nil = true + } + } else { + itr.point.Value = false + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *booleanIteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *booleanIteratorMapper) Close() error { + return itr.cur.Close() +} + +type booleanFilterIterator struct { + input BooleanIterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func newBooleanFilterIterator(input BooleanIterator, cond influxql.Expr, opt IteratorOptions) BooleanIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &booleanFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *booleanFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanFilterIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFilterIterator) Next() (*BooleanPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type booleanTagSubsetIterator struct { + input BooleanIterator + point BooleanPoint + lastTags Tags + dimensions []string +} + +func newBooleanTagSubsetIterator(input BooleanIterator, opt IteratorOptions) *booleanTagSubsetIterator { + return &booleanTagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *booleanTagSubsetIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *booleanTagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *booleanTagSubsetIterator) Close() error { + return itr.input.Close() +} + +// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator. +func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator { + return &booleanDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// booleanReaderIterator represents an iterator that streams from a reader. +type booleanReaderIterator struct { + r io.Reader + dec *BooleanPointDecoder +} + +// newBooleanReaderIterator returns a new instance of booleanReaderIterator. +func newBooleanReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *booleanReaderIterator { + dec := NewBooleanPointDecoder(ctx, r) + dec.stats = stats + + return &booleanReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *booleanReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &BooleanPoint{} + if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// encodeFloatIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewFloatPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeFloatPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeIntegerIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewIntegerPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeIntegerPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeUnsignedIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeUnsignedIterator(itr UnsignedIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewUnsignedPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeUnsignedPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeStringIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewStringPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeStringPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeBooleanIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewBooleanPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeBooleanPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl new file mode 100644 index 0000000..be01d40 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl @@ -0,0 +1,1580 @@ +package query + +import ( + "context" + "container/heap" + "io" + "sort" + "sync" + "time" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxql" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = time.Second + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}Iterator represents a stream of {{$k.name}} points. +type {{$k.Name}}Iterator interface { + Iterator + Next() (*{{$k.Name}}Point, error) +} + +// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. +// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot +// be cast to a {{$k.Name}}Iterator. +func new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator { + a := make([]{{$k.Name}}Iterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case {{$k.Name}}Iterator: + a = append(a, itr) + default: + itr.Close() + } + } + return a +} + + +// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator. +type buf{{$k.Name}}Iterator struct { + itr {{$k.Name}}Iterator + buf *{{$k.Name}}Point +} + +// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator. +func newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator { + return &buf{{$k.Name}}Iterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v } + +// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators. +type {{$k.name}}MergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}MergeHeap + init bool + + closed bool + mu sync.RWMutex + + // Current iterator and window. + curr *{{$k.name}}MergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator. +func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator { + itr := &{{$k.name}}MergeIterator{ + inputs: inputs, + heap: &{{$k.name}}MergeHeap{ + items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBuf{{$k.Name}}Iterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Close() error { + itr.mu.Lock() + defer itr.mu.Unlock() + + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + itr.closed = true + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { + itr.mu.RLock() + defer itr.mu.RUnlock() + if itr.closed { + return nil, nil + } + + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type {{$k.name}}MergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}MergeHeapItem +} + +func (h *{{$k.name}}MergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}MergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + + +func (h *{{$k.name}}MergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}MergeHeapItem)) +} + +func (h *{{$k.name}}MergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}MergeHeapItem struct { + itr *buf{{$k.Name}}Iterator +} + +// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type {{$k.name}}SortedMergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}SortedMergeHeap + init bool +} + +// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. +func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { + itr := &{{$k.name}}SortedMergeIterator{ + inputs: inputs, + heap: &{{$k.name}}SortedMergeHeap{ + items: make([]*{{$k.name}}SortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}SortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*{{$k.name}}SortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. +// Items are sorted with the following priority: +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. +// +type {{$k.name}}SortedMergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}SortedMergeHeapItem +} + +func (h *{{$k.name}}SortedMergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}SortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}SortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + + if x.Time != y.Time{ + return x.Time < y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 < v2 + } + } + return false // Times and/or Aux fields are equal. + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + + if x.Time != y.Time{ + return x.Time > y.Time + } + + if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { + for i := 0; i < len(x.Aux); i++ { + v1, ok1 := x.Aux[i].(string) + v2, ok2 := y.Aux[i].(string) + if !ok1 || !ok2 { + // Unsupported types used in Aux fields. Maybe they + // need to be added here? + return false + } else if v1 == v2 { + continue + } + return v1 > v2 + } + } + return false // Times and/or Aux fields are equal. +} + +func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}SortedMergeHeapItem)) +} + +func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}SortedMergeHeapItem struct { + point *{{$k.Name}}Point + err error + itr {{$k.Name}}Iterator +} + +// {{$k.name}}IteratorScanner scans the results of a {{$k.Name}}Iterator into a map. +type {{$k.name}}IteratorScanner struct { + input *buf{{$k.Name}}Iterator + err error + keys []influxql.VarRef + defaultValue interface{} +} + +// new{{$k.Name}}IteratorScanner creates a new IteratorScanner. +func new{{$k.Name}}IteratorScanner(input {{$k.Name}}Iterator, keys []influxql.VarRef, defaultValue interface{}) *{{$k.name}}IteratorScanner { + return &{{$k.name}}IteratorScanner{ + input: newBuf{{$k.Name}}Iterator(input), + keys: keys, + defaultValue: defaultValue, + } +} + +func (s *{{$k.name}}IteratorScanner) Peek() (int64, string, Tags) { + if s.err != nil { + return ZeroTime, "", Tags{} + } + + p, err := s.input.peek() + if err != nil { + s.err = err + return ZeroTime, "", Tags{} + } else if p == nil { + return ZeroTime, "", Tags{} + } + return p.Time, p.Name, p.Tags +} + +func (s *{{$k.name}}IteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { + if s.err != nil { + return + } + + p, err := s.input.Next() + if err != nil { + s.err = err + return + } else if p == nil { + s.useDefaults(m) + return + } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { + s.useDefaults(m) + s.input.unread(p) + return + } + + if k := s.keys[0]; k.Val != "" { + if p.Nil { + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } else { + m[k.Val] = p.Value + } + } + for i, v := range p.Aux { + k := s.keys[i+1] + switch v.(type) { + case float64, int64, uint64, string, bool: + m[k.Val] = v + default: + // Insert the fill value if one was specified. + if s.defaultValue != SkipDefault { + m[k.Val] = castToType(s.defaultValue, k.Type) + } + } + } +} + +func (s *{{$k.name}}IteratorScanner) useDefaults(m map[string]interface{}) { + if s.defaultValue == SkipDefault { + return + } + for _, k := range s.keys { + if k.Val == "" { + continue + } + m[k.Val] = castToType(s.defaultValue, k.Type) + } +} + +func (s *{{$k.name}}IteratorScanner) Stats() IteratorStats { return s.input.Stats() } +func (s *{{$k.name}}IteratorScanner) Err() error { return s.err } +func (s *{{$k.name}}IteratorScanner) Close() error { return s.input.Close() } + +// {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine. +type {{$k.name}}ParallelIterator struct { + input {{$k.Name}}Iterator + ch chan {{$k.name}}PointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// new{{$k.Name}}ParallelIterator returns a new instance of {{$k.name}}ParallelIterator. +func new{{$k.Name}}ParallelIterator(input {{$k.Name}}Iterator) *{{$k.name}}ParallelIterator { + itr := &{{$k.name}}ParallelIterator{ + input: input, + ch: make(chan {{$k.name}}PointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}ParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}ParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ParallelIterator) Next() (*{{$k.Name}}Point, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *{{$k.name}}ParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- {{$k.name}}PointError{point: p, err: err}: + } + } +} + +type {{$k.name}}PointError struct { + point *{{$k.Name}}Point + err error +} + +// {{$k.name}}LimitIterator represents an iterator that limits points per group. +type {{$k.name}}LimitIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator. +func new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator { + return &{{$k.name}}LimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type {{$k.name}}FillIterator struct { + input *buf{{$k.Name}}Iterator + prev {{$k.Name}}Point + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr influxql.Expr, opt IteratorOptions) *{{$k.name}}FillIterator { + if opt.Fill == influxql.NullFill { + if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { + opt.Fill = influxql.NumberFill + opt.FillValue = {{$k.Zero}} + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &{{$k.name}}FillIterator{ + input: newBuf{{$k.Name}}Iterator(input), + prev: {{$k.Name}}Point{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.startTime == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.window.time == influxql.MinTime { + itr.window.time, _ = itr.opt.Window(p.Time) + } + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = {{$k.Name}}Point{Nil: true} + } + + // Check if the point is our next expected point. +CONSTRUCT: + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &{{$k.Name}}Point{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case influxql.LinearFill: + {{- if or (eq $k.Name "Float") (eq $k.Name "Integer") (eq $k.Name "Unsigned")}} + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linear{{$k.Name}}(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + {{else}} + fallthrough + {{- end}} + case influxql.NullFill: + p.Nil = true + case influxql.NumberFill: + p.Value, _ = castTo{{$k.Name}}(itr.opt.FillValue) + case influxql.PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. +type {{$k.name}}IntervalIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions +} + +func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator { + return &{{$k.name}}IntervalIterator{input: input, opt: opt} +} + +func (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == influxql.MinTime { + p.Time = 0 + } + return p, nil +} + +// {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator. +type {{$k.name}}InterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + count int +} + +func new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}InterruptIterator { + return &{{$k.name}}InterruptIterator{input: input, closing: closing} +} + +func (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count & 0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// {{$k.name}}CloseInterruptIterator represents a {{$k.name}} implementation of CloseInterruptIterator. +type {{$k.name}}CloseInterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func new{{$k.Name}}CloseInterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}CloseInterruptIterator { + itr := &{{$k.name}}CloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *{{$k.name}}CloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *{{$k.name}}CloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *{{$k.name}}CloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *{{$k.name}}CloseInterruptIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +{{range $v := $types}} + +// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. +type {{$k.name}}Reduce{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string + opt IteratorOptions + points []{{$v.Name}}Point + keepTags bool +} + +func new{{$k.Name}}Reduce{{$v.Name}}Iterator(input {{$k.Name}}Iterator, opt IteratorOptions, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)) *{{$k.name}}Reduce{{$v.Name}}Iterator { + return &{{$k.name}}Reduce{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. +type {{$k.name}}Reduce{{$v.Name}}Point struct { + Name string + Tags Tags + Aggregator {{$k.Name}}PointAggregator + Emitter {{$v.Name}}PointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{$v.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]{{$v.Name}}Point, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points)-1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse({{$v.name}}PointsByTime(a))) + } + + return a, nil +} + +// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. +type {{$k.name}}Stream{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string + opt IteratorOptions + m map[string]*{{$k.name}}Reduce{{$v.Name}}Point + points []{{$v.Name}}Point +} + +// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator. +func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator { + return &{{$k.name}}Stream{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + // We have already read all of the input points. + if itr.m == nil { + return nil, nil + } + + for { + // Read next point. + curr, err := itr.input.Next() + if err != nil { + return nil, err + } else if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []{{$v.Name}}Point + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} +{{end}} + +// {{$k.name}}DedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type {{$k.name}}DedupeIterator struct { + input {{$k.Name}}Iterator + m map[string]struct{} // lookup of points already sent +} + +type {{$k.name}}IteratorMapper struct { + cur Cursor + row Row + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point {{$k.Name}}Point +} + +func new{{$k.Name}}IteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper { + return &{{$k.name}}IteratorMapper{ + cur: cur, + driver: driver, + fields: fields, + point: {{$k.Name}}Point{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) { + if !itr.cur.Scan(&itr.row) { + if err := itr.cur.Err(); err != nil { + return nil, err + } + return nil, nil + } + + itr.point.Time = itr.row.Time + itr.point.Name = itr.row.Series.Name + itr.point.Tags = itr.row.Series.Tags + + if itr.driver != nil { + if v := itr.driver.Value(&itr.row); v != nil { + if v, ok := castTo{{$k.Name}}(v); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = {{$k.Nil}} + itr.point.Nil = true + } + } else { + itr.point.Value = {{$k.Nil}} + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(&itr.row) + } + return &itr.point, nil +} + +func (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats { + return itr.cur.Stats() +} + +func (itr *{{$k.name}}IteratorMapper) Close() error { + return itr.cur.Close() +} + +type {{$k.name}}FilterIterator struct { + input {{$k.Name}}Iterator + cond influxql.Expr + opt IteratorOptions + m map[string]interface{} +} + +func new{{$k.Name}}FilterIterator(input {{$k.Name}}Iterator, cond influxql.Expr, opt IteratorOptions) {{$k.Name}}Iterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + if n.LHS.String() == "time" { + return &influxql.BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(influxql.Expr) + if cond == nil { + return input + } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { + return input + } + + return &{{$k.name}}FilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *{{$k.name}}FilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}FilterIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FilterIterator) Next() (*{{$k.Name}}Point, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !influxql.EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +type {{$k.name}}TagSubsetIterator struct { + input {{$k.Name}}Iterator + point {{$k.Name}}Point + lastTags Tags + dimensions []string +} + +func new{{$k.Name}}TagSubsetIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}TagSubsetIterator { + return &{{$k.name}}TagSubsetIterator{ + input: input, + dimensions: opt.GetDimensions(), + } +} + +func (itr *{{$k.name}}TagSubsetIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p == nil { + return nil, nil + } + + itr.point.Name = p.Name + if !p.Tags.Equal(itr.lastTags) { + itr.point.Tags = p.Tags.Subset(itr.dimensions) + itr.lastTags = p.Tags + } + itr.point.Time = p.Time + itr.point.Value = p.Value + itr.point.Aux = p.Aux + itr.point.Aggregated = p.Aggregated + itr.point.Nil = p.Nil + return &itr.point, nil +} + +func (itr *{{$k.name}}TagSubsetIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *{{$k.name}}TagSubsetIterator) Close() error { + return itr.input.Close() +} + +// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator. +func new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator { + return &{{$k.name}}DedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// {{$k.name}}ReaderIterator represents an iterator that streams from a reader. +type {{$k.name}}ReaderIterator struct { + r io.Reader + dec *{{$k.Name}}PointDecoder +} + +// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. +func new{{$k.Name}}ReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator { + dec := New{{$k.Name}}PointDecoder(ctx, r) + dec.stats = stats + + return &{{$k.name}}ReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *{{$k.name}}ReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *{{$k.name}}ReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &{{$k.Name}}Point{} + if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} +{{end}} + +{{range .}} +// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := New{{.Name}}PointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.Encode{{.Name}}Point(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +{{end}} + +{{end}} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.go b/vendor/github.com/influxdata/influxdb/query/iterator.go new file mode 100644 index 0000000..f890341 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator.go @@ -0,0 +1,1423 @@ +package query + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "regexp" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/pkg/tracing" + internal "github.com/influxdata/influxdb/query/internal" + "github.com/influxdata/influxql" +) + +// ErrUnknownCall is returned when operating on an unknown function call. +var ErrUnknownCall = errors.New("unknown call") + +const ( + // secToNs is the number of nanoseconds in a second. + secToNs = int64(time.Second) +) + +// Iterator represents a generic interface for all Iterators. +// Most iterator operations are done on the typed sub-interfaces. +type Iterator interface { + Stats() IteratorStats + Close() error +} + +// Iterators represents a list of iterators. +type Iterators []Iterator + +// Stats returns the aggregation of all iterator stats. +func (a Iterators) Stats() IteratorStats { + var stats IteratorStats + for _, itr := range a { + stats.Add(itr.Stats()) + } + return stats +} + +// Close closes all iterators. +func (a Iterators) Close() error { + for _, itr := range a { + itr.Close() + } + return nil +} + +// filterNonNil returns a slice of iterators that removes all nil iterators. +func (a Iterators) filterNonNil() []Iterator { + other := make([]Iterator, 0, len(a)) + for _, itr := range a { + if itr == nil { + continue + } + other = append(other, itr) + } + return other +} + +// dataType determines what slice type this set of iterators should be. +// An iterator type is chosen by looking at the first element in the slice +// and then returning the data type for that iterator. +func (a Iterators) dataType() influxql.DataType { + if len(a) == 0 { + return influxql.Unknown + } + + switch a[0].(type) { + case FloatIterator: + return influxql.Float + case IntegerIterator: + return influxql.Integer + case UnsignedIterator: + return influxql.Unsigned + case StringIterator: + return influxql.String + case BooleanIterator: + return influxql.Boolean + default: + return influxql.Unknown + } +} + +// coerce forces an array of iterators to be a single type. +// Iterators that are not of the same type as the first element in the slice +// will be closed and dropped. +func (a Iterators) coerce() interface{} { + typ := a.dataType() + switch typ { + case influxql.Float: + return newFloatIterators(a) + case influxql.Integer: + return newIntegerIterators(a) + case influxql.Unsigned: + return newUnsignedIterators(a) + case influxql.String: + return newStringIterators(a) + case influxql.Boolean: + return newBooleanIterators(a) + } + return a +} + +// Merge combines all iterators into a single iterator. +// A sorted merge iterator or a merge iterator can be used based on opt. +func (a Iterators) Merge(opt IteratorOptions) (Iterator, error) { + // Check if this is a call expression. + call, ok := opt.Expr.(*influxql.Call) + + // Merge into a single iterator. + if !ok && opt.MergeSorted() { + itr := NewSortedMergeIterator(a, opt) + if itr != nil && opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return itr, nil + } + + // We do not need an ordered output so use a merge iterator. + itr := NewMergeIterator(a, opt) + if itr == nil { + return nil, nil + } + + if opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + + if !ok { + // This is not a call expression so do not use a call iterator. + return itr, nil + } + + // When merging the count() function, use sum() to sum the counted points. + if call.Name == "count" { + opt.Expr = &influxql.Call{ + Name: "sum", + Args: call.Args, + } + } + return NewCallIterator(itr, opt) +} + +// NewMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be merge iterators or only contain a single name/tag in +// sorted order. The iterator will output all points by window, name/tag, then +// time. This iterator is useful when you need all of the points for an +// interval. +func NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if n := len(inputs); n == 0 { + return nil + } else if n == 1 { + return inputs[0] + } + + // Aggregate functions can use a more relaxed sorting so that points + // within a window are grouped. This is much more efficient. + switch inputs := Iterators(inputs).coerce().(type) { + case []FloatIterator: + return newFloatMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerMergeIterator(inputs, opt) + case []UnsignedIterator: + return newUnsignedMergeIterator(inputs, opt) + case []StringIterator: + return newStringMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported merge iterator type: %T", inputs)) + } +} + +// NewParallelMergeIterator returns an iterator that breaks input iterators +// into groups and processes them in parallel. +func NewParallelMergeIterator(inputs []Iterator, opt IteratorOptions, parallelism int) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } else if len(inputs) == 1 { + return inputs[0] + } + + // Limit parallelism to the number of inputs. + if len(inputs) < parallelism { + parallelism = len(inputs) + } + + // Determine the number of inputs per output iterator. + n := len(inputs) / parallelism + + // Group iterators together. + outputs := make([]Iterator, parallelism) + for i := range outputs { + var slice []Iterator + if i < len(outputs)-1 { + slice = inputs[i*n : (i+1)*n] + } else { + slice = inputs[i*n:] + } + + outputs[i] = newParallelIterator(NewMergeIterator(slice, opt)) + } + + // Merge all groups together. + return NewMergeIterator(outputs, opt) +} + +// NewSortedMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be sorted merge iterators or only contain a single +// name/tag in sorted order. The iterator will output all points by name/tag, +// then time. This iterator is useful when you need all points for a name/tag +// to be in order. +func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } else if len(inputs) == 1 { + return inputs[0] + } + + switch inputs := Iterators(inputs).coerce().(type) { + case []FloatIterator: + return newFloatSortedMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerSortedMergeIterator(inputs, opt) + case []UnsignedIterator: + return newUnsignedSortedMergeIterator(inputs, opt) + case []StringIterator: + return newStringSortedMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanSortedMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported sorted merge iterator type: %T", inputs)) + } +} + +// newParallelIterator returns an iterator that runs in a separate goroutine. +func newParallelIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch itr := input.(type) { + case FloatIterator: + return newFloatParallelIterator(itr) + case IntegerIterator: + return newIntegerParallelIterator(itr) + case UnsignedIterator: + return newUnsignedParallelIterator(itr) + case StringIterator: + return newStringParallelIterator(itr) + case BooleanIterator: + return newBooleanParallelIterator(itr) + default: + panic(fmt.Sprintf("unsupported parallel iterator type: %T", itr)) + } +} + +// NewLimitIterator returns an iterator that limits the number of points per grouping. +func NewLimitIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatLimitIterator(input, opt) + case IntegerIterator: + return newIntegerLimitIterator(input, opt) + case UnsignedIterator: + return newUnsignedLimitIterator(input, opt) + case StringIterator: + return newStringLimitIterator(input, opt) + case BooleanIterator: + return newBooleanLimitIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) + } +} + +// NewFilterIterator returns an iterator that filters the points based on the +// condition. This iterator is not nearly as efficient as filtering points +// within the query engine and is only used when filtering subqueries. +func NewFilterIterator(input Iterator, cond influxql.Expr, opt IteratorOptions) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatFilterIterator(input, cond, opt) + case IntegerIterator: + return newIntegerFilterIterator(input, cond, opt) + case UnsignedIterator: + return newUnsignedFilterIterator(input, cond, opt) + case StringIterator: + return newStringFilterIterator(input, cond, opt) + case BooleanIterator: + return newBooleanFilterIterator(input, cond, opt) + default: + panic(fmt.Sprintf("unsupported filter iterator type: %T", input)) + } +} + +// NewTagSubsetIterator will strip each of the points to a subset of the tag key values +// for each point it processes. +func NewTagSubsetIterator(input Iterator, opt IteratorOptions) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatTagSubsetIterator(input, opt) + case IntegerIterator: + return newIntegerTagSubsetIterator(input, opt) + case UnsignedIterator: + return newUnsignedTagSubsetIterator(input, opt) + case StringIterator: + return newStringTagSubsetIterator(input, opt) + case BooleanIterator: + return newBooleanTagSubsetIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported tag subset iterator type: %T", input)) + } +} + +// NewDedupeIterator returns an iterator that only outputs unique points. +// This iterator maintains a serialized copy of each row so it is inefficient +// to use on large datasets. It is intended for small datasets such as meta queries. +func NewDedupeIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatDedupeIterator(input) + case IntegerIterator: + return newIntegerDedupeIterator(input) + case UnsignedIterator: + return newUnsignedDedupeIterator(input) + case StringIterator: + return newStringDedupeIterator(input) + case BooleanIterator: + return newBooleanDedupeIterator(input) + default: + panic(fmt.Sprintf("unsupported dedupe iterator type: %T", input)) + } +} + +// NewFillIterator returns an iterator that fills in missing points in an aggregate. +func NewFillIterator(input Iterator, expr influxql.Expr, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatFillIterator(input, expr, opt) + case IntegerIterator: + return newIntegerFillIterator(input, expr, opt) + case UnsignedIterator: + return newUnsignedFillIterator(input, expr, opt) + case StringIterator: + return newStringFillIterator(input, expr, opt) + case BooleanIterator: + return newBooleanFillIterator(input, expr, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewIntervalIterator returns an iterator that sets the time on each point to the interval. +func NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatIntervalIterator(input, opt) + case IntegerIterator: + return newIntegerIntervalIterator(input, opt) + case UnsignedIterator: + return newUnsignedIntervalIterator(input, opt) + case StringIterator: + return newStringIntervalIterator(input, opt) + case BooleanIterator: + return newBooleanIntervalIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported interval iterator type: %T", input)) + } +} + +// NewInterruptIterator returns an iterator that will stop producing output +// when the passed-in channel is closed. +func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerInterruptIterator(input, closing) + case UnsignedIterator: + return newUnsignedInterruptIterator(input, closing) + case StringIterator: + return newStringInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported interrupt iterator type: %T", input)) + } +} + +// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an +// iterator when the passed-in channel has been closed. +func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatCloseInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerCloseInterruptIterator(input, closing) + case UnsignedIterator: + return newUnsignedCloseInterruptIterator(input, closing) + case StringIterator: + return newStringCloseInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanCloseInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported close iterator iterator type: %T", input)) + } +} + +// IteratorScanner is used to scan the results of an iterator into a map. +type IteratorScanner interface { + // Peek retrieves information about the next point. It returns a timestamp, the name, and the tags. + Peek() (int64, string, Tags) + + // ScanAt will take a time, name, and tags and scan the point that matches those into the map. + ScanAt(ts int64, name string, tags Tags, values map[string]interface{}) + + // Stats returns the IteratorStats from the Iterator. + Stats() IteratorStats + + // Err returns an error that was encountered while scanning. + Err() error + + io.Closer +} + +// SkipDefault is a sentinel value to tell the IteratorScanner to skip setting the +// default value if none was present. This causes the map to use the previous value +// if it was previously set. +var SkipDefault = interface{}(0) + +// NewIteratorScanner produces an IteratorScanner for the Iterator. +func NewIteratorScanner(input Iterator, keys []influxql.VarRef, defaultValue interface{}) IteratorScanner { + switch input := input.(type) { + case FloatIterator: + return newFloatIteratorScanner(input, keys, defaultValue) + case IntegerIterator: + return newIntegerIteratorScanner(input, keys, defaultValue) + case UnsignedIterator: + return newUnsignedIteratorScanner(input, keys, defaultValue) + case StringIterator: + return newStringIteratorScanner(input, keys, defaultValue) + case BooleanIterator: + return newBooleanIteratorScanner(input, keys, defaultValue) + default: + panic(fmt.Sprintf("unsupported type for iterator scanner: %T", input)) + } +} + +// DrainIterator reads and discards all points from itr. +func DrainIterator(itr Iterator) { + defer itr.Close() + switch itr := itr.(type) { + case FloatIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case IntegerIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case UnsignedIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case StringIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case BooleanIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } +} + +// DrainIterators reads and discards all points from itrs. +func DrainIterators(itrs []Iterator) { + defer Iterators(itrs).Close() + for { + var hasData bool + + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case IntegerIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case UnsignedIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case StringIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case BooleanIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } + } + + // Exit once all iterators return a nil point. + if !hasData { + break + } + } +} + +// NewReaderIterator returns an iterator that streams from a reader. +func NewReaderIterator(ctx context.Context, r io.Reader, typ influxql.DataType, stats IteratorStats) Iterator { + switch typ { + case influxql.Float: + return newFloatReaderIterator(ctx, r, stats) + case influxql.Integer: + return newIntegerReaderIterator(ctx, r, stats) + case influxql.Unsigned: + return newUnsignedReaderIterator(ctx, r, stats) + case influxql.String: + return newStringReaderIterator(ctx, r, stats) + case influxql.Boolean: + return newBooleanReaderIterator(ctx, r, stats) + default: + return &nilFloatReaderIterator{r: r} + } +} + +// IteratorCreator is an interface to create Iterators. +type IteratorCreator interface { + // Creates a simple iterator for use in an InfluxQL query. + CreateIterator(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (Iterator, error) + + // Determines the potential cost for creating an iterator. + IteratorCost(source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) +} + +// IteratorOptions is an object passed to CreateIterator to specify creation options. +type IteratorOptions struct { + // Expression to iterate for. + // This can be VarRef or a Call. + Expr influxql.Expr + + // Auxilary tags or values to also retrieve for the point. + Aux []influxql.VarRef + + // Data sources from which to receive data. This is only used for encoding + // measurements over RPC and is no longer used in the open source version. + Sources []influxql.Source + + // Group by interval and tags. + Interval Interval + Dimensions []string // The final dimensions of the query (stays the same even in subqueries). + GroupBy map[string]struct{} // Dimensions to group points by in intermediate iterators. + Location *time.Location + + // Fill options. + Fill influxql.FillOption + FillValue interface{} + + // Condition to filter by. + Condition influxql.Expr + + // Time range for the iterator. + StartTime int64 + EndTime int64 + + // Sorted in time ascending order if true. + Ascending bool + + // Limits the number of points per series. + Limit, Offset int + + // Limits the number of series. + SLimit, SOffset int + + // Removes the measurement name. Useful for meta queries. + StripName bool + + // Removes duplicate rows from raw queries. + Dedupe bool + + // Determines if this is a query for raw data or an aggregate/selector. + Ordered bool + + // Limits on the creation of iterators. + MaxSeriesN int + + // If this channel is set and is closed, the iterator should try to exit + // and close as soon as possible. + InterruptCh <-chan struct{} + + // Authorizer can limit access to data + Authorizer Authorizer +} + +// newIteratorOptionsStmt creates the iterator options from stmt. +func newIteratorOptionsStmt(stmt *influxql.SelectStatement, sopt SelectOptions) (opt IteratorOptions, err error) { + // Determine time range from the condition. + valuer := &influxql.NowValuer{Location: stmt.Location} + condition, timeRange, err := influxql.ConditionExpr(stmt.Condition, valuer) + if err != nil { + return IteratorOptions{}, err + } + + if !timeRange.Min.IsZero() { + opt.StartTime = timeRange.Min.UnixNano() + } else { + opt.StartTime = influxql.MinTime + } + if !timeRange.Max.IsZero() { + opt.EndTime = timeRange.Max.UnixNano() + } else { + opt.EndTime = influxql.MaxTime + } + opt.Location = stmt.Location + + // Determine group by interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return opt, err + } + // Set duration to zero if a negative interval has been used. + if interval < 0 { + interval = 0 + } else if interval > 0 { + opt.Interval.Offset, err = stmt.GroupByOffset() + if err != nil { + return opt, err + } + } + opt.Interval.Duration = interval + + // Always request an ordered output for the top level iterators. + // The emitter will always emit points as ordered. + opt.Ordered = true + + // Determine dimensions. + opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions)) + for _, d := range stmt.Dimensions { + if d, ok := d.Expr.(*influxql.VarRef); ok { + opt.Dimensions = append(opt.Dimensions, d.Val) + opt.GroupBy[d.Val] = struct{}{} + } + } + + opt.Condition = condition + opt.Ascending = stmt.TimeAscending() + opt.Dedupe = stmt.Dedupe + opt.StripName = stmt.StripName + + opt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue + if opt.Fill == influxql.NullFill && stmt.Target != nil { + // Set the fill option to none if a target has been given. + // Null values will get ignored when being written to the target + // so fill(null) wouldn't write any null values to begin with. + opt.Fill = influxql.NoFill + } + opt.Limit, opt.Offset = stmt.Limit, stmt.Offset + opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset + opt.MaxSeriesN = sopt.MaxSeriesN + opt.Authorizer = sopt.Authorizer + + return opt, nil +} + +func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) { + subOpt, err := newIteratorOptionsStmt(stmt, SelectOptions{ + Authorizer: opt.Authorizer, + MaxSeriesN: opt.MaxSeriesN, + }) + if err != nil { + return IteratorOptions{}, err + } + + if subOpt.StartTime < opt.StartTime { + subOpt.StartTime = opt.StartTime + } + if subOpt.EndTime > opt.EndTime { + subOpt.EndTime = opt.EndTime + } + if !subOpt.Interval.IsZero() && subOpt.EndTime == influxql.MaxTime { + if now := ctx.Value("now"); now != nil { + subOpt.EndTime = now.(time.Time).UnixNano() + } + } + // Propagate the dimensions to the inner subquery. + subOpt.Dimensions = opt.Dimensions + for d := range opt.GroupBy { + subOpt.GroupBy[d] = struct{}{} + } + subOpt.InterruptCh = opt.InterruptCh + + // Extract the time range and condition from the condition. + cond, t, err := influxql.ConditionExpr(stmt.Condition, nil) + if err != nil { + return IteratorOptions{}, err + } + subOpt.Condition = cond + // If the time range is more constrained, use it instead. A less constrained time + // range should be ignored. + if !t.Min.IsZero() && t.MinTimeNano() > opt.StartTime { + subOpt.StartTime = t.MinTimeNano() + } + if !t.Max.IsZero() && t.MaxTimeNano() < opt.EndTime { + subOpt.EndTime = t.MaxTimeNano() + } + + // Propagate the SLIMIT and SOFFSET from the outer query. + subOpt.SLimit += opt.SLimit + subOpt.SOffset += opt.SOffset + + // Propagate the ordering from the parent query. + subOpt.Ascending = opt.Ascending + + // If the inner query uses a null fill option and is not a raw query, + // switch it to none so we don't hit an unnecessary penalty from the + // fill iterator. Null values will end up getting stripped by an outer + // query anyway so there's no point in having them here. We still need + // all other types of fill iterators because they can affect the result + // of the outer query. We also do not do this for raw queries because + // there is no fill iterator for them and fill(none) doesn't work with + // raw queries. + if !stmt.IsRawQuery && subOpt.Fill == influxql.NullFill { + subOpt.Fill = influxql.NoFill + } + + // Inherit the ordering method from the outer query. + subOpt.Ordered = opt.Ordered + + // If there is no interval for this subquery, but the outer query has an + // interval, inherit the parent interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return IteratorOptions{}, err + } else if interval == 0 { + subOpt.Interval = opt.Interval + } + return subOpt, nil +} + +// MergeSorted returns true if the options require a sorted merge. +func (opt IteratorOptions) MergeSorted() bool { + return opt.Ordered +} + +// SeekTime returns the time the iterator should start from. +// For ascending iterators this is the start time, for descending iterators it's the end time. +func (opt IteratorOptions) SeekTime() int64 { + if opt.Ascending { + return opt.StartTime + } + return opt.EndTime +} + +// StopTime returns the time the iterator should end at. +// For ascending iterators this is the end time, for descending iterators it's the start time. +func (opt IteratorOptions) StopTime() int64 { + if opt.Ascending { + return opt.EndTime + } + return opt.StartTime +} + +// Window returns the time window [start,end) that t falls within. +func (opt IteratorOptions) Window(t int64) (start, end int64) { + if opt.Interval.IsZero() { + return opt.StartTime, opt.EndTime + 1 + } + + // Subtract the offset to the time so we calculate the correct base interval. + t -= int64(opt.Interval.Offset) + + // Retrieve the zone offset for the start time. + var zone int64 + if opt.Location != nil { + _, zone = opt.Zone(t) + } + + // Truncate time by duration. + dt := (t + zone) % int64(opt.Interval.Duration) + if dt < 0 { + // Negative modulo rounds up instead of down, so offset + // with the duration. + dt += int64(opt.Interval.Duration) + } + + // Find the start time. + if influxql.MinTime+dt >= t { + start = influxql.MinTime + } else { + start = t - dt + } + + // Look for the start offset again because the first time may have been + // after the offset switch. Now that we are at midnight in UTC, we can + // lookup the zone offset again to get the real starting offset. + if opt.Location != nil { + _, startOffset := opt.Zone(start) + // Do not adjust the offset if the offset change is greater than or + // equal to the duration. + if o := zone - startOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { + start += o + } + } + start += int64(opt.Interval.Offset) + + // Find the end time. + if dt := int64(opt.Interval.Duration) - dt; influxql.MaxTime-dt <= t { + end = influxql.MaxTime + } else { + end = t + dt + } + + // Retrieve the zone offset for the end time. + if opt.Location != nil { + _, endOffset := opt.Zone(end) + // Adjust the end time if the offset is different from the start offset. + // Only apply the offset if it is smaller than the duration. + // This prevents going back in time and creating time windows + // that don't make any sense. + if o := zone - endOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { + // If the offset is greater than 0, that means we are adding time. + // Added time goes into the previous interval because the clocks + // move backwards. If the offset is less than 0, then we are skipping + // time. Skipped time comes after the switch so if we have a time + // interval that lands on the switch, it comes from the next + // interval and not the current one. For this reason, we need to know + // when the actual switch happens by seeing if the time switch is within + // the current interval. We calculate the zone offset with the offset + // and see if the value is the same. If it is, we apply the + // offset. + if o > 0 { + end += o + } else if _, z := opt.Zone(end + o); z == endOffset { + end += o + } + } + } + end += int64(opt.Interval.Offset) + return +} + +// DerivativeInterval returns the time interval for the derivative function. +func (opt IteratorOptions) DerivativeInterval() Interval { + // Use the interval on the derivative() call, if specified. + if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} + } + + // Otherwise use the group by interval, if specified. + if opt.Interval.Duration > 0 { + return Interval{Duration: opt.Interval.Duration} + } + + return Interval{Duration: time.Second} +} + +// ElapsedInterval returns the time interval for the elapsed function. +func (opt IteratorOptions) ElapsedInterval() Interval { + // Use the interval on the elapsed() call, if specified. + if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} + } + + return Interval{Duration: time.Nanosecond} +} + +// IntegralInterval returns the time interval for the integral function. +func (opt IteratorOptions) IntegralInterval() Interval { + // Use the interval on the integral() call, if specified. + if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} + } + + return Interval{Duration: time.Second} +} + +// GetDimensions retrieves the dimensions for this query. +func (opt IteratorOptions) GetDimensions() []string { + if len(opt.GroupBy) > 0 { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + return dimensions + } + return opt.Dimensions +} + +// Zone returns the zone information for the given time. The offset is in nanoseconds. +func (opt *IteratorOptions) Zone(ns int64) (string, int64) { + if opt.Location == nil { + return "", 0 + } + + t := time.Unix(0, ns).In(opt.Location) + name, offset := t.Zone() + return name, secToNs * int64(offset) +} + +// MarshalBinary encodes opt into a binary format. +func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { + return proto.Marshal(encodeIteratorOptions(opt)) +} + +// UnmarshalBinary decodes from a binary format in to opt. +func (opt *IteratorOptions) UnmarshalBinary(buf []byte) error { + var pb internal.IteratorOptions + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + other, err := decodeIteratorOptions(&pb) + if err != nil { + return err + } + *opt = *other + + return nil +} + +func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { + pb := &internal.IteratorOptions{ + Interval: encodeInterval(opt.Interval), + Dimensions: opt.Dimensions, + Fill: proto.Int32(int32(opt.Fill)), + StartTime: proto.Int64(opt.StartTime), + EndTime: proto.Int64(opt.EndTime), + Ascending: proto.Bool(opt.Ascending), + Limit: proto.Int64(int64(opt.Limit)), + Offset: proto.Int64(int64(opt.Offset)), + SLimit: proto.Int64(int64(opt.SLimit)), + SOffset: proto.Int64(int64(opt.SOffset)), + StripName: proto.Bool(opt.StripName), + Dedupe: proto.Bool(opt.Dedupe), + MaxSeriesN: proto.Int64(int64(opt.MaxSeriesN)), + Ordered: proto.Bool(opt.Ordered), + } + + // Set expression, if set. + if opt.Expr != nil { + pb.Expr = proto.String(opt.Expr.String()) + } + + // Set the location, if set. + if opt.Location != nil { + pb.Location = proto.String(opt.Location.String()) + } + + // Convert and encode aux fields as variable references. + if opt.Aux != nil { + pb.Fields = make([]*internal.VarRef, len(opt.Aux)) + pb.Aux = make([]string, len(opt.Aux)) + for i, ref := range opt.Aux { + pb.Fields[i] = encodeVarRef(ref) + pb.Aux[i] = ref.Val + } + } + + // Encode group by dimensions from a map. + if opt.GroupBy != nil { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + pb.GroupBy = dimensions + } + + // Convert and encode sources to measurements. + if opt.Sources != nil { + sources := make([]*internal.Measurement, len(opt.Sources)) + for i, source := range opt.Sources { + mm := source.(*influxql.Measurement) + sources[i] = encodeMeasurement(mm) + } + pb.Sources = sources + } + + // Fill value can only be a number. Set it if available. + if v, ok := opt.FillValue.(float64); ok { + pb.FillValue = proto.Float64(v) + } + + // Set condition, if set. + if opt.Condition != nil { + pb.Condition = proto.String(opt.Condition.String()) + } + + return pb +} + +func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { + opt := &IteratorOptions{ + Interval: decodeInterval(pb.GetInterval()), + Dimensions: pb.GetDimensions(), + Fill: influxql.FillOption(pb.GetFill()), + StartTime: pb.GetStartTime(), + EndTime: pb.GetEndTime(), + Ascending: pb.GetAscending(), + Limit: int(pb.GetLimit()), + Offset: int(pb.GetOffset()), + SLimit: int(pb.GetSLimit()), + SOffset: int(pb.GetSOffset()), + StripName: pb.GetStripName(), + Dedupe: pb.GetDedupe(), + MaxSeriesN: int(pb.GetMaxSeriesN()), + Ordered: pb.GetOrdered(), + } + + // Set expression, if set. + if pb.Expr != nil { + expr, err := influxql.ParseExpr(pb.GetExpr()) + if err != nil { + return nil, err + } + opt.Expr = expr + } + + if pb.Location != nil { + loc, err := time.LoadLocation(pb.GetLocation()) + if err != nil { + return nil, err + } + opt.Location = loc + } + + // Convert and decode variable references. + if fields := pb.GetFields(); fields != nil { + opt.Aux = make([]influxql.VarRef, len(fields)) + for i, ref := range fields { + opt.Aux[i] = decodeVarRef(ref) + } + } else if aux := pb.GetAux(); aux != nil { + opt.Aux = make([]influxql.VarRef, len(aux)) + for i, name := range aux { + opt.Aux[i] = influxql.VarRef{Val: name} + } + } + + // Convert and decode sources to measurements. + if pb.Sources != nil { + sources := make([]influxql.Source, len(pb.GetSources())) + for i, source := range pb.GetSources() { + mm, err := decodeMeasurement(source) + if err != nil { + return nil, err + } + sources[i] = mm + } + opt.Sources = sources + } + + // Convert group by dimensions to a map. + if pb.GroupBy != nil { + dimensions := make(map[string]struct{}, len(pb.GroupBy)) + for _, dim := range pb.GetGroupBy() { + dimensions[dim] = struct{}{} + } + opt.GroupBy = dimensions + } + + // Set the fill value, if set. + if pb.FillValue != nil { + opt.FillValue = pb.GetFillValue() + } + + // Set condition, if set. + if pb.Condition != nil { + expr, err := influxql.ParseExpr(pb.GetCondition()) + if err != nil { + return nil, err + } + opt.Condition = expr + } + + return opt, nil +} + +func encodeMeasurement(mm *influxql.Measurement) *internal.Measurement { + pb := &internal.Measurement{ + Database: proto.String(mm.Database), + RetentionPolicy: proto.String(mm.RetentionPolicy), + Name: proto.String(mm.Name), + SystemIterator: proto.String(mm.SystemIterator), + IsTarget: proto.Bool(mm.IsTarget), + } + if mm.Regex != nil { + pb.Regex = proto.String(mm.Regex.Val.String()) + } + return pb +} + +func decodeMeasurement(pb *internal.Measurement) (*influxql.Measurement, error) { + mm := &influxql.Measurement{ + Database: pb.GetDatabase(), + RetentionPolicy: pb.GetRetentionPolicy(), + Name: pb.GetName(), + SystemIterator: pb.GetSystemIterator(), + IsTarget: pb.GetIsTarget(), + } + + if pb.Regex != nil { + regex, err := regexp.Compile(pb.GetRegex()) + if err != nil { + return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) + } + mm.Regex = &influxql.RegexLiteral{Val: regex} + } + + return mm, nil +} + +// Interval represents a repeating interval for a query. +type Interval struct { + Duration time.Duration + Offset time.Duration +} + +// IsZero returns true if the interval has no duration. +func (i Interval) IsZero() bool { return i.Duration == 0 } + +func encodeInterval(i Interval) *internal.Interval { + return &internal.Interval{ + Duration: proto.Int64(i.Duration.Nanoseconds()), + Offset: proto.Int64(i.Offset.Nanoseconds()), + } +} + +func decodeInterval(pb *internal.Interval) Interval { + return Interval{ + Duration: time.Duration(pb.GetDuration()), + Offset: time.Duration(pb.GetOffset()), + } +} + +func encodeVarRef(ref influxql.VarRef) *internal.VarRef { + return &internal.VarRef{ + Val: proto.String(ref.Val), + Type: proto.Int32(int32(ref.Type)), + } +} + +func decodeVarRef(pb *internal.VarRef) influxql.VarRef { + return influxql.VarRef{ + Val: pb.GetVal(), + Type: influxql.DataType(pb.GetType()), + } +} + +type nilFloatIterator struct{} + +func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } +func (*nilFloatIterator) Close() error { return nil } +func (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil } + +type nilFloatReaderIterator struct { + r io.Reader +} + +func (*nilFloatReaderIterator) Stats() IteratorStats { return IteratorStats{} } +func (itr *nilFloatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + itr.r = nil + return r.Close() + } + return nil +} +func (*nilFloatReaderIterator) Next() (*FloatPoint, error) { return nil, nil } + +// IteratorStats represents statistics about an iterator. +// Some statistics are available immediately upon iterator creation while +// some are derived as the iterator processes data. +type IteratorStats struct { + SeriesN int // series represented + PointN int // points returned +} + +// Add aggregates fields from s and other together. Overwrites s. +func (s *IteratorStats) Add(other IteratorStats) { + s.SeriesN += other.SeriesN + s.PointN += other.PointN +} + +func encodeIteratorStats(stats *IteratorStats) *internal.IteratorStats { + return &internal.IteratorStats{ + SeriesN: proto.Int64(int64(stats.SeriesN)), + PointN: proto.Int64(int64(stats.PointN)), + } +} + +func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats { + return IteratorStats{ + SeriesN: int(pb.GetSeriesN()), + PointN: int(pb.GetPointN()), + } +} + +func decodeIteratorTrace(ctx context.Context, data []byte) error { + pt := tracing.TraceFromContext(ctx) + if pt == nil { + return nil + } + + var ct tracing.Trace + if err := ct.UnmarshalBinary(data); err != nil { + return err + } + + pt.Merge(&ct) + + return nil +} + +// IteratorCost contains statistics retrieved for explaining what potential +// cost may be incurred by instantiating an iterator. +type IteratorCost struct { + // The total number of shards that are touched by this query. + NumShards int64 + + // The total number of non-unique series that are accessed by this query. + // This number matches the number of cursors created by the query since + // one cursor is created for every series. + NumSeries int64 + + // CachedValues returns the number of cached values that may be read by this + // query. + CachedValues int64 + + // The total number of non-unique files that may be accessed by this query. + // This will count the number of files accessed by each series so files + // will likely be double counted. + NumFiles int64 + + // The number of blocks that had the potential to be accessed. + BlocksRead int64 + + // The amount of data that can be potentially read. + BlockSize int64 +} + +// Combine combines the results of two IteratorCost structures into one. +func (c IteratorCost) Combine(other IteratorCost) IteratorCost { + return IteratorCost{ + NumShards: c.NumShards + other.NumShards, + NumSeries: c.NumSeries + other.NumSeries, + CachedValues: c.CachedValues + other.CachedValues, + NumFiles: c.NumFiles + other.NumFiles, + BlocksRead: c.BlocksRead + other.BlocksRead, + BlockSize: c.BlockSize + other.BlockSize, + } +} + +// floatFastDedupeIterator outputs unique points where the point has a single aux field. +type floatFastDedupeIterator struct { + input FloatIterator + m map[fastDedupeKey]struct{} // lookup of points already sent +} + +// newFloatFastDedupeIterator returns a new instance of floatFastDedupeIterator. +func newFloatFastDedupeIterator(input FloatIterator) *floatFastDedupeIterator { + return &floatFastDedupeIterator{ + input: input, + m: make(map[fastDedupeKey]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatFastDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatFastDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatFastDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + // Skip if there are not any aux fields. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } else if len(p.Aux) == 0 { + continue + } + + // If the point has already been output then move to the next point. + key := fastDedupeKey{name: p.Name} + key.values[0] = p.Aux[0] + if len(p.Aux) > 1 { + key.values[1] = p.Aux[1] + } + if _, ok := itr.m[key]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[key] = struct{}{} + return p, nil + } +} + +type fastDedupeKey struct { + name string + values [2]interface{} +} + +type reverseStringSlice []string + +func (p reverseStringSlice) Len() int { return len(p) } +func (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] } +func (p reverseStringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func abs(v int64) int64 { + if v < 0 { + return -v + } + return v +} + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer + + // Frequency with which stats are emitted. + StatsInterval time.Duration +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{ + w: w, + + StatsInterval: DefaultStatsInterval, + } +} + +// EncodeIterator encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +func (enc *IteratorEncoder) EncodeTrace(trace *tracing.Trace) error { + data, err := trace.MarshalBinary() + if err != nil { + return err + } + + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Trace: data, + }) + if err != nil { + return err + } + + if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err = enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// encode a stats object in the point stream. +func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Stats: encodeIteratorStats(&stats), + }) + if err != nil { + return err + } + + if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err = enc.w.Write(buf); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator_mapper.go b/vendor/github.com/influxdata/influxdb/query/iterator_mapper.go new file mode 100644 index 0000000..79675fa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator_mapper.go @@ -0,0 +1,67 @@ +package query + +import ( + "fmt" + "math" + + "github.com/influxdata/influxql" +) + +type IteratorMap interface { + Value(row *Row) interface{} +} + +type FieldMap struct { + Index int + Type influxql.DataType +} + +func (f FieldMap) Value(row *Row) interface{} { + v := castToType(row.Values[f.Index], f.Type) + if v == NullFloat { + // If the value is a null float, then convert it back to NaN + // so it is treated as a float for eval. + v = math.NaN() + } + return v +} + +type TagMap string + +func (s TagMap) Value(row *Row) interface{} { return row.Series.Tags.Value(string(s)) } + +type NullMap struct{} + +func (NullMap) Value(row *Row) interface{} { return nil } + +func NewIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator { + if driver != nil { + switch driver := driver.(type) { + case FieldMap: + switch driver.Type { + case influxql.Float: + return newFloatIteratorMapper(cur, driver, fields, opt) + case influxql.Integer: + return newIntegerIteratorMapper(cur, driver, fields, opt) + case influxql.Unsigned: + return newUnsignedIteratorMapper(cur, driver, fields, opt) + case influxql.String, influxql.Tag: + return newStringIteratorMapper(cur, driver, fields, opt) + case influxql.Boolean: + return newBooleanIteratorMapper(cur, driver, fields, opt) + default: + // The driver doesn't appear to to have a valid driver type. + // We should close the cursor and return a blank iterator. + // We close the cursor because we own it and have a responsibility + // to close it once it is passed into this function. + cur.Close() + return &nilFloatIterator{} + } + case TagMap: + return newStringIteratorMapper(cur, driver, fields, opt) + default: + panic(fmt.Sprintf("unable to create iterator mapper with driver expression type: %T", driver)) + } + } + return newFloatIteratorMapper(cur, nil, fields, opt) +} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator_mapper_test.go b/vendor/github.com/influxdata/influxdb/query/iterator_mapper_test.go new file mode 100644 index 0000000..3a06a83 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator_mapper_test.go @@ -0,0 +1,74 @@ +package query_test + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/pkg/deep" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func TestIteratorMapper(t *testing.T) { + cur := query.RowCursor([]query.Row{ + { + Time: 0, + Series: query.Series{ + Name: "cpu", + Tags: ParseTags("host=A"), + }, + Values: []interface{}{float64(1), "a"}, + }, + { + Time: 5, + Series: query.Series{ + Name: "cpu", + Tags: ParseTags("host=A"), + }, + Values: []interface{}{float64(3), "c"}, + }, + { + Time: 2, + Series: query.Series{ + Name: "cpu", + Tags: ParseTags("host=B"), + }, + Values: []interface{}{float64(2), "b"}, + }, + { + Time: 8, + Series: query.Series{ + Name: "cpu", + Tags: ParseTags("host=B"), + }, + Values: []interface{}{float64(8), "h"}, + }, + }, []influxql.VarRef{ + {Val: "val1", Type: influxql.Float}, + {Val: "val2", Type: influxql.String}, + }) + + opt := query.IteratorOptions{ + Ascending: true, + Aux: []influxql.VarRef{ + {Val: "val1", Type: influxql.Float}, + {Val: "val2", Type: influxql.String}, + }, + Dimensions: []string{"host"}, + } + itr := query.NewIteratorMapper(cur, nil, []query.IteratorMap{ + query.FieldMap{Index: 0}, + query.FieldMap{Index: 1}, + query.TagMap("host"), + }, opt) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Aux: []interface{}{float64(1), "a", "A"}}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Aux: []interface{}{float64(3), "c", "A"}}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Aux: []interface{}{float64(2), "b", "B"}}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Aux: []interface{}{float64(8), "h", "B"}}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/iterator_test.go b/vendor/github.com/influxdata/influxdb/query/iterator_test.go new file mode 100644 index 0000000..574a4fd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/iterator_test.go @@ -0,0 +1,1784 @@ +package query_test + +import ( + "bytes" + "context" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/pkg/deep" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.FloatPoint{}}, + {Points: []query.FloatPoint{}}, + } + + itr := query.NewMergeIterator(FloatIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.IntegerPoint{}}, + } + itr := query.NewMergeIterator(IntegerIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Unsigned(t *testing.T) { + inputs := []*UnsignedIterator{ + {Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.UnsignedPoint{}}, + } + itr := query.NewMergeIterator(UnsignedIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, + }}, + {Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []query.StringPoint{}}, + } + itr := query.NewMergeIterator(StringIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&query.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&query.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, + }}, + {Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, + }}, + {Points: []query.BooleanPoint{}}, + } + itr := query.NewMergeIterator(BooleanIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, + {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestMergeIterator_Nil(t *testing.T) { + itr := query.NewMergeIterator([]query.Iterator{nil}, query.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +// Verifies that coercing will drop values that aren't the primary type. +// It's the responsibility of the engine to return the correct type. If they don't, +// we drop iterators that don't match. +func TestMergeIterator_Coerce_Float(t *testing.T) { + inputs := []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + } + + itr := query.NewMergeIterator(inputs, query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *UnsignedIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.FloatPoint{}}, + } + itr := query.NewSortedMergeIterator(FloatIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.IntegerPoint{}}, + } + itr := query.NewSortedMergeIterator(IntegerIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Unsigned(t *testing.T) { + inputs := []*UnsignedIterator{ + {Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []query.UnsignedPoint{}}, + } + itr := query.NewSortedMergeIterator(UnsignedIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}, + }}, + {Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []query.StringPoint{}}, + } + itr := query.NewSortedMergeIterator(StringIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&query.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&query.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}, + }}, + {Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}, + }}, + {Points: []query.BooleanPoint{}}, + } + itr := query.NewSortedMergeIterator(BooleanIterators(inputs), query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}}, + {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestSortedMergeIterator_Nil(t *testing.T) { + itr := query.NewSortedMergeIterator([]query.Iterator{nil}, query.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +func TestSortedMergeIterator_Coerce_Float(t *testing.T) { + inputs := []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + } + + itr := query.NewSortedMergeIterator(inputs, query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *UnsignedIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Float(t *testing.T) { + input := &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := query.NewLimitIterator(input, query.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Time: 5, Value: 3}}, + {&query.FloatPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Integer(t *testing.T) { + input := &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := query.NewLimitIterator(input, query.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 3}}, + {&query.IntegerPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Unsigned(t *testing.T) { + input := &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := query.NewLimitIterator(input, query.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.UnsignedPoint{Name: "cpu", Time: 5, Value: 3}}, + {&query.UnsignedPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_String(t *testing.T) { + input := &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Time: 0, Value: "a"}, + {Name: "cpu", Time: 5, Value: "b"}, + {Name: "cpu", Time: 10, Value: "c"}, + {Name: "mem", Time: 5, Value: "d"}, + {Name: "mem", Time: 7, Value: "e"}, + }} + + itr := query.NewLimitIterator(input, query.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, + {&query.StringPoint{Name: "mem", Time: 7, Value: "e"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Boolean(t *testing.T) { + input := &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true}, + {Name: "cpu", Time: 5, Value: false}, + {Name: "cpu", Time: 10, Value: true}, + {Name: "mem", Time: 5, Value: false}, + {Name: "mem", Time: 7, Value: true}, + }} + + itr := query.NewLimitIterator(input, query.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, + {&query.BooleanPoint{Name: "mem", Time: 7, Value: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterator returns a subset of points. +func TestLimitIterator(t *testing.T) { + itr := query.NewLimitIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: 0, Value: 0}, + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }}, + query.IteratorOptions{ + Limit: 2, + Offset: 1, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Time: 1, Value: 1}}, + {&query.FloatPoint{Time: 2, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestFillIterator_ImplicitStartTime(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: influxql.MinTime, + EndTime: mustParseTime("2000-01-01T01:00:00Z").UnixNano() - 1, + Interval: query.Interval{ + Duration: 20 * time.Minute, + }, + Ascending: true, + } + start := mustParseTime("2000-01-01T00:00:00Z").UnixNano() + itr := query.NewFillIterator( + &FloatIterator{Points: []query.FloatPoint{ + {Time: start, Value: 0}, + }}, + nil, + opt, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.FloatPoint{Time: start, Value: 0}}, + {&query.FloatPoint{Time: start + int64(20*time.Minute), Nil: true}}, + {&query.FloatPoint{Time: start + int64(40*time.Minute), Nil: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestFillIterator_DST(t *testing.T) { + for _, tt := range []struct { + name string + start, end time.Time + points []time.Duration + opt query.IteratorOptions + }{ + { + name: "Start_GroupByDay_Ascending", + start: mustParseTime("2000-04-01T00:00:00-08:00"), + end: mustParseTime("2000-04-05T00:00:00-07:00"), + points: []time.Duration{ + 24 * time.Hour, + 47 * time.Hour, + 71 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupByDay_Descending", + start: mustParseTime("2000-04-01T00:00:00-08:00"), + end: mustParseTime("2000-04-05T00:00:00-07:00"), + points: []time.Duration{ + 71 * time.Hour, + 47 * time.Hour, + 24 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "Start_GroupByHour_Ascending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T05:00:00-07:00"), + points: []time.Duration{ + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupByHour_Descending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T05:00:00-07:00"), + points: []time.Duration{ + 3 * time.Hour, + 2 * time.Hour, + 1 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "Start_GroupBy2Hour_Ascending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T07:00:00-07:00"), + points: []time.Duration{ + 2 * time.Hour, + 3 * time.Hour, + 5 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 2 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupBy2Hour_Descending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T07:00:00-07:00"), + points: []time.Duration{ + 5 * time.Hour, + 3 * time.Hour, + 2 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 2 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "End_GroupByDay_Ascending", + start: mustParseTime("2000-10-28T00:00:00-07:00"), + end: mustParseTime("2000-11-01T00:00:00-08:00"), + points: []time.Duration{ + 24 * time.Hour, + 49 * time.Hour, + 73 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "End_GroupByDay_Descending", + start: mustParseTime("2000-10-28T00:00:00-07:00"), + end: mustParseTime("2000-11-01T00:00:00-08:00"), + points: []time.Duration{ + 73 * time.Hour, + 49 * time.Hour, + 24 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "End_GroupByHour_Ascending", + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-08:00"), + points: []time.Duration{ + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "End_GroupByHour_Descending", + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-08:00"), + points: []time.Duration{ + 3 * time.Hour, + 2 * time.Hour, + 1 * time.Hour, + }, + opt: query.IteratorOptions{ + Interval: query.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + opt := tt.opt + opt.StartTime = tt.start.UnixNano() + opt.EndTime = tt.end.UnixNano() - 1 + + points := make([][]query.Point, 0, len(tt.points)+1) + if opt.Ascending { + points = append(points, []query.Point{ + &query.FloatPoint{ + Time: tt.start.UnixNano(), + }, + }) + } + for _, d := range tt.points { + points = append(points, []query.Point{ + &query.FloatPoint{ + Time: tt.start.Add(d).UnixNano(), + Nil: true, + }, + }) + } + if !opt.Ascending { + points = append(points, []query.Point{ + &query.FloatPoint{ + Time: tt.start.UnixNano(), + }, + }) + } + itr := query.NewFillIterator( + &FloatIterator{Points: []query.FloatPoint{{Time: tt.start.UnixNano(), Value: 0}}}, + nil, + opt, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, points) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + }) + } +} + +// Iterators is a test wrapper for iterators. +type Iterators []query.Iterator + +// Next returns the next value from each iterator. +// Returns nil if any iterator returns a nil. +func (itrs Iterators) Next() ([]query.Point, error) { + a := make([]query.Point, len(itrs)) + for i, itr := range itrs { + switch itr := itr.(type) { + case query.FloatIterator: + fp, err := itr.Next() + if fp == nil || err != nil { + return nil, err + } + a[i] = fp + case query.IntegerIterator: + ip, err := itr.Next() + if ip == nil || err != nil { + return nil, err + } + a[i] = ip + case query.UnsignedIterator: + up, err := itr.Next() + if up == nil || err != nil { + return nil, err + } + a[i] = up + case query.StringIterator: + sp, err := itr.Next() + if sp == nil || err != nil { + return nil, err + } + a[i] = sp + case query.BooleanIterator: + bp, err := itr.Next() + if bp == nil || err != nil { + return nil, err + } + a[i] = bp + default: + panic(fmt.Sprintf("iterator type not supported: %T", itr)) + } + } + return a, nil +} + +// ReadAll reads all points from all iterators. +func (itrs Iterators) ReadAll() ([][]query.Point, error) { + var a [][]query.Point + + // Read from every iterator until a nil is encountered. + for { + points, err := itrs.Next() + if err != nil { + return nil, err + } else if points == nil { + break + } + a = append(a, query.Points(points).Clone()) + } + + // Close all iterators. + query.Iterators(itrs).Close() + + return a, nil +} + +func TestIteratorOptions_Window_Interval(t *testing.T) { + opt := query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10, + }, + } + + start, end := opt.Window(4) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 10 { + t.Errorf("expected end to be 10, got %d", end) + } +} + +func TestIteratorOptions_Window_Offset(t *testing.T) { + opt := query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10, + Offset: 8, + }, + } + + start, end := opt.Window(14) + if start != 8 { + t.Errorf("expected start to be 8, got %d", start) + } + if end != 18 { + t.Errorf("expected end to be 18, got %d", end) + } +} + +func TestIteratorOptions_Window_Default(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: 0, + EndTime: 60, + } + + start, end := opt.Window(34) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 61 { + t.Errorf("expected end to be 61, got %d", end) + } +} + +func TestIteratorOptions_Window_Location(t *testing.T) { + for _, tt := range []struct { + now time.Time + start, end time.Time + interval time.Duration + }{ + { + now: mustParseTime("2000-04-02T12:14:15-07:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-03T00:00:00-07:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:17:12-08:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-03T00:00:00-07:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:14:15-08:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T03:00:00-07:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T03:17:12-07:00"), + start: mustParseTime("2000-04-02T03:00:00-07:00"), + end: mustParseTime("2000-04-02T04:00:00-07:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:14:15-08:00"), + start: mustParseTime("2000-04-02T01:00:00-08:00"), + end: mustParseTime("2000-04-02T03:00:00-07:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T03:17:12-07:00"), + start: mustParseTime("2000-04-02T03:00:00-07:00"), + end: mustParseTime("2000-04-02T04:00:00-07:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T12:14:15-08:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-30T00:00:00-08:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:17:12-07:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-30T00:00:00-08:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:14:15-07:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T02:00:00-08:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T03:17:12-08:00"), + start: mustParseTime("2000-10-29T02:00:00-08:00"), + end: mustParseTime("2000-10-29T04:00:00-08:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:14:15-07:00"), + start: mustParseTime("2000-10-29T01:00:00-07:00"), + end: mustParseTime("2000-10-29T01:00:00-08:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T02:17:12-07:00"), + start: mustParseTime("2000-10-29T02:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-07:00"), + interval: 1 * time.Hour, + }, + } { + t.Run(fmt.Sprintf("%s/%s", tt.now, tt.interval), func(t *testing.T) { + opt := query.IteratorOptions{ + Location: LosAngeles, + Interval: query.Interval{ + Duration: tt.interval, + }, + } + start, end := opt.Window(tt.now.UnixNano()) + if have, want := time.Unix(0, start).In(LosAngeles), tt.start; !have.Equal(want) { + t.Errorf("unexpected start time: %s != %s", have, want) + } + if have, want := time.Unix(0, end).In(LosAngeles), tt.end; !have.Equal(want) { + t.Errorf("unexpected end time: %s != %s", have, want) + } + }) + } +} + +func TestIteratorOptions_Window_MinTime(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Interval: query.Interval{ + Duration: time.Hour, + }, + } + expected := time.Unix(0, influxql.MinTime).Add(time.Hour).Truncate(time.Hour) + + start, end := opt.Window(influxql.MinTime) + if start != influxql.MinTime { + t.Errorf("expected start to be %d, got %d", influxql.MinTime, start) + } + if have, want := end, expected.UnixNano(); have != want { + t.Errorf("expected end to be %d, got %d", want, have) + } +} + +func TestIteratorOptions_Window_MaxTime(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Interval: query.Interval{ + Duration: time.Hour, + }, + } + expected := time.Unix(0, influxql.MaxTime).Truncate(time.Hour) + + start, end := opt.Window(influxql.MaxTime) + if have, want := start, expected.UnixNano(); have != want { + t.Errorf("expected start to be %d, got %d", want, have) + } + if end != influxql.MaxTime { + t.Errorf("expected end to be %d, got %d", influxql.MaxTime, end) + } +} + +func TestIteratorOptions_SeekTime_Ascending(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: true, + } + + time := opt.SeekTime() + if time != 30 { + t.Errorf("expected time to be 30, got %d", time) + } +} + +func TestIteratorOptions_SeekTime_Descending(t *testing.T) { + opt := query.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: false, + } + + time := opt.SeekTime() + if time != 60 { + t.Errorf("expected time to be 60, got %d", time) + } +} + +func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { + opt := query.IteratorOptions{} + expected := query.Interval{Duration: time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) { + opt := query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := query.Interval{Duration: 10} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_Call(t *testing.T) { + opt := query.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: query.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := query.Interval{Duration: 2 * time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Default(t *testing.T) { + opt := query.IteratorOptions{} + expected := query.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_GroupBy(t *testing.T) { + opt := query.IteratorOptions{ + Interval: query.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := query.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Call(t *testing.T) { + opt := query.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: query.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := query.Interval{Duration: 2 * time.Second} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_IntegralInterval_Default(t *testing.T) { + opt := query.IteratorOptions{} + expected := query.Interval{Duration: time.Second} + actual := opt.IntegralInterval() + if actual != expected { + t.Errorf("expected default integral interval to be %v, got %v", expected, actual) + } +} + +// Ensure iterator options can be marshaled to and from a binary format. +func TestIteratorOptions_MarshalBinary(t *testing.T) { + opt := &query.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + Aux: []influxql.VarRef{{Val: "a"}, {Val: "b"}, {Val: "c"}}, + Interval: query.Interval{ + Duration: 1 * time.Hour, + Offset: 20 * time.Minute, + }, + Dimensions: []string{"region", "host"}, + GroupBy: map[string]struct{}{ + "region": {}, + "host": {}, + "cluster": {}, + }, + Fill: influxql.NumberFill, + FillValue: float64(100), + Condition: MustParseExpr(`foo = 'bar'`), + StartTime: 1000, + EndTime: 2000, + Ascending: true, + Limit: 100, + Offset: 200, + SLimit: 300, + SOffset: 400, + StripName: true, + Dedupe: true, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other query.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(&other, opt) { + t.Fatalf("unexpected options: %s", spew.Sdump(other)) + } +} + +// Ensure iterator can be encoded and decoded over a byte stream. +func TestIterator_EncodeDecode(t *testing.T) { + var buf bytes.Buffer + + // Create an iterator with several points & stats. + itr := &FloatIterator{ + Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}, + }, + stats: query.IteratorStats{ + SeriesN: 2, + PointN: 0, + }, + } + + // Encode to the buffer. + enc := query.NewIteratorEncoder(&buf) + enc.StatsInterval = 100 * time.Millisecond + if err := enc.EncodeIterator(itr); err != nil { + t.Fatal(err) + } + + // Decode from the buffer. + dec := query.NewReaderIterator(context.Background(), &buf, influxql.Float, itr.Stats()) + + // Initial stats should exist immediately. + fdec := dec.(query.FloatIterator) + if stats := fdec.Stats(); !reflect.DeepEqual(stats, query.IteratorStats{SeriesN: 2, PointN: 0}) { + t.Fatalf("unexpected stats(initial): %#v", stats) + } + + // Read both points. + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(0): %#v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}) { + t.Fatalf("unexpected point(0); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(1): %#v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}) { + t.Fatalf("unexpected point(1); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(eof): %#v", err) + } else if p != nil { + t.Fatalf("unexpected point(eof); %#v", p) + } +} + +// Test implementation of influxql.FloatIterator +type FloatIterator struct { + Context context.Context + Points []query.FloatPoint + Closed bool + Delay time.Duration + stats query.IteratorStats + point query.FloatPoint +} + +func (itr *FloatIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*query.FloatPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + // If we have asked for a delay, then delay the returning of the point + // until either an (optional) context is done or the time has passed. + if itr.Delay > 0 { + var done <-chan struct{} + if itr.Context != nil { + done = itr.Context.Done() + } + + timer := time.NewTimer(itr.Delay) + select { + case <-timer.C: + case <-done: + timer.Stop() + return nil, itr.Context.Err() + } + } + v := &itr.Points[0] + itr.Points = itr.Points[1:] + + // Copy the returned point into a static point that we return. + // This actual storage engine returns a point from the same memory location + // so we need to test that the query engine does not misuse this memory. + itr.point.Name = v.Name + itr.point.Tags = v.Tags + itr.point.Time = v.Time + itr.point.Value = v.Value + itr.point.Nil = v.Nil + if len(itr.point.Aux) != len(v.Aux) { + itr.point.Aux = make([]interface{}, len(v.Aux)) + } + copy(itr.point.Aux, v.Aux) + return &itr.point, nil +} + +func FloatIterators(inputs []*FloatIterator) []query.Iterator { + itrs := make([]query.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = query.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of query.IntegerIterator +type IntegerIterator struct { + Points []query.IntegerPoint + Closed bool + stats query.IteratorStats + point query.IntegerPoint +} + +func (itr *IntegerIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *IntegerIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *IntegerIterator) Next() (*query.IntegerPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + + // Copy the returned point into a static point that we return. + // This actual storage engine returns a point from the same memory location + // so we need to test that the query engine does not misuse this memory. + itr.point.Name = v.Name + itr.point.Tags = v.Tags + itr.point.Time = v.Time + itr.point.Value = v.Value + itr.point.Nil = v.Nil + if len(itr.point.Aux) != len(v.Aux) { + itr.point.Aux = make([]interface{}, len(v.Aux)) + } + copy(itr.point.Aux, v.Aux) + return &itr.point, nil +} + +func IntegerIterators(inputs []*IntegerIterator) []query.Iterator { + itrs := make([]query.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = query.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of query.UnsignedIterator +type UnsignedIterator struct { + Points []query.UnsignedPoint + Closed bool + stats query.IteratorStats + point query.UnsignedPoint +} + +func (itr *UnsignedIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *UnsignedIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *UnsignedIterator) Next() (*query.UnsignedPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + + // Copy the returned point into a static point that we return. + // This actual storage engine returns a point from the same memory location + // so we need to test that the query engine does not misuse this memory. + itr.point.Name = v.Name + itr.point.Tags = v.Tags + itr.point.Time = v.Time + itr.point.Value = v.Value + itr.point.Nil = v.Nil + if len(itr.point.Aux) != len(v.Aux) { + itr.point.Aux = make([]interface{}, len(v.Aux)) + } + copy(itr.point.Aux, v.Aux) + return &itr.point, nil +} + +func UnsignedIterators(inputs []*UnsignedIterator) []query.Iterator { + itrs := make([]query.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = query.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of query.StringIterator +type StringIterator struct { + Points []query.StringPoint + Closed bool + stats query.IteratorStats + point query.StringPoint +} + +func (itr *StringIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *StringIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *StringIterator) Next() (*query.StringPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + + // Copy the returned point into a static point that we return. + // This actual storage engine returns a point from the same memory location + // so we need to test that the query engine does not misuse this memory. + itr.point.Name = v.Name + itr.point.Tags = v.Tags + itr.point.Time = v.Time + itr.point.Value = v.Value + itr.point.Nil = v.Nil + if len(itr.point.Aux) != len(v.Aux) { + itr.point.Aux = make([]interface{}, len(v.Aux)) + } + copy(itr.point.Aux, v.Aux) + return &itr.point, nil +} + +func StringIterators(inputs []*StringIterator) []query.Iterator { + itrs := make([]query.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = query.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of query.BooleanIterator +type BooleanIterator struct { + Points []query.BooleanPoint + Closed bool + stats query.IteratorStats + point query.BooleanPoint +} + +func (itr *BooleanIterator) Stats() query.IteratorStats { return itr.stats } +func (itr *BooleanIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *BooleanIterator) Next() (*query.BooleanPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + + // Copy the returned point into a static point that we return. + // This actual storage engine returns a point from the same memory location + // so we need to test that the query engine does not misuse this memory. + itr.point.Name = v.Name + itr.point.Tags = v.Tags + itr.point.Time = v.Time + itr.point.Value = v.Value + itr.point.Nil = v.Nil + if len(itr.point.Aux) != len(v.Aux) { + itr.point.Aux = make([]interface{}, len(v.Aux)) + } + copy(itr.point.Aux, v.Aux) + return &itr.point, nil +} + +func BooleanIterators(inputs []*BooleanIterator) []query.Iterator { + itrs := make([]query.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = query.Iterator(inputs[i]) + } + return itrs +} + +// MustParseSelectStatement parses a select statement. Panic on error. +func MustParseSelectStatement(s string) *influxql.SelectStatement { + stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement() + if err != nil { + panic(err) + } + return stmt.(*influxql.SelectStatement) +} + +// MustParseExpr parses an expression. Panic on error. +func MustParseExpr(s string) influxql.Expr { + expr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr() + if err != nil { + panic(err) + } + return expr +} + +// mustParseTime parses an IS0-8601 string. Panic on error. +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err.Error()) + } + return t +} + +func mustLoadLocation(s string) *time.Location { + l, err := time.LoadLocation(s) + if err != nil { + panic(err) + } + return l +} + +var LosAngeles = mustLoadLocation("America/Los_Angeles") diff --git a/vendor/github.com/influxdata/influxdb/query/linear.go b/vendor/github.com/influxdata/influxdb/query/linear.go new file mode 100644 index 0000000..0da38f9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/linear.go @@ -0,0 +1,31 @@ +package query + +// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { + m := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := previousValue + return m*x + b +} + +// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { + m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := float64(previousValue) + return int64(m*x + b) +} + +// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 { + m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := float64(previousValue) + return uint64(m*x + b) +} diff --git a/vendor/github.com/influxdata/influxdb/query/math.go b/vendor/github.com/influxdata/influxdb/query/math.go new file mode 100644 index 0000000..7b9219c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/math.go @@ -0,0 +1,243 @@ +package query + +import ( + "fmt" + "math" + + "github.com/influxdata/influxql" +) + +func isMathFunction(call *influxql.Call) bool { + switch call.Name { + case "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", "log2", "log10", "sqrt", "pow", "floor", "ceil", "round": + return true + } + return false +} + +type MathTypeMapper struct{} + +func (MathTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + return influxql.Unknown +} + +func (MathTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { + switch name { + case "sin", "cos", "tan", "atan", "exp", "log", "ln", "log2", "log10", "sqrt": + var arg0 influxql.DataType + if len(args) > 0 { + arg0 = args[0] + } + switch arg0 { + case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: + return influxql.Float, nil + default: + return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) + } + case "asin", "acos": + var arg0 influxql.DataType + if len(args) > 0 { + arg0 = args[0] + } + switch arg0 { + case influxql.Float, influxql.Unknown: + return influxql.Float, nil + default: + return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) + } + case "atan2", "pow": + var arg0, arg1 influxql.DataType + if len(args) > 0 { + arg0 = args[0] + } + if len(args) > 1 { + arg1 = args[1] + } + + switch arg0 { + case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: + // Pass through to verify the second argument. + default: + return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) + } + + switch arg1 { + case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: + return influxql.Float, nil + default: + return influxql.Unknown, fmt.Errorf("invalid argument type for the second argument in %s(): %s", name, arg1) + } + case "abs", "floor", "ceil", "round": + var arg0 influxql.DataType + if len(args) > 0 { + arg0 = args[0] + } + switch arg0 { + case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: + return args[0], nil + default: + return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) + } + } + return influxql.Unknown, nil +} + +type MathValuer struct{} + +var _ influxql.CallValuer = MathValuer{} + +func (MathValuer) Value(key string) (interface{}, bool) { + return nil, false +} + +func (v MathValuer) Call(name string, args []interface{}) (interface{}, bool) { + if len(args) == 1 { + arg0 := args[0] + switch name { + case "abs": + switch arg0 := arg0.(type) { + case float64: + return math.Abs(arg0), true + case int64, uint64: + return arg0, true + default: + return nil, true + } + case "sin": + if arg0, ok := asFloat(arg0); ok { + return math.Sin(arg0), true + } + return nil, true + case "cos": + if arg0, ok := asFloat(arg0); ok { + return math.Cos(arg0), true + } + return nil, true + case "tan": + if arg0, ok := asFloat(arg0); ok { + return math.Tan(arg0), true + } + return nil, true + case "floor": + switch arg0 := arg0.(type) { + case float64: + return math.Floor(arg0), true + case int64, uint64: + return arg0, true + default: + return nil, true + } + case "ceil": + switch arg0 := arg0.(type) { + case float64: + return math.Ceil(arg0), true + case int64, uint64: + return arg0, true + default: + return nil, true + } + case "round": + switch arg0 := arg0.(type) { + case float64: + return round(arg0), true + case int64, uint64: + return arg0, true + default: + return nil, true + } + case "asin": + if arg0, ok := asFloat(arg0); ok { + return math.Asin(arg0), true + } + return nil, true + case "acos": + if arg0, ok := asFloat(arg0); ok { + return math.Acos(arg0), true + } + return nil, true + case "atan": + if arg0, ok := asFloat(arg0); ok { + return math.Atan(arg0), true + } + return nil, true + case "exp": + if arg0, ok := asFloat(arg0); ok { + return math.Exp(arg0), true + } + return nil, true + case "ln": + if arg0, ok := asFloat(arg0); ok { + return math.Log(arg0), true + } + return nil, true + case "log2": + if arg0, ok := asFloat(arg0); ok { + return math.Log2(arg0), true + } + return nil, true + case "log10": + if arg0, ok := asFloat(arg0); ok { + return math.Log10(arg0), true + } + return nil, true + case "sqrt": + if arg0, ok := asFloat(arg0); ok { + return math.Sqrt(arg0), true + } + return nil, true + } + } else if len(args) == 2 { + arg0, arg1 := args[0], args[1] + switch name { + case "atan2": + if arg0, arg1, ok := asFloats(arg0, arg1); ok { + return math.Atan2(arg0, arg1), true + } + return nil, true + case "log": + if arg0, arg1, ok := asFloats(arg0, arg1); ok { + return math.Log(arg0) / math.Log(arg1), true + } + return nil, true + case "pow": + if arg0, arg1, ok := asFloats(arg0, arg1); ok { + return math.Pow(arg0, arg1), true + } + return nil, true + } + } + return nil, false +} + +func asFloat(x interface{}) (float64, bool) { + switch arg0 := x.(type) { + case float64: + return arg0, true + case int64: + return float64(arg0), true + case uint64: + return float64(arg0), true + default: + return 0, false + } +} + +func asFloats(x, y interface{}) (float64, float64, bool) { + arg0, ok := asFloat(x) + if !ok { + return 0, 0, false + } + arg1, ok := asFloat(y) + if !ok { + return 0, 0, false + } + return arg0, arg1, true +} + +func round(x float64) float64 { + t := math.Trunc(x) + if math.Abs(x-t) >= 0.5 { + return t + math.Copysign(1, x) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/query/math_test.go b/vendor/github.com/influxdata/influxdb/query/math_test.go new file mode 100644 index 0000000..3522278 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/math_test.go @@ -0,0 +1,212 @@ +package query_test + +import ( + "math" + "testing" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func TestMath_TypeMapper(t *testing.T) { + for _, tt := range []struct { + s string + typ influxql.DataType + err bool + }{ + {s: `abs(f::float)`, typ: influxql.Float}, + {s: `abs(i::integer)`, typ: influxql.Integer}, + {s: `abs(u::unsigned)`, typ: influxql.Unsigned}, + {s: `abs(s::string)`, err: true}, + {s: `abs(b::boolean)`, err: true}, + {s: `sin(f::float)`, typ: influxql.Float}, + {s: `sin(i::integer)`, typ: influxql.Float}, + {s: `sin(u::unsigned)`, typ: influxql.Float}, + {s: `sin(s::string)`, err: true}, + {s: `sin(b::boolean)`, err: true}, + {s: `cos(f::float)`, typ: influxql.Float}, + {s: `cos(i::integer)`, typ: influxql.Float}, + {s: `cos(u::unsigned)`, typ: influxql.Float}, + {s: `cos(s::string)`, err: true}, + {s: `cos(b::boolean)`, err: true}, + {s: `tan(f::float)`, typ: influxql.Float}, + {s: `tan(i::integer)`, typ: influxql.Float}, + {s: `tan(u::unsigned)`, typ: influxql.Float}, + {s: `tan(s::string)`, err: true}, + {s: `tan(b::boolean)`, err: true}, + {s: `asin(f::float)`, typ: influxql.Float}, + {s: `asin(i::integer)`, err: true}, + {s: `asin(u::unsigned)`, err: true}, + {s: `asin(s::string)`, err: true}, + {s: `asin(b::boolean)`, err: true}, + {s: `acos(f::float)`, typ: influxql.Float}, + {s: `acos(i::integer)`, err: true}, + {s: `acos(u::unsigned)`, err: true}, + {s: `acos(s::string)`, err: true}, + {s: `acos(b::boolean)`, err: true}, + {s: `atan(f::float)`, typ: influxql.Float}, + {s: `atan(i::integer)`, typ: influxql.Float}, + {s: `atan(u::unsigned)`, typ: influxql.Float}, + {s: `atan(s::string)`, err: true}, + {s: `atan(b::boolean)`, err: true}, + {s: `atan2(y::float, x::float)`, typ: influxql.Float}, + {s: `atan2(y::integer, x::float)`, typ: influxql.Float}, + {s: `atan2(y::unsigned, x::float)`, typ: influxql.Float}, + {s: `atan2(y::string, x::float)`, err: true}, + {s: `atan2(y::boolean, x::float)`, err: true}, + {s: `atan2(y::float, x::float)`, typ: influxql.Float}, + {s: `atan2(y::float, x::integer)`, typ: influxql.Float}, + {s: `atan2(y::float, x::unsigned)`, typ: influxql.Float}, + {s: `atan2(y::float, x::string)`, err: true}, + {s: `atan2(y::float, x::boolean)`, err: true}, + {s: `exp(f::float)`, typ: influxql.Float}, + {s: `exp(i::integer)`, typ: influxql.Float}, + {s: `exp(u::unsigned)`, typ: influxql.Float}, + {s: `exp(s::string)`, err: true}, + {s: `exp(b::boolean)`, err: true}, + {s: `log(f::float)`, typ: influxql.Float}, + {s: `log(i::integer)`, typ: influxql.Float}, + {s: `log(u::unsigned)`, typ: influxql.Float}, + {s: `log(s::string)`, err: true}, + {s: `log(b::boolean)`, err: true}, + {s: `ln(f::float)`, typ: influxql.Float}, + {s: `ln(i::integer)`, typ: influxql.Float}, + {s: `ln(u::unsigned)`, typ: influxql.Float}, + {s: `ln(s::string)`, err: true}, + {s: `ln(b::boolean)`, err: true}, + {s: `log2(f::float)`, typ: influxql.Float}, + {s: `log2(i::integer)`, typ: influxql.Float}, + {s: `log2(u::unsigned)`, typ: influxql.Float}, + {s: `log2(s::string)`, err: true}, + {s: `log2(b::boolean)`, err: true}, + {s: `log10(f::float)`, typ: influxql.Float}, + {s: `log10(i::integer)`, typ: influxql.Float}, + {s: `log10(u::unsigned)`, typ: influxql.Float}, + {s: `log10(s::string)`, err: true}, + {s: `log10(b::boolean)`, err: true}, + {s: `sqrt(f::float)`, typ: influxql.Float}, + {s: `sqrt(i::integer)`, typ: influxql.Float}, + {s: `sqrt(u::unsigned)`, typ: influxql.Float}, + {s: `sqrt(s::string)`, err: true}, + {s: `sqrt(b::boolean)`, err: true}, + {s: `pow(y::float, x::float)`, typ: influxql.Float}, + {s: `pow(y::integer, x::float)`, typ: influxql.Float}, + {s: `pow(y::unsigned, x::float)`, typ: influxql.Float}, + {s: `pow(y::string, x::string)`, err: true}, + {s: `pow(y::boolean, x::boolean)`, err: true}, + {s: `pow(y::float, x::float)`, typ: influxql.Float}, + {s: `pow(y::float, x::integer)`, typ: influxql.Float}, + {s: `pow(y::float, x::unsigned)`, typ: influxql.Float}, + {s: `pow(y::float, x::string)`, err: true}, + {s: `pow(y::float, x::boolean)`, err: true}, + {s: `floor(f::float)`, typ: influxql.Float}, + {s: `floor(i::integer)`, typ: influxql.Integer}, + {s: `floor(u::unsigned)`, typ: influxql.Unsigned}, + {s: `floor(s::string)`, err: true}, + {s: `floor(b::boolean)`, err: true}, + {s: `ceil(f::float)`, typ: influxql.Float}, + {s: `ceil(i::integer)`, typ: influxql.Integer}, + {s: `ceil(u::unsigned)`, typ: influxql.Unsigned}, + {s: `ceil(s::string)`, err: true}, + {s: `ceil(b::boolean)`, err: true}, + {s: `round(f::float)`, typ: influxql.Float}, + {s: `round(i::integer)`, typ: influxql.Integer}, + {s: `round(u::unsigned)`, typ: influxql.Unsigned}, + {s: `round(s::string)`, err: true}, + {s: `round(b::boolean)`, err: true}, + } { + t.Run(tt.s, func(t *testing.T) { + expr := MustParseExpr(tt.s) + + typmap := influxql.TypeValuerEval{ + TypeMapper: query.MathTypeMapper{}, + } + if got, err := typmap.EvalType(expr); err != nil { + if !tt.err { + t.Errorf("unexpected error: %s", err) + } + } else if tt.err { + t.Error("expected error") + } else if want := tt.typ; got != want { + t.Errorf("unexpected type:\n\t-: \"%s\"\n\t+: \"%s\"", want, got) + } + }) + } +} + +func TestMathValuer_Call(t *testing.T) { + type values map[string]interface{} + for _, tt := range []struct { + s string + values values + exp interface{} + }{ + {s: `abs(f)`, values: values{"f": float64(2)}, exp: float64(2)}, + {s: `abs(f)`, values: values{"f": float64(-2)}, exp: float64(2)}, + {s: `abs(i)`, values: values{"i": int64(2)}, exp: int64(2)}, + {s: `abs(i)`, values: values{"i": int64(-2)}, exp: int64(-2)}, + {s: `abs(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, + {s: `sin(f)`, values: values{"f": math.Pi / 2}, exp: math.Sin(math.Pi / 2)}, + {s: `sin(i)`, values: values{"i": int64(2)}, exp: math.Sin(2)}, + {s: `sin(u)`, values: values{"u": uint64(2)}, exp: math.Sin(2)}, + {s: `asin(f)`, values: values{"f": float64(0.5)}, exp: math.Asin(0.5)}, + {s: `cos(f)`, values: values{"f": math.Pi / 2}, exp: math.Cos(math.Pi / 2)}, + {s: `cos(i)`, values: values{"i": int64(2)}, exp: math.Cos(2)}, + {s: `cos(u)`, values: values{"u": uint64(2)}, exp: math.Cos(2)}, + {s: `acos(f)`, values: values{"f": float64(0.5)}, exp: math.Acos(0.5)}, + {s: `tan(f)`, values: values{"f": math.Pi / 2}, exp: math.Tan(math.Pi / 2)}, + {s: `tan(i)`, values: values{"i": int64(2)}, exp: math.Tan(2)}, + {s: `tan(u)`, values: values{"u": uint64(2)}, exp: math.Tan(2)}, + {s: `atan(f)`, values: values{"f": float64(2)}, exp: math.Atan(2)}, + {s: `atan(i)`, values: values{"i": int64(2)}, exp: math.Atan(2)}, + {s: `atan(u)`, values: values{"u": uint64(2)}, exp: math.Atan(2)}, + {s: `atan2(y, x)`, values: values{"y": float64(2), "x": float64(3)}, exp: math.Atan2(2, 3)}, + {s: `atan2(y, x)`, values: values{"y": int64(2), "x": int64(3)}, exp: math.Atan2(2, 3)}, + {s: `atan2(y, x)`, values: values{"y": uint64(2), "x": uint64(3)}, exp: math.Atan2(2, 3)}, + {s: `floor(f)`, values: values{"f": float64(2.5)}, exp: float64(2)}, + {s: `floor(i)`, values: values{"i": int64(2)}, exp: int64(2)}, + {s: `floor(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, + {s: `ceil(f)`, values: values{"f": float64(2.5)}, exp: float64(3)}, + {s: `ceil(i)`, values: values{"i": int64(2)}, exp: int64(2)}, + {s: `ceil(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, + {s: `round(f)`, values: values{"f": float64(2.4)}, exp: float64(2)}, + {s: `round(f)`, values: values{"f": float64(2.6)}, exp: float64(3)}, + {s: `round(i)`, values: values{"i": int64(2)}, exp: int64(2)}, + {s: `round(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, + {s: `exp(f)`, values: values{"f": float64(3)}, exp: math.Exp(3)}, + {s: `exp(i)`, values: values{"i": int64(3)}, exp: math.Exp(3)}, + {s: `exp(u)`, values: values{"u": uint64(3)}, exp: math.Exp(3)}, + {s: `log(f, 8)`, values: values{"f": float64(3)}, exp: math.Log(3) / math.Log(8)}, + {s: `log(i, 8)`, values: values{"i": int64(3)}, exp: math.Log(3) / math.Log(8)}, + {s: `log(u, 8)`, values: values{"u": uint64(3)}, exp: math.Log(3) / math.Log(8)}, + {s: `ln(f)`, values: values{"f": float64(3)}, exp: math.Log(3)}, + {s: `ln(i)`, values: values{"i": int64(3)}, exp: math.Log(3)}, + {s: `ln(u)`, values: values{"u": uint64(3)}, exp: math.Log(3)}, + {s: `log2(f)`, values: values{"f": float64(3)}, exp: math.Log2(3)}, + {s: `log2(i)`, values: values{"i": int64(3)}, exp: math.Log2(3)}, + {s: `log2(u)`, values: values{"u": uint64(3)}, exp: math.Log2(3)}, + {s: `log10(f)`, values: values{"f": float64(3)}, exp: math.Log10(3)}, + {s: `log10(i)`, values: values{"i": int64(3)}, exp: math.Log10(3)}, + {s: `log10(u)`, values: values{"u": uint64(3)}, exp: math.Log10(3)}, + {s: `sqrt(f)`, values: values{"f": float64(3)}, exp: math.Sqrt(3)}, + {s: `sqrt(i)`, values: values{"i": int64(3)}, exp: math.Sqrt(3)}, + {s: `sqrt(u)`, values: values{"u": uint64(3)}, exp: math.Sqrt(3)}, + {s: `pow(f, 2)`, values: values{"f": float64(4)}, exp: math.Pow(4, 2)}, + {s: `pow(i, 2)`, values: values{"i": int64(4)}, exp: math.Pow(4, 2)}, + {s: `pow(u, 2)`, values: values{"u": uint64(4)}, exp: math.Pow(4, 2)}, + } { + t.Run(tt.s, func(t *testing.T) { + expr := MustParseExpr(tt.s) + + valuer := influxql.ValuerEval{ + Valuer: influxql.MultiValuer( + influxql.MapValuer(tt.values), + query.MathValuer{}, + ), + } + if got, want := valuer.Eval(expr), tt.exp; got != want { + t.Errorf("unexpected value: %v != %v", want, got) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/monitor.go b/vendor/github.com/influxdata/influxdb/query/monitor.go new file mode 100644 index 0000000..8e76be7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/monitor.go @@ -0,0 +1,48 @@ +package query + +import ( + "context" + "time" +) + +// MonitorFunc is a function that will be called to check if a query +// is currently healthy. If the query needs to be interrupted for some reason, +// the error should be returned by this function. +type MonitorFunc func(<-chan struct{}) error + +// Monitor monitors the status of a query and returns whether the query should +// be aborted with an error. +type Monitor interface { + // Monitor starts a new goroutine that will monitor a query. The function + // will be passed in a channel to signal when the query has been finished + // normally. If the function returns with an error and the query is still + // running, the query will be terminated. + Monitor(fn MonitorFunc) +} + +// MonitorFromContext returns a Monitor embedded within the Context +// if one exists. +func MonitorFromContext(ctx context.Context) Monitor { + v, _ := ctx.Value(monitorContextKey).(Monitor) + return v +} + +// PointLimitMonitor is a query monitor that exits when the number of points +// emitted exceeds a threshold. +func PointLimitMonitor(cur Cursor, interval time.Duration, limit int) MonitorFunc { + return func(closing <-chan struct{}) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + stats := cur.Stats() + if stats.PointN >= limit { + return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit) + } + case <-closing: + return nil + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/monitor_test.go b/vendor/github.com/influxdata/influxdb/query/monitor_test.go new file mode 100644 index 0000000..8175bf7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/monitor_test.go @@ -0,0 +1,61 @@ +package query_test + +import ( + "context" + "testing" + "time" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func TestPointLimitMonitor(t *testing.T) { + t.Parallel() + + stmt := MustParseSelectStatement(`SELECT mean(value) FROM cpu`) + + // Create a new task manager so we can use the query task as a monitor. + taskManager := query.NewTaskManager() + ctx, detach, err := taskManager.AttachQuery(&influxql.Query{ + Statements: []influxql.Statement{stmt}, + }, query.ExecutionOptions{}, nil) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer detach() + + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + return &FloatIterator{ + Points: []query.FloatPoint{ + {Name: "cpu", Value: 35}, + }, + Context: ctx, + Delay: 2 * time.Second, + stats: query.IteratorStats{ + PointN: 10, + }, + }, nil + }, + Fields: map[string]influxql.DataType{ + "value": influxql.Float, + }, + } + }, + } + + cur, err := query.Select(ctx, stmt, &shardMapper, query.SelectOptions{ + MaxPointN: 1, + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := query.DrainCursor(cur); err == nil { + t.Fatalf("expected an error") + } else if got, want := err.Error(), "max-select-point limit exceeed: (10/1)"; got != want { + t.Fatalf("unexpected error: got=%v want=%v", got, want) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead.go b/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead.go new file mode 100644 index 0000000..f2e628d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead.go @@ -0,0 +1,239 @@ +// Package neldermead is an implementation of the Nelder-Mead optimization method. +// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html +package neldermead + +import "math" + +const ( + defaultMaxIterations = 1000 + // reflection coefficient + defaultAlpha = 1.0 + // contraction coefficient + defaultBeta = 0.5 + // expansion coefficient + defaultGamma = 2.0 +) + +// Optimizer represents the parameters to the Nelder-Mead simplex method. +type Optimizer struct { + // Maximum number of iterations. + MaxIterations int + // Reflection coefficient. + Alpha, + // Contraction coefficient. + Beta, + // Expansion coefficient. + Gamma float64 +} + +// New returns a new instance of Optimizer with all values set to the defaults. +func New() *Optimizer { + return &Optimizer{ + MaxIterations: defaultMaxIterations, + Alpha: defaultAlpha, + Beta: defaultBeta, + Gamma: defaultGamma, + } +} + +// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings. +func (o *Optimizer) Optimize( + objfunc func([]float64) float64, + start []float64, + epsilon, + scale float64, +) (float64, []float64) { + n := len(start) + + //holds vertices of simplex + v := make([][]float64, n+1) + for i := range v { + v[i] = make([]float64, n) + } + + //value of function at each vertex + f := make([]float64, n+1) + + //reflection - coordinates + vr := make([]float64, n) + + //expansion - coordinates + ve := make([]float64, n) + + //contraction - coordinates + vc := make([]float64, n) + + //centroid - coordinates + vm := make([]float64, n) + + // create the initial simplex + // assume one of the vertices is 0,0 + + pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2)) + qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2)) + + for i := 0; i < n; i++ { + v[0][i] = start[i] + } + + for i := 1; i <= n; i++ { + for j := 0; j < n; j++ { + if i-1 == j { + v[i][j] = pn + start[j] + } else { + v[i][j] = qn + start[j] + } + } + } + + // find the initial function values + for j := 0; j <= n; j++ { + f[j] = objfunc(v[j]) + } + + // begin the main loop of the minimization + for itr := 1; itr <= o.MaxIterations; itr++ { + + // find the indexes of the largest and smallest values + vg := 0 + vs := 0 + for i := 0; i <= n; i++ { + if f[i] > f[vg] { + vg = i + } + if f[i] < f[vs] { + vs = i + } + } + // find the index of the second largest value + vh := vs + for i := 0; i <= n; i++ { + if f[i] > f[vh] && f[i] < f[vg] { + vh = i + } + } + + // calculate the centroid + for i := 0; i <= n-1; i++ { + cent := 0.0 + for m := 0; m <= n; m++ { + if m != vg { + cent += v[m][i] + } + } + vm[i] = cent / float64(n) + } + + // reflect vg to new vertex vr + for i := 0; i <= n-1; i++ { + vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i]) + } + + // value of function at reflection point + fr := objfunc(vr) + + if fr < f[vh] && fr >= f[vs] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + + // investigate a step further in this direction + if fr < f[vs] { + for i := 0; i <= n-1; i++ { + ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i]) + } + + // value of function at expansion point + fe := objfunc(ve) + + // by making fe < fr as opposed to fe < f[vs], + // Rosenbrocks function takes 63 iterations as opposed + // to 64 when using double variables. + + if fe < fr { + for i := 0; i <= n-1; i++ { + v[vg][i] = ve[i] + } + f[vg] = fe + } else { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + } + + // check to see if a contraction is necessary + if fr >= f[vh] { + if fr < f[vg] && fr >= f[vh] { + // perform outside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] + o.Beta*(vr[i]-vm[i]) + } + } else { + // perform inside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i]) + } + } + + // value of function at contraction point + fc := objfunc(vc) + + if fc < f[vg] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vc[i] + } + f[vg] = fc + } else { + // at this point the contraction is not successful, + // we must halve the distance from vs to all the + // vertices of the simplex and then continue. + + for row := 0; row <= n; row++ { + if row != vs { + for i := 0; i <= n-1; i++ { + v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0 + } + } + } + f[vg] = objfunc(v[vg]) + f[vh] = objfunc(v[vh]) + } + } + + // test for convergence + fsum := 0.0 + for i := 0; i <= n; i++ { + fsum += f[i] + } + favg := fsum / float64(n+1) + s := 0.0 + for i := 0; i <= n; i++ { + s += math.Pow((f[i]-favg), 2.0) / float64(n) + } + s = math.Sqrt(s) + if s < epsilon { + break + } + } + + // find the index of the smallest value + vs := 0 + for i := 0; i <= n; i++ { + if f[i] < f[vs] { + vs = i + } + } + + parameters := make([]float64, n) + for i := 0; i < n; i++ { + parameters[i] = v[vs][i] + } + + min := objfunc(v[vs]) + + return min, parameters +} diff --git a/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead_test.go b/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead_test.go new file mode 100644 index 0000000..fdb957c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/neldermead/neldermead_test.go @@ -0,0 +1,64 @@ +package neldermead_test + +import ( + "math" + "testing" + + "github.com/influxdata/influxdb/query/neldermead" +) + +func round(num float64, precision float64) float64 { + rnum := num * math.Pow(10, precision) + var tnum float64 + if rnum < 0 { + tnum = math.Floor(rnum - 0.5) + } else { + tnum = math.Floor(rnum + 0.5) + } + rnum = tnum / math.Pow(10, precision) + return rnum +} + +func almostEqual(a, b, e float64) bool { + return math.Abs(a-b) < e +} + +func Test_Optimize(t *testing.T) { + + constraints := func(x []float64) { + for i := range x { + x[i] = round(x[i], 5) + } + } + // 100*(b-a^2)^2 + (1-a)^2 + // + // Obvious global minimum at (a,b) = (1,1) + // + // Useful visualization: + // https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2) + f := func(x []float64) float64 { + constraints(x) + // a = x[0] + // b = x[1] + return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0]) + } + + start := []float64{-1.2, 1.0} + + opt := neldermead.New() + epsilon := 1e-5 + min, parameters := opt.Optimize(f, start, epsilon, 1) + + if !almostEqual(min, 0, epsilon) { + t.Errorf("unexpected min: got %f exp 0", min) + } + + if !almostEqual(parameters[0], 1, 1e-2) { + t.Errorf("unexpected parameters[0]: got %f exp 1", parameters[0]) + } + + if !almostEqual(parameters[1], 1, 1e-2) { + t.Errorf("unexpected parameters[1]: got %f exp 1", parameters[1]) + } + +} diff --git a/vendor/github.com/influxdata/influxdb/query/point.gen.go b/vendor/github.com/influxdata/influxdb/query/point.gen.go new file mode 100644 index 0000000..26daff4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/point.gen.go @@ -0,0 +1,1139 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: point.gen.go.tmpl + +package query + +import ( + "context" + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/query/internal" +) + +// FloatPoint represents a point with a float64 value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type FloatPoint struct { + Name string + Tags Tags + + Time int64 + Value float64 + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *FloatPoint) name() string { return v.Name } +func (v *FloatPoint) tags() Tags { return v.Tags } +func (v *FloatPoint) time() int64 { return v.Time } +func (v *FloatPoint) nil() bool { return v.Nil } +func (v *FloatPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *FloatPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *FloatPoint) Clone() *FloatPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *FloatPoint) CopyTo(other *FloatPoint) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encodeFloatPoint(p *FloatPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + FloatValue: proto.Float64(p.Value), + } +} + +func decodeFloatPoint(pb *internal.Point) *FloatPoint { + return &FloatPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetFloatValue(), + } +} + +// floatPoints represents a slice of points sortable by value. +type floatPoints []FloatPoint + +func (a floatPoints) Len() int { return len(a) } +func (a floatPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointsByValue represents a slice of points sortable by value. +type floatPointsByValue []FloatPoint + +func (a floatPointsByValue) Len() int { return len(a) } + +func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointsByTime represents a slice of points sortable by value. +type floatPointsByTime []FloatPoint + +func (a floatPointsByTime) Len() int { return len(a) } +func (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a floatPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// floatPointByFunc represents a slice of points sortable by a function. +type floatPointsByFunc struct { + points []FloatPoint + cmp func(a, b *FloatPoint) bool +} + +func (a *floatPointsByFunc) Len() int { return len(a.points) } +func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *floatPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(FloatPoint)) +} + +func (a *floatPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc { + return &floatPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// FloatPointEncoder encodes FloatPoint points to a writer. +type FloatPointEncoder struct { + w io.Writer +} + +// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w. +func NewFloatPointEncoder(w io.Writer) *FloatPointEncoder { + return &FloatPointEncoder{w: w} +} + +// EncodeFloatPoint marshals and writes p to the underlying writer. +func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// FloatPointDecoder decodes FloatPoint points from a reader. +type FloatPointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r. +func NewFloatPointDecoder(ctx context.Context, r io.Reader) *FloatPointDecoder { + return &FloatPointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeFloatPoint reads from the underlying reader and unmarshals into p. +func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decodeFloatPoint(&pb) + + return nil + } +} + +// IntegerPoint represents a point with a int64 value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type IntegerPoint struct { + Name string + Tags Tags + + Time int64 + Value int64 + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *IntegerPoint) name() string { return v.Name } +func (v *IntegerPoint) tags() Tags { return v.Tags } +func (v *IntegerPoint) time() int64 { return v.Time } +func (v *IntegerPoint) nil() bool { return v.Nil } +func (v *IntegerPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *IntegerPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *IntegerPoint) Clone() *IntegerPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *IntegerPoint) CopyTo(other *IntegerPoint) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encodeIntegerPoint(p *IntegerPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + IntegerValue: proto.Int64(p.Value), + } +} + +func decodeIntegerPoint(pb *internal.Point) *IntegerPoint { + return &IntegerPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetIntegerValue(), + } +} + +// integerPoints represents a slice of points sortable by value. +type integerPoints []IntegerPoint + +func (a integerPoints) Len() int { return len(a) } +func (a integerPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointsByValue represents a slice of points sortable by value. +type integerPointsByValue []IntegerPoint + +func (a integerPointsByValue) Len() int { return len(a) } + +func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointsByTime represents a slice of points sortable by value. +type integerPointsByTime []IntegerPoint + +func (a integerPointsByTime) Len() int { return len(a) } +func (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a integerPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// integerPointByFunc represents a slice of points sortable by a function. +type integerPointsByFunc struct { + points []IntegerPoint + cmp func(a, b *IntegerPoint) bool +} + +func (a *integerPointsByFunc) Len() int { return len(a.points) } +func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *integerPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(IntegerPoint)) +} + +func (a *integerPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc { + return &integerPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// IntegerPointEncoder encodes IntegerPoint points to a writer. +type IntegerPointEncoder struct { + w io.Writer +} + +// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w. +func NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder { + return &IntegerPointEncoder{w: w} +} + +// EncodeIntegerPoint marshals and writes p to the underlying writer. +func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// IntegerPointDecoder decodes IntegerPoint points from a reader. +type IntegerPointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r. +func NewIntegerPointDecoder(ctx context.Context, r io.Reader) *IntegerPointDecoder { + return &IntegerPointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeIntegerPoint reads from the underlying reader and unmarshals into p. +func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decodeIntegerPoint(&pb) + + return nil + } +} + +// UnsignedPoint represents a point with a uint64 value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type UnsignedPoint struct { + Name string + Tags Tags + + Time int64 + Value uint64 + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *UnsignedPoint) name() string { return v.Name } +func (v *UnsignedPoint) tags() Tags { return v.Tags } +func (v *UnsignedPoint) time() int64 { return v.Time } +func (v *UnsignedPoint) nil() bool { return v.Nil } +func (v *UnsignedPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *UnsignedPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *UnsignedPoint) Clone() *UnsignedPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *UnsignedPoint) CopyTo(other *UnsignedPoint) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encodeUnsignedPoint(p *UnsignedPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + } +} + +func decodeUnsignedPoint(pb *internal.Point) *UnsignedPoint { + return &UnsignedPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetUnsignedValue(), + } +} + +// unsignedPoints represents a slice of points sortable by value. +type unsignedPoints []UnsignedPoint + +func (a unsignedPoints) Len() int { return len(a) } +func (a unsignedPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a unsignedPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// unsignedPointsByValue represents a slice of points sortable by value. +type unsignedPointsByValue []UnsignedPoint + +func (a unsignedPointsByValue) Len() int { return len(a) } + +func (a unsignedPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a unsignedPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// unsignedPointsByTime represents a slice of points sortable by value. +type unsignedPointsByTime []UnsignedPoint + +func (a unsignedPointsByTime) Len() int { return len(a) } +func (a unsignedPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a unsignedPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// unsignedPointByFunc represents a slice of points sortable by a function. +type unsignedPointsByFunc struct { + points []UnsignedPoint + cmp func(a, b *UnsignedPoint) bool +} + +func (a *unsignedPointsByFunc) Len() int { return len(a.points) } +func (a *unsignedPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *unsignedPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *unsignedPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(UnsignedPoint)) +} + +func (a *unsignedPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func unsignedPointsSortBy(points []UnsignedPoint, cmp func(a, b *UnsignedPoint) bool) *unsignedPointsByFunc { + return &unsignedPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// UnsignedPointEncoder encodes UnsignedPoint points to a writer. +type UnsignedPointEncoder struct { + w io.Writer +} + +// NewUnsignedPointEncoder returns a new instance of UnsignedPointEncoder that writes to w. +func NewUnsignedPointEncoder(w io.Writer) *UnsignedPointEncoder { + return &UnsignedPointEncoder{w: w} +} + +// EncodeUnsignedPoint marshals and writes p to the underlying writer. +func (enc *UnsignedPointEncoder) EncodeUnsignedPoint(p *UnsignedPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeUnsignedPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// UnsignedPointDecoder decodes UnsignedPoint points from a reader. +type UnsignedPointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// NewUnsignedPointDecoder returns a new instance of UnsignedPointDecoder that reads from r. +func NewUnsignedPointDecoder(ctx context.Context, r io.Reader) *UnsignedPointDecoder { + return &UnsignedPointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *UnsignedPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeUnsignedPoint reads from the underlying reader and unmarshals into p. +func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decodeUnsignedPoint(&pb) + + return nil + } +} + +// StringPoint represents a point with a string value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type StringPoint struct { + Name string + Tags Tags + + Time int64 + Value string + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *StringPoint) name() string { return v.Name } +func (v *StringPoint) tags() Tags { return v.Tags } +func (v *StringPoint) time() int64 { return v.Time } +func (v *StringPoint) nil() bool { return v.Nil } +func (v *StringPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *StringPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *StringPoint) Clone() *StringPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *StringPoint) CopyTo(other *StringPoint) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encodeStringPoint(p *StringPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + StringValue: proto.String(p.Value), + } +} + +func decodeStringPoint(pb *internal.Point) *StringPoint { + return &StringPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetStringValue(), + } +} + +// stringPoints represents a slice of points sortable by value. +type stringPoints []StringPoint + +func (a stringPoints) Len() int { return len(a) } +func (a stringPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return a[i].Value < a[j].Value +} +func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointsByValue represents a slice of points sortable by value. +type stringPointsByValue []StringPoint + +func (a stringPointsByValue) Len() int { return len(a) } + +func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointsByTime represents a slice of points sortable by value. +type stringPointsByTime []StringPoint + +func (a stringPointsByTime) Len() int { return len(a) } +func (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a stringPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stringPointByFunc represents a slice of points sortable by a function. +type stringPointsByFunc struct { + points []StringPoint + cmp func(a, b *StringPoint) bool +} + +func (a *stringPointsByFunc) Len() int { return len(a.points) } +func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *stringPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(StringPoint)) +} + +func (a *stringPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc { + return &stringPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// StringPointEncoder encodes StringPoint points to a writer. +type StringPointEncoder struct { + w io.Writer +} + +// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w. +func NewStringPointEncoder(w io.Writer) *StringPointEncoder { + return &StringPointEncoder{w: w} +} + +// EncodeStringPoint marshals and writes p to the underlying writer. +func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// StringPointDecoder decodes StringPoint points from a reader. +type StringPointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r. +func NewStringPointDecoder(ctx context.Context, r io.Reader) *StringPointDecoder { + return &StringPointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeStringPoint reads from the underlying reader and unmarshals into p. +func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decodeStringPoint(&pb) + + return nil + } +} + +// BooleanPoint represents a point with a bool value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type BooleanPoint struct { + Name string + Tags Tags + + Time int64 + Value bool + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *BooleanPoint) name() string { return v.Name } +func (v *BooleanPoint) tags() Tags { return v.Tags } +func (v *BooleanPoint) time() int64 { return v.Time } +func (v *BooleanPoint) nil() bool { return v.Nil } +func (v *BooleanPoint) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *BooleanPoint) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *BooleanPoint) Clone() *BooleanPoint { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *BooleanPoint) CopyTo(other *BooleanPoint) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encodeBooleanPoint(p *BooleanPoint) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + BooleanValue: proto.Bool(p.Value), + } +} + +func decodeBooleanPoint(pb *internal.Point) *BooleanPoint { + return &BooleanPoint{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.GetBooleanValue(), + } +} + +// booleanPoints represents a slice of points sortable by value. +type booleanPoints []BooleanPoint + +func (a booleanPoints) Len() int { return len(a) } +func (a booleanPoints) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return !a[i].Value +} +func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointsByValue represents a slice of points sortable by value. +type booleanPointsByValue []BooleanPoint + +func (a booleanPointsByValue) Len() int { return len(a) } + +func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value } + +func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointsByTime represents a slice of points sortable by value. +type booleanPointsByTime []BooleanPoint + +func (a booleanPointsByTime) Len() int { return len(a) } +func (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a booleanPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// booleanPointByFunc represents a slice of points sortable by a function. +type booleanPointsByFunc struct { + points []BooleanPoint + cmp func(a, b *BooleanPoint) bool +} + +func (a *booleanPointsByFunc) Len() int { return len(a.points) } +func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *booleanPointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.(BooleanPoint)) +} + +func (a *booleanPointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc { + return &booleanPointsByFunc{ + points: points, + cmp: cmp, + } +} + +// BooleanPointEncoder encodes BooleanPoint points to a writer. +type BooleanPointEncoder struct { + w io.Writer +} + +// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w. +func NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder { + return &BooleanPointEncoder{w: w} +} + +// EncodeBooleanPoint marshals and writes p to the underlying writer. +func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error { + // Marshal to bytes. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +// BooleanPointDecoder decodes BooleanPoint points from a reader. +type BooleanPointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r. +func NewBooleanPointDecoder(ctx context.Context, r io.Reader) *BooleanPointDecoder { + return &BooleanPointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodeBooleanPoint reads from the underlying reader and unmarshals into p. +func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decodeBooleanPoint(&pb) + + return nil + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/point.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/query/point.gen.go.tmpl new file mode 100644 index 0000000..593db9c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/point.gen.go.tmpl @@ -0,0 +1,250 @@ +package query + +import ( + "context" + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/query/internal" +) + +{{range .}} + +// {{.Name}}Point represents a point with a {{.Type}} value. +// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. +// See TestPoint_Fields in influxql/point_test.go for more details. +type {{.Name}}Point struct { + Name string + Tags Tags + + Time int64 + Value {{.Type}} + Aux []interface{} + + // Total number of points that were combined into this point from an aggregate. + // If this is zero, the point is not the result of an aggregate function. + Aggregated uint32 + Nil bool +} + +func (v *{{.Name}}Point) name() string { return v.Name } +func (v *{{.Name}}Point) tags() Tags { return v.Tags } +func (v *{{.Name}}Point) time() int64 { return v.Time } +func (v *{{.Name}}Point) nil() bool { return v.Nil } +func (v *{{.Name}}Point) value() interface{} { + if v.Nil { + return nil + } + return v.Value +} +func (v *{{.Name}}Point) aux() []interface{} { return v.Aux } + +// Clone returns a copy of v. +func (v *{{.Name}}Point) Clone() *{{.Name}}Point { + if v == nil { + return nil + } + + other := *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } + + return &other +} + +// CopyTo makes a deep copy into the point. +func (v *{{.Name}}Point) CopyTo(other *{{.Name}}Point) { + other.Name, other.Tags = v.Name, v.Tags + other.Time = v.Time + other.Value, other.Nil = v.Value, v.Nil + if v.Aux != nil { + if len(other.Aux) != len(v.Aux) { + other.Aux = make([]interface{}, len(v.Aux)) + } + copy(other.Aux, v.Aux) + } +} + +func encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point { + return &internal.Point{ + Name: proto.String(p.Name), + Tags: proto.String(p.Tags.ID()), + Time: proto.Int64(p.Time), + Nil: proto.Bool(p.Nil), + Aux: encodeAux(p.Aux), + Aggregated: proto.Uint32(p.Aggregated), + + {{if eq .Name "Float"}} + FloatValue: proto.Float64(p.Value), + {{else if eq .Name "Integer"}} + IntegerValue: proto.Int64(p.Value), + {{else if eq .Name "String"}} + StringValue: proto.String(p.Value), + {{else if eq .Name "Boolean"}} + BooleanValue: proto.Bool(p.Value), + {{end}} + } +} + +func decode{{.Name}}Point(pb *internal.Point) *{{.Name}}Point { + return &{{.Name}}Point{ + Name: pb.GetName(), + Tags: newTagsID(pb.GetTags()), + Time: pb.GetTime(), + Nil: pb.GetNil(), + Aux: decodeAux(pb.Aux), + Aggregated: pb.GetAggregated(), + Value: pb.Get{{.Name}}Value(), + } +} + +// {{.name}}Points represents a slice of points sortable by value. +type {{.name}}Points []{{.Name}}Point + +func (a {{.name}}Points) Len() int { return len(a) } +func (a {{.name}}Points) Less(i, j int) bool { + if a[i].Time != a[j].Time { + return a[i].Time < a[j].Time + } + return {{if ne .Name "Boolean"}}a[i].Value < a[j].Value{{else}}!a[i].Value{{end}} +} +func (a {{.name}}Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointsByValue represents a slice of points sortable by value. +type {{.name}}PointsByValue []{{.Name}}Point + +func (a {{.name}}PointsByValue) Len() int { return len(a) } +{{if eq .Name "Boolean"}} +func (a {{.name}}PointsByValue) Less(i, j int) bool { return !a[i].Value } +{{else}} +func (a {{.name}}PointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } +{{end}} +func (a {{.name}}PointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointsByTime represents a slice of points sortable by value. +type {{.name}}PointsByTime []{{.Name}}Point + +func (a {{.name}}PointsByTime) Len() int { return len(a) } +func (a {{.name}}PointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a {{.name}}PointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// {{.name}}PointByFunc represents a slice of points sortable by a function. +type {{.name}}PointsByFunc struct { + points []{{.Name}}Point + cmp func(a, b *{{.Name}}Point) bool +} + +func (a *{{.name}}PointsByFunc) Len() int { return len(a.points) } +func (a *{{.name}}PointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } +func (a *{{.name}}PointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } + +func (a *{{.name}}PointsByFunc) Push(x interface{}) { + a.points = append(a.points, x.({{.Name}}Point)) +} + +func (a *{{.name}}PointsByFunc) Pop() interface{} { + p := a.points[len(a.points)-1] + a.points = a.points[:len(a.points)-1] + return p +} + +func {{.name}}PointsSortBy(points []{{.Name}}Point, cmp func(a, b *{{.Name}}Point) bool) *{{.name}}PointsByFunc { + return &{{.name}}PointsByFunc{ + points: points, + cmp: cmp, + } +} + +// {{.Name}}PointEncoder encodes {{.Name}}Point points to a writer. +type {{.Name}}PointEncoder struct { + w io.Writer +} + +// New{{.Name}}PointEncoder returns a new instance of {{.Name}}PointEncoder that writes to w. +func New{{.Name}}PointEncoder(w io.Writer) *{{.Name}}PointEncoder { + return &{{.Name}}PointEncoder{w: w} +} + +// Encode{{.Name}}Point marshals and writes p to the underlying writer. +func (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error { + // Marshal to bytes. + buf, err := proto.Marshal(encode{{.Name}}Point(p)) + if err != nil { + return err + } + + // Write the length. + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + + // Write the encoded point. + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + + +// {{.Name}}PointDecoder decodes {{.Name}}Point points from a reader. +type {{.Name}}PointDecoder struct { + r io.Reader + stats IteratorStats + ctx context.Context +} + +// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r. +func New{{.Name}}PointDecoder(ctx context.Context, r io.Reader) *{{.Name}}PointDecoder { + return &{{.Name}}PointDecoder{r: r, ctx: ctx} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *{{.Name}}PointDecoder) Stats() IteratorStats { return dec.stats } + +// Decode{{.Name}}Point reads from the underlying reader and unmarshals into p. +func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if len(pb.Trace) > 0 { + var err error + err = decodeIteratorTrace(dec.ctx, pb.Trace) + if err != nil { + return err + } + continue + } + + // Decode into point object. + *p = *decode{{.Name}}Point(&pb) + + return nil + } +} + +{{end}} diff --git a/vendor/github.com/influxdata/influxdb/query/point.go b/vendor/github.com/influxdata/influxdb/query/point.go new file mode 100644 index 0000000..80183bf --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/point.go @@ -0,0 +1,382 @@ +package query + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "sort" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/query/internal" + "github.com/influxdata/influxql" +) + +// ZeroTime is the Unix nanosecond timestamp for no time. +// This time is not used by the query engine or the storage engine as a valid time. +const ZeroTime = int64(math.MinInt64) + +// Point represents a value in a series that occurred at a given time. +type Point interface { + // Name and tags uniquely identify the series the value belongs to. + name() string + tags() Tags + + // The time that the value occurred at. + time() int64 + + // The value at the given time. + value() interface{} + + // Auxillary values passed along with the value. + aux() []interface{} +} + +// Points represents a list of points. +type Points []Point + +// Clone returns a deep copy of a. +func (a Points) Clone() []Point { + other := make([]Point, len(a)) + for i, p := range a { + if p == nil { + other[i] = nil + continue + } + + switch p := p.(type) { + case *FloatPoint: + other[i] = p.Clone() + case *IntegerPoint: + other[i] = p.Clone() + case *UnsignedPoint: + other[i] = p.Clone() + case *StringPoint: + other[i] = p.Clone() + case *BooleanPoint: + other[i] = p.Clone() + default: + panic(fmt.Sprintf("unable to clone point: %T", p)) + } + } + return other +} + +// Tags represent a map of keys and values. +// It memoizes its key so it can be used efficiently during query execution. +type Tags struct { + id string + m map[string]string +} + +// NewTags returns a new instance of Tags. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return Tags{} + } + return Tags{ + id: string(encodeTags(m)), + m: m, + } +} + +// newTagsID returns a new instance of Tags by parsing the given tag ID. +func newTagsID(id string) Tags { + m := decodeTags([]byte(id)) + if len(m) == 0 { + return Tags{} + } + return Tags{id: id, m: m} +} + +// Equal compares if the Tags are equal to each other. +func (t Tags) Equal(other Tags) bool { + return t.ID() == other.ID() +} + +// ID returns the string identifier for the tags. +func (t Tags) ID() string { return t.id } + +// KeyValues returns the underlying map for the tags. +func (t Tags) KeyValues() map[string]string { return t.m } + +// Keys returns a sorted list of all keys on the tag. +func (t *Tags) Keys() []string { + if t == nil { + return nil + } + + var a []string + for k := range t.m { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Values returns a sorted list of all values on the tag. +func (t *Tags) Values() []string { + if t == nil { + return nil + } + + a := make([]string, 0, len(t.m)) + for _, v := range t.m { + a = append(a, v) + } + sort.Strings(a) + return a +} + +// Value returns the value for a given key. +func (t *Tags) Value(k string) string { + if t == nil { + return "" + } + return t.m[k] +} + +// Subset returns a new tags object with a subset of the keys. +func (t *Tags) Subset(keys []string) Tags { + if len(keys) == 0 { + return Tags{} + } + + // If keys match existing keys, simply return this tagset. + if keysMatch(t.m, keys) { + return *t + } + + // Otherwise create new tag set. + m := make(map[string]string, len(keys)) + for _, k := range keys { + m[k] = t.m[k] + } + return NewTags(m) +} + +// Equals returns true if t equals other. +func (t *Tags) Equals(other *Tags) bool { + if t == nil && other == nil { + return true + } else if t == nil || other == nil { + return false + } + return t.id == other.id +} + +// keysMatch returns true if m has exactly the same keys as listed in keys. +func keysMatch(m map[string]string, keys []string) bool { + if len(keys) != len(m) { + return false + } + + for _, k := range keys { + if _, ok := m[k]; !ok { + return false + } + } + + return true +} + +// encodeTags converts a map of strings to an identifier. +func encodeTags(m map[string]string) []byte { + // Empty maps marshal to empty bytes. + if len(m) == 0 { + return nil + } + + // Extract keys and determine final size. + sz := (len(m) * 2) - 1 // separators + keys := make([]string, 0, len(m)) + for k, v := range m { + keys = append(keys, k) + sz += len(k) + len(v) + } + sort.Strings(keys) + + // Generate marshaled bytes. + b := make([]byte, sz) + buf := b + for _, k := range keys { + copy(buf, k) + buf[len(k)] = '\x00' + buf = buf[len(k)+1:] + } + for i, k := range keys { + v := m[k] + copy(buf, v) + if i < len(keys)-1 { + buf[len(v)] = '\x00' + buf = buf[len(v)+1:] + } + } + return b +} + +// decodeTags parses an identifier into a map of tags. +func decodeTags(id []byte) map[string]string { + a := bytes.Split(id, []byte{'\x00'}) + + // There must be an even number of segments. + if len(a) > 0 && len(a)%2 == 1 { + a = a[:len(a)-1] + } + + // Return nil if there are no segments. + if len(a) == 0 { + return nil + } + mid := len(a) / 2 + + // Decode key/value tags. + m := make(map[string]string) + for i := 0; i < mid; i++ { + m[string(a[i])] = string(a[i+mid]) + } + return m +} + +func encodeAux(aux []interface{}) []*internal.Aux { + pb := make([]*internal.Aux, len(aux)) + for i := range aux { + switch v := aux[i].(type) { + case float64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Float)), FloatValue: proto.Float64(v)} + case *float64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Float))} + case int64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Integer)), IntegerValue: proto.Int64(v)} + case *int64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Integer))} + case uint64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unsigned)), UnsignedValue: proto.Uint64(v)} + case *uint64: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unsigned))} + case string: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.String)), StringValue: proto.String(v)} + case *string: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.String))} + case bool: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Boolean)), BooleanValue: proto.Bool(v)} + case *bool: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Boolean))} + default: + pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unknown))} + } + } + return pb +} + +func decodeAux(pb []*internal.Aux) []interface{} { + if len(pb) == 0 { + return nil + } + + aux := make([]interface{}, len(pb)) + for i := range pb { + switch influxql.DataType(pb[i].GetDataType()) { + case influxql.Float: + if pb[i].FloatValue != nil { + aux[i] = *pb[i].FloatValue + } else { + aux[i] = (*float64)(nil) + } + case influxql.Integer: + if pb[i].IntegerValue != nil { + aux[i] = *pb[i].IntegerValue + } else { + aux[i] = (*int64)(nil) + } + case influxql.Unsigned: + if pb[i].UnsignedValue != nil { + aux[i] = *pb[i].UnsignedValue + } else { + aux[i] = (*uint64)(nil) + } + case influxql.String: + if pb[i].StringValue != nil { + aux[i] = *pb[i].StringValue + } else { + aux[i] = (*string)(nil) + } + case influxql.Boolean: + if pb[i].BooleanValue != nil { + aux[i] = *pb[i].BooleanValue + } else { + aux[i] = (*bool)(nil) + } + default: + aux[i] = nil + } + } + return aux +} + +func cloneAux(src []interface{}) []interface{} { + if src == nil { + return src + } + dest := make([]interface{}, len(src)) + copy(dest, src) + return dest +} + +// PointDecoder decodes generic points from a reader. +type PointDecoder struct { + r io.Reader + stats IteratorStats +} + +// NewPointDecoder returns a new instance of PointDecoder that reads from r. +func NewPointDecoder(r io.Reader) *PointDecoder { + return &PointDecoder{r: r} +} + +// Stats returns iterator stats embedded within the stream. +func (dec *PointDecoder) Stats() IteratorStats { return dec.stats } + +// DecodePoint reads from the underlying reader and unmarshals into p. +func (dec *PointDecoder) DecodePoint(p *Point) error { + for { + // Read length. + var sz uint32 + if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { + return err + } + + // Read point data. + buf := make([]byte, sz) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return err + } + + // Unmarshal into point. + var pb internal.Point + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + // If the point contains stats then read stats and retry. + if pb.Stats != nil { + dec.stats = decodeIteratorStats(pb.Stats) + continue + } + + if pb.IntegerValue != nil { + *p = decodeIntegerPoint(&pb) + } else if pb.UnsignedValue != nil { + *p = decodeUnsignedPoint(&pb) + } else if pb.StringValue != nil { + *p = decodeStringPoint(&pb) + } else if pb.BooleanValue != nil { + *p = decodeBooleanPoint(&pb) + } else { + *p = decodeFloatPoint(&pb) + } + + return nil + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/point_test.go b/vendor/github.com/influxdata/influxdb/query/point_test.go new file mode 100644 index 0000000..da3f975 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/point_test.go @@ -0,0 +1,187 @@ +package query_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/pkg/deep" + "github.com/influxdata/influxdb/query" +) + +func TestPoint_Clone_Float(t *testing.T) { + p := &query.FloatPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: 2, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Integer(t *testing.T) { + p := &query.IntegerPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: 2, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_String(t *testing.T) { + p := &query.StringPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: "clone", + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Boolean(t *testing.T) { + p := &query.BooleanPoint{ + Name: "cpu", + Tags: ParseTags("host=server01"), + Time: 5, + Value: true, + Aux: []interface{}{float64(45)}, + } + c := p.Clone() + if p == c { + t.Errorf("clone has the same address as the original: %v == %v", p, c) + } + if !deep.Equal(p, c) { + t.Errorf("mismatched point: %s", spew.Sdump(c)) + } + if &p.Aux[0] == &c.Aux[0] { + t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) + } else if !deep.Equal(p.Aux, c.Aux) { + t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) + } +} + +func TestPoint_Clone_Nil(t *testing.T) { + var fp *query.FloatPoint + if p := fp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var ip *query.IntegerPoint + if p := ip.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var sp *query.StringPoint + if p := sp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } + + var bp *query.BooleanPoint + if p := bp.Clone(); p != nil { + t.Errorf("expected nil, got %v", p) + } +} + +// TestPoint_Fields ensures that no additional fields are added to the point structs. +// This struct is very sensitive and can effect performance unless handled carefully. +// To avoid the struct becoming a dumping ground for every function that needs to store +// miscellaneous information, this test is meant to ensure that new fields don't slip +// into the struct. +func TestPoint_Fields(t *testing.T) { + allowedFields := map[string]bool{ + "Name": true, + "Tags": true, + "Time": true, + "Nil": true, + "Value": true, + "Aux": true, + "Aggregated": true, + } + + for _, typ := range []reflect.Type{ + reflect.TypeOf(query.FloatPoint{}), + reflect.TypeOf(query.IntegerPoint{}), + reflect.TypeOf(query.StringPoint{}), + reflect.TypeOf(query.BooleanPoint{}), + } { + f, ok := typ.FieldByNameFunc(func(name string) bool { + return !allowedFields[name] + }) + if ok { + t.Errorf("found an unallowed field in %s: %s %s", typ, f.Name, f.Type) + } + } +} + +// Ensure that tags can return a unique id. +func TestTags_ID(t *testing.T) { + tags := query.NewTags(map[string]string{"foo": "bar", "baz": "bat"}) + if id := tags.ID(); id != "baz\x00foo\x00bat\x00bar" { + t.Fatalf("unexpected id: %q", id) + } +} + +// Ensure that a subset can be created from a tag set. +func TestTags_Subset(t *testing.T) { + tags := query.NewTags(map[string]string{"a": "0", "b": "1", "c": "2"}) + subset := tags.Subset([]string{"b", "c", "d"}) + if keys := subset.Keys(); !reflect.DeepEqual(keys, []string{"b", "c", "d"}) { + t.Fatalf("unexpected keys: %+v", keys) + } else if v := subset.Value("a"); v != "" { + t.Fatalf("unexpected 'a' value: %s", v) + } else if v := subset.Value("b"); v != "1" { + t.Fatalf("unexpected 'b' value: %s", v) + } else if v := subset.Value("c"); v != "2" { + t.Fatalf("unexpected 'c' value: %s", v) + } else if v := subset.Value("d"); v != "" { + t.Fatalf("unexpected 'd' value: %s", v) + } +} + +// ParseTags returns an instance of Tags for a comma-delimited list of key/values. +func ParseTags(s string) query.Tags { + m := make(map[string]string) + for _, kv := range strings.Split(s, ",") { + a := strings.Split(kv, "=") + m[a[0]] = a[1] + } + return query.NewTags(m) +} diff --git a/vendor/github.com/influxdata/influxdb/query/query.go b/vendor/github.com/influxdata/influxdb/query/query.go new file mode 100644 index 0000000..8d3a44f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/query.go @@ -0,0 +1,7 @@ +package query // import "github.com/influxdata/influxdb/query" + +//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl +//go:generate tmpl -data=@tmpldata point.gen.go.tmpl +//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl + +//go:generate protoc --gogo_out=. internal/internal.proto diff --git a/vendor/github.com/influxdata/influxdb/query/result.go b/vendor/github.com/influxdata/influxdb/query/result.go new file mode 100644 index 0000000..844f259 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/result.go @@ -0,0 +1,141 @@ +package query + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" +) + +const ( + // WarningLevel is the message level for a warning. + WarningLevel = "warning" +) + +// TagSet is a fundamental concept within the query system. It represents a composite series, +// composed of multiple individual series that share a set of tag attributes. +type TagSet struct { + Tags map[string]string + Filters []influxql.Expr + SeriesKeys []string + Key []byte +} + +// AddFilter adds a series-level filter to the Tagset. +func (t *TagSet) AddFilter(key string, filter influxql.Expr) { + t.SeriesKeys = append(t.SeriesKeys, key) + t.Filters = append(t.Filters, filter) +} + +func (t *TagSet) Len() int { return len(t.SeriesKeys) } +func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] } +func (t *TagSet) Swap(i, j int) { + t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] + t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] +} + +// Reverse reverses the order of series keys and filters in the TagSet. +func (t *TagSet) Reverse() { + for i, j := 0, len(t.Filters)-1; i < j; i, j = i+1, j-1 { + t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] + t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] + } +} + +// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. +func LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet { + // Ignore if no limit or offset is specified. + if slimit == 0 && soffset == 0 { + return a + } + + // If offset is beyond the number of tag sets then return nil. + if soffset > len(a) { + return nil + } + + // Clamp limit to the max number of tag sets. + if soffset+slimit > len(a) { + slimit = len(a) - soffset + } + return a[soffset : soffset+slimit] +} + +// Message represents a user-facing message to be included with the result. +type Message struct { + Level string `json:"level"` + Text string `json:"text"` +} + +// ReadOnlyWarning generates a warning message that tells the user the command +// they are using is being used for writing in a read only context. +// +// This is a temporary method while to be used while transitioning to read only +// operations for issue #6290. +func ReadOnlyWarning(stmt string) *Message { + return &Message{ + Level: WarningLevel, + Text: fmt.Sprintf("deprecated use of '%s' in a read only context, please use a POST request instead", stmt), + } +} + +// Result represents a resultset returned from a single statement. +// Rows represents a list of rows that can be sorted consistently by name/tag. +type Result struct { + // StatementID is just the statement's position in the query. It's used + // to combine statement results if they're being buffered in memory. + StatementID int + Series models.Rows + Messages []*Message + Partial bool + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + StatementID int `json:"statement_id"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Partial bool `json:"partial,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.StatementID = r.StatementID + o.Series = r.Series + o.Messages = r.Messages + o.Partial = r.Partial + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + StatementID int `json:"statement_id"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Partial bool `json:"partial,omitempty"` + Err string `json:"error,omitempty"` + } + + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + r.StatementID = o.StatementID + r.Series = o.Series + r.Messages = o.Messages + r.Partial = o.Partial + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/select.go b/vendor/github.com/influxdata/influxdb/query/select.go new file mode 100644 index 0000000..fd9fb02 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/select.go @@ -0,0 +1,974 @@ +package query + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + "time" + + "github.com/influxdata/influxdb/pkg/tracing" + "github.com/influxdata/influxdb/query/internal/gota" + "github.com/influxdata/influxql" +) + +var DefaultTypeMapper = influxql.MultiTypeMapper( + FunctionTypeMapper{}, + MathTypeMapper{}, +) + +// SelectOptions are options that customize the select call. +type SelectOptions struct { + // Authorizer is used to limit access to data + Authorizer Authorizer + + // Node to exclusively read from. + // If zero, all nodes are used. + NodeID uint64 + + // Maximum number of concurrent series. + MaxSeriesN int + + // Maximum number of points to read from the query. + // This requires the passed in context to have a Monitor that is + // created using WithMonitor. + MaxPointN int + + // Maximum number of buckets for a statement. + MaxBucketsN int +} + +// ShardMapper retrieves and maps shards into an IteratorCreator that can later be +// used for executing queries. +type ShardMapper interface { + MapShards(sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error) +} + +// ShardGroup represents a shard or a collection of shards that can be accessed +// for creating iterators. +// When creating iterators, the resource used for reading the iterators should be +// separate from the resource used to map the shards. When the ShardGroup is closed, +// it should not close any resources associated with the created Iterator. Those +// resources belong to the Iterator and will be closed when the Iterator itself is +// closed. +// The query engine operates under this assumption and will close the shard group +// after creating the iterators, but before the iterators are actually read. +type ShardGroup interface { + IteratorCreator + influxql.FieldMapper + io.Closer +} + +// Select is a prepared statement that is ready to be executed. +type PreparedStatement interface { + // Select creates the Iterators that will be used to read the query. + Select(ctx context.Context) (Cursor, error) + + // Explain outputs the explain plan for this statement. + Explain() (string, error) + + // Close closes the resources associated with this prepared statement. + // This must be called as the mapped shards may hold open resources such + // as network connections. + Close() error +} + +// Prepare will compile the statement with the default compile options and +// then prepare the query. +func Prepare(stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) { + c, err := Compile(stmt, CompileOptions{}) + if err != nil { + return nil, err + } + return c.Prepare(shardMapper, opt) +} + +// Select compiles, prepares, and then initiates execution of the query using the +// default compile options. +func Select(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (Cursor, error) { + s, err := Prepare(stmt, shardMapper, opt) + if err != nil { + return nil, err + } + // Must be deferred so it runs after Select. + defer s.Close() + return s.Select(ctx) +} + +type preparedStatement struct { + stmt *influxql.SelectStatement + opt IteratorOptions + ic interface { + IteratorCreator + io.Closer + } + columns []string + maxPointN int + now time.Time +} + +func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) { + // TODO(jsternberg): Remove this hacky method of propagating now. + // Each level of the query should use a time range discovered during + // compilation, but that requires too large of a refactor at the moment. + ctx = context.WithValue(ctx, "now", p.now) + + opt := p.opt + opt.InterruptCh = ctx.Done() + cur, err := buildCursor(ctx, p.stmt, p.ic, opt) + if err != nil { + return nil, err + } + + // If a monitor exists and we are told there is a maximum number of points, + // register the monitor function. + if m := MonitorFromContext(ctx); m != nil { + if p.maxPointN > 0 { + monitor := PointLimitMonitor(cur, DefaultStatsInterval, p.maxPointN) + m.Monitor(monitor) + } + } + return cur, nil +} + +func (p *preparedStatement) Close() error { + return p.ic.Close() +} + +// buildExprIterator creates an iterator for an expression. +func buildExprIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { + opt.Expr = expr + b := exprIteratorBuilder{ + ic: ic, + sources: sources, + opt: opt, + selector: selector, + writeMode: writeMode, + } + + switch expr := expr.(type) { + case *influxql.VarRef: + return b.buildVarRefIterator(ctx, expr) + case *influxql.Call: + return b.buildCallIterator(ctx, expr) + default: + return nil, fmt.Errorf("invalid expression type: %T", expr) + } +} + +type exprIteratorBuilder struct { + ic IteratorCreator + sources influxql.Sources + opt IteratorOptions + selector bool + writeMode bool +} + +func (b *exprIteratorBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef) (Iterator, error) { + inputs := make([]Iterator, 0, len(b.sources)) + if err := func() error { + for _, source := range b.sources { + switch source := source.(type) { + case *influxql.Measurement: + input, err := b.ic.CreateIterator(ctx, source, b.opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *influxql.SubQuery: + subquery := subqueryBuilder{ + ic: b.ic, + stmt: source.Statement, + } + + input, err := subquery.buildVarRefIterator(ctx, expr, b.opt) + if err != nil { + return err + } else if input != nil { + inputs = append(inputs, input) + } + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + // Variable references in this section will always go into some call + // iterator. Combine it with a merge iterator. + itr := NewMergeIterator(inputs, b.opt) + if itr == nil { + itr = &nilFloatIterator{} + } + + if b.opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, b.opt.InterruptCh) + } + return itr, nil +} + +func (b *exprIteratorBuilder) buildCallIterator(ctx context.Context, expr *influxql.Call) (Iterator, error) { + // TODO(jsternberg): Refactor this. This section needs to die in a fire. + opt := b.opt + // Eliminate limits and offsets if they were previously set. These are handled by the caller. + opt.Limit, opt.Offset = 0, 0 + switch expr.Name { + case "distinct": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + input, err = NewDistinctIterator(input, opt) + if err != nil { + return nil, err + } + return NewIntervalIterator(input, opt), nil + case "sample": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + size := expr.Args[1].(*influxql.IntegerLiteral) + + return newSampleIterator(input, opt, int(size.Val)) + case "holt_winters", "holt_winters_with_fit": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + h := expr.Args[1].(*influxql.IntegerLiteral) + m := expr.Args[2].(*influxql.IntegerLiteral) + + includeFitData := "holt_winters_with_fit" == expr.Name + + interval := opt.Interval.Duration + // Redefine interval to be unbounded to capture all aggregate results + opt.StartTime = influxql.MinTime + opt.EndTime = influxql.MaxTime + opt.Interval = Interval{} + + return newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval) + case "derivative", "non_negative_derivative", "difference", "non_negative_difference", "moving_average", "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative", "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average", "chande_momentum_oscillator", "elapsed": + if !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) + } else { + opt.EndTime += int64(opt.Interval.Duration) + } + } + opt.Ordered = true + + input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + + switch expr.Name { + case "derivative", "non_negative_derivative": + interval := opt.DerivativeInterval() + isNonNegative := (expr.Name == "non_negative_derivative") + return newDerivativeIterator(input, opt, interval, isNonNegative) + case "elapsed": + interval := opt.ElapsedInterval() + return newElapsedIterator(input, opt, interval) + case "difference", "non_negative_difference": + isNonNegative := (expr.Name == "non_negative_difference") + return newDifferenceIterator(input, opt, isNonNegative) + case "moving_average": + n := expr.Args[1].(*influxql.IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + return newMovingAverageIterator(input, int(n.Val), opt) + case "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative": + n := expr.Args[1].(*influxql.IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + + nHold := -1 + if len(expr.Args) >= 3 { + nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) + } + + warmupType := gota.WarmEMA + if len(expr.Args) >= 4 { + if warmupType, err = gota.ParseWarmupType(expr.Args[3].(*influxql.StringLiteral).Val); err != nil { + return nil, err + } + } + + switch expr.Name { + case "exponential_moving_average": + return newExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) + case "double_exponential_moving_average": + return newDoubleExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) + case "triple_exponential_moving_average": + return newTripleExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) + case "relative_strength_index": + return newRelativeStrengthIndexIterator(input, int(n.Val), nHold, warmupType, opt) + case "triple_exponential_derivative": + return newTripleExponentialDerivativeIterator(input, int(n.Val), nHold, warmupType, opt) + } + case "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average": + n := expr.Args[1].(*influxql.IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + + nHold := -1 + if len(expr.Args) >= 3 { + nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) + } + + switch expr.Name { + case "kaufmans_efficiency_ratio": + return newKaufmansEfficiencyRatioIterator(input, int(n.Val), nHold, opt) + case "kaufmans_adaptive_moving_average": + return newKaufmansAdaptiveMovingAverageIterator(input, int(n.Val), nHold, opt) + } + case "chande_momentum_oscillator": + n := expr.Args[1].(*influxql.IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + + nHold := -1 + if len(expr.Args) >= 3 { + nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) + } + + warmupType := gota.WarmupType(-1) + if len(expr.Args) >= 4 { + wt := expr.Args[3].(*influxql.StringLiteral).Val + if wt != "none" { + if warmupType, err = gota.ParseWarmupType(wt); err != nil { + return nil, err + } + } + } + + return newChandeMomentumOscillatorIterator(input, int(n.Val), nHold, warmupType, opt) + } + panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) + case "cumulative_sum": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + return newCumulativeSumIterator(input, opt) + case "integral": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + interval := opt.IntegralInterval() + return newIntegralIterator(input, opt, interval) + case "top": + if len(expr.Args) < 2 { + return nil, fmt.Errorf("top() requires 2 or more arguments, got %d", len(expr.Args)) + } + + var input Iterator + if len(expr.Args) > 2 { + // Create a max iterator using the groupings in the arguments. + dims := make(map[string]struct{}, len(expr.Args)-2+len(opt.GroupBy)) + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*influxql.VarRef) + dims[ref.Val] = struct{}{} + } + for dim := range opt.GroupBy { + dims[dim] = struct{}{} + } + + call := &influxql.Call{ + Name: "max", + Args: expr.Args[:1], + } + callOpt := opt + callOpt.Expr = call + callOpt.GroupBy = dims + callOpt.Fill = influxql.NoFill + + builder := *b + builder.opt = callOpt + builder.selector = true + builder.writeMode = false + + i, err := builder.callIterator(ctx, call, callOpt) + if err != nil { + return nil, err + } + input = i + } else { + // There are no arguments so do not organize the points by tags. + builder := *b + builder.opt.Expr = expr.Args[0] + builder.selector = true + builder.writeMode = false + + ref := expr.Args[0].(*influxql.VarRef) + i, err := builder.buildVarRefIterator(ctx, ref) + if err != nil { + return nil, err + } + input = i + } + + n := expr.Args[len(expr.Args)-1].(*influxql.IntegerLiteral) + return newTopIterator(input, opt, int(n.Val), b.writeMode) + case "bottom": + if len(expr.Args) < 2 { + return nil, fmt.Errorf("bottom() requires 2 or more arguments, got %d", len(expr.Args)) + } + + var input Iterator + if len(expr.Args) > 2 { + // Create a max iterator using the groupings in the arguments. + dims := make(map[string]struct{}, len(expr.Args)-2) + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*influxql.VarRef) + dims[ref.Val] = struct{}{} + } + for dim := range opt.GroupBy { + dims[dim] = struct{}{} + } + + call := &influxql.Call{ + Name: "min", + Args: expr.Args[:1], + } + callOpt := opt + callOpt.Expr = call + callOpt.GroupBy = dims + callOpt.Fill = influxql.NoFill + + builder := *b + builder.opt = callOpt + builder.selector = true + builder.writeMode = false + + i, err := builder.callIterator(ctx, call, callOpt) + if err != nil { + return nil, err + } + input = i + } else { + // There are no arguments so do not organize the points by tags. + builder := *b + builder.opt.Expr = expr.Args[0] + builder.selector = true + builder.writeMode = false + + ref := expr.Args[0].(*influxql.VarRef) + i, err := builder.buildVarRefIterator(ctx, ref) + if err != nil { + return nil, err + } + input = i + } + + n := expr.Args[len(expr.Args)-1].(*influxql.IntegerLiteral) + return newBottomIterator(input, b.opt, int(n.Val), b.writeMode) + } + + itr, err := func() (Iterator, error) { + switch expr.Name { + case "count": + switch arg0 := expr.Args[0].(type) { + case *influxql.Call: + if arg0.Name == "distinct" { + input, err := buildExprIterator(ctx, arg0, b.ic, b.sources, opt, b.selector, false) + if err != nil { + return nil, err + } + return newCountIterator(input, opt) + } + } + fallthrough + case "min", "max", "sum", "first", "last", "mean": + return b.callIterator(ctx, expr, opt) + case "median": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + return newMedianIterator(input, opt) + case "mode": + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + return NewModeIterator(input, opt) + case "stddev": + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + return newStddevIterator(input, opt) + case "spread": + // OPTIMIZE(benbjohnson): convert to map/reduce + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + return newSpreadIterator(input, opt) + case "percentile": + opt.Ordered = true + input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) + if err != nil { + return nil, err + } + var percentile float64 + switch arg := expr.Args[1].(type) { + case *influxql.NumberLiteral: + percentile = arg.Val + case *influxql.IntegerLiteral: + percentile = float64(arg.Val) + } + return newPercentileIterator(input, opt, percentile) + default: + return nil, fmt.Errorf("unsupported call: %s", expr.Name) + } + }() + + if err != nil { + return nil, err + } + + if !b.selector || !opt.Interval.IsZero() { + itr = NewIntervalIterator(itr, opt) + if !opt.Interval.IsZero() && opt.Fill != influxql.NoFill { + itr = NewFillIterator(itr, expr, opt) + } + } + if opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return itr, nil +} + +func (b *exprIteratorBuilder) callIterator(ctx context.Context, expr *influxql.Call, opt IteratorOptions) (Iterator, error) { + inputs := make([]Iterator, 0, len(b.sources)) + if err := func() error { + for _, source := range b.sources { + switch source := source.(type) { + case *influxql.Measurement: + input, err := b.ic.CreateIterator(ctx, source, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *influxql.SubQuery: + // Identify the name of the field we are using. + arg0 := expr.Args[0].(*influxql.VarRef) + + input, err := buildExprIterator(ctx, arg0, b.ic, []influxql.Source{source}, opt, b.selector, false) + if err != nil { + return err + } + + // Wrap the result in a call iterator. + i, err := NewCallIterator(input, opt) + if err != nil { + input.Close() + return err + } + inputs = append(inputs, i) + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + itr, err := Iterators(inputs).Merge(opt) + if err != nil { + Iterators(inputs).Close() + return nil, err + } else if itr == nil { + itr = &nilFloatIterator{} + } + return itr, nil +} + +func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) (Cursor, error) { + span := tracing.SpanFromContext(ctx) + if span != nil { + span = span.StartSpan("build_cursor") + defer span.Finish() + + span.SetLabels("statement", stmt.String()) + ctx = tracing.NewContextWithSpan(ctx, span) + } + + switch opt.Fill { + case influxql.NumberFill: + if v, ok := opt.FillValue.(int); ok { + opt.FillValue = int64(v) + } + case influxql.PreviousFill: + opt.FillValue = SkipDefault + } + + fields := make([]*influxql.Field, 0, len(stmt.Fields)+1) + if !stmt.OmitTime { + // Add a field with the variable "time" if we have not omitted time. + fields = append(fields, &influxql.Field{ + Expr: &influxql.VarRef{ + Val: "time", + Type: influxql.Time, + }, + }) + } + + // Iterate through each of the fields to add them to the value mapper. + valueMapper := newValueMapper() + for _, f := range stmt.Fields { + fields = append(fields, valueMapper.Map(f)) + + // If the field is a top() or bottom() call, we need to also add + // the extra variables if we are not writing into a target. + if stmt.Target != nil { + continue + } + + switch expr := f.Expr.(type) { + case *influxql.Call: + if expr.Name == "top" || expr.Name == "bottom" { + for i := 1; i < len(expr.Args)-1; i++ { + nf := influxql.Field{Expr: expr.Args[i]} + fields = append(fields, valueMapper.Map(&nf)) + } + } + } + } + + // Set the aliases on each of the columns to what the final name should be. + columns := stmt.ColumnNames() + for i, f := range fields { + f.Alias = columns[i] + } + + // Retrieve the refs to retrieve the auxiliary fields. + var auxKeys []influxql.VarRef + if len(valueMapper.refs) > 0 { + opt.Aux = make([]influxql.VarRef, 0, len(valueMapper.refs)) + for ref := range valueMapper.refs { + opt.Aux = append(opt.Aux, *ref) + } + sort.Sort(influxql.VarRefs(opt.Aux)) + + auxKeys = make([]influxql.VarRef, len(opt.Aux)) + for i, ref := range opt.Aux { + auxKeys[i] = valueMapper.symbols[ref.String()] + } + } + + // If there are no calls, then produce an auxiliary cursor. + if len(valueMapper.calls) == 0 { + // If all of the auxiliary keys are of an unknown type, + // do not construct the iterator and return a null cursor. + if !hasValidType(auxKeys) { + return newNullCursor(fields), nil + } + + itr, err := buildAuxIterator(ctx, ic, stmt.Sources, opt) + if err != nil { + return nil, err + } + + // Create a slice with an empty first element. + keys := []influxql.VarRef{{}} + keys = append(keys, auxKeys...) + + scanner := NewIteratorScanner(itr, keys, opt.FillValue) + return newScannerCursor(scanner, fields, opt), nil + } + + // Check to see if this is a selector statement. + // It is a selector if it is the only selector call and the call itself + // is a selector. + selector := len(valueMapper.calls) == 1 + if selector { + for call := range valueMapper.calls { + if !influxql.IsSelector(call) { + selector = false + } + } + } + + // Produce an iterator for every single call and create an iterator scanner + // associated with it. + scanners := make([]IteratorScanner, 0, len(valueMapper.calls)) + for call := range valueMapper.calls { + driver := valueMapper.table[call] + if driver.Type == influxql.Unknown { + // The primary driver of this call is of unknown type, so skip this. + continue + } + + itr, err := buildFieldIterator(ctx, call, ic, stmt.Sources, opt, selector, stmt.Target != nil) + if err != nil { + for _, s := range scanners { + s.Close() + } + return nil, err + } + + keys := make([]influxql.VarRef, 0, len(auxKeys)+1) + keys = append(keys, driver) + keys = append(keys, auxKeys...) + + scanner := NewIteratorScanner(itr, keys, opt.FillValue) + scanners = append(scanners, scanner) + } + + if len(scanners) == 0 { + return newNullCursor(fields), nil + } else if len(scanners) == 1 { + return newScannerCursor(scanners[0], fields, opt), nil + } + return newMultiScannerCursor(scanners, fields, opt), nil +} + +func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) (Iterator, error) { + span := tracing.SpanFromContext(ctx) + if span != nil { + span = span.StartSpan("iterator_scanner") + defer span.Finish() + + auxFieldNames := make([]string, len(opt.Aux)) + for i, ref := range opt.Aux { + auxFieldNames[i] = ref.String() + } + span.SetLabels("auxiliary_fields", strings.Join(auxFieldNames, ", ")) + ctx = tracing.NewContextWithSpan(ctx, span) + } + + inputs := make([]Iterator, 0, len(sources)) + if err := func() error { + for _, source := range sources { + switch source := source.(type) { + case *influxql.Measurement: + input, err := ic.CreateIterator(ctx, source, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *influxql.SubQuery: + b := subqueryBuilder{ + ic: ic, + stmt: source.Statement, + } + + input, err := b.buildAuxIterator(ctx, opt) + if err != nil { + return err + } else if input != nil { + inputs = append(inputs, input) + } + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + // Merge iterators to read auxilary fields. + input, err := Iterators(inputs).Merge(opt) + if err != nil { + Iterators(inputs).Close() + return nil, err + } else if input == nil { + input = &nilFloatIterator{} + } + + // Filter out duplicate rows, if required. + if opt.Dedupe { + // If there is no group by and it is a float iterator, see if we can use a fast dedupe. + if itr, ok := input.(FloatIterator); ok && len(opt.Dimensions) == 0 { + if sz := len(opt.Aux); sz > 0 && sz < 3 { + input = newFloatFastDedupeIterator(itr) + } else { + input = NewDedupeIterator(itr) + } + } else { + input = NewDedupeIterator(input) + } + } + // Apply limit & offset. + if opt.Limit > 0 || opt.Offset > 0 { + input = NewLimitIterator(input, opt) + } + return input, nil +} + +func buildFieldIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { + span := tracing.SpanFromContext(ctx) + if span != nil { + span = span.StartSpan("iterator_scanner") + defer span.Finish() + + labels := []string{"expr", expr.String()} + if len(opt.Aux) > 0 { + auxFieldNames := make([]string, len(opt.Aux)) + for i, ref := range opt.Aux { + auxFieldNames[i] = ref.String() + } + labels = append(labels, "auxiliary_fields", strings.Join(auxFieldNames, ", ")) + } + span.SetLabels(labels...) + ctx = tracing.NewContextWithSpan(ctx, span) + } + + input, err := buildExprIterator(ctx, expr, ic, sources, opt, selector, writeMode) + if err != nil { + return nil, err + } + + // Apply limit & offset. + if opt.Limit > 0 || opt.Offset > 0 { + input = NewLimitIterator(input, opt) + } + return input, nil +} + +type valueMapper struct { + // An index that maps a node's string output to its symbol so that all + // nodes with the same signature are mapped the same. + symbols map[string]influxql.VarRef + // An index that maps a specific expression to a symbol. This ensures that + // only expressions that were mapped get symbolized. + table map[influxql.Expr]influxql.VarRef + // A collection of all of the calls in the table. + calls map[*influxql.Call]struct{} + // A collection of all of the calls in the table. + refs map[*influxql.VarRef]struct{} + i int +} + +func newValueMapper() *valueMapper { + return &valueMapper{ + symbols: make(map[string]influxql.VarRef), + table: make(map[influxql.Expr]influxql.VarRef), + calls: make(map[*influxql.Call]struct{}), + refs: make(map[*influxql.VarRef]struct{}), + } +} + +func (v *valueMapper) Map(field *influxql.Field) *influxql.Field { + clone := *field + clone.Expr = influxql.CloneExpr(field.Expr) + + influxql.Walk(v, clone.Expr) + clone.Expr = influxql.RewriteExpr(clone.Expr, v.rewriteExpr) + return &clone +} + +func (v *valueMapper) Visit(n influxql.Node) influxql.Visitor { + expr, ok := n.(influxql.Expr) + if !ok { + return v + } + + key := expr.String() + symbol, ok := v.symbols[key] + if !ok { + // This symbol has not been assigned yet. + // If this is a call or expression, mark the node + // as stored in the symbol table. + switch n := n.(type) { + case *influxql.Call: + if isMathFunction(n) { + return v + } + v.calls[n] = struct{}{} + case *influxql.VarRef: + v.refs[n] = struct{}{} + default: + return v + } + + // Determine the symbol name and the symbol type. + symbolName := fmt.Sprintf("val%d", v.i) + valuer := influxql.TypeValuerEval{ + TypeMapper: DefaultTypeMapper, + } + typ, _ := valuer.EvalType(expr) + + symbol = influxql.VarRef{ + Val: symbolName, + Type: typ, + } + + // Assign this symbol to the symbol table if it is not presently there + // and increment the value index number. + v.symbols[key] = symbol + v.i++ + } + // Store the symbol for this expression so we can later rewrite + // the query correctly. + v.table[expr] = symbol + return nil +} + +func (v *valueMapper) rewriteExpr(expr influxql.Expr) influxql.Expr { + symbol, ok := v.table[expr] + if !ok { + return expr + } + return &symbol +} + +func validateTypes(stmt *influxql.SelectStatement) error { + valuer := influxql.TypeValuerEval{ + TypeMapper: influxql.MultiTypeMapper( + FunctionTypeMapper{}, + MathTypeMapper{}, + ), + } + for _, f := range stmt.Fields { + if _, err := valuer.EvalType(f.Expr); err != nil { + return err + } + } + return nil +} + +// hasValidType returns true if there is at least one non-unknown type +// in the slice. +func hasValidType(refs []influxql.VarRef) bool { + for _, ref := range refs { + if ref.Type != influxql.Unknown { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/query/select_test.go b/vendor/github.com/influxdata/influxdb/query/select_test.go new file mode 100644 index 0000000..32d7afb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/select_test.go @@ -0,0 +1,4197 @@ +package query_test + +import ( + "context" + "fmt" + "math/rand" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +// Second represents a helper for type converting durations. +const Second = int64(time.Second) + +func TestSelect(t *testing.T) { + for _, tt := range []struct { + name string + q string + typ influxql.DataType + fields map[string]influxql.DataType + expr string + itrs []query.Iterator + rows []query.Row + now time.Time + err string + }{ + { + name: "Min", + q: `SELECT min(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + }, + }, + { + name: "Distinct_Float", + q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + }, + }, + { + name: "Distinct_Integer", + q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, + }, + }, + { + name: "Distinct_Unsigned", + q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, + }, + }, + { + name: "Distinct_String", + q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.String, + itrs: []query.Iterator{ + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "b"}, + }}, + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "c"}, + }}, + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"b"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"d"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"c"}}, + }, + }, + { + name: "Distinct_Boolean", + q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Boolean, + itrs: []query.Iterator{ + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, + }}, + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: false}, + }}, + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{false}}, + }, + }, + { + name: "Mean_Float", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, + }, + }, + { + name: "Mean_Integer", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + expr: `mean(value::integer)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, + }, + }, + { + name: "Mean_Unsigned", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + expr: `mean(value::Unsigned)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, + }, + }, + { + name: "Mean_String", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.String, + itrs: []query.Iterator{&StringIterator{}}, + err: `unsupported mean iterator type: *query_test.StringIterator`, + }, + { + name: "Mean_Boolean", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Boolean, + itrs: []query.Iterator{&BooleanIterator{}}, + err: `unsupported mean iterator type: *query_test.BooleanIterator`, + }, + { + name: "Median_Float", + q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, + }, + }, + { + name: "Median_Integer", + q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, + }, + }, + { + name: "Median_Unsigned", + q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, + }, + }, + { + name: "Median_String", + q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.String, + itrs: []query.Iterator{&StringIterator{}}, + err: `unsupported median iterator type: *query_test.StringIterator`, + }, + { + name: "Median_Boolean", + q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Boolean, + itrs: []query.Iterator{&BooleanIterator{}}, + err: `unsupported median iterator type: *query_test.BooleanIterator`, + }, + { + name: "Mode_Float", + q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(1)}}, + }, + }, + { + name: "Mode_Integer", + q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(1)}}, + }, + }, + { + name: "Mode_Unsigned", + q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(1)}}, + }, + }, + { + name: "Mode_String", + q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.String, + itrs: []query.Iterator{ + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "a"}, + }}, + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "cxxx"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: "zzzz"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 7 * Second, Value: "zzzz"}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 8 * Second, Value: "zxxx"}, + }}, + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"d"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"zzzz"}}, + }, + }, + { + name: "Mode_Boolean", + q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Boolean, + itrs: []query.Iterator{ + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 2 * Second, Value: false}, + }}, + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: false}, + }}, + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{true}}, + }, + }, + { + name: "Top_NoTags_Float", + q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(5)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, + }, + }, + { + name: "Top_NoTags_Integer", + q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(19)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(5)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, + }, + }, + { + name: "Top_NoTags_Unsigned", + q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(19)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(5)}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, + }, + }, + { + name: "Top_Tags_Float", + q: `SELECT top(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Float, + expr: `max(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20), "A"}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10), "B"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100), "A"}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5), "B"}}, + }, + }, + { + name: "Top_Tags_Integer", + q: `SELECT top(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Integer, + expr: `max(value::integer)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20), "A"}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10), "B"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100), "A"}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(5), "B"}}, + }, + }, + { + name: "Top_Tags_Unsigned", + q: `SELECT top(value::Unsigned, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Unsigned, + expr: `max(value::Unsigned)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20), "A"}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10), "B"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100), "A"}}, + {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(5), "B"}}, + }, + }, + { + name: "Top_GroupByTags_Float", + q: `SELECT top(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Float, + expr: `max(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{float64(19), "A"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(20), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(100), "A"}}, + }, + }, + { + name: "Top_GroupByTags_Integer", + q: `SELECT top(value::integer, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{int64(19), "A"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(20), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(100), "A"}}, + }, + }, + { + name: "Top_GroupByTags_Unsigned", + q: `SELECT top(value::Unsigned, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{uint64(19), "A"}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(20), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(100), "A"}}, + }, + }, + { + name: "Top_AuxFields_Float", + q: `SELECT top(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Float, + "p2": influxql.Float, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{float64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{float64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{float64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{float64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3), float64(4), "ccc"}}, + {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4), float64(5), "ddd"}}, + }, + }, + { + name: "Top_AuxFields_Integer", + q: `SELECT top(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Integer, + "p2": influxql.Integer, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{int64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{int64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{int64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{int64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3), int64(4), "ccc"}}, + {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4), int64(5), "ddd"}}, + }, + }, + { + name: "Top_AuxFields_Unsigned", + q: `SELECT top(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Unsigned, + "p2": influxql.Unsigned, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{uint64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{uint64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{uint64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{uint64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(3), uint64(4), "ccc"}}, + {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(4), uint64(5), "ddd"}}, + }, + }, + { + name: "Bottom_NoTags_Float", + q: `SELECT bottom(value::float, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(1)}}, + {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, + }, + }, + { + name: "Bottom_NoTags_Integer", + q: `SELECT bottom(value::integer, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(3)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(1)}}, + {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(2)}}, + }, + }, + { + name: "Bottom_NoTags_Unsigned", + q: `SELECT bottom(value::Unsigned, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(3)}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(1)}}, + {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(2)}}, + }, + }, + { + name: "Bottom_Tags_Float", + q: `SELECT bottom(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Float, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10), "B"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1), "B"}}, + }, + }, + { + name: "Bottom_Tags_Integer", + q: `SELECT bottom(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10), "B"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(2), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(1), "B"}}, + }, + }, + { + name: "Bottom_Tags_Unsigned", + q: `SELECT bottom(value::Unsigned, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10), "B"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2), "A"}}, + {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1), "B"}}, + }, + }, + { + name: "Bottom_GroupByTags_Float", + q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Float, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{float64(2), "A"}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(3), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(1), "B"}}, + }, + }, + { + name: "Bottom_GroupByTags_Integer", + q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Integer, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{int64(2), "A"}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(3), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(1), "B"}}, + }, + }, + { + name: "Bottom_GroupByTags_Unsigned", + q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, + typ: influxql.Unsigned, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, + }}, + }, + rows: []query.Row{ + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{uint64(2), "A"}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(3), "A"}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(1), "B"}}, + }, + }, + { + name: "Bottom_AuxFields_Float", + q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Float, + "p2": influxql.Float, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{float64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{float64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{float64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{float64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1), float64(2), "aaa"}}, + {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2), float64(3), "bbb"}}, + }, + }, + { + name: "Bottom_AuxFields_Integer", + q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Integer, + "p2": influxql.Integer, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{int64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{int64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{int64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{int64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(1), int64(2), "aaa"}}, + {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(2), int64(3), "bbb"}}, + }, + }, + { + name: "Bottom_AuxFields_Unsigned", + q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, + fields: map[string]influxql.DataType{ + "p1": influxql.Unsigned, + "p2": influxql.Unsigned, + "p3": influxql.String, + }, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{uint64(2), "aaa"}}, + {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{uint64(3), "bbb"}}, + {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{uint64(4), "ccc"}}, + {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{uint64(5), "ddd"}}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1), uint64(2), "aaa"}}, + {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2), uint64(3), "bbb"}}, + }, + }, + { + name: "Fill_Null_Float", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(null)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Number_Float", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(1)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + }, + }, + { + name: "Fill_Previous_Float", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(previous)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + }, + }, + { + name: "Fill_Linear_Float_One", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Linear_Float_Many", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 62 * Second, Value: 7}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(5)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(6)}}, + {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(7)}}, + }, + }, + { + name: "Fill_Linear_Float_MultipleSeries", + q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Linear_Integer_One", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Integer, + expr: `max(value::integer)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Linear_Integer_Many", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Integer, + expr: `max(value::integer)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(5)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(7)}}, + {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(8)}}, + {Time: 70 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, + }, + }, + { + name: "Fill_Linear_Integer_MultipleSeries", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Integer, + expr: `max(value::integer)`, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Linear_Unsigned_One", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Unsigned, + expr: `max(value::Unsigned)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Fill_Linear_Unsigned_Many", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Unsigned, + expr: `max(value::Unsigned)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(5)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(7)}}, + {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(8)}}, + {Time: 70 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, + }, + }, + { + name: "Fill_Linear_Unsigned_MultipleSeries", + q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, + typ: influxql.Unsigned, + expr: `max(value::Unsigned)`, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, + {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, + }, + }, + { + name: "Stddev_Float", + q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, + }, + }, + { + name: "Stddev_Integer", + q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, + }, + }, + { + name: "Stddev_Unsigned", + q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, + }, + }, + { + name: "Spread_Float", + q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(0)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(0)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, + }, + }, + { + name: "Spread_Integer", + q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(0)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(0)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, + }, + }, + { + name: "Spread_Unsigned", + q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(0)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(0)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, + }, + }, + { + name: "Percentile_Float", + q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(9)}}, + }, + }, + { + name: "Percentile_Integer", + q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(3)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(9)}}, + }, + }, + { + name: "Percentile_Unsigned", + q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(3)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, + {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(9)}}, + }, + }, + { + name: "Sample_Float", + q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(19)}}, + {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, + }, + }, + { + name: "Sample_Integer", + q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, + }}, + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(19)}}, + {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(2)}}, + }, + }, + { + name: "Sample_Unsigned", + q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, + }}, + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(19)}}, + {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(2)}}, + }, + }, + { + name: "Sample_String", + q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.String, + itrs: []query.Iterator{ + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: "b"}, + }}, + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: "c"}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: "d"}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"b"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"c"}}, + {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"d"}}, + }, + }, + { + name: "Sample_Boolean", + q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Boolean, + itrs: []query.Iterator{ + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: false}, + }}, + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: false}, + {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: true}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{false}}, + {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{true}}, + }, + }, + //{ + // name: "Raw", + // q: `SELECT v1::float, v2::float FROM cpu`, + // itrs: []query.Iterator{ + // &FloatIterator{Points: []query.FloatPoint{ + // {Time: 0, Aux: []interface{}{float64(1), nil}}, + // {Time: 1, Aux: []interface{}{nil, float64(2)}}, + // {Time: 5, Aux: []interface{}{float64(3), float64(4)}}, + // }}, + // }, + // points: [][]query.Point{ + // { + // &query.FloatPoint{Time: 0, Value: 1}, + // &query.FloatPoint{Time: 0, Nil: true}, + // }, + // { + // &query.FloatPoint{Time: 1, Nil: true}, + // &query.FloatPoint{Time: 1, Value: 2}, + // }, + // { + // &query.FloatPoint{Time: 5, Value: 3}, + // &query.FloatPoint{Time: 5, Value: 4}, + // }, + // }, + //}, + { + name: "ParenExpr_Min", + q: `SELECT (min(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + }, + }, + { + name: "ParenExpr_Distinct", + q: `SELECT (distinct(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, + }, + }, + { + name: "Derivative_Float", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, + }, + }, + { + name: "Derivative_Integer", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, + }, + }, + { + name: "Derivative_Unsigned", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, + }, + }, + { + name: "Derivative_Desc_Float", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 0 * Second, Value: 20}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, + }, + }, + { + name: "Derivative_Desc_Integer", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 0 * Second, Value: 20}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, + }, + }, + { + name: "Derivative_Desc_Unsigned", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 0 * Second, Value: 20}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, + }, + }, + { + name: "Derivative_Duplicate_Float", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + }, + }, + { + name: "Derivative_Duplicate_Integer", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + }, + }, + { + name: "Derivative_Duplicate_Unsigned", + q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, + }, + }, + { + name: "Difference_Float", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-10)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-16)}}, + }, + }, + { + name: "Difference_Integer", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-10)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(9)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-16)}}, + }, + }, + { + name: "Difference_Unsigned", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551606)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551600)}}, + }, + }, + { + name: "Difference_Duplicate_Float", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-10)}}, + }, + }, + { + name: "Difference_Duplicate_Integer", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-10)}}, + }, + }, + { + name: "Difference_Duplicate_Unsigned", + q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551606)}}, + }, + }, + { + name: "Non_Negative_Difference_Float", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 29}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 16 * Second, Value: 39}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19)}}, + {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(36)}}, + }, + }, + { + name: "Non_Negative_Difference_Integer", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 21}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(11)}}, + }, + }, + { + name: "Non_Negative_Difference_Unsigned", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 21}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(11)}}, + }, + }, + { + name: "Non_Negative_Difference_Duplicate_Float", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 30}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 10}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 16 * Second, Value: 40}, + {Name: "cpu", Time: 16 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(30)}}, + }, + }, + { + name: "Non_Negative_Difference_Duplicate_Integer", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 30}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 10}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 16 * Second, Value: 40}, + {Name: "cpu", Time: 16 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(30)}}, + }, + }, + { + name: "Non_Negative_Difference_Duplicate_Unsigned", + q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + {Name: "cpu", Time: 8 * Second, Value: 30}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 10}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + {Name: "cpu", Time: 16 * Second, Value: 40}, + {Name: "cpu", Time: 16 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(30)}}, + }, + }, + { + name: "Elapsed_Float", + q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 11 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + name: "Elapsed_Integer", + q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 11 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + name: "Elapsed_Unsigned", + q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 11 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + name: "Elapsed_String", + q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.String, + itrs: []query.Iterator{ + &StringIterator{Points: []query.StringPoint{ + {Name: "cpu", Time: 0 * Second, Value: "a"}, + {Name: "cpu", Time: 4 * Second, Value: "b"}, + {Name: "cpu", Time: 8 * Second, Value: "c"}, + {Name: "cpu", Time: 11 * Second, Value: "d"}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + name: "Elapsed_Boolean", + q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Boolean, + itrs: []query.Iterator{ + &BooleanIterator{Points: []query.BooleanPoint{ + {Name: "cpu", Time: 0 * Second, Value: true}, + {Name: "cpu", Time: 4 * Second, Value: false}, + {Name: "cpu", Time: 8 * Second, Value: false}, + {Name: "cpu", Time: 11 * Second, Value: true}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, + {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + name: "Integral_Float", + q: `SELECT integral(value) FROM cpu`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 10 * Second, Value: 20}, + {Name: "cpu", Time: 15 * Second, Value: 10}, + {Name: "cpu", Time: 20 * Second, Value: 0}, + {Name: "cpu", Time: 30 * Second, Value: -10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(50)}}, + }, + }, + { + name: "Integral_Duplicate_Float", + q: `SELECT integral(value) FROM cpu`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 5 * Second, Value: 10}, + {Name: "cpu", Time: 5 * Second, Value: 30}, + {Name: "cpu", Time: 10 * Second, Value: 40}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(250)}}, + }, + }, + { + name: "Integral_Float_GroupByTime", + q: `SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 10 * Second, Value: 20}, + {Name: "cpu", Time: 15 * Second, Value: 10}, + {Name: "cpu", Time: 20 * Second, Value: 0}, + {Name: "cpu", Time: 30 * Second, Value: -10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-50)}}, + }, + }, + { + name: "Integral_Float_InterpolateGroupByTime", + q: `SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 10 * Second, Value: 20}, + {Name: "cpu", Time: 15 * Second, Value: 10}, + {Name: "cpu", Time: 25 * Second, Value: 0}, + {Name: "cpu", Time: 30 * Second, Value: -10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(112.5)}}, + {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-12.5)}}, + }, + }, + { + name: "Integral_Integer", + q: `SELECT integral(value) FROM cpu`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 5 * Second, Value: 10}, + {Name: "cpu", Time: 10 * Second, Value: 0}, + {Name: "cpu", Time: 20 * Second, Value: -10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(50)}}, + }, + }, + { + name: "Integral_Duplicate_Integer", + q: `SELECT integral(value, 2s) FROM cpu`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 5 * Second, Value: 10}, + {Name: "cpu", Time: 5 * Second, Value: 30}, + {Name: "cpu", Time: 10 * Second, Value: 40}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(125)}}, + }, + }, + { + name: "Integral_Unsigned", + q: `SELECT integral(value) FROM cpu`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 5 * Second, Value: 10}, + {Name: "cpu", Time: 10 * Second, Value: 0}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + }, + }, + { + name: "Integral_Duplicate_Unsigned", + q: `SELECT integral(value, 2s) FROM cpu`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 5 * Second, Value: 10}, + {Name: "cpu", Time: 5 * Second, Value: 30}, + {Name: "cpu", Time: 10 * Second, Value: 40}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(125)}}, + }, + }, + { + name: "MovingAverage_Float", + q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, + }, + }, + { + name: "MovingAverage_Integer", + q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, + }, + }, + { + name: "MovingAverage_Unsigned", + q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, + }, + }, + { + name: "CumulativeSum_Float", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(30)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(49)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(52)}}, + }, + }, + { + name: "CumulativeSum_Integer", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(30)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(49)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(52)}}, + }, + }, + { + name: "CumulativeSum_Unsigned", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 8 * Second, Value: 19}, + {Name: "cpu", Time: 12 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(30)}}, + {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(49)}}, + {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(52)}}, + }, + }, + { + name: "CumulativeSum_Duplicate_Float", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Float, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(39)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(49)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(52)}}, + }, + }, + { + name: "CumulativeSum_Duplicate_Integer", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Integer, + itrs: []query.Iterator{ + &IntegerIterator{Points: []query.IntegerPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(39)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(49)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(52)}}, + }, + }, + { + name: "CumulativeSum_Duplicate_Unsigned", + q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, + typ: influxql.Unsigned, + itrs: []query.Iterator{ + &UnsignedIterator{Points: []query.UnsignedPoint{ + {Name: "cpu", Time: 0 * Second, Value: 20}, + {Name: "cpu", Time: 0 * Second, Value: 19}, + {Name: "cpu", Time: 4 * Second, Value: 10}, + {Name: "cpu", Time: 4 * Second, Value: 3}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(39)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(49)}}, + {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(52)}}, + }, + }, + { + name: "HoltWinters_GroupBy_Agg", + q: `SELECT holt_winters(mean(value), 2, 2) FROM cpu WHERE time >= '1970-01-01T00:00:10Z' AND time < '1970-01-01T00:00:20Z' GROUP BY time(2s)`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 10 * Second, Value: 4}, + {Name: "cpu", Time: 11 * Second, Value: 6}, + + {Name: "cpu", Time: 12 * Second, Value: 9}, + {Name: "cpu", Time: 13 * Second, Value: 11}, + + {Name: "cpu", Time: 14 * Second, Value: 5}, + {Name: "cpu", Time: 15 * Second, Value: 7}, + + {Name: "cpu", Time: 16 * Second, Value: 10}, + {Name: "cpu", Time: 17 * Second, Value: 12}, + + {Name: "cpu", Time: 18 * Second, Value: 6}, + {Name: "cpu", Time: 19 * Second, Value: 8}, + }}, + }, + rows: []query.Row{ + {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{11.960623419918432}}, + {Time: 22 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{7.953140268154609}}, + }, + }, + { + name: "DuplicateSelectors", + q: `SELECT min(value) * 2, min(value) / 2 FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, + typ: influxql.Float, + expr: `min(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, + {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, + }}, + }, + rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(38), float64(19) / 2}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4), float64(1)}}, + {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(200), float64(50)}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(20), float64(5)}}, + }, + }, + { + name: "GroupByOffset", + q: `SELECT mean(value) FROM cpu WHERE time >= now() - 2m AND time < now() GROUP BY time(1m, now())`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 34 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 92 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 45 * Second, Value: 10}, + }}, + }, + rows: []query.Row{ + {Time: 30 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, + {Time: 90 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + }, + now: mustParseTime("1970-01-01T00:02:30Z"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + var fields map[string]influxql.DataType + if tt.typ != influxql.Unknown { + fields = map[string]influxql.DataType{"value": tt.typ} + } else { + fields = tt.fields + } + return &ShardGroup{ + Fields: fields, + Dimensions: []string{"host", "region"}, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } + if tt.expr != "" && !reflect.DeepEqual(opt.Expr, MustParseExpr(tt.expr)) { + t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) + } + + itrs := tt.itrs + if _, ok := opt.Expr.(*influxql.Call); ok { + for i, itr := range itrs { + itr, err := query.NewCallIterator(itr, opt) + if err != nil { + return nil, err + } + itrs[i] = itr + } + } + return query.Iterators(itrs).Merge(opt) + }, + } + }, + } + + stmt := MustParseSelectStatement(tt.q) + stmt.OmitTime = true + cur, err := func(stmt *influxql.SelectStatement) (query.Cursor, error) { + c, err := query.Compile(stmt, query.CompileOptions{ + Now: tt.now, + }) + if err != nil { + return nil, err + } + + p, err := c.Prepare(&shardMapper, query.SelectOptions{}) + if err != nil { + return nil, err + } + return p.Select(context.Background()) + }(stmt) + if err != nil { + if tt.err == "" { + t.Fatal(err) + } else if have, want := err.Error(), tt.err; have != want { + t.Fatalf("unexpected error: have=%s want=%s", have, want) + } + } else if tt.err != "" { + t.Fatal("expected error") + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("unexpected point: %s", err) + } else if diff := cmp.Diff(tt.rows, a); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } + }) + } +} + +// Ensure a SELECT with raw fields works for all types. +func TestSelect_Raw(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "f": influxql.Float, + "i": influxql.Integer, + "u": influxql.Unsigned, + "s": influxql.String, + "b": influxql.Boolean, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } + if !reflect.DeepEqual(opt.Aux, []influxql.VarRef{ + {Val: "b", Type: influxql.Boolean}, + {Val: "f", Type: influxql.Float}, + {Val: "i", Type: influxql.Integer}, + {Val: "s", Type: influxql.String}, + {Val: "u", Type: influxql.Unsigned}, + }) { + t.Fatalf("unexpected auxiliary fields: %v", opt.Aux) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Aux: []interface{}{ + true, float64(20), int64(20), "a", uint64(20)}}, + {Name: "cpu", Time: 5 * Second, Aux: []interface{}{ + false, float64(10), int64(10), "b", uint64(10)}}, + {Name: "cpu", Time: 9 * Second, Aux: []interface{}{ + true, float64(19), int64(19), "c", uint64(19)}}, + }}, nil + }, + } + }, + } + + stmt := MustParseSelectStatement(`SELECT f, i, u, s, b FROM cpu`) + stmt.OmitTime = true + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) + if err != nil { + t.Errorf("parse error: %s", err) + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff([]query.Row{ + { + Time: 0 * Second, + Series: query.Series{ + Name: "cpu", + }, + Values: []interface{}{float64(20), int64(20), uint64(20), "a", true}, + }, + { + Time: 5 * Second, + Series: query.Series{ + Name: "cpu", + }, + Values: []interface{}{float64(10), int64(10), uint64(10), "b", false}, + }, + { + Time: 9 * Second, + Series: query.Series{ + Name: "cpu", + }, + Values: []interface{}{float64(19), int64(19), uint64(19), "c", true}, + }, + }, a); diff != "" { + t.Errorf("unexpected points:\n%s", diff) + } +} + +// Ensure a SELECT binary expr queries can be executed as floats. +func TestSelect_BinaryExpr(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "f": influxql.Float, + "i": influxql.Integer, + "u": influxql.Unsigned, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } + makeAuxFields := func(value int) []interface{} { + aux := make([]interface{}, len(opt.Aux)) + for i := range aux { + switch opt.Aux[i].Type { + case influxql.Float: + aux[i] = float64(value) + case influxql.Integer: + aux[i] = int64(value) + case influxql.Unsigned: + aux[i] = uint64(value) + } + } + return aux + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Aux: makeAuxFields(20)}, + {Name: "cpu", Time: 5 * Second, Aux: makeAuxFields(10)}, + {Name: "cpu", Time: 9 * Second, Aux: makeAuxFields(19)}, + }}, nil + }, + } + }, + } + + for _, test := range []struct { + Name string + Statement string + Rows []query.Row + Err string + }{ + { + Name: "Float_AdditionRHS_Number", + Statement: `SELECT f + 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Integer_AdditionRHS_Number", + Statement: `SELECT i + 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Unsigned_AdditionRHS_Number", + Statement: `SELECT u + 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Float_AdditionRHS_Integer", + Statement: `SELECT f + 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Integer_AdditionRHS_Integer", + Statement: `SELECT i + 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(21)}}, + }, + }, + { + Name: "Unsigned_AdditionRHS_Integer", + Statement: `SELECT u + 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(21)}}, + }, + }, + { + Name: "Float_AdditionRHS_Unsigned", + Statement: `SELECT f + 9223372036854775808 FROM cpu`, + Rows: []query.Row{ // adding small floats to this does not change the value, this is expected + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + }, + }, + { + Name: "Integer_AdditionRHS_Unsigned", + Statement: `SELECT i + 9223372036854775808 FROM cpu`, + Err: `type error: i::integer + 9223372036854775808: cannot use + with an integer and unsigned literal`, + }, + { + Name: "Unsigned_AdditionRHS_Unsigned", + Statement: `SELECT u + 9223372036854775808 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775828)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775818)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775827)}}, + }, + }, + { + Name: "Float_AdditionLHS_Number", + Statement: `SELECT 2.0 + f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Integer_AdditionLHS_Number", + Statement: `SELECT 2.0 + i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Unsigned_AdditionLHS_Number", + Statement: `SELECT 2.0 + u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Float_AdditionLHS_Integer", + Statement: `SELECT 2 + f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, + }, + }, + { + Name: "Integer_AdditionLHS_Integer", + Statement: `SELECT 2 + i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(21)}}, + }, + }, + { + Name: "Unsigned_AdditionLHS_Integer", + Statement: `SELECT 2 + u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(22)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(12)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(21)}}, + }, + }, + { + Name: "Float_AdditionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 + f FROM cpu`, + Rows: []query.Row{ // adding small floats to this does not change the value, this is expected + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, + }, + }, + { + Name: "Integer_AdditionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 + i FROM cpu`, + Err: `type error: 9223372036854775808 + i::integer: cannot use + with an integer and unsigned literal`, + }, + { + Name: "Unsigned_AdditionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 + u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775828)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775818)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775827)}}, + }, + }, + { + Name: "Float_Add_Float", + Statement: `SELECT f + f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_Add_Integer", + Statement: `SELECT i + i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, + }, + }, + { + Name: "Unsigned_Add_Unsigned", + Statement: `SELECT u + u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, + }, + }, + { + Name: "Float_Add_Integer", + Statement: `SELECT f + i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Float_Add_Unsigned", + Statement: `SELECT f + u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_Add_Unsigned", + Statement: `SELECT i + u FROM cpu`, + Err: `type error: i::integer + u::unsigned: cannot use + between an integer and unsigned, an explicit cast is required`, + }, + { + Name: "Float_MultiplicationRHS_Number", + Statement: `SELECT f * 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_MultiplicationRHS_Number", + Statement: `SELECT i * 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Unsigned_MultiplicationRHS_Number", + Statement: `SELECT u * 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Float_MultiplicationRHS_Integer", + Statement: `SELECT f * 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_MultiplicationRHS_Integer", + Statement: `SELECT i * 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, + }, + }, + { + Name: "Unsigned_MultiplicationRHS_Integer", + Statement: `SELECT u * 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, + }, + }, + // Skip unsigned literals for multiplication because there is inevitable + // overflow. While it is possible to do, the behavior is considered undefined + // and it's not a very good test because it would result in just plugging + // the values into the computer anyway to figure out what the correct answer + // is rather than calculating it myself and testing that I get the correct + // value. + { + Name: "Float_MultiplicationLHS_Number", + Statement: `SELECT 2.0 * f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_MultiplicationLHS_Number", + Statement: `SELECT 2.0 * i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Unsigned_MultiplicationLHS_Number", + Statement: `SELECT 2.0 * u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Float_MultiplicationLHS_Integer", + Statement: `SELECT 2 * f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, + }, + }, + { + Name: "Integer_MultiplicationLHS_Integer", + Statement: `SELECT 2 * i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, + }, + }, + { + Name: "Unsigned_MultiplicationLHS_Integer", + Statement: `SELECT 2 * u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, + }, + }, + // Skip unsigned literals for multiplication. See above. + { + Name: "Float_Multiply_Float", + Statement: `SELECT f * f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, + }, + }, + { + Name: "Integer_Multiply_Integer", + Statement: `SELECT i * i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(400)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(361)}}, + }, + }, + { + Name: "Unsigned_Multiply_Unsigned", + Statement: `SELECT u * u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(400)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(361)}}, + }, + }, + { + Name: "Float_Multiply_Integer", + Statement: `SELECT f * i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, + }, + }, + { + Name: "Float_Multiply_Unsigned", + Statement: `SELECT f * u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, + }, + }, + { + Name: "Integer_Multiply_Unsigned", + Statement: `SELECT i * u FROM cpu`, + Err: `type error: i::integer * u::unsigned: cannot use * between an integer and unsigned, an explicit cast is required`, + }, + { + Name: "Float_SubtractionRHS_Number", + Statement: `SELECT f - 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, + }, + }, + { + Name: "Integer_SubtractionRHS_Number", + Statement: `SELECT i - 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, + }, + }, + { + Name: "Unsigned_SubtractionRHS_Number", + Statement: `SELECT u - 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, + }, + }, + { + Name: "Float_SubtractionRHS_Integer", + Statement: `SELECT f - 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, + }, + }, + { + Name: "Integer_SubtractionRHS_Integer", + Statement: `SELECT i - 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(17)}}, + }, + }, + { + Name: "Unsigned_SubtractionRHS_Integer", + Statement: `SELECT u - 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(17)}}, + }, + }, + { + Name: "Float_SubtractionRHS_Unsigned", + Statement: `SELECT f - 9223372036854775808 FROM cpu`, + Rows: []query.Row{ // adding small floats to this does not change the value, this is expected + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, + }, + }, + { + Name: "Integer_SubtractionRHS_Unsigned", + Statement: `SELECT i - 9223372036854775808 FROM cpu`, + Err: `type error: i::integer - 9223372036854775808: cannot use - with an integer and unsigned literal`, + }, + // Skip Unsigned_SubtractionRHS_Integer because it would result in underflow. + { + Name: "Float_SubtractionLHS_Number", + Statement: `SELECT 2.0 - f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, + }, + }, + { + Name: "Integer_SubtractionLHS_Number", + Statement: `SELECT 2.0 - i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, + }, + }, + { + Name: "Unsigned_SubtractionLHS_Number", + Statement: `SELECT 2.0 - u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, + }, + }, + { + Name: "Float_SubtractionLHS_Integer", + Statement: `SELECT 2 - f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, + }, + }, + { + Name: "Integer_SubtractionLHS_Integer", + Statement: `SELECT 2 - i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-18)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-17)}}, + }, + }, + { + Name: "Unsigned_SubtractionLHS_Integer", + Statement: `SELECT 30 - u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(11)}}, + }, + }, + { + Name: "Float_SubtractionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 - f FROM cpu`, // subtracting small floats to this does not change the value, this is expected + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, + }, + }, + { + Name: "Integer_SubtractionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 - i FROM cpu`, + Err: `type error: 9223372036854775808 - i::integer: cannot use - with an integer and unsigned literal`, + }, + { + Name: "Unsigned_SubtractionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 - u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775788)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775798)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775789)}}, + }, + }, + { + Name: "Float_Subtract_Float", + Statement: `SELECT f - f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + }, + }, + { + Name: "Integer_Subtract_Integer", + Statement: `SELECT i - i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + }, + }, + { + Name: "Unsigned_Subtract_Unsigned", + Statement: `SELECT u - u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + }, + }, + { + Name: "Float_Subtract_Integer", + Statement: `SELECT f - i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + }, + }, + { + Name: "Float_Subtract_Unsigned", + Statement: `SELECT f - u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, + }, + }, + { + Name: "Integer_Subtract_Unsigned", + Statement: `SELECT i - u FROM cpu`, + Err: `type error: i::integer - u::unsigned: cannot use - between an integer and unsigned, an explicit cast is required`, + }, + { + Name: "Float_DivisionRHS_Number", + Statement: `SELECT f / 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, + }, + }, + { + Name: "Integer_DivisionRHS_Number", + Statement: `SELECT i / 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, + }, + }, + { + Name: "Unsigned_DivisionRHS_Number", + Statement: `SELECT u / 2.0 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, + }, + }, + { + Name: "Float_DivisionRHS_Integer", + Statement: `SELECT f / 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, + }, + }, + { + Name: "Integer_DivisionRHS_Integer", + Statement: `SELECT i / 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, + }, + }, + { + Name: "Unsigned_DivisionRHS_Integer", + Statement: `SELECT u / 2 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9)}}, + }, + }, + { + Name: "Float_DivisionRHS_Unsigned", + Statement: `SELECT f / 9223372036854775808 FROM cpu`, + Rows: []query.Row{ // dividing small floats does not result in a meaningful result, this is expected + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20) / float64(9223372036854775808)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10) / float64(9223372036854775808)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / float64(9223372036854775808)}}, + }, + }, + { + Name: "Integer_DivisionRHS_Unsigned", + Statement: `SELECT i / 9223372036854775808 FROM cpu`, + Err: `type error: i::integer / 9223372036854775808: cannot use / with an integer and unsigned literal`, + }, + { + Name: "Unsigned_DivisionRHS_Unsigned", + Statement: `SELECT u / 9223372036854775808 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + }, + }, + { + Name: "Float_DivisionLHS_Number", + Statement: `SELECT 38.0 / f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, + }, + }, + { + Name: "Integer_DivisionLHS_Number", + Statement: `SELECT 38.0 / i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, + }, + }, + { + Name: "Unsigned_DivisionLHS_Number", + Statement: `SELECT 38.0 / u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, + }, + }, + { + Name: "Float_DivisionLHS_Integer", + Statement: `SELECT 38 / f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, + }, + }, + { + Name: "Integer_DivisionLHS_Integer", + Statement: `SELECT 38 / i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, + }, + }, + { + Name: "Unsigned_DivisionLHS_Integer", + Statement: `SELECT 38 / u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(3)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2)}}, + }, + }, + { + Name: "Float_DivisionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 / f FROM cpu`, + Rows: []query.Row{ // dividing large floats results in inaccurate outputs so these may not be correct, but that is considered normal for floating point + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(461168601842738816)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(922337203685477632)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(485440633518672384)}}, + }, + }, + { + Name: "Integer_DivisionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 / i FROM cpu`, + Err: `type error: 9223372036854775808 / i::integer: cannot use / with an integer and unsigned literal`, + }, + { + Name: "Unsigned_DivisionLHS_Unsigned", + Statement: `SELECT 9223372036854775808 / u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(461168601842738790)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(922337203685477580)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(485440633518672410)}}, + }, + }, + { + Name: "Float_Divide_Float", + Statement: `SELECT f / f FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + }, + }, + { + Name: "Integer_Divide_Integer", + Statement: `SELECT i / i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + }, + }, + { + Name: "Unsigned_Divide_Unsigned", + Statement: `SELECT u / u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, + }, + }, + { + Name: "Float_Divide_Integer", + Statement: `SELECT f / i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + }, + }, + { + Name: "Float_Divide_Unsigned", + Statement: `SELECT f / u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, + }, + }, + { + Name: "Integer_Divide_Unsigned", + Statement: `SELECT i / u FROM cpu`, + Err: `type error: i::integer / u::unsigned: cannot use / between an integer and unsigned, an explicit cast is required`, + }, + { + Name: "Integer_BitwiseAndRHS", + Statement: `SELECT i & 254 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(18)}}, + }, + }, + { + Name: "Unsigned_BitwiseAndRHS", + Statement: `SELECT u & 254 FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18)}}, + }, + }, + { + Name: "Integer_BitwiseOrLHS", + Statement: `SELECT 4 | i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(14)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(23)}}, + }, + }, + { + Name: "Unsigned_BitwiseOrLHS", + Statement: `SELECT 4 | u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(14)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(23)}}, + }, + }, + { + Name: "Integer_BitwiseXOr_Integer", + Statement: `SELECT i ^ i FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, + }, + }, + { + Name: "Unsigned_BitwiseXOr_Integer", + Statement: `SELECT u ^ u FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, + }, + }, + } { + t.Run(test.Name, func(t *testing.T) { + stmt := MustParseSelectStatement(test.Statement) + stmt.OmitTime = true + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) + if err != nil { + if have, want := err.Error(), test.Err; want != "" { + if have != want { + t.Errorf("%s: unexpected parse error: %s != %s", test.Name, have, want) + } + } else { + t.Errorf("%s: unexpected parse error: %s", test.Name, have) + } + } else if test.Err != "" { + t.Fatalf("%s: expected error", test.Name) + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if diff := cmp.Diff(test.Rows, a); diff != "" { + t.Errorf("%s: unexpected points:\n%s", test.Name, diff) + } + }) + } +} + +// Ensure a SELECT binary expr queries can be executed as booleans. +func TestSelect_BinaryExpr_Boolean(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "one": influxql.Boolean, + "two": influxql.Boolean, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } + makeAuxFields := func(value bool) []interface{} { + aux := make([]interface{}, len(opt.Aux)) + for i := range aux { + aux[i] = value + } + return aux + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Aux: makeAuxFields(true)}, + {Name: "cpu", Time: 5 * Second, Aux: makeAuxFields(false)}, + {Name: "cpu", Time: 9 * Second, Aux: makeAuxFields(true)}, + }}, nil + }, + } + }, + } + + for _, test := range []struct { + Name string + Statement string + Rows []query.Row + }{ + { + Name: "BinaryXOrRHS", + Statement: `SELECT one ^ true FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, + }, + }, + { + Name: "BinaryOrLHS", + Statement: `SELECT true | two FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + }, + }, + { + Name: "TwoSeriesBitwiseAnd", + Statement: `SELECT one & two FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, + }, + }, + } { + t.Run(test.Name, func(t *testing.T) { + stmt := MustParseSelectStatement(test.Statement) + stmt.OmitTime = true + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if diff := cmp.Diff(test.Rows, a); diff != "" { + t.Errorf("%s: unexpected points:\n%s", test.Name, diff) + } + }) + } +} + +// Ensure a SELECT binary expr with nil values can be executed. +// Nil values may be present when a field is missing from one iterator, +// but not the other. +func TestSelect_BinaryExpr_NilValues(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "total": influxql.Float, + "value": influxql.Float, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Aux: []interface{}{float64(20), nil}}, + {Name: "cpu", Time: 5 * Second, Aux: []interface{}{float64(10), float64(15)}}, + {Name: "cpu", Time: 9 * Second, Aux: []interface{}{nil, float64(5)}}, + }}, nil + }, + } + }, + } + + for _, test := range []struct { + Name string + Statement string + Rows []query.Row + }{ + { + Name: "Addition", + Statement: `SELECT total + value FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(25)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + }, + }, + { + Name: "Subtraction", + Statement: `SELECT total - value FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-5)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + }, + }, + { + Name: "Multiplication", + Statement: `SELECT total * value FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(150)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + }, + }, + { + Name: "Division", + Statement: `SELECT total / value FROM cpu`, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10) / float64(15)}}, + {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, + }, + }, + } { + t.Run(test.Name, func(t *testing.T) { + stmt := MustParseSelectStatement(test.Statement) + stmt.OmitTime = true + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) + if err != nil { + t.Errorf("%s: parse error: %s", test.Name, err) + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("%s: unexpected error: %s", test.Name, err) + } else if diff := cmp.Diff(test.Rows, a); diff != "" { + t.Errorf("%s: unexpected points:\n%s", test.Name, diff) + } + }) + } +} + +type ShardMapper struct { + MapShardsFn func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup +} + +func (m *ShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { + shards := m.MapShardsFn(sources, t) + return shards, nil +} + +type ShardGroup struct { + CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) + Fields map[string]influxql.DataType + Dimensions []string +} + +func (sh *ShardGroup) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + return sh.CreateIteratorFn(ctx, m, opt) +} + +func (sh *ShardGroup) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { + return query.IteratorCost{}, nil +} + +func (sh *ShardGroup) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + fields = make(map[string]influxql.DataType) + dimensions = make(map[string]struct{}) + + for f, typ := range sh.Fields { + fields[f] = typ + } + for _, d := range sh.Dimensions { + dimensions[d] = struct{}{} + } + return fields, dimensions, nil +} + +func (sh *ShardGroup) MapType(m *influxql.Measurement, field string) influxql.DataType { + if typ, ok := sh.Fields[field]; ok { + return typ + } + for _, d := range sh.Dimensions { + if d == field { + return influxql.Tag + } + } + return influxql.Unknown +} + +func (*ShardGroup) Close() error { + return nil +} + +func BenchmarkSelect_Raw_1K(b *testing.B) { benchmarkSelectRaw(b, 1000) } +func BenchmarkSelect_Raw_100K(b *testing.B) { benchmarkSelectRaw(b, 1000000) } + +func benchmarkSelectRaw(b *testing.B, pointN int) { + benchmarkSelect(b, MustParseSelectStatement(`SELECT fval FROM cpu`), NewRawBenchmarkIteratorCreator(pointN)) +} + +func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, shardMapper query.ShardMapper) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + cur, err := query.Select(context.Background(), stmt, shardMapper, query.SelectOptions{}) + if err != nil { + b.Fatal(err) + } + query.DrainCursor(cur) + } +} + +// NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields. +func NewRawBenchmarkIteratorCreator(pointN int) query.ShardMapper { + return &ShardMapper{ + MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "fval": influxql.Float, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if opt.Expr != nil { + panic("unexpected expression") + } + + p := query.FloatPoint{ + Name: "cpu", + Aux: make([]interface{}, len(opt.Aux)), + } + + for i := range opt.Aux { + switch opt.Aux[i].Val { + case "fval": + p.Aux[i] = float64(100) + default: + panic("unknown iterator expr: " + opt.Expr.String()) + } + } + + return &FloatPointGenerator{N: pointN, Fn: func(i int) *query.FloatPoint { + p.Time = int64(time.Duration(i) * (10 * time.Second)) + return &p + }}, nil + }, + } + }, + } +} + +func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) { + stmt := MustParseSelectStatement(`SELECT sval::string FROM cpu`) + stmt.Dedupe = true + + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "sval": influxql.String, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if opt.Expr != nil { + panic("unexpected expression") + } + + p := query.FloatPoint{ + Name: "tags", + Aux: []interface{}{nil}, + } + + return &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *query.FloatPoint { + p.Aux[0] = fmt.Sprintf("server%d", i%seriesN) + return &p + }}, nil + }, + } + }, + } + + b.ResetTimer() + benchmarkSelect(b, stmt, &shardMapper) +} + +func BenchmarkSelect_Dedupe_1K(b *testing.B) { benchmarkSelectDedupe(b, 1000, 100) } + +func benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) { + stmt := MustParseSelectStatement(`SELECT top(sval, 10) FROM cpu`) + + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "sval": influxql.Float, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if m.Name != "cpu" { + b.Fatalf("unexpected source: %s", m.Name) + } + if !reflect.DeepEqual(opt.Expr, MustParseExpr(`sval`)) { + b.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) + } + + p := query.FloatPoint{ + Name: "cpu", + } + + return &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *query.FloatPoint { + p.Value = float64(rand.Int63()) + p.Time = int64(time.Duration(i) * (10 * time.Second)) + return &p + }}, nil + }, + } + }, + } + + b.ResetTimer() + benchmarkSelect(b, stmt, &shardMapper) +} + +func BenchmarkSelect_Top_1K(b *testing.B) { benchmarkSelectTop(b, 1000, 1000) } + +// ReadCursor reads a Cursor into an array of points. +func ReadCursor(cur query.Cursor) ([]query.Row, error) { + defer cur.Close() + + var rows []query.Row + for { + var row query.Row + if !cur.Scan(&row) { + if err := cur.Err(); err != nil { + return nil, err + } + return rows, nil + } + rows = append(rows, row) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/statement_rewriter.go b/vendor/github.com/influxdata/influxdb/query/statement_rewriter.go new file mode 100644 index 0000000..6189b04 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/statement_rewriter.go @@ -0,0 +1,487 @@ +package query + +import ( + "errors" + "regexp" + + "github.com/influxdata/influxql" +) + +// RewriteStatement rewrites stmt into a new statement, if applicable. +func RewriteStatement(stmt influxql.Statement) (influxql.Statement, error) { + switch stmt := stmt.(type) { + case *influxql.ShowFieldKeysStatement: + return rewriteShowFieldKeysStatement(stmt) + case *influxql.ShowFieldKeyCardinalityStatement: + return rewriteShowFieldKeyCardinalityStatement(stmt) + case *influxql.ShowMeasurementsStatement: + return rewriteShowMeasurementsStatement(stmt) + case *influxql.ShowMeasurementCardinalityStatement: + return rewriteShowMeasurementCardinalityStatement(stmt) + case *influxql.ShowSeriesStatement: + return rewriteShowSeriesStatement(stmt) + case *influxql.ShowSeriesCardinalityStatement: + return rewriteShowSeriesCardinalityStatement(stmt) + case *influxql.ShowTagKeysStatement: + return rewriteShowTagKeysStatement(stmt) + case *influxql.ShowTagKeyCardinalityStatement: + return rewriteShowTagKeyCardinalityStatement(stmt) + case *influxql.ShowTagValuesStatement: + return rewriteShowTagValuesStatement(stmt) + case *influxql.ShowTagValuesCardinalityStatement: + return rewriteShowTagValuesCardinalityStatement(stmt) + default: + return stmt, nil + } +} + +func rewriteShowFieldKeysStatement(stmt *influxql.ShowFieldKeysStatement) (influxql.Statement, error) { + return &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "fieldKey"}}, + {Expr: &influxql.VarRef{Val: "fieldType"}}, + }), + Sources: rewriteSources(stmt.Sources, "_fieldKeys", stmt.Database), + Condition: rewriteSourcesCondition(stmt.Sources, nil), + Offset: stmt.Offset, + Limit: stmt.Limit, + SortFields: stmt.SortFields, + OmitTime: true, + Dedupe: true, + IsRawQuery: true, + }, nil +} + +func rewriteShowFieldKeyCardinalityStatement(stmt *influxql.ShowFieldKeyCardinalityStatement) (influxql.Statement, error) { + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW FIELD KEY CARDINALITY doesn't support time in WHERE clause") + } + + // Use all field keys, if zero. + if len(stmt.Sources) == 0 { + stmt.Sources = influxql.Sources{ + &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`.+`)}}, + } + } + + return &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "count", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "distinct", + Args: []influxql.Expr{&influxql.VarRef{Val: "_fieldKey"}}, + }, + }, + }, + Alias: "count", + }, + }, + Sources: rewriteSources2(stmt.Sources, stmt.Database), + Condition: stmt.Condition, + Dimensions: stmt.Dimensions, + Offset: stmt.Offset, + Limit: stmt.Limit, + OmitTime: true, + }, nil +} + +func rewriteShowMeasurementsStatement(stmt *influxql.ShowMeasurementsStatement) (influxql.Statement, error) { + var sources influxql.Sources + if stmt.Source != nil { + sources = influxql.Sources{stmt.Source} + } + + // Currently time based SHOW MEASUREMENT queries can't be supported because + // it's not possible to appropriate set operations such as a negated regex + // using the query engine. + if influxql.HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW MEASUREMENTS doesn't support time in WHERE clause") + } + + // rewrite condition to push a source measurement into a "_name" tag. + stmt.Condition = rewriteSourcesCondition(sources, stmt.Condition) + return stmt, nil +} + +func rewriteShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (influxql.Statement, error) { + // TODO(edd): currently we only support cardinality estimation for certain + // types of query. As the estimation coverage is expanded, this condition + // will become less strict. + if !stmt.Exact && stmt.Sources == nil && stmt.Condition == nil && stmt.Dimensions == nil && stmt.Limit == 0 && stmt.Offset == 0 { + return stmt, nil + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW MEASUREMENT EXACT CARDINALITY doesn't support time in WHERE clause") + } + + // Use all measurements, if zero. + if len(stmt.Sources) == 0 { + stmt.Sources = influxql.Sources{ + &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`.+`)}}, + } + } + + return &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "count", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "distinct", + Args: []influxql.Expr{&influxql.VarRef{Val: "_name"}}, + }, + }, + }, + Alias: "count", + }, + }, + Sources: rewriteSources2(stmt.Sources, stmt.Database), + Condition: stmt.Condition, + Dimensions: stmt.Dimensions, + Offset: stmt.Offset, + Limit: stmt.Limit, + OmitTime: true, + StripName: true, + }, nil +} + +func rewriteShowSeriesStatement(stmt *influxql.ShowSeriesStatement) (influxql.Statement, error) { + s := &influxql.SelectStatement{ + Condition: stmt.Condition, + Offset: stmt.Offset, + Limit: stmt.Limit, + SortFields: stmt.SortFields, + OmitTime: true, + StripName: true, + Dedupe: true, + IsRawQuery: true, + } + // Check if we can exclusively use the index. + if !influxql.HasTimeExpr(stmt.Condition) { + s.Fields = []*influxql.Field{{Expr: &influxql.VarRef{Val: "key"}}} + s.Sources = rewriteSources(stmt.Sources, "_series", stmt.Database) + s.Condition = rewriteSourcesCondition(s.Sources, s.Condition) + return s, nil + } + + // The query is bounded by time then it will have to query TSM data rather + // than utilising the index via system iterators. + s.Fields = []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "_seriesKey"}, Alias: "key"}, + } + s.Sources = rewriteSources2(stmt.Sources, stmt.Database) + return s, nil +} + +func rewriteShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (influxql.Statement, error) { + // TODO(edd): currently we only support cardinality estimation for certain + // types of query. As the estimation coverage is expanded, this condition + // will become less strict. + if !stmt.Exact && stmt.Sources == nil && stmt.Condition == nil && stmt.Dimensions == nil && stmt.Limit == 0 && stmt.Offset == 0 { + return stmt, nil + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW SERIES EXACT CARDINALITY doesn't support time in WHERE clause") + } + + // Use all measurements, if zero. + if len(stmt.Sources) == 0 { + stmt.Sources = influxql.Sources{ + &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`.+`)}}, + } + } + + return &influxql.SelectStatement{ + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "_seriesKey"}}}, Alias: "count"}, + }, + Sources: rewriteSources2(stmt.Sources, stmt.Database), + Condition: stmt.Condition, + Dimensions: stmt.Dimensions, + Offset: stmt.Offset, + Limit: stmt.Limit, + OmitTime: true, + }, nil +} + +func rewriteShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement) (influxql.Statement, error) { + var expr influxql.Expr + if list, ok := stmt.TagKeyExpr.(*influxql.ListLiteral); ok { + for _, tagKey := range list.Vals { + tagExpr := &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: &influxql.StringLiteral{Val: tagKey}, + } + + if expr != nil { + expr = &influxql.BinaryExpr{ + Op: influxql.OR, + LHS: expr, + RHS: tagExpr, + } + } else { + expr = tagExpr + } + } + } else { + expr = &influxql.BinaryExpr{ + Op: stmt.Op, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: stmt.TagKeyExpr, + } + } + + // Set condition or "AND" together. + condition := stmt.Condition + if condition == nil { + condition = expr + } else { + condition = &influxql.BinaryExpr{ + Op: influxql.AND, + LHS: &influxql.ParenExpr{Expr: condition}, + RHS: &influxql.ParenExpr{Expr: expr}, + } + } + condition = rewriteSourcesCondition(stmt.Sources, condition) + + return &influxql.ShowTagValuesStatement{ + Database: stmt.Database, + Op: stmt.Op, + TagKeyExpr: stmt.TagKeyExpr, + Condition: condition, + SortFields: stmt.SortFields, + Limit: stmt.Limit, + Offset: stmt.Offset, + }, nil +} + +func rewriteShowTagValuesCardinalityStatement(stmt *influxql.ShowTagValuesCardinalityStatement) (influxql.Statement, error) { + // Use all measurements, if zero. + if len(stmt.Sources) == 0 { + stmt.Sources = influxql.Sources{ + &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`.+`)}}, + } + } + + var expr influxql.Expr + if list, ok := stmt.TagKeyExpr.(*influxql.ListLiteral); ok { + for _, tagKey := range list.Vals { + tagExpr := &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: &influxql.StringLiteral{Val: tagKey}, + } + + if expr != nil { + expr = &influxql.BinaryExpr{ + Op: influxql.OR, + LHS: expr, + RHS: tagExpr, + } + } else { + expr = tagExpr + } + } + } else { + expr = &influxql.BinaryExpr{ + Op: stmt.Op, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: stmt.TagKeyExpr, + } + } + + // Set condition or "AND" together. + condition := stmt.Condition + if condition == nil { + condition = expr + } else { + condition = &influxql.BinaryExpr{ + Op: influxql.AND, + LHS: &influxql.ParenExpr{Expr: condition}, + RHS: &influxql.ParenExpr{Expr: expr}, + } + } + + return &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "count", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "distinct", + Args: []influxql.Expr{&influxql.VarRef{Val: "_tagValue"}}, + }, + }, + }, + Alias: "count", + }, + }, + Sources: rewriteSources2(stmt.Sources, stmt.Database), + Condition: condition, + Dimensions: stmt.Dimensions, + Offset: stmt.Offset, + Limit: stmt.Limit, + OmitTime: true, + }, nil +} + +func rewriteShowTagKeysStatement(stmt *influxql.ShowTagKeysStatement) (influxql.Statement, error) { + return &influxql.ShowTagKeysStatement{ + Database: stmt.Database, + Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), + SortFields: stmt.SortFields, + Limit: stmt.Limit, + Offset: stmt.Offset, + SLimit: stmt.SLimit, + SOffset: stmt.SOffset, + }, nil +} + +func rewriteShowTagKeyCardinalityStatement(stmt *influxql.ShowTagKeyCardinalityStatement) (influxql.Statement, error) { + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return nil, errors.New("SHOW TAG KEY EXACT CARDINALITY doesn't support time in WHERE clause") + } + + // Use all measurements, if zero. + if len(stmt.Sources) == 0 { + stmt.Sources = influxql.Sources{ + &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`.+`)}}, + } + } + + return &influxql.SelectStatement{ + Fields: []*influxql.Field{ + { + Expr: &influxql.Call{ + Name: "count", + Args: []influxql.Expr{ + &influxql.Call{ + Name: "distinct", + Args: []influxql.Expr{&influxql.VarRef{Val: "_tagKey"}}, + }, + }, + }, + Alias: "count", + }, + }, + Sources: rewriteSources2(stmt.Sources, stmt.Database), + Condition: stmt.Condition, + Dimensions: stmt.Dimensions, + Offset: stmt.Offset, + Limit: stmt.Limit, + OmitTime: true, + }, nil +} + +// rewriteSources rewrites sources to include the provided system iterator. +// +// rewriteSources also sets the default database where necessary. +func rewriteSources(sources influxql.Sources, systemIterator, defaultDatabase string) influxql.Sources { + newSources := influxql.Sources{} + for _, src := range sources { + if src == nil { + continue + } + mm := src.(*influxql.Measurement) + database := mm.Database + if database == "" { + database = defaultDatabase + } + + newM := mm.Clone() + newM.SystemIterator, newM.Database = systemIterator, database + newSources = append(newSources, newM) + } + + if len(newSources) <= 0 { + return append(newSources, &influxql.Measurement{ + Database: defaultDatabase, + SystemIterator: systemIterator, + }) + } + return newSources +} + +// rewriteSourcesCondition rewrites sources into `name` expressions. +// Merges with cond and returns a new condition. +func rewriteSourcesCondition(sources influxql.Sources, cond influxql.Expr) influxql.Expr { + if len(sources) == 0 { + return cond + } + + // Generate an OR'd set of filters on source name. + var scond influxql.Expr + for _, source := range sources { + mm := source.(*influxql.Measurement) + + // Generate a filtering expression on the measurement name. + var expr influxql.Expr + if mm.Regex != nil { + expr = &influxql.BinaryExpr{ + Op: influxql.EQREGEX, + LHS: &influxql.VarRef{Val: "_name"}, + RHS: &influxql.RegexLiteral{Val: mm.Regex.Val}, + } + } else if mm.Name != "" { + expr = &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_name"}, + RHS: &influxql.StringLiteral{Val: mm.Name}, + } + } + + if scond == nil { + scond = expr + } else { + scond = &influxql.BinaryExpr{ + Op: influxql.OR, + LHS: scond, + RHS: expr, + } + } + } + + // This is the case where the original query has a WHERE on a tag, and also + // is requesting from a specific source. + if cond != nil && scond != nil { + return &influxql.BinaryExpr{ + Op: influxql.AND, + LHS: &influxql.ParenExpr{Expr: scond}, + RHS: &influxql.ParenExpr{Expr: cond}, + } + } else if cond != nil { + // This is the case where the original query has a WHERE on a tag but + // is not requesting from a specific source. + return cond + } + return scond +} + +func rewriteSources2(sources influxql.Sources, database string) influxql.Sources { + if len(sources) == 0 { + sources = influxql.Sources{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex.Copy()}}} + } + for _, source := range sources { + switch source := source.(type) { + case *influxql.Measurement: + if source.Database == "" { + source.Database = database + } + } + } + return sources +} + +var matchAllRegex = regexp.MustCompile(`.+`) diff --git a/vendor/github.com/influxdata/influxdb/query/statement_rewriter_test.go b/vendor/github.com/influxdata/influxdb/query/statement_rewriter_test.go new file mode 100644 index 0000000..6e98406 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/statement_rewriter_test.go @@ -0,0 +1,308 @@ +package query_test + +import ( + "testing" + + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +func TestRewriteStatement(t *testing.T) { + tests := []struct { + stmt string + s string + }{ + { + stmt: `SHOW FIELD KEYS`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys`, + }, + { + stmt: `SHOW FIELD KEYS ON db0`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys`, + }, + { + stmt: `SHOW FIELD KEYS FROM cpu`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM cpu`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS FROM /c.*/`, + s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM /c.*/`, + s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS FROM mydb.myrp2.cpu`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2.cpu`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW FIELD KEYS FROM mydb.myrp2./c.*/`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2./c.*/`, + s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW SERIES`, + s: `SELECT "key" FROM _series`, + }, + { + stmt: `SHOW SERIES ON db0`, + s: `SELECT "key" FROM db0.._series`, + }, + { + stmt: `SHOW SERIES FROM cpu`, + s: `SELECT "key" FROM _series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES ON db0 FROM cpu`, + s: `SELECT "key" FROM db0.._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1.cpu`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1./c.*/`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE region = 'uswest'`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE (_name =~ /c.*/) AND (region = 'uswest')`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/`, + s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW SERIES WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM /.+/ WHERE time > 0`, + }, + { + stmt: `SHOW SERIES ON db0 WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM db0../.+/ WHERE time > 0`, + }, + { + stmt: `SHOW SERIES FROM cpu WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM cpu WHERE time > 0`, + }, + { + stmt: `SHOW SERIES ON db0 FROM cpu WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM db0..cpu WHERE time > 0`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1.cpu WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM mydb.myrp1.cpu WHERE time > 0`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM mydb.myrp1.cpu WHERE time > 0`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE time > 0`, + }, + { + stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE region = 'uswest' AND time > 0`, + s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE region = 'uswest' AND time > 0`, + }, + { + stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/ WHERE time > 0`, + s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE time > 0`, + }, + { + stmt: `SHOW TAG KEYS`, + s: `SHOW TAG KEYS`, + }, + { + stmt: `SHOW TAG KEYS ON db0`, + s: `SHOW TAG KEYS ON db0`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu`, + s: `SHOW TAG KEYS WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu`, + s: `SHOW TAG KEYS ON db0 WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS FROM /c.*/`, + s: `SHOW TAG KEYS WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM /c.*/`, + s: `SHOW TAG KEYS ON db0 WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest'`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu`, + s: `SHOW TAG KEYS WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu`, + s: `SHOW TAG KEYS ON db0 WHERE _name = 'cpu'`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/`, + s: `SHOW TAG KEYS WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/`, + s: `SHOW TAG KEYS ON db0 WHERE _name =~ /c.*/`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest'`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest'`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest')`, + }, + { + stmt: `SHOW TAG KEYS WHERE time > 0`, + s: `SHOW TAG KEYS WHERE time > 0`, + }, + { + stmt: `SHOW TAG KEYS ON db0 WHERE time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE time > 0`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE time > 0`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS FROM /c.*/ WHERE time > 0`, + s: `SHOW TAG KEYS WHERE (_name =~ /c.*/) AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM /c.*/ WHERE time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name =~ /c.*/) AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest' AND time > 0`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest' AND time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE time > 0`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/ WHERE time > 0`, + s: `SHOW TAG KEYS WHERE (_name =~ /c.*/) AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/ WHERE time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name =~ /c.*/) AND (time > 0)`, + }, + { + stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest' AND time > 0`, + s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, + }, + { + stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest' AND time > 0`, + s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY = "region"`, + s: `SHOW TAG VALUES WITH KEY = region WHERE _tagKey = 'region'`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY = "region" WHERE "region" = 'uswest'`, + s: `SHOW TAG VALUES WITH KEY = region WHERE (region = 'uswest') AND (_tagKey = 'region')`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY IN ("region", "server") WHERE "platform" = 'cloud'`, + s: `SHOW TAG VALUES WITH KEY IN (region, server) WHERE (platform = 'cloud') AND (_tagKey = 'region' OR _tagKey = 'server')`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY = "region" WHERE "region" = 'uswest' AND time > 0`, + s: `SHOW TAG VALUES WITH KEY = region WHERE (region = 'uswest' AND time > 0) AND (_tagKey = 'region')`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY = "region" ON db0`, + s: `SHOW TAG VALUES WITH KEY = region WHERE _tagKey = 'region'`, + }, + { + stmt: `SHOW TAG VALUES FROM cpu WITH KEY = "region"`, + s: `SHOW TAG VALUES WITH KEY = region WHERE (_name = 'cpu') AND (_tagKey = 'region')`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY != "region"`, + s: `SHOW TAG VALUES WITH KEY != region WHERE _tagKey != 'region'`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY =~ /re.*/`, + s: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE _tagKey =~ /re.*/`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE time > 0`, + s: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE (time > 0) AND (_tagKey =~ /re.*/)`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/`, + s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/ LIMIT 1`, + s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/ LIMIT 1`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/ OFFSET 2`, + s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/ OFFSET 2`, + }, + { + stmt: `SELECT value FROM cpu`, + s: `SELECT value FROM cpu`, + }, + } + + for _, test := range tests { + t.Run(test.stmt, func(t *testing.T) { + stmt, err := influxql.ParseStatement(test.stmt) + if err != nil { + t.Errorf("error parsing statement: %s", err) + } else { + stmt, err = query.RewriteStatement(stmt) + if err != nil { + t.Errorf("error rewriting statement: %s", err) + } else if s := stmt.String(); s != test.s { + t.Errorf("error rendering string. expected %s, actual: %s", test.s, s) + } + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/query/subquery.go b/vendor/github.com/influxdata/influxdb/query/subquery.go new file mode 100644 index 0000000..2bb0b25 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/subquery.go @@ -0,0 +1,126 @@ +package query + +import ( + "context" + + "github.com/influxdata/influxql" +) + +type subqueryBuilder struct { + ic IteratorCreator + stmt *influxql.SelectStatement +} + +// buildAuxIterator constructs an auxiliary Iterator from a subquery. +func (b *subqueryBuilder) buildAuxIterator(ctx context.Context, opt IteratorOptions) (Iterator, error) { + // Map the desired auxiliary fields from the substatement. + indexes := b.mapAuxFields(opt.Aux) + + subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) + if err != nil { + return nil, err + } + + cur, err := buildCursor(ctx, b.stmt, b.ic, subOpt) + if err != nil { + return nil, err + } + + // Filter the cursor by a condition if one was given. + if opt.Condition != nil { + cur = newFilterCursor(cur, opt.Condition) + } + + // Construct the iterators for the subquery. + itr := NewIteratorMapper(cur, nil, indexes, subOpt) + if len(opt.GetDimensions()) != len(subOpt.GetDimensions()) { + itr = NewTagSubsetIterator(itr, opt) + } + return itr, nil +} + +func (b *subqueryBuilder) mapAuxFields(auxFields []influxql.VarRef) []IteratorMap { + indexes := make([]IteratorMap, len(auxFields)) + for i, name := range auxFields { + m := b.mapAuxField(&name) + if m == nil { + // If this field doesn't map to anything, use the NullMap so it + // shows up as null. + m = NullMap{} + } + indexes[i] = m + } + return indexes +} + +func (b *subqueryBuilder) mapAuxField(name *influxql.VarRef) IteratorMap { + offset := 0 + for i, f := range b.stmt.Fields { + if f.Name() == name.Val { + return FieldMap{ + Index: i + offset, + // Cast the result of the field into the desired type. + Type: name.Type, + } + } else if call, ok := f.Expr.(*influxql.Call); ok && (call.Name == "top" || call.Name == "bottom") { + // We may match one of the arguments in "top" or "bottom". + if len(call.Args) > 2 { + for j, arg := range call.Args[1 : len(call.Args)-1] { + if arg, ok := arg.(*influxql.VarRef); ok && arg.Val == name.Val { + return FieldMap{ + Index: i + j + 1, + Type: influxql.String, + } + } + } + // Increment the offset so we have the correct index for later fields. + offset += len(call.Args) - 2 + } + } + } + + // Unable to find this in the list of fields. + // Look within the dimensions and create a field if we find it. + for _, d := range b.stmt.Dimensions { + if d, ok := d.Expr.(*influxql.VarRef); ok && name.Val == d.Val { + return TagMap(d.Val) + } + } + + // Unable to find any matches. + return nil +} + +func (b *subqueryBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef, opt IteratorOptions) (Iterator, error) { + // Look for the field or tag that is driving this query. + driver := b.mapAuxField(expr) + if driver == nil { + // Exit immediately if there is no driver. If there is no driver, there + // are no results. Period. + return nil, nil + } + + // Map the auxiliary fields to their index in the subquery. + indexes := b.mapAuxFields(opt.Aux) + subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) + if err != nil { + return nil, err + } + + cur, err := buildCursor(ctx, b.stmt, b.ic, subOpt) + if err != nil { + return nil, err + } + + // Filter the cursor by a condition if one was given. + if opt.Condition != nil { + cur = newFilterCursor(cur, opt.Condition) + } + + // Construct the iterators for the subquery. + itr := NewIteratorMapper(cur, driver, indexes, subOpt) + if len(opt.GetDimensions()) != len(subOpt.GetDimensions()) { + itr = NewTagSubsetIterator(itr, opt) + } + return itr, nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/subquery_test.go b/vendor/github.com/influxdata/influxdb/query/subquery_test.go new file mode 100644 index 0000000..624bb4f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/subquery_test.go @@ -0,0 +1,322 @@ +package query_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxql" +) + +type CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator + +func TestSubquery(t *testing.T) { + for _, test := range []struct { + Name string + Statement string + Fields map[string]influxql.DataType + MapShardsFn func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn + Rows []query.Row + }{ + { + Name: "AuxiliaryFields", + Statement: `SELECT max / 2.0 FROM (SELECT max(value) FROM cpu GROUP BY time(5s)) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, + Fields: map[string]influxql.DataType{"value": influxql.Float}, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + if got, want := tr.MinTimeNano(), 0*Second; got != want { + t.Errorf("unexpected min time: got=%d want=%d", got, want) + } + if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { + t.Errorf("unexpected max time: got=%d want=%d", got, want) + } + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Expr.String(), "max(value::float)"; got != want { + t.Errorf("unexpected expression: got=%s want=%s", got, want) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 5}, + {Name: "cpu", Time: 5 * Second, Value: 3}, + {Name: "cpu", Time: 10 * Second, Value: 8}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{2.5}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{1.5}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, + }, + }, + { + Name: "AuxiliaryFields_WithWhereClause", + Statement: `SELECT host FROM (SELECT max(value), host FROM cpu GROUP BY time(5s)) WHERE max > 4 AND time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, + Fields: map[string]influxql.DataType{ + "value": influxql.Float, + "host": influxql.Tag, + }, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + if got, want := tr.MinTimeNano(), 0*Second; got != want { + t.Errorf("unexpected min time: got=%d want=%d", got, want) + } + if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { + t.Errorf("unexpected max time: got=%d want=%d", got, want) + } + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Expr.String(), "max(value::float)"; got != want { + t.Errorf("unexpected expression: got=%s want=%s", got, want) + } + if got, want := opt.Aux, []influxql.VarRef{{Val: "host", Type: influxql.Tag}}; !cmp.Equal(got, want) { + t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 5, Aux: []interface{}{"server02"}}, + {Name: "cpu", Time: 5 * Second, Value: 3, Aux: []interface{}{"server01"}}, + {Name: "cpu", Time: 10 * Second, Value: 8, Aux: []interface{}{"server03"}}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"server02"}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"server03"}}, + }, + }, + { + Name: "AuxiliaryFields_NonExistentField", + Statement: `SELECT host FROM (SELECT max(value) FROM cpu GROUP BY time(5s)) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, + Fields: map[string]influxql.DataType{"value": influxql.Float}, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: 5}, + {Name: "cpu", Time: 5 * Second, Value: 3}, + {Name: "cpu", Time: 10 * Second, Value: 8}, + }} + } + }, + Rows: []query.Row(nil), + }, + { + Name: "AggregateOfMath", + Statement: `SELECT mean(percentage) FROM (SELECT value * 100.0 AS percentage FROM cpu) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z' GROUP BY time(5s)`, + Fields: map[string]influxql.DataType{"value": influxql.Float}, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + if got, want := tr.MinTimeNano(), 0*Second; got != want { + t.Errorf("unexpected min time: got=%d want=%d", got, want) + } + if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { + t.Errorf("unexpected max time: got=%d want=%d", got, want) + } + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Expr, influxql.Expr(nil); got != want { + t.Errorf("unexpected expression: got=%s want=%s", got, want) + } + if got, want := opt.Aux, []influxql.VarRef{{Val: "value", Type: influxql.Float}}; !cmp.Equal(got, want) { + t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Aux: []interface{}{0.5}}, + {Name: "cpu", Time: 2 * Second, Aux: []interface{}{1.0}}, + {Name: "cpu", Time: 5 * Second, Aux: []interface{}{0.05}}, + {Name: "cpu", Time: 8 * Second, Aux: []interface{}{0.45}}, + {Name: "cpu", Time: 12 * Second, Aux: []interface{}{0.34}}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(75)}}, + {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(25)}}, + {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(34)}}, + }, + }, + { + Name: "Cast", + Statement: `SELECT value::integer FROM (SELECT mean(value) AS value FROM cpu)`, + Fields: map[string]influxql.DataType{"value": influxql.Integer}, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Expr.String(), "mean(value::integer)"; got != want { + t.Errorf("unexpected expression: got=%s want=%s", got, want) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Time: 0 * Second, Value: float64(20) / float64(6)}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + Name: "CountTag", + Statement: `SELECT count(host) FROM (SELECT value, host FROM cpu) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, + Fields: map[string]influxql.DataType{ + "value": influxql.Float, + "host": influxql.Tag, + }, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + if got, want := tr.MinTimeNano(), 0*Second; got != want { + t.Errorf("unexpected min time: got=%d want=%d", got, want) + } + if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { + t.Errorf("unexpected max time: got=%d want=%d", got, want) + } + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Aux, []influxql.VarRef{ + {Val: "host", Type: influxql.Tag}, + {Val: "value", Type: influxql.Float}, + }; !cmp.Equal(got, want) { + t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Aux: []interface{}{"server01", 5.0}}, + {Name: "cpu", Aux: []interface{}{"server02", 3.0}}, + {Name: "cpu", Aux: []interface{}{"server03", 8.0}}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, + }, + }, + { + Name: "StripTags", + Statement: `SELECT max FROM (SELECT max(value) FROM cpu GROUP BY host) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, + Fields: map[string]influxql.DataType{"value": influxql.Float}, + MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { + if got, want := tr.MinTimeNano(), 0*Second; got != want { + t.Errorf("unexpected min time: got=%d want=%d", got, want) + } + if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { + t.Errorf("unexpected max time: got=%d want=%d", got, want) + } + return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { + if got, want := m.Name, "cpu"; got != want { + t.Errorf("unexpected source: got=%s want=%s", got, want) + } + if got, want := opt.Expr.String(), "max(value::float)"; got != want { + t.Errorf("unexpected expression: got=%s want=%s", got, want) + } + return &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=server01"), Value: 5}, + {Name: "cpu", Tags: ParseTags("host=server02"), Value: 3}, + {Name: "cpu", Tags: ParseTags("host=server03"), Value: 8}, + }} + } + }, + Rows: []query.Row{ + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{5.0}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{3.0}}, + {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{8.0}}, + }, + }, + } { + t.Run(test.Name, func(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + fn := test.MapShardsFn(t, tr) + return &ShardGroup{ + Fields: test.Fields, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + return fn(ctx, m, opt), nil + }, + } + }, + } + + stmt := MustParseSelectStatement(test.Statement) + stmt.OmitTime = true + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected parse error: %s", err) + } else if a, err := ReadCursor(cur); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if diff := cmp.Diff(test.Rows, a); diff != "" { + t.Fatalf("unexpected points:\n%s", diff) + } + }) + } +} + +type openAuthorizer struct{} + +func (*openAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { return true } +func (*openAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { return nil } +func (*openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { + return true +} +func (*openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { + return true +} + +// Ensure that the subquery gets passed the query authorizer. +func TestSubquery_Authorizer(t *testing.T) { + auth := &openAuthorizer{} + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "value": influxql.Float, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if opt.Authorizer != auth { + t.Errorf("query authorizer has not been set") + } + return nil, nil + }, + } + }, + } + + stmt := MustParseSelectStatement(`SELECT max(value) FROM (SELECT value FROM cpu)`) + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{ + Authorizer: auth, + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + cur.Close() +} + +// Ensure that the subquery gets passed the max series limit. +func TestSubquery_MaxSeriesN(t *testing.T) { + shardMapper := ShardMapper{ + MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + return &ShardGroup{ + Fields: map[string]influxql.DataType{ + "value": influxql.Float, + }, + CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + if opt.MaxSeriesN != 1000 { + t.Errorf("max series limit has not been set") + } + return nil, nil + }, + } + }, + } + + stmt := MustParseSelectStatement(`SELECT max(value) FROM (SELECT value FROM cpu)`) + cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{ + MaxSeriesN: 1000, + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + cur.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/query/task_manager.go b/vendor/github.com/influxdata/influxdb/query/task_manager.go new file mode 100644 index 0000000..f380d14 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/task_manager.go @@ -0,0 +1,319 @@ +package query + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxql" + "go.uber.org/zap" +) + +const ( + // DefaultQueryTimeout is the default timeout for executing a query. + // A value of zero will have no query timeout. + DefaultQueryTimeout = time.Duration(0) +) + +type TaskStatus int + +const ( + // RunningTask is set when the task is running. + RunningTask TaskStatus = iota + 1 + + // KilledTask is set when the task is killed, but resources are still + // being used. + KilledTask +) + +func (t TaskStatus) String() string { + switch t { + case RunningTask: + return "running" + case KilledTask: + return "killed" + default: + return "unknown" + } +} + +func (t TaskStatus) MarshalJSON() ([]byte, error) { + s := t.String() + return json.Marshal(s) +} + +func (t *TaskStatus) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte("running")) { + *t = RunningTask + } else if bytes.Equal(data, []byte("killed")) { + *t = KilledTask + } else if bytes.Equal(data, []byte("unknown")) { + *t = TaskStatus(0) + } else { + return fmt.Errorf("unknown task status: %s", string(data)) + } + return nil +} + +// TaskManager takes care of all aspects related to managing running queries. +type TaskManager struct { + // Query execution timeout. + QueryTimeout time.Duration + + // Log queries if they are slower than this time. + // If zero, slow queries will never be logged. + LogQueriesAfter time.Duration + + // Maximum number of concurrent queries. + MaxConcurrentQueries int + + // Logger to use for all logging. + // Defaults to discarding all log output. + Logger *zap.Logger + + // Used for managing and tracking running queries. + queries map[uint64]*Task + nextID uint64 + mu sync.RWMutex + shutdown bool +} + +// NewTaskManager creates a new TaskManager. +func NewTaskManager() *TaskManager { + return &TaskManager{ + QueryTimeout: DefaultQueryTimeout, + Logger: zap.NewNop(), + queries: make(map[uint64]*Task), + nextID: 1, + } +} + +// ExecuteStatement executes a statement containing one of the task management queries. +func (t *TaskManager) ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error { + switch stmt := stmt.(type) { + case *influxql.ShowQueriesStatement: + rows, err := t.executeShowQueriesStatement(stmt) + if err != nil { + return err + } + + ctx.Send(&Result{ + Series: rows, + }) + case *influxql.KillQueryStatement: + var messages []*Message + if ctx.ReadOnly { + messages = append(messages, ReadOnlyWarning(stmt.String())) + } + + if err := t.executeKillQueryStatement(stmt); err != nil { + return err + } + ctx.Send(&Result{ + Messages: messages, + }) + default: + return ErrInvalidQuery + } + return nil +} + +func (t *TaskManager) executeKillQueryStatement(stmt *influxql.KillQueryStatement) error { + return t.KillQuery(stmt.QueryID) +} + +func (t *TaskManager) executeShowQueriesStatement(q *influxql.ShowQueriesStatement) (models.Rows, error) { + t.mu.RLock() + defer t.mu.RUnlock() + + now := time.Now() + + values := make([][]interface{}, 0, len(t.queries)) + for id, qi := range t.queries { + d := now.Sub(qi.startTime) + + switch { + case d >= time.Second: + d = d - (d % time.Second) + case d >= time.Millisecond: + d = d - (d % time.Millisecond) + case d >= time.Microsecond: + d = d - (d % time.Microsecond) + } + + values = append(values, []interface{}{id, qi.query, qi.database, d.String(), qi.status.String()}) + } + + return []*models.Row{{ + Columns: []string{"qid", "query", "database", "duration", "status"}, + Values: values, + }}, nil +} + +func (t *TaskManager) queryError(qid uint64, err error) { + t.mu.RLock() + query := t.queries[qid] + t.mu.RUnlock() + if query != nil { + query.setError(err) + } +} + +// AttachQuery attaches a running query to be managed by the TaskManager. +// Returns the query id of the newly attached query or an error if it was +// unable to assign a query id or attach the query to the TaskManager. +// This function also returns a channel that will be closed when this +// query finishes running. +// +// After a query finishes running, the system is free to reuse a query id. +func (t *TaskManager) AttachQuery(q *influxql.Query, opt ExecutionOptions, interrupt <-chan struct{}) (*ExecutionContext, func(), error) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.shutdown { + return nil, nil, ErrQueryEngineShutdown + } + + if t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries { + return nil, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries) + } + + qid := t.nextID + query := &Task{ + query: q.String(), + database: opt.Database, + status: RunningTask, + startTime: time.Now(), + closing: make(chan struct{}), + monitorCh: make(chan error), + } + t.queries[qid] = query + + go t.waitForQuery(qid, query.closing, interrupt, query.monitorCh) + if t.LogQueriesAfter != 0 { + go query.monitor(func(closing <-chan struct{}) error { + timer := time.NewTimer(t.LogQueriesAfter) + defer timer.Stop() + + select { + case <-timer.C: + t.Logger.Warn(fmt.Sprintf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)", + query.query, qid, query.database, t.LogQueriesAfter)) + case <-closing: + } + return nil + }) + } + t.nextID++ + + ctx := &ExecutionContext{ + Context: context.Background(), + QueryID: qid, + task: query, + ExecutionOptions: opt, + } + ctx.watch() + return ctx, func() { t.DetachQuery(qid) }, nil +} + +// KillQuery enters a query into the killed state and closes the channel +// from the TaskManager. This method can be used to forcefully terminate a +// running query. +func (t *TaskManager) KillQuery(qid uint64) error { + t.mu.Lock() + query := t.queries[qid] + t.mu.Unlock() + + if query == nil { + return fmt.Errorf("no such query id: %d", qid) + } + return query.kill() +} + +// DetachQuery removes a query from the query table. If the query is not in the +// killed state, this will also close the related channel. +func (t *TaskManager) DetachQuery(qid uint64) error { + t.mu.Lock() + defer t.mu.Unlock() + + query := t.queries[qid] + if query == nil { + return fmt.Errorf("no such query id: %d", qid) + } + + query.close() + delete(t.queries, qid) + return nil +} + +// QueryInfo represents the information for a query. +type QueryInfo struct { + ID uint64 `json:"id"` + Query string `json:"query"` + Database string `json:"database"` + Duration time.Duration `json:"duration"` + Status TaskStatus `json:"status"` +} + +// Queries returns a list of all running queries with information about them. +func (t *TaskManager) Queries() []QueryInfo { + t.mu.RLock() + defer t.mu.RUnlock() + + now := time.Now() + queries := make([]QueryInfo, 0, len(t.queries)) + for id, qi := range t.queries { + queries = append(queries, QueryInfo{ + ID: id, + Query: qi.query, + Database: qi.database, + Duration: now.Sub(qi.startTime), + Status: qi.status, + }) + } + return queries +} + +func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) { + var timerCh <-chan time.Time + if t.QueryTimeout != 0 { + timer := time.NewTimer(t.QueryTimeout) + timerCh = timer.C + defer timer.Stop() + } + + select { + case <-closing: + t.queryError(qid, ErrQueryInterrupted) + case err := <-monitorCh: + if err == nil { + break + } + + t.queryError(qid, err) + case <-timerCh: + t.queryError(qid, ErrQueryTimeoutLimitExceeded) + case <-interrupt: + // Query was manually closed so exit the select. + return + } + t.KillQuery(qid) +} + +// Close kills all running queries and prevents new queries from being attached. +func (t *TaskManager) Close() error { + t.mu.Lock() + defer t.mu.Unlock() + + t.shutdown = true + for _, query := range t.queries { + query.setError(ErrQueryEngineShutdown) + query.close() + } + t.queries = nil + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/query/tmpldata b/vendor/github.com/influxdata/influxdb/query/tmpldata new file mode 100644 index 0000000..27ffdb4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/query/tmpldata @@ -0,0 +1,37 @@ +[ + { + "Name":"Float", + "name":"float", + "Type":"float64", + "Nil":"0", + "Zero":"float64(0)" + }, + { + "Name":"Integer", + "name":"integer", + "Type":"int64", + "Nil":"0", + "Zero":"int64(0)" + }, + { + "Name":"Unsigned", + "name":"unsigned", + "Type":"uint64", + "Nil":"0", + "Zero":"uint64(0)" + }, + { + "Name":"String", + "name":"string", + "Type":"string", + "Nil":"\"\"", + "Zero":"\"\"" + }, + { + "Name":"Boolean", + "name":"boolean", + "Type":"bool", + "Nil":"false", + "Zero":"false" + } +] diff --git a/vendor/github.com/influxdata/influxdb/releng/README.md b/vendor/github.com/influxdata/influxdb/releng/README.md new file mode 100644 index 0000000..9330394 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/README.md @@ -0,0 +1,39 @@ +# influxdb/releng + +This directory and its subdirectories contain release engineering scripts to build source tarballs and packages, run unit tests in an isolated environment, and so on. +The directory layout typically looks like: + +``` +├── Dockerfile +├── build.bash +└── fs + └── usr + └── local + └── bin + └── influxdb_tarball.bash +``` + +Where you only need to run `build.bash` (or other shell scripts in the root directory) with valid arguments to complete the step. +All scripts in the root folders accept the `-h` flag to explain usage. + +The `fs` folder is overlaid on the Docker image so that is clear where each script for the Docker containers reside. +Those scripts make assumptions about the environment which are controlled in the outer scripts (i.e. `build.bash`), so the scripts not intended to be run outside of Docker. + +By default, these scripts will use the "current" Go version as determined by `_go_versions.sh`. +To use the "next" version of Go, set the environment variable GO_NEXT to a non-empty value. + +## source-tarball + +Generates a source tarball of influxdb that can be extracted to a new `GOPATH` such that you can `go build github.com/influxdata/influxdb/cmd/influxd`, etc., without manually setting linker flags or anything. + +## raw-binaries + +Builds the raw binaries for the various influxdb commands, and stores them in OS/architecture-specific tarballs in the provided destination path. + +## packages + +Given a source tarball and an archive of raw binaries, generates OS/architecture-specific packages (i.e. .deb and .rpm files). + +## unit-tests + +Given a source tarball, runs the influxdb unit tests in a clean Docker environment. diff --git a/vendor/github.com/influxdata/influxdb/releng/_go_versions.sh b/vendor/github.com/influxdata/influxdb/releng/_go_versions.sh new file mode 100644 index 0000000..68b640b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/_go_versions.sh @@ -0,0 +1,5 @@ +# These are the current and "next" Go versions used to build influxdb. +# This file is meant to be sourced from other scripts. + +export GO_CURRENT_VERSION=1.10.6 +export GO_NEXT_VERSION=1.11.3 diff --git a/vendor/github.com/influxdata/influxdb/releng/raw-binaries/Dockerfile b/vendor/github.com/influxdata/influxdb/releng/raw-binaries/Dockerfile new file mode 100644 index 0000000..c797f96 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/raw-binaries/Dockerfile @@ -0,0 +1,10 @@ +ARG GO_VERSION +FROM golang:${GO_VERSION} + +RUN apt-get update && apt-get install -y --no-install-recommends \ + jq \ + && rm -rf /var/lib/apt/lists/* + +COPY fs/ / + +ENTRYPOINT ["influxdb_raw_binaries.bash"] diff --git a/vendor/github.com/influxdata/influxdb/releng/raw-binaries/build.bash b/vendor/github.com/influxdata/influxdb/releng/raw-binaries/build.bash new file mode 100755 index 0000000..9bbfc86 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/raw-binaries/build.bash @@ -0,0 +1,59 @@ +#!/bin/bash + +function printHelp() { + >&2 echo "USAGE: $0 -i PATH_TO_SOURCE_TARBALL -o OUTDIR + +Emits an archive of influxdb binaries based on the current environment's GOOS and GOARCH. +Respects CGO_ENABLED. + +If the environment variable GO_NEXT is not empty, builds the binaries with the 'next' version of Go. +" +} + +if [ $# -eq 0 ]; then + printHelp + exit 1 +fi + +if [ -z "$GOOS" ] || [ -z "$GOARCH" ]; then + >&2 echo 'The environment variables $GOOS and $GOARCH must both be set.' + exit 1 +fi + +SRCDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SRCDIR/../_go_versions.sh" + +OUTDIR="" +TARBALL="" +RACE_FLAG="" + +while getopts hi:o:r arg; do + case "$arg" in + h) printHelp; exit 1;; + i) TARBALL="$OPTARG";; + o) OUTDIR="$OPTARG";; + r) RACE_FLAG="-r";; + esac +done + +if [ -z "$OUTDIR" ] || [ -z "$TARBALL" ]; then + printHelp + exit 1 +fi + +if [ -z "$GO_NEXT" ]; then + DOCKER_TAG=latest + GO_VERSION="$GO_CURRENT_VERSION" +else + DOCKER_TAG=next + GO_VERSION="$GO_NEXT_VERSION" +fi +docker build --build-arg "GO_VERSION=$GO_VERSION" -t influxdata/influxdb/releng/raw-binaries:"$DOCKER_TAG" "$SRCDIR" + +mkdir -p "$OUTDIR" + +docker run --rm \ + --mount type=bind,source="${OUTDIR}",destination=/out \ + --mount type=bind,source="${TARBALL}",destination=/influxdb-src.tar.gz,ro=1 \ + -e GOOS -e GOARCH -e CGO_ENABLED \ + influxdata/influxdb/releng/raw-binaries:"$DOCKER_TAG" $RACE_FLAG diff --git a/vendor/github.com/influxdata/influxdb/releng/source-tarball/Dockerfile b/vendor/github.com/influxdata/influxdb/releng/source-tarball/Dockerfile new file mode 100644 index 0000000..ebb303a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/source-tarball/Dockerfile @@ -0,0 +1,20 @@ +ARG GO_VERSION +FROM golang:${GO_VERSION}-alpine + +RUN apk add --no-cache \ + asciidoc \ + bash \ + git \ + openssh-client \ + make \ + tar \ + xmlto + +# Build the dep binary and then clean out /go. +RUN go get github.com/golang/dep/cmd/dep && \ + mv /go/bin/dep /usr/local/bin/dep && \ + rm -rf /go/* + +COPY fs/ / + +ENTRYPOINT ["influxdb_tarball.bash"] diff --git a/vendor/github.com/influxdata/influxdb/releng/source-tarball/build.bash b/vendor/github.com/influxdata/influxdb/releng/source-tarball/build.bash new file mode 100755 index 0000000..8944566 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/source-tarball/build.bash @@ -0,0 +1,60 @@ +#!/bin/bash + +function printHelp() { + >&2 echo \ +"USAGE: $0 [-p INFLUXDB_GIT_DIR] + -s INFLUXDB_SHA -b INFLUXDB_BRANCH -v INFLUXDB_VERSION -o OUTDIR + +Emits a tarball of influxdb source code and dependencies to OUTDIR. + +If using -p flag, directory containing influxdb source code will be used as source of truth. +This is helpful if you have local commits that have not been pushed. +" +} + +if [ $# -eq 0 ]; then + printHelp + exit 1 +fi + +SRCDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SRCDIR/../_go_versions.sh" + +SHA="" +BRANCH="" +VERSION="" +OUTDIR="" + +# These variables may expand to command arguments. Don't double quote them when used later. +INFLUXDB_GIT_MOUNT="" + + +while getopts hs:b:v:o:p: arg; do + case "$arg" in + h) printHelp; exit 1;; + s) SHA="$OPTARG";; + b) BRANCH="$OPTARG";; + v) VERSION="$OPTARG";; + o) OUTDIR="$OPTARG";; + p) INFLUXDB_GIT_MOUNT="--mount type=bind,src=$OPTARG,dst=/influxdb-git,ro=1";; + esac +done + +if [ -z "$OUTDIR" ]; then + # Not bothering to check the other variables since they're checked in the inner docker script. + printHelp + exit 1 +fi + +# Only build with GO_CURRENT_VERSION. No need to build source tarball with next version of Go. +docker build --build-arg "GO_VERSION=$GO_CURRENT_VERSION" -t influxdata/influxdb/releng/source-tarball:latest "$SRCDIR" + +mkdir -p "$OUTDIR" + +docker run --rm \ + $INFLUXDB_GIT_MOUNT \ + --mount "type=bind,src=${OUTDIR},dst=/out" \ + influxdata/influxdb/releng/source-tarball:latest \ + -s "$SHA" \ + -b "$BRANCH" \ + -v "$VERSION" diff --git a/vendor/github.com/influxdata/influxdb/releng/unit-tests/Dockerfile b/vendor/github.com/influxdata/influxdb/releng/unit-tests/Dockerfile new file mode 100644 index 0000000..11c431c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/unit-tests/Dockerfile @@ -0,0 +1,15 @@ +ARG GO_VERSION +FROM golang:${GO_VERSION}-alpine + +RUN apk add --no-cache \ + bash \ + jq \ + git + +RUN go get -u github.com/jstemmer/go-junit-report && \ + mv /go/bin/go-junit-report /usr/bin/go-junit-report && \ + rm -rf /go/* + +COPY fs/ / + +ENTRYPOINT ["influxdb_prebuild_tests.bash"] diff --git a/vendor/github.com/influxdata/influxdb/releng/unit-tests/run.bash b/vendor/github.com/influxdata/influxdb/releng/unit-tests/run.bash new file mode 100755 index 0000000..d4404a8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/releng/unit-tests/run.bash @@ -0,0 +1,48 @@ +#!/bin/bash + +function printHelp() { + >&2 echo "USAGE: $0 -i PATH_TO_SOURCE_TARBALL -o OUTDIR + +Runs unit tests for influxdb. + +If the environment variable GO_NEXT is not empty, tests run with the 'next' version of Go. +" +} + +if [ $# -eq 0 ]; then + printHelp + exit 1 +fi + +SRCDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SRCDIR/../_go_versions.sh" + +OUTDIR="" +TARBALL="" + +while getopts hi:o: arg; do + case "$arg" in + h) printHelp; exit 1;; + i) TARBALL="$OPTARG";; + o) OUTDIR="$OPTARG";; + esac +done + +if [ -z "$TARBALL" ] || [ -z "$OUTDIR" ]; then + printHelp + exit 1 +fi + +if [ -z "$GO_NEXT" ]; then + DOCKER_TAG=latest + GO_VERSION="$GO_CURRENT_VERSION" +else + DOCKER_TAG=next + GO_VERSION="$GO_NEXT_VERSION" +fi +docker build --build-arg "GO_VERSION=$GO_VERSION" -t influxdata/influxdb/releng/unit-tests:"$DOCKER_TAG" "$SRCDIR" + +docker run --rm \ + --mount type=bind,source="$OUTDIR",destination=/out \ + --mount type=bind,source="$TARBALL",destination=/influxdb-src.tar.gz,ro=1 \ + influxdata/influxdb/releng/unit-tests:"$DOCKER_TAG" diff --git a/vendor/github.com/influxdata/influxdb/scripts/influxdb.service b/vendor/github.com/influxdata/influxdb/scripts/influxdb.service new file mode 100644 index 0000000..25c2ffe --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/influxdb.service @@ -0,0 +1,19 @@ +# If you modify this, please also make sure to edit init.sh + +[Unit] +Description=InfluxDB is an open-source, distributed, time series database +Documentation=https://docs.influxdata.com/influxdb/ +After=network-online.target + +[Service] +User=influxdb +Group=influxdb +LimitNOFILE=65536 +EnvironmentFile=-/etc/default/influxdb +ExecStart=/usr/bin/influxd -config /etc/influxdb/influxdb.conf $INFLUXD_OPTS +KillMode=control-group +Restart=on-failure + +[Install] +WantedBy=multi-user.target +Alias=influxd.service diff --git a/vendor/github.com/influxdata/influxdb/scripts/init.sh b/vendor/github.com/influxdata/influxdb/scripts/init.sh new file mode 100755 index 0000000..ba13620 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/init.sh @@ -0,0 +1,233 @@ +#!/bin/bash +### BEGIN INIT INFO +# Provides: influxd +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start the InfluxDB process +### END INIT INFO + +# If you modify this, please make sure to also edit influxdb.service + +# Command-line options that can be set in /etc/default/influxdb. These will override +# any config file values. +DEFAULT=/etc/default/influxdb + +# Daemon options +INFLUXD_OPTS= + +# Process name ( For display ) +NAME=influxdb + +# User and group +USER=influxdb +GROUP=influxdb + +# Check for sudo or root privileges before continuing +if [ "$UID" != "0" ]; then + echo "You must be root to run this script" + exit 1 +fi + +# Daemon name, where is the actual executable If the daemon is not +# there, then exit. +DAEMON=/usr/bin/influxd +if [ ! -x $DAEMON ]; then + echo "Executable $DAEMON does not exist!" + exit 5 +fi + +# Configuration file +CONFIG=/etc/influxdb/influxdb.conf + +# PID file for the daemon +PIDFILE=/var/run/influxdb/influxd.pid +PIDDIR=`dirname $PIDFILE` +if [ ! -d "$PIDDIR" ]; then + mkdir -p $PIDDIR + chown $USER:$GROUP $PIDDIR +fi + +# Max open files +OPEN_FILE_LIMIT=65536 + +if [ -r /lib/lsb/init-functions ]; then + source /lib/lsb/init-functions +fi + +# Logging +if [ -z "$STDOUT" ]; then + STDOUT=/dev/null +fi + +if [ ! -f "$STDOUT" ]; then + mkdir -p $(dirname $STDOUT) +fi + +if [ -z "$STDERR" ]; then + STDERR=/var/log/influxdb/influxd.log +fi + +if [ ! -f "$STDERR" ]; then + mkdir -p $(dirname $STDERR) +fi + +# Override init script variables with DEFAULT values +if [ -r $DEFAULT ]; then + source $DEFAULT +fi + +function log_failure_msg() { + echo "$@" "[ FAILED ]" +} + +function log_success_msg() { + echo "$@" "[ OK ]" +} + +function start() { + # Check if config file exist + if [ ! -r $CONFIG ]; then + log_failure_msg "config file $CONFIG doesn't exist (or you don't have permission to view)" + exit 4 + fi + + # Check that the PID file exists, and check the actual status of process + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 "$PID" &>/dev/null; then + # Process is already up + log_success_msg "$NAME process is already running" + return 0 + fi + else + su -s /bin/sh -c "touch $PIDFILE" $USER &>/dev/null + if [ $? -ne 0 ]; then + log_failure_msg "$PIDFILE not writable, check permissions" + exit 5 + fi + fi + + # Bump the file limits, before launching the daemon. These will + # carry over to launched processes. + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "Unable to set ulimit to $OPEN_FILE_LIMIT" + exit 1 + fi + + # Launch process + echo "Starting $NAME..." + if command -v start-stop-daemon &>/dev/null; then + start-stop-daemon \ + --chuid $USER:$GROUP \ + --start \ + --quiet \ + --pidfile $PIDFILE \ + --exec $DAEMON \ + -- \ + -pidfile $PIDFILE \ + -config $CONFIG \ + $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & + else + local CMD="$DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" + su -s /bin/sh -c "$CMD" $USER + fi + + # Sleep to verify process is still up + sleep 1 + if [ -f $PIDFILE ]; then + # PIDFILE exists + if kill -0 $(cat $PIDFILE) &>/dev/null; then + # PID up, service running + log_success_msg "$NAME process was started" + return 0 + fi + fi + log_failure_msg "$NAME process was unable to start" + exit 1 +} + +function stop() { + # Stop the daemon. + if [ -f $PIDFILE ]; then + local PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + echo "Stopping $NAME..." + # Process still up, send SIGTERM and remove PIDFILE + kill -s TERM $PID &>/dev/null && rm -f "$PIDFILE" &>/dev/null + n=0 + while true; do + # Enter loop to ensure process is stopped + kill -0 $PID &>/dev/null + if [ "$?" != "0" ]; then + # Process stopped, break from loop + log_success_msg "$NAME process was stopped" + return 0 + fi + + # Process still up after signal, sleep and wait + sleep 1 + n=$(expr $n + 1) + if [ $n -eq 30 ]; then + # After 30 seconds, send SIGKILL + echo "Timeout exceeded, sending SIGKILL..." + kill -s KILL $PID &>/dev/null + elif [ $? -eq 40 ]; then + # After 40 seconds, error out + log_failure_msg "could not stop $NAME process" + exit 1 + fi + done + fi + fi + log_success_msg "$NAME process already stopped" +} + +function restart() { + # Restart the daemon. + stop + start +} + +function status() { + # Check the status of the process. + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + log_success_msg "$NAME process is running" + exit 0 + fi + fi + log_failure_msg "$NAME process is not running" + exit 1 +} + +case $1 in + start) + start + ;; + + stop) + stop + ;; + + restart) + restart + ;; + + status) + status + ;; + + version) + $DAEMON version + ;; + + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|status|version}" + exit 2 + ;; +esac diff --git a/vendor/github.com/influxdata/influxdb/scripts/logrotate b/vendor/github.com/influxdata/influxdb/scripts/logrotate new file mode 100644 index 0000000..de410d4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/logrotate @@ -0,0 +1,8 @@ +/var/log/influxdb/influxd.log { + daily + rotate 7 + missingok + dateext + copytruncate + compress +} diff --git a/vendor/github.com/influxdata/influxdb/scripts/post-install.sh b/vendor/github.com/influxdata/influxdb/scripts/post-install.sh new file mode 100644 index 0000000..d609f6f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/post-install.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +BIN_DIR=/usr/bin +DATA_DIR=/var/lib/influxdb +LOG_DIR=/var/log/influxdb +SCRIPT_DIR=/usr/lib/influxdb/scripts +LOGROTATE_DIR=/etc/logrotate.d + +function install_init { + cp -f $SCRIPT_DIR/init.sh /etc/init.d/influxdb + chmod +x /etc/init.d/influxdb +} + +function install_systemd { + cp -f $SCRIPT_DIR/influxdb.service /lib/systemd/system/influxdb.service + systemctl enable influxdb +} + +function install_update_rcd { + update-rc.d influxdb defaults +} + +function install_chkconfig { + chkconfig --add influxdb +} + +id influxdb &>/dev/null +if [[ $? -ne 0 ]]; then + useradd --system -U -M influxdb -s /bin/false -d $DATA_DIR +fi + +chown -R -L influxdb:influxdb $DATA_DIR +chown -R -L influxdb:influxdb $LOG_DIR + +# Add defaults file, if it doesn't exist +if [[ ! -f /etc/default/influxdb ]]; then + touch /etc/default/influxdb +fi + +# Remove legacy symlink, if it exists +if [[ -L /etc/init.d/influxdb ]]; then + rm -f /etc/init.d/influxdb +fi + +# Distribution-specific logic +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_chkconfig + fi +elif [[ -f /etc/debian_version ]]; then + # Debian/Ubuntu logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_update_rcd + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ $ID = "amzn" ]]; then + # Amazon Linux logic + install_init + install_chkconfig + fi +fi diff --git a/vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh b/vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh new file mode 100644 index 0000000..c62dcf6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +function disable_systemd { + systemctl disable influxdb + rm -f /lib/systemd/system/influxdb.service +} + +function disable_update_rcd { + update-rc.d -f influxdb remove + rm -f /etc/init.d/influxdb +} + +function disable_chkconfig { + chkconfig --del influxdb + rm -f /etc/init.d/influxdb +} + +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/influxdb + + if command -v systemctl &>/dev/null; then + disable_systemd + else + # Assuming sysv + disable_chkconfig + fi + fi +elif [[ -f /etc/lsb-release ]]; then + # Debian/Ubuntu logic + if [[ "$1" != "upgrade" ]]; then + # Remove/purge + rm -f /etc/default/influxdb + + if command -v systemctl &>/dev/null; then + disable_systemd + else + # Assuming sysv + disable_update_rcd + fi + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ $ID = "amzn" ]]; then + # Amazon Linux logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/influxdb + disable_chkconfig + fi + fi +fi diff --git a/vendor/github.com/influxdata/influxdb/scripts/pre-install.sh b/vendor/github.com/influxdata/influxdb/scripts/pre-install.sh new file mode 100755 index 0000000..d57ff0f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/scripts/pre-install.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +if [[ -d /etc/opt/influxdb ]]; then + # Legacy configuration found + if [[ ! -d /etc/influxdb ]]; then + # New configuration does not exist, move legacy configuration to new location + echo -e "Please note, InfluxDB's configuration is now located at '/etc/influxdb' (previously '/etc/opt/influxdb')." + mv -vn /etc/opt/influxdb /etc/influxdb + + if [[ -f /etc/influxdb/influxdb.conf ]]; then + backup_name="influxdb.conf.$(date +%s).backup" + echo "A backup of your current configuration can be found at: /etc/influxdb/$backup_name" + cp -a /etc/influxdb/influxdb.conf /etc/influxdb/$backup_name + fi + fi +fi diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/README.md b/vendor/github.com/influxdata/influxdb/services/collectd/README.md new file mode 100644 index 0000000..478ea36 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/README.md @@ -0,0 +1,40 @@ +# The collectd Input + +The [collectd](https://collectd.org) input allows InfluxDB to accept data transmitted in collectd native format. This data is transmitted over UDP. + +## A note on UDP/IP OS Buffer sizes + +If you're running Linux or FreeBSD, please adjust your OS UDP buffer +size limit, [see here for more details.](../udp/README.md#a-note-on-udpip-os-buffer-sizes) + +## Configuration + +Each collectd input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it. + +Each collectd input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default batch size is 1000, pending batch factor is 5, with a batch timeout of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches. + +Multi-value plugins can be handled two ways. Setting parse-multivalue-plugin to "split" will parse and store the multi-value plugin data (e.g., df free:5000,used:1000) into separate measurements (e.g., (df_free, value=5000) (df_used, value=1000)), while "join" will parse and store the multi-value plugin as a single multi-value measurement (e.g., (df, free=5000,used=1000)). "split" is the default behavior for backward compatability with previous versions of influxdb. + +The path to the collectd types database file may also be set. + +## Large UDP packets + +Please note that UDP packets larger than the standard size of 1452 are dropped at the time of ingestion. Be sure to set `MaxPacketSize` to 1452 in the collectd configuration. + +## Config Example + +``` +[[collectd]] + enabled = true + bind-address = ":25826" # the bind address + database = "collectd" # Name of the database that will be written to + retention-policy = "" + batch-size = 5000 # will flush if this many points get buffered + batch-pending = 10 # number of batches that may be pending in memory + batch-timeout = "10s" + read-buffer = 0 # UDP read buffer size, 0 means to use OS default + typesdb = "/usr/share/collectd/types.db" + security-level = "none" # "none", "sign", or "encrypt" + auth-file = "/etc/collectd/auth_file" + parse-multivalue-plugin = "split" # "split" or "join" +``` diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf b/vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf new file mode 100644 index 0000000..97cc4cc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf @@ -0,0 +1,209 @@ +absolute value:ABSOLUTE:0:U +apache_bytes value:DERIVE:0:U +apache_connections value:GAUGE:0:65535 +apache_idle_workers value:GAUGE:0:65535 +apache_requests value:DERIVE:0:U +apache_scoreboard value:GAUGE:0:65535 +ath_nodes value:GAUGE:0:65535 +ath_stat value:DERIVE:0:U +backends value:GAUGE:0:65535 +bitrate value:GAUGE:0:4294967295 +bytes value:GAUGE:0:U +cache_eviction value:DERIVE:0:U +cache_operation value:DERIVE:0:U +cache_ratio value:GAUGE:0:100 +cache_result value:DERIVE:0:U +cache_size value:GAUGE:0:U +charge value:GAUGE:0:U +compression_ratio value:GAUGE:0:2 +compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U +connections value:DERIVE:0:U +conntrack value:GAUGE:0:4294967295 +contextswitch value:DERIVE:0:U +counter value:COUNTER:U:U +cpufreq value:GAUGE:0:U +cpu value:DERIVE:0:U +current_connections value:GAUGE:0:U +current_sessions value:GAUGE:0:U +current value:GAUGE:U:U +delay value:GAUGE:-1000000:1000000 +derive value:DERIVE:0:U +df_complex value:GAUGE:0:U +df_inodes value:GAUGE:0:U +df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 +disk_latency read:GAUGE:0:U, write:GAUGE:0:U +disk_merged read:DERIVE:0:U, write:DERIVE:0:U +disk_octets read:DERIVE:0:U, write:DERIVE:0:U +disk_ops_complex value:DERIVE:0:U +disk_ops read:DERIVE:0:U, write:DERIVE:0:U +disk_time read:DERIVE:0:U, write:DERIVE:0:U +dns_answer value:DERIVE:0:U +dns_notify value:DERIVE:0:U +dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U +dns_opcode value:DERIVE:0:U +dns_qtype_cached value:GAUGE:0:4294967295 +dns_qtype value:DERIVE:0:U +dns_query value:DERIVE:0:U +dns_question value:DERIVE:0:U +dns_rcode value:DERIVE:0:U +dns_reject value:DERIVE:0:U +dns_request value:DERIVE:0:U +dns_resolver value:DERIVE:0:U +dns_response value:DERIVE:0:U +dns_transfer value:DERIVE:0:U +dns_update value:DERIVE:0:U +dns_zops value:DERIVE:0:U +duration seconds:GAUGE:0:U +email_check value:GAUGE:0:U +email_count value:GAUGE:0:U +email_size value:GAUGE:0:U +entropy value:GAUGE:0:4294967295 +fanspeed value:GAUGE:0:U +file_size value:GAUGE:0:U +files value:GAUGE:0:U +flow value:GAUGE:0:U +fork_rate value:DERIVE:0:U +frequency_offset value:GAUGE:-1000000:1000000 +frequency value:GAUGE:0:U +fscache_stat value:DERIVE:0:U +gauge value:GAUGE:U:U +hash_collisions value:DERIVE:0:U +http_request_methods value:DERIVE:0:U +http_requests value:DERIVE:0:U +http_response_codes value:DERIVE:0:U +humidity value:GAUGE:0:100 +if_collisions value:DERIVE:0:U +if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U +if_errors rx:DERIVE:0:U, tx:DERIVE:0:U +if_multicast value:DERIVE:0:U +if_octets rx:DERIVE:0:U, tx:DERIVE:0:U +if_packets rx:DERIVE:0:U, tx:DERIVE:0:U +if_rx_errors value:DERIVE:0:U +if_rx_octets value:DERIVE:0:U +if_tx_errors value:DERIVE:0:U +if_tx_octets value:DERIVE:0:U +invocations value:DERIVE:0:U +io_octets rx:DERIVE:0:U, tx:DERIVE:0:U +io_packets rx:DERIVE:0:U, tx:DERIVE:0:U +ipt_bytes value:DERIVE:0:U +ipt_packets value:DERIVE:0:U +irq value:DERIVE:0:U +latency value:GAUGE:0:U +links value:GAUGE:0:U +load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 +md_disks value:GAUGE:0:U +memcached_command value:DERIVE:0:U +memcached_connections value:GAUGE:0:U +memcached_items value:GAUGE:0:U +memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U +memcached_ops value:DERIVE:0:U +memory value:GAUGE:0:281474976710656 +multimeter value:GAUGE:U:U +mutex_operations value:DERIVE:0:U +mysql_commands value:DERIVE:0:U +mysql_handler value:DERIVE:0:U +mysql_locks value:DERIVE:0:U +mysql_log_position value:DERIVE:0:U +mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U +nfs_procedure value:DERIVE:0:U +nginx_connections value:GAUGE:0:U +nginx_requests value:DERIVE:0:U +node_octets rx:DERIVE:0:U, tx:DERIVE:0:U +node_rssi value:GAUGE:0:255 +node_stat value:DERIVE:0:U +node_tx_rate value:GAUGE:0:127 +objects value:GAUGE:0:U +operations value:DERIVE:0:U +percent value:GAUGE:0:100.1 +percent_bytes value:GAUGE:0:100.1 +percent_inodes value:GAUGE:0:100.1 +pf_counters value:DERIVE:0:U +pf_limits value:DERIVE:0:U +pf_source value:DERIVE:0:U +pf_states value:GAUGE:0:U +pf_state value:DERIVE:0:U +pg_blks value:DERIVE:0:U +pg_db_size value:GAUGE:0:U +pg_n_tup_c value:DERIVE:0:U +pg_n_tup_g value:GAUGE:0:U +pg_numbackends value:GAUGE:0:U +pg_scan value:DERIVE:0:U +pg_xact value:DERIVE:0:U +ping_droprate value:GAUGE:0:100 +ping_stddev value:GAUGE:0:65535 +ping value:GAUGE:0:65535 +players value:GAUGE:0:1000000 +power value:GAUGE:0:U +protocol_counter value:DERIVE:0:U +ps_code value:GAUGE:0:9223372036854775807 +ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 +ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U +ps_data value:GAUGE:0:9223372036854775807 +ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U +ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U +ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U +ps_rss value:GAUGE:0:9223372036854775807 +ps_stacksize value:GAUGE:0:9223372036854775807 +ps_state value:GAUGE:0:65535 +ps_vm value:GAUGE:0:9223372036854775807 +queue_length value:GAUGE:0:U +records value:GAUGE:0:U +requests value:GAUGE:0:U +response_time value:GAUGE:0:U +response_code value:GAUGE:0:U +route_etx value:GAUGE:0:U +route_metric value:GAUGE:0:U +routes value:GAUGE:0:U +serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U +signal_noise value:GAUGE:U:0 +signal_power value:GAUGE:U:0 +signal_quality value:GAUGE:0:U +snr value:GAUGE:0:U +spam_check value:GAUGE:0:U +spam_score value:GAUGE:U:U +spl value:GAUGE:U:U +swap_io value:DERIVE:0:U +swap value:GAUGE:0:1099511627776 +tcp_connections value:GAUGE:0:4294967295 +temperature value:GAUGE:U:U +threads value:GAUGE:0:U +time_dispersion value:GAUGE:-1000000:1000000 +timeleft value:GAUGE:0:U +time_offset value:GAUGE:-1000000:1000000 +total_bytes value:DERIVE:0:U +total_connections value:DERIVE:0:U +total_objects value:DERIVE:0:U +total_operations value:DERIVE:0:U +total_requests value:DERIVE:0:U +total_sessions value:DERIVE:0:U +total_threads value:DERIVE:0:U +total_time_in_ms value:DERIVE:0:U +total_values value:DERIVE:0:U +uptime value:GAUGE:0:4294967295 +users value:GAUGE:0:65535 +vcl value:GAUGE:0:65535 +vcpu value:GAUGE:0:U +virt_cpu_total value:DERIVE:0:U +virt_vcpu value:DERIVE:0:U +vmpage_action value:DERIVE:0:U +vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U +vmpage_io in:DERIVE:0:U, out:DERIVE:0:U +vmpage_number value:GAUGE:0:4294967295 +volatile_changes value:GAUGE:0:U +voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U +voltage value:GAUGE:U:U +vs_memory value:GAUGE:0:9223372036854775807 +vs_processes value:GAUGE:0:65535 +vs_threads value:GAUGE:0:65535 + +# +# Legacy types +# (required for the v5 upgrade target) +# +arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U +arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U +arc_l2_size value:GAUGE:0:U +arc_ratio value:GAUGE:0:U +arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U +mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U +mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/config.go b/vendor/github.com/influxdata/influxdb/services/collectd/config.go new file mode 100644 index 0000000..1095658 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/config.go @@ -0,0 +1,177 @@ +package collectd + +import ( + "errors" + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultBindAddress is the default port to bind to. + DefaultBindAddress = ":25826" + + // DefaultDatabase is the default DB to write to. + DefaultDatabase = "collectd" + + // DefaultRetentionPolicy is the default retention policy of the writes. + DefaultRetentionPolicy = "" + + // DefaultBatchSize is the default write batch size. + DefaultBatchSize = 5000 + + // DefaultBatchPending is the default number of pending write batches. + DefaultBatchPending = 10 + + // DefaultBatchDuration is the default batch timeout duration. + DefaultBatchDuration = toml.Duration(10 * time.Second) + + // DefaultTypesDB is the default location of the collectd types db file. + DefaultTypesDB = "/usr/share/collectd/types.db" + + // DefaultReadBuffer is the default buffer size for the UDP listener. + // Sets the size of the operating system's receive buffer associated with + // the UDP traffic. Keep in mind that the OS must be able + // to handle the number set here or the UDP listener will error and exit. + // + // DefaultReadBuffer = 0 means to use the OS default, which is usually too + // small for high UDP performance. + // + // Increasing OS buffer limits: + // Linux: sudo sysctl -w net.core.rmem_max= + // BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf= + DefaultReadBuffer = 0 + + // DefaultSecurityLevel is the default security level. + DefaultSecurityLevel = "none" + + // DefaultAuthFile is the default location of the user/password file. + DefaultAuthFile = "/etc/collectd/auth_file" + + // DefaultParseMultiValuePlugin is "split", defaulting to version <1.2 where plugin values were split into separate rows + DefaultParseMultiValuePlugin = "split" +) + +// Config represents a configuration for the collectd service. +type Config struct { + Enabled bool `toml:"enabled"` + BindAddress string `toml:"bind-address"` + Database string `toml:"database"` + RetentionPolicy string `toml:"retention-policy"` + BatchSize int `toml:"batch-size"` + BatchPending int `toml:"batch-pending"` + BatchDuration toml.Duration `toml:"batch-timeout"` + ReadBuffer int `toml:"read-buffer"` + TypesDB string `toml:"typesdb"` + SecurityLevel string `toml:"security-level"` + AuthFile string `toml:"auth-file"` + ParseMultiValuePlugin string `toml:"parse-multivalue-plugin"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + BindAddress: DefaultBindAddress, + Database: DefaultDatabase, + RetentionPolicy: DefaultRetentionPolicy, + ReadBuffer: DefaultReadBuffer, + BatchSize: DefaultBatchSize, + BatchPending: DefaultBatchPending, + BatchDuration: DefaultBatchDuration, + TypesDB: DefaultTypesDB, + SecurityLevel: DefaultSecurityLevel, + AuthFile: DefaultAuthFile, + ParseMultiValuePlugin: DefaultParseMultiValuePlugin, + } +} + +// WithDefaults takes the given config and returns a new config with any required +// default values set. +func (c *Config) WithDefaults() *Config { + d := *c + if d.BindAddress == "" { + d.BindAddress = DefaultBindAddress + } + if d.Database == "" { + d.Database = DefaultDatabase + } + if d.RetentionPolicy == "" { + d.RetentionPolicy = DefaultRetentionPolicy + } + if d.BatchSize == 0 { + d.BatchSize = DefaultBatchSize + } + if d.BatchPending == 0 { + d.BatchPending = DefaultBatchPending + } + if d.BatchDuration == 0 { + d.BatchDuration = DefaultBatchDuration + } + if d.ReadBuffer == 0 { + d.ReadBuffer = DefaultReadBuffer + } + if d.TypesDB == "" { + d.TypesDB = DefaultTypesDB + } + if d.SecurityLevel == "" { + d.SecurityLevel = DefaultSecurityLevel + } + if d.AuthFile == "" { + d.AuthFile = DefaultAuthFile + } + if d.ParseMultiValuePlugin == "" { + d.ParseMultiValuePlugin = DefaultParseMultiValuePlugin + } + + return &d +} + +// Validate returns an error if the Config is invalid. +func (c *Config) Validate() error { + switch c.SecurityLevel { + case "none", "sign", "encrypt": + default: + return errors.New("Invalid security level") + } + + switch c.ParseMultiValuePlugin { + case "split", "join": + default: + return errors.New(`Invalid value for parse-multivalue-plugin. Valid options are "split" and "join"`) + } + + return nil +} + +// Configs wraps a slice of Config to aggregate diagnostics. +type Configs []Config + +// Diagnostics returns one set of diagnostics for all of the Configs. +func (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) { + d := &diagnostics.Diagnostics{ + Columns: []string{"enabled", "bind-address", "database", "retention-policy", "batch-size", "batch-pending", "batch-timeout"}, + } + + for _, cc := range c { + if !cc.Enabled { + d.AddRow([]interface{}{false}) + continue + } + + r := []interface{}{true, cc.BindAddress, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchDuration} + d.AddRow(r) + } + + return d, nil +} + +// Enabled returns true if any underlying Config is Enabled. +func (c Configs) Enabled() bool { + for _, cc := range c { + if cc.Enabled { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go b/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go new file mode 100644 index 0000000..6cae66c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go @@ -0,0 +1,32 @@ +package collectd_test + +import ( + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/services/collectd" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c collectd.Config + if _, err := toml.Decode(` +enabled = true +bind-address = ":9000" +database = "xxx" +typesdb = "yyy" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if !c.Enabled { + t.Fatalf("unexpected enabled: %v", c.Enabled) + } else if c.BindAddress != ":9000" { + t.Fatalf("unexpected bind address: %s", c.BindAddress) + } else if c.Database != "xxx" { + t.Fatalf("unexpected database: %s", c.Database) + } else if c.TypesDB != "yyy" { + t.Fatalf("unexpected types db: %s", c.TypesDB) + } +} diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service.go b/vendor/github.com/influxdata/influxdb/services/collectd/service.go new file mode 100644 index 0000000..05d515b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service.go @@ -0,0 +1,506 @@ +// Package collectd provides a service for InfluxDB to ingest data via the collectd protocol. +package collectd // import "github.com/influxdata/influxdb/services/collectd" + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "collectd.org/api" + "collectd.org/network" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "go.uber.org/zap" +) + +// statistics gathered by the collectd service. +const ( + statPointsReceived = "pointsRx" + statBytesReceived = "bytesRx" + statPointsParseFail = "pointsParseFail" + statReadFail = "readFail" + statBatchesTransmitted = "batchesTx" + statPointsTransmitted = "pointsTx" + statBatchesTransmitFail = "batchesTxFail" + statDroppedPointsInvalid = "droppedPointsInvalid" +) + +// pointsWriter is an internal interface to make testing easier. +type pointsWriter interface { + WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error +} + +// metaClient is an internal interface to make testing easier. +type metaClient interface { + CreateDatabase(name string) (*meta.DatabaseInfo, error) +} + +// TypesDBFile reads a collectd types db from a file. +func TypesDBFile(path string) (typesdb *api.TypesDB, err error) { + var reader *os.File + reader, err = os.Open(path) + if err == nil { + typesdb, err = api.NewTypesDB(reader) + } + return +} + +// Service represents a UDP server which receives metrics in collectd's binary +// protocol and stores them in InfluxDB. +type Service struct { + Config *Config + MetaClient metaClient + PointsWriter pointsWriter + Logger *zap.Logger + + wg sync.WaitGroup + conn *net.UDPConn + batcher *tsdb.PointBatcher + popts network.ParseOpts + addr net.Addr + + mu sync.RWMutex + ready bool // Has the required database been created? + done chan struct{} // Is the service closing or closed? + + // expvar-based stats. + stats *Statistics + defaultTags models.StatisticTags +} + +// NewService returns a new instance of the collectd service. +func NewService(c Config) *Service { + s := Service{ + // Use defaults where necessary. + Config: c.WithDefaults(), + + Logger: zap.NewNop(), + stats: &Statistics{}, + defaultTags: models.StatisticTags{"bind": c.BindAddress}, + } + + return &s +} + +// Open starts the service. +func (s *Service) Open() error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.done != nil { + return nil // Already open. + } + s.done = make(chan struct{}) + + s.Logger.Info("Starting collectd service") + + if s.Config.BindAddress == "" { + return fmt.Errorf("bind address is blank") + } else if s.Config.Database == "" { + return fmt.Errorf("database name is blank") + } else if s.PointsWriter == nil { + return fmt.Errorf("PointsWriter is nil") + } + + if s.popts.TypesDB == nil { + // Open collectd types. + if stat, err := os.Stat(s.Config.TypesDB); err != nil { + return fmt.Errorf("Stat(): %s", err) + } else if stat.IsDir() { + alltypesdb, err := api.NewTypesDB(&bytes.Buffer{}) + if err != nil { + return err + } + var readdir func(path string) + readdir = func(path string) { + files, err := ioutil.ReadDir(path) + if err != nil { + s.Logger.Info("Unable to read directory", + zap.String("path", path), zap.Error(err)) + return + } + + for _, f := range files { + fullpath := filepath.Join(path, f.Name()) + if f.IsDir() { + readdir(fullpath) + continue + } + + s.Logger.Info("Loading types from file", zap.String("path", fullpath)) + types, err := TypesDBFile(fullpath) + if err != nil { + s.Logger.Info("Unable to parse collectd types file", zap.String("path", f.Name())) + continue + } + + alltypesdb.Merge(types) + } + } + readdir(s.Config.TypesDB) + s.popts.TypesDB = alltypesdb + } else { + s.Logger.Info("Loading types from file", zap.String("path", s.Config.TypesDB)) + types, err := TypesDBFile(s.Config.TypesDB) + if err != nil { + return fmt.Errorf("Open(): %s", err) + } + s.popts.TypesDB = types + } + } + + // Sets the security level according to the config. + // Default not necessary because we validate the config. + switch s.Config.SecurityLevel { + case "none": + s.popts.SecurityLevel = network.None + case "sign": + s.popts.SecurityLevel = network.Sign + case "encrypt": + s.popts.SecurityLevel = network.Encrypt + } + + // Sets the auth file according to the config. + if s.popts.PasswordLookup == nil { + s.popts.PasswordLookup = network.NewAuthFile(s.Config.AuthFile) + } + + // Resolve our address. + addr, err := net.ResolveUDPAddr("udp", s.Config.BindAddress) + if err != nil { + return fmt.Errorf("unable to resolve UDP address: %s", err) + } + s.addr = addr + + // Start listening + conn, err := net.ListenUDP("udp", addr) + if err != nil { + return fmt.Errorf("unable to listen on UDP: %s", err) + } + + if s.Config.ReadBuffer != 0 { + err = conn.SetReadBuffer(s.Config.ReadBuffer) + if err != nil { + return fmt.Errorf("unable to set UDP read buffer to %d: %s", + s.Config.ReadBuffer, err) + } + } + s.conn = conn + + s.Logger.Info("Listening on UDP", zap.Stringer("addr", conn.LocalAddr())) + + // Start the points batcher. + s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration)) + s.batcher.Start() + + // Create waitgroup for signalling goroutines to stop and start goroutines + // that process collectd packets. + s.wg.Add(2) + go func() { defer s.wg.Done(); s.serve() }() + go func() { defer s.wg.Done(); s.writePoints() }() + + return nil +} + +// Close stops the service. +func (s *Service) Close() error { + if wait := func() bool { + s.mu.Lock() + defer s.mu.Unlock() + + if s.closed() { + return false + } + close(s.done) + + // Close the connection, and wait for the goroutine to exit. + if s.conn != nil { + s.conn.Close() + } + if s.batcher != nil { + s.batcher.Stop() + } + return true + }(); !wait { + return nil // Already closed. + } + + // Wait with the lock unlocked. + s.wg.Wait() + + // Release all remaining resources. + s.mu.Lock() + defer s.mu.Unlock() + + s.conn = nil + s.batcher = nil + s.Logger.Info("Closed collectd service") + s.done = nil + return nil +} + +func (s *Service) closed() bool { + select { + case <-s.done: + // Service is closing. + return true + default: + } + return s.done == nil +} + +// createInternalStorage ensures that the required database has been created. +func (s *Service) createInternalStorage() error { + s.mu.RLock() + ready := s.ready + s.mu.RUnlock() + if ready { + return nil + } + + if _, err := s.MetaClient.CreateDatabase(s.Config.Database); err != nil { + return err + } + + // The service is now ready. + s.mu.Lock() + s.ready = true + s.mu.Unlock() + return nil +} + +// WithLogger sets the service's logger. +func (s *Service) WithLogger(log *zap.Logger) { + s.Logger = log.With(zap.String("service", "collectd")) +} + +// Statistics maintains statistics for the collectd service. +type Statistics struct { + PointsReceived int64 + BytesReceived int64 + PointsParseFail int64 + ReadFail int64 + BatchesTransmitted int64 + PointsTransmitted int64 + BatchesTransmitFail int64 + InvalidDroppedPoints int64 +} + +// Statistics returns statistics for periodic monitoring. +func (s *Service) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "collectd", + Tags: s.defaultTags.Merge(tags), + Values: map[string]interface{}{ + statPointsReceived: atomic.LoadInt64(&s.stats.PointsReceived), + statBytesReceived: atomic.LoadInt64(&s.stats.BytesReceived), + statPointsParseFail: atomic.LoadInt64(&s.stats.PointsParseFail), + statReadFail: atomic.LoadInt64(&s.stats.ReadFail), + statBatchesTransmitted: atomic.LoadInt64(&s.stats.BatchesTransmitted), + statPointsTransmitted: atomic.LoadInt64(&s.stats.PointsTransmitted), + statBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail), + statDroppedPointsInvalid: atomic.LoadInt64(&s.stats.InvalidDroppedPoints), + }, + }} +} + +// SetTypes sets collectd types db. +func (s *Service) SetTypes(types string) (err error) { + reader := strings.NewReader(types) + s.popts.TypesDB, err = api.NewTypesDB(reader) + return +} + +// Addr returns the listener's address. It returns nil if listener is closed. +func (s *Service) Addr() net.Addr { + return s.conn.LocalAddr() +} + +func (s *Service) serve() { + // From https://collectd.org/wiki/index.php/Binary_protocol + // 1024 bytes (payload only, not including UDP / IP headers) + // In versions 4.0 through 4.7, the receive buffer has a fixed size + // of 1024 bytes. When longer packets are received, the trailing data + // is simply ignored. Since version 4.8, the buffer size can be + // configured. Version 5.0 will increase the default buffer size to + // 1452 bytes (the maximum payload size when using UDP/IPv6 over + // Ethernet). + buffer := make([]byte, 1452) + + for { + select { + case <-s.done: + // We closed the connection, time to go. + return + default: + // Keep processing. + } + + n, _, err := s.conn.ReadFromUDP(buffer) + if err != nil { + if strings.Contains(err.Error(), "use of closed network connection") { + select { + case <-s.done: + return + default: + // The socket wasn't closed by us so consider it an error. + } + } + atomic.AddInt64(&s.stats.ReadFail, 1) + s.Logger.Info("ReadFromUDP error", zap.Error(err)) + continue + } + if n > 0 { + atomic.AddInt64(&s.stats.BytesReceived, int64(n)) + s.handleMessage(buffer[:n]) + } + } +} + +func (s *Service) handleMessage(buffer []byte) { + valueLists, err := network.Parse(buffer, s.popts) + if err != nil { + atomic.AddInt64(&s.stats.PointsParseFail, 1) + s.Logger.Info("collectd parse error", zap.Error(err)) + return + } + var points []models.Point + for _, valueList := range valueLists { + if s.Config.ParseMultiValuePlugin == "join" { + points = s.UnmarshalValueListPacked(valueList) + } else { + points = s.UnmarshalValueList(valueList) + } + for _, p := range points { + s.batcher.In() <- p + } + atomic.AddInt64(&s.stats.PointsReceived, int64(len(points))) + } +} + +func (s *Service) writePoints() { + for { + select { + case <-s.done: + return + case batch := <-s.batcher.Out(): + // Will attempt to create database if not yet created. + if err := s.createInternalStorage(); err != nil { + s.Logger.Info("Required database not yet created", + logger.Database(s.Config.Database), zap.Error(err)) + continue + } + + if err := s.PointsWriter.WritePointsPrivileged(s.Config.Database, s.Config.RetentionPolicy, models.ConsistencyLevelAny, batch); err == nil { + atomic.AddInt64(&s.stats.BatchesTransmitted, 1) + atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch))) + } else { + s.Logger.Info("Failed to write point batch to database", + logger.Database(s.Config.Database), zap.Error(err)) + atomic.AddInt64(&s.stats.BatchesTransmitFail, 1) + } + } + } +} + +// UnmarshalValueListPacked is an alternative to the original UnmarshalValueList. +// The difference is that the original provided measurements like (PLUGIN_DSNAME, ["value",xxx]) +// while this one will provide measurements like (PLUGIN, {["DSNAME",xxx]}). +// This effectively joins collectd data that should go together, such as: +// (df, {["used",1000],["free",2500]}). +func (s *Service) UnmarshalValueListPacked(vl *api.ValueList) []models.Point { + timestamp := vl.Time.UTC() + + var name = vl.Identifier.Plugin + tags := make(map[string]string, 4) + fields := make(map[string]interface{}, len(vl.Values)) + + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } + + for i, v := range vl.Values { + fieldName := vl.DSName(i) + switch value := v.(type) { + case api.Gauge: + fields[fieldName] = float64(value) + case api.Derive: + fields[fieldName] = float64(value) + case api.Counter: + fields[fieldName] = float64(value) + } + } + // Drop invalid points + p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp) + if err != nil { + s.Logger.Info("Dropping point", zap.String("name", name), zap.Error(err)) + atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1) + return nil + } + + return []models.Point{p} +} + +// UnmarshalValueList translates a ValueList into InfluxDB data points. +func (s *Service) UnmarshalValueList(vl *api.ValueList) []models.Point { + timestamp := vl.Time.UTC() + + var points []models.Point + for i := range vl.Values { + name := fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + tags := make(map[string]string, 4) + fields := make(map[string]interface{}, 1) + + // Convert interface back to actual type, then to float64 + switch value := vl.Values[i].(type) { + case api.Gauge: + fields["value"] = float64(value) + case api.Derive: + fields["value"] = float64(value) + case api.Counter: + fields["value"] = float64(value) + } + + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } + + // Drop invalid points + p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp) + if err != nil { + s.Logger.Info("Dropping point", zap.String("name", name), zap.Error(err)) + atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1) + continue + } + + points = append(points, p) + } + return points +} diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go b/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go new file mode 100644 index 0000000..ac59cb2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go @@ -0,0 +1,731 @@ +package collectd + +import ( + "encoding/hex" + "errors" + "io/ioutil" + "net" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/toml" +) + +func TestService_OpenClose(t *testing.T) { + service := NewTestService(1, time.Second, "split") + + // Closing a closed service is fine. + if err := service.Service.Close(); err != nil { + t.Fatal(err) + } + + // Closing a closed service again is fine. + if err := service.Service.Close(); err != nil { + t.Fatal(err) + } + + if err := service.Service.Open(); err != nil { + t.Fatal(err) + } + + // Opening an already open service is fine. + if err := service.Service.Open(); err != nil { + t.Fatal(err) + } + + // Reopening a previously opened service is fine. + if err := service.Service.Close(); err != nil { + t.Fatal(err) + } + + if err := service.Service.Open(); err != nil { + t.Fatal(err) + } + + // Tidy up. + if err := service.Service.Close(); err != nil { + t.Fatal(err) + } +} + +// Test that the service can read types DB files from a directory. +func TestService_Open_TypesDBDir(t *testing.T) { + t.Parallel() + + // Make a temp dir to write types.db into. + tmpDir, err := ioutil.TempDir(os.TempDir(), "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + // Write types.db. + if err := ioutil.WriteFile(path.Join(tmpDir, "types.db"), []byte(typesDBText), 0777); err != nil { + t.Fatal(err) + } + + // Setup config to read all files in the temp dir. + c := Config{ + BindAddress: "127.0.0.1:0", + Database: "collectd_test", + BatchSize: 1000, + BatchDuration: toml.Duration(time.Second), + TypesDB: tmpDir, + } + + s := &TestService{ + Config: c, + Service: NewService(c), + MetaClient: &internal.MetaClientMock{}, + } + + if testing.Verbose() { + s.Service.WithLogger(logger.New(os.Stderr)) + } + + s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) { + return nil, nil + } + + s.Service.PointsWriter = s + s.Service.MetaClient = s.MetaClient + + if err := s.Service.Open(); err != nil { + t.Fatal(err) + } + + if err := s.Service.Close(); err != nil { + t.Fatal(err) + } +} + +// Test that the service checks / creates the target database every time we +// try to write points. +func TestService_CreatesDatabase(t *testing.T) { + t.Parallel() + + s := NewTestService(1, time.Second, "split") + + s.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error { + return nil + } + + called := make(chan struct{}) + s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) { + if name != s.Config.Database { + t.Errorf("\n\texp = %s\n\tgot = %s\n", s.Config.Database, name) + } + // Allow some time for the caller to return and the ready status to + // be set. + time.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} }) + return nil, errors.New("an error") + } + + if err := s.Service.Open(); err != nil { + t.Fatal(err) + } + + points, err := models.ParsePointsString(`cpu value=1`) + if err != nil { + t.Fatal(err) + } + + s.Service.batcher.In() <- points[0] // Send a point. + s.Service.batcher.Flush() + select { + case <-called: + // OK + case <-time.NewTimer(5 * time.Second).C: + t.Fatal("Service should have attempted to create database") + } + + // ready status should not have been switched due to meta client error. + s.Service.mu.RLock() + ready := s.Service.ready + s.Service.mu.RUnlock() + + if got, exp := ready, false; got != exp { + t.Fatalf("got %v, expected %v", got, exp) + } + + // This time MC won't cause an error. + s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) { + // Allow some time for the caller to return and the ready status to + // be set. + time.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} }) + return nil, nil + } + + s.Service.batcher.In() <- points[0] // Send a point. + s.Service.batcher.Flush() + select { + case <-called: + // OK + case <-time.NewTimer(5 * time.Second).C: + t.Fatal("Service should have attempted to create database") + } + + // ready status should not have been switched due to meta client error. + s.Service.mu.RLock() + ready = s.Service.ready + s.Service.mu.RUnlock() + + if got, exp := ready, true; got != exp { + t.Fatalf("got %v, expected %v", got, exp) + } + + s.Service.Close() +} + +// Test that the collectd service correctly batches points by BatchSize. +func TestService_BatchSize(t *testing.T) { + t.Parallel() + + totalPoints := len(expPoints) + + // Batch sizes that totalTestPoints divide evenly by. + batchSizes := []int{1, 2, 13} + + for _, batchSize := range batchSizes { + func() { + s := NewTestService(batchSize, time.Second, "split") + + pointCh := make(chan models.Point) + s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + if len(points) != batchSize { + t.Errorf("\n\texp = %d\n\tgot = %d\n", batchSize, len(points)) + } + + for _, p := range points { + pointCh <- p + } + return nil + } + + if err := s.Service.Open(); err != nil { + t.Fatal(err) + } + defer func() { t.Log("closing service"); s.Service.Close() }() + + // Get the address & port the service is listening on for collectd data. + addr := s.Service.Addr() + conn, err := net.Dial("udp", addr.String()) + if err != nil { + t.Fatal(err) + } + + // Send the test data to the service. + if n, err := conn.Write(testData); err != nil { + t.Fatal(err) + } else if n != len(testData) { + t.Fatalf("only sent %d of %d bytes", n, len(testData)) + } + + var points []models.Point + timer := time.NewTimer(time.Second) + Loop: + for { + timer.Reset(time.Second) + select { + case p := <-pointCh: + points = append(points, p) + if len(points) == totalPoints { + break Loop + } + case <-timer.C: + t.Logf("exp %d points, got %d", totalPoints, len(points)) + t.Fatal("timed out waiting for points from collectd service") + } + } + + if len(points) != totalPoints { + t.Fatalf("exp %d points, got %d", totalPoints, len(points)) + } + + for i, exp := range expPoints { + got := points[i].String() + if got != exp { + t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) + } + } + }() + } +} + +// Test that the parse-multi-value-plugin config works properly. +// The other tests already verify the 'split' config, so this only runs the 'join' test. +func TestService_ParseMultiValuePlugin(t *testing.T) { + t.Parallel() + + totalPoints := len(expPointsTupled) + + s := NewTestService(1, time.Second, "join") + + pointCh := make(chan models.Point, 1000) + s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + for _, p := range points { + pointCh <- p + } + return nil + } + + if err := s.Service.Open(); err != nil { + t.Fatal(err) + } + defer func() { t.Log("closing service"); s.Service.Close() }() + + // Get the address & port the service is listening on for collectd data. + addr := s.Service.Addr() + conn, err := net.Dial("udp", addr.String()) + if err != nil { + t.Fatal(err) + } + + // Send the test data to the service. + if n, err := conn.Write(testData); err != nil { + t.Fatal(err) + } else if n != len(testData) { + t.Fatalf("only sent %d of %d bytes", n, len(testData)) + } + + var points []models.Point + + timer := time.NewTimer(time.Second) +Loop: + for { + timer.Reset(time.Second) + select { + case p := <-pointCh: + points = append(points, p) + if len(points) == totalPoints { + break Loop + } + case <-timer.C: + t.Logf("exp %d points, got %d", totalPoints, len(points)) + t.Fatal("timed out waiting for points from collectd service") + } + } + + for i, exp := range expPointsTupled { + got := points[i].String() + if got != exp { + t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) + } + } + +} + +// Test that the collectd service correctly batches points using BatchDuration. +func TestService_BatchDuration(t *testing.T) { + t.Parallel() + + totalPoints := len(expPoints) + + s := NewTestService(5000, 250*time.Millisecond, "split") + + pointCh := make(chan models.Point, 1000) + s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + for _, p := range points { + pointCh <- p + } + return nil + } + + if err := s.Service.Open(); err != nil { + t.Fatal(err) + } + defer func() { t.Log("closing service"); s.Service.Close() }() + + // Get the address & port the service is listening on for collectd data. + addr := s.Service.Addr() + conn, err := net.Dial("udp", addr.String()) + if err != nil { + t.Fatal(err) + } + + // Send the test data to the service. + if n, err := conn.Write(testData); err != nil { + t.Fatal(err) + } else if n != len(testData) { + t.Fatalf("only sent %d of %d bytes", n, len(testData)) + } + + var points []models.Point + timer := time.NewTimer(time.Second) +Loop: + for { + timer.Reset(time.Second) + select { + case p := <-pointCh: + points = append(points, p) + if len(points) == totalPoints { + break Loop + } + case <-timer.C: + t.Logf("exp %d points, got %d", totalPoints, len(points)) + t.Fatal("timed out waiting for points from collectd service") + } + } + + if len(points) != totalPoints { + t.Fatalf("exp %d points, got %d", totalPoints, len(points)) + } + + for i, exp := range expPoints { + got := points[i].String() + if got != exp { + t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) + } + } +} + +type TestService struct { + Service *Service + Config Config + MetaClient *internal.MetaClientMock + WritePointsFn func(string, string, models.ConsistencyLevel, []models.Point) error +} + +func NewTestService(batchSize int, batchDuration time.Duration, parseOpt string) *TestService { + c := Config{ + BindAddress: "127.0.0.1:0", + Database: "collectd_test", + BatchSize: batchSize, + BatchDuration: toml.Duration(batchDuration), + ParseMultiValuePlugin: parseOpt, + } + + s := &TestService{ + Config: c, + Service: NewService(c), + MetaClient: &internal.MetaClientMock{}, + } + + s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) { + return nil, nil + } + + s.Service.PointsWriter = s + s.Service.MetaClient = s.MetaClient + + // Set the collectd types using test string. + if err := s.Service.SetTypes(typesDBText); err != nil { + panic(err) + } + + if testing.Verbose() { + s.Service.WithLogger(logger.New(os.Stderr)) + } + + return s +} + +func (w *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + return w.WritePointsFn(database, retentionPolicy, consistencyLevel, points) +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +// Raw data sent by collectd, captured using Wireshark. +var testData = func() []byte { + data := []string{ + "000000167066312d36322d3231302d39342d313733000001000c00000000544928ff0007000c0000000", + "0000000050002000c656e74726f7079000004000c656e74726f7079000006000f000101000000000000", + "7240000200086370750000030006310000040008637075000005000969646c65000006000f000100000", + "0000000a674620005000977616974000006000f00010000000000000000000002000764660000030005", + "00000400076466000005000d6c6976652d636f7700000600180002010100000000a090b641000000a0c", + "b6a2742000200086370750000030006310000040008637075000005000e696e74657272757074000006", + "000f00010000000000000000fe0005000c736f6674697271000006000f0001000000000000000000000", + "20007646600000300050000040007646600000500096c69766500000600180002010100000000000000", + "00000000e0ec972742000200086370750000030006310000040008637075000005000a737465616c000", + "006000f00010000000000000000000003000632000005000975736572000006000f0001000000000000", + "005f36000500096e696365000006000f0001000000000000000ad80002000e696e74657266616365000", + "0030005000004000e69665f6f6374657473000005000b64756d6d793000000600180002000000000000", + "00000000000000000000041a000200076466000004000764660000050008746d7000000600180002010", + "1000000000000f240000000a0ea97274200020008637075000003000632000004000863707500000500", + "0b73797374656d000006000f00010000000000000045d30002000e696e7465726661636500000300050", + "00004000f69665f7061636b657473000005000b64756d6d793000000600180002000000000000000000", + "00000000000000000f000200086370750000030006320000040008637075000005000969646c6500000", + "6000f0001000000000000a66480000200076466000003000500000400076466000005000d72756e2d6c", + "6f636b000006001800020101000000000000000000000000000054410002000e696e746572666163650", + "00004000e69665f6572726f7273000005000b64756d6d79300000060018000200000000000000000000", + "00000000000000000002000863707500000300063200000400086370750000050009776169740000060", + "00f00010000000000000000000005000e696e74657272757074000006000f0001000000000000000132", + } + b, err := hex.DecodeString(strings.Join(data, "")) + check(err) + return b +}() + +var expPoints = []string{ + "entropy_value,host=pf1-62-210-94-173,type=entropy value=288 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306 1414080767000000000", +} + +var expPointsTupled = []string{ + "entropy,host=pf1-62-210-94-173,type=entropy value=288 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0 1414080767000000000", + "df,host=pf1-62-210-94-173,type=df,type_instance=live-cow free=50287988736,used=378576896 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0 1414080767000000000", + "df,host=pf1-62-210-94-173,type=df,type_instance=live free=50666565632,used=0 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776 1414080767000000000", + "interface,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 rx=0,tx=1050 1414080767000000000", + "df,host=pf1-62-210-94-173,type=df,type_instance=tmp free=50666491904,used=73728 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875 1414080767000000000", + "interface,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 rx=0,tx=15 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704 1414080767000000000", + "df,host=pf1-62-210-94-173,type=df,type_instance=run-lock free=5242880,used=0 1414080767000000000", + "interface,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 rx=0,tx=0 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0 1414080767000000000", + "cpu,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306 1414080767000000000", +} + +// Taken from /usr/share/collectd/types.db on a Ubuntu system +var typesDBText = ` +absolute value:ABSOLUTE:0:U +apache_bytes value:DERIVE:0:U +apache_connections value:GAUGE:0:65535 +apache_idle_workers value:GAUGE:0:65535 +apache_requests value:DERIVE:0:U +apache_scoreboard value:GAUGE:0:65535 +ath_nodes value:GAUGE:0:65535 +ath_stat value:DERIVE:0:U +backends value:GAUGE:0:65535 +bitrate value:GAUGE:0:4294967295 +bytes value:GAUGE:0:U +cache_eviction value:DERIVE:0:U +cache_operation value:DERIVE:0:U +cache_ratio value:GAUGE:0:100 +cache_result value:DERIVE:0:U +cache_size value:GAUGE:0:4294967295 +charge value:GAUGE:0:U +compression_ratio value:GAUGE:0:2 +compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U +connections value:DERIVE:0:U +conntrack value:GAUGE:0:4294967295 +contextswitch value:DERIVE:0:U +counter value:COUNTER:U:U +cpufreq value:GAUGE:0:U +cpu value:DERIVE:0:U +current_connections value:GAUGE:0:U +current_sessions value:GAUGE:0:U +current value:GAUGE:U:U +delay value:GAUGE:-1000000:1000000 +derive value:DERIVE:0:U +df_complex value:GAUGE:0:U +df_inodes value:GAUGE:0:U +df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 +disk_latency read:GAUGE:0:U, write:GAUGE:0:U +disk_merged read:DERIVE:0:U, write:DERIVE:0:U +disk_octets read:DERIVE:0:U, write:DERIVE:0:U +disk_ops_complex value:DERIVE:0:U +disk_ops read:DERIVE:0:U, write:DERIVE:0:U +disk_time read:DERIVE:0:U, write:DERIVE:0:U +dns_answer value:DERIVE:0:U +dns_notify value:DERIVE:0:U +dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U +dns_opcode value:DERIVE:0:U +dns_qtype_cached value:GAUGE:0:4294967295 +dns_qtype value:DERIVE:0:U +dns_query value:DERIVE:0:U +dns_question value:DERIVE:0:U +dns_rcode value:DERIVE:0:U +dns_reject value:DERIVE:0:U +dns_request value:DERIVE:0:U +dns_resolver value:DERIVE:0:U +dns_response value:DERIVE:0:U +dns_transfer value:DERIVE:0:U +dns_update value:DERIVE:0:U +dns_zops value:DERIVE:0:U +duration seconds:GAUGE:0:U +email_check value:GAUGE:0:U +email_count value:GAUGE:0:U +email_size value:GAUGE:0:U +entropy value:GAUGE:0:4294967295 +fanspeed value:GAUGE:0:U +file_size value:GAUGE:0:U +files value:GAUGE:0:U +fork_rate value:DERIVE:0:U +frequency_offset value:GAUGE:-1000000:1000000 +frequency value:GAUGE:0:U +fscache_stat value:DERIVE:0:U +gauge value:GAUGE:U:U +hash_collisions value:DERIVE:0:U +http_request_methods value:DERIVE:0:U +http_requests value:DERIVE:0:U +http_response_codes value:DERIVE:0:U +humidity value:GAUGE:0:100 +if_collisions value:DERIVE:0:U +if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U +if_errors rx:DERIVE:0:U, tx:DERIVE:0:U +if_multicast value:DERIVE:0:U +if_octets rx:DERIVE:0:U, tx:DERIVE:0:U +if_packets rx:DERIVE:0:U, tx:DERIVE:0:U +if_rx_errors value:DERIVE:0:U +if_rx_octets value:DERIVE:0:U +if_tx_errors value:DERIVE:0:U +if_tx_octets value:DERIVE:0:U +invocations value:DERIVE:0:U +io_octets rx:DERIVE:0:U, tx:DERIVE:0:U +io_packets rx:DERIVE:0:U, tx:DERIVE:0:U +ipt_bytes value:DERIVE:0:U +ipt_packets value:DERIVE:0:U +irq value:DERIVE:0:U +latency value:GAUGE:0:U +links value:GAUGE:0:U +load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 +md_disks value:GAUGE:0:U +memcached_command value:DERIVE:0:U +memcached_connections value:GAUGE:0:U +memcached_items value:GAUGE:0:U +memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U +memcached_ops value:DERIVE:0:U +memory value:GAUGE:0:281474976710656 +multimeter value:GAUGE:U:U +mutex_operations value:DERIVE:0:U +mysql_commands value:DERIVE:0:U +mysql_handler value:DERIVE:0:U +mysql_locks value:DERIVE:0:U +mysql_log_position value:DERIVE:0:U +mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U +nfs_procedure value:DERIVE:0:U +nginx_connections value:GAUGE:0:U +nginx_requests value:DERIVE:0:U +node_octets rx:DERIVE:0:U, tx:DERIVE:0:U +node_rssi value:GAUGE:0:255 +node_stat value:DERIVE:0:U +node_tx_rate value:GAUGE:0:127 +objects value:GAUGE:0:U +operations value:DERIVE:0:U +percent value:GAUGE:0:100.1 +percent_bytes value:GAUGE:0:100.1 +percent_inodes value:GAUGE:0:100.1 +pf_counters value:DERIVE:0:U +pf_limits value:DERIVE:0:U +pf_source value:DERIVE:0:U +pf_states value:GAUGE:0:U +pf_state value:DERIVE:0:U +pg_blks value:DERIVE:0:U +pg_db_size value:GAUGE:0:U +pg_n_tup_c value:DERIVE:0:U +pg_n_tup_g value:GAUGE:0:U +pg_numbackends value:GAUGE:0:U +pg_scan value:DERIVE:0:U +pg_xact value:DERIVE:0:U +ping_droprate value:GAUGE:0:100 +ping_stddev value:GAUGE:0:65535 +ping value:GAUGE:0:65535 +players value:GAUGE:0:1000000 +power value:GAUGE:0:U +protocol_counter value:DERIVE:0:U +ps_code value:GAUGE:0:9223372036854775807 +ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 +ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U +ps_data value:GAUGE:0:9223372036854775807 +ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U +ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U +ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U +ps_rss value:GAUGE:0:9223372036854775807 +ps_stacksize value:GAUGE:0:9223372036854775807 +ps_state value:GAUGE:0:65535 +ps_vm value:GAUGE:0:9223372036854775807 +queue_length value:GAUGE:0:U +records value:GAUGE:0:U +requests value:GAUGE:0:U +response_time value:GAUGE:0:U +response_code value:GAUGE:0:U +route_etx value:GAUGE:0:U +route_metric value:GAUGE:0:U +routes value:GAUGE:0:U +serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U +signal_noise value:GAUGE:U:0 +signal_power value:GAUGE:U:0 +signal_quality value:GAUGE:0:U +snr value:GAUGE:0:U +spam_check value:GAUGE:0:U +spam_score value:GAUGE:U:U +spl value:GAUGE:U:U +swap_io value:DERIVE:0:U +swap value:GAUGE:0:1099511627776 +tcp_connections value:GAUGE:0:4294967295 +temperature value:GAUGE:U:U +threads value:GAUGE:0:U +time_dispersion value:GAUGE:-1000000:1000000 +timeleft value:GAUGE:0:U +time_offset value:GAUGE:-1000000:1000000 +total_bytes value:DERIVE:0:U +total_connections value:DERIVE:0:U +total_objects value:DERIVE:0:U +total_operations value:DERIVE:0:U +total_requests value:DERIVE:0:U +total_sessions value:DERIVE:0:U +total_threads value:DERIVE:0:U +total_time_in_ms value:DERIVE:0:U +total_values value:DERIVE:0:U +uptime value:GAUGE:0:4294967295 +users value:GAUGE:0:65535 +vcl value:GAUGE:0:65535 +vcpu value:GAUGE:0:U +virt_cpu_total value:DERIVE:0:U +virt_vcpu value:DERIVE:0:U +vmpage_action value:DERIVE:0:U +vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U +vmpage_io in:DERIVE:0:U, out:DERIVE:0:U +vmpage_number value:GAUGE:0:4294967295 +volatile_changes value:GAUGE:0:U +voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U +voltage value:GAUGE:U:U +vs_memory value:GAUGE:0:9223372036854775807 +vs_processes value:GAUGE:0:65535 +vs_threads value:GAUGE:0:65535 +# +# Legacy types +# (required for the v5 upgrade target) +# +arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U +arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U +arc_l2_size value:GAUGE:0:U +arc_ratio value:GAUGE:0:U +arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U +mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U +mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U +` diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md new file mode 100644 index 0000000..90de2b2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md @@ -0,0 +1,3 @@ +collectD Client +============ +This directory contains code for generating collectd load. diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go new file mode 100644 index 0000000..f947709 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go @@ -0,0 +1,70 @@ +package main + +import ( + "collectd.org/api" + "collectd.org/network" + + "context" + "flag" + "fmt" + "math/rand" + "os" + "strconv" + "time" +) + +var nMeasurments = flag.Int("m", 1, "Number of measurements") +var tagVariance = flag.Int("v", 1, "Number of values per tag. Client is fixed at one tag") +var rate = flag.Int("r", 1, "Number of points per second") +var total = flag.Int("t", -1, "Total number of points to send (default is no limit)") +var host = flag.String("u", "127.0.0.1:25826", "Destination host in the form host:port") + +func main() { + flag.Parse() + + conn, err := network.Dial(*host, network.ClientOptions{}) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer conn.Close() + + rateLimiter := make(chan int, *rate) + + go func() { + ticker := time.NewTicker(time.Second) + for range ticker.C { + for i := 0; i < *rate; i++ { + rateLimiter <- i + } + } + }() + + nSent := 0 + for { + if nSent >= *total && *total > 0 { + break + } + <-rateLimiter + + vl := api.ValueList{ + Identifier: api.Identifier{ + Host: "tagvalue" + strconv.Itoa(int(rand.Int31n(int32(*tagVariance)))), + Plugin: "golang" + strconv.Itoa(int(rand.Int31n(int32(*nMeasurments)))), + Type: "gauge", + }, + Time: time.Now(), + Interval: 10 * time.Second, + Values: []api.Value{api.Gauge(42.0)}, + } + ctx := context.TODO() + if err := conn.Write(ctx, &vl); err != nil { + fmt.Println(err) + os.Exit(1) + } + conn.Flush() + nSent = nSent + 1 + } + + fmt.Println("Number of points sent:", nSent) +} diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go new file mode 100644 index 0000000..abf4c05 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go @@ -0,0 +1,74 @@ +package continuous_querier + +import ( + "errors" + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/toml" +) + +// Default values for aspects of interval computation. +const ( + // The default value of how often to check whether any CQs need to be run. + DefaultRunInterval = time.Second +) + +// Config represents a configuration for the continuous query service. +type Config struct { + // Enables logging in CQ service to display when CQ's are processed and how many points were written. + LogEnabled bool `toml:"log-enabled"` + + // If this flag is set to false, both the brokers and data nodes should ignore any CQ processing. + Enabled bool `toml:"enabled"` + + // QueryStatsEnabled enables logging of individual query execution statistics to the self-monitoring data + // store. The default is false. + QueryStatsEnabled bool `toml:"query-stats-enabled"` + + // Run interval for checking continuous queries. This should be set to the least common factor + // of the interval for running continuous queries. If you only aggregate continuous queries + // every minute, this should be set to 1 minute. The default is set to '1s' so the interval + // is compatible with most aggregations. + RunInterval toml.Duration `toml:"run-interval"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + LogEnabled: true, + Enabled: true, + QueryStatsEnabled: false, + RunInterval: toml.Duration(DefaultRunInterval), + } +} + +// Validate returns an error if the Config is invalid. +func (c Config) Validate() error { + if !c.Enabled { + return nil + } + + // TODO: Should we enforce a minimum interval? + // Polling every nanosecond, for instance, will greatly impact performance. + if c.RunInterval <= 0 { + return errors.New("run-interval must be positive") + } + + return nil +} + +// Diagnostics returns a diagnostics representation of a subset of the Config. +func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { + if !c.Enabled { + return diagnostics.RowFromMap(map[string]interface{}{ + "enabled": false, + }), nil + } + + return diagnostics.RowFromMap(map[string]interface{}{ + "enabled": true, + "query-stats-enabled": c.QueryStatsEnabled, + "run-interval": c.RunInterval, + }), nil +} diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go new file mode 100644 index 0000000..c5b9bc7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go @@ -0,0 +1,46 @@ +package continuous_querier_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/services/continuous_querier" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c continuous_querier.Config + if _, err := toml.Decode(` +run-interval = "1m" +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.RunInterval) != time.Minute { + t.Fatalf("unexpected run interval: %v", c.RunInterval) + } else if !c.Enabled { + t.Fatalf("unexpected enabled: %v", c.Enabled) + } +} + +func TestConfig_Validate(t *testing.T) { + c := continuous_querier.NewConfig() + if err := c.Validate(); err != nil { + t.Fatalf("unexpected validation fail from NewConfig: %s", err) + } + + c = continuous_querier.NewConfig() + c.RunInterval = 0 + if err := c.Validate(); err == nil { + t.Fatal("expected error for run-interval = 0, got nil") + } + + c = continuous_querier.NewConfig() + c.RunInterval *= -1 + if err := c.Validate(); err == nil { + t.Fatal("expected error for negative run-interval, got nil") + } +} diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md b/vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md new file mode 100644 index 0000000..7835322 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md @@ -0,0 +1,235 @@ +# Continuous Queries + +This document lays out continuous queries and a proposed architecture for how they'll work within an InfluxDB cluster. + +## Definition of Continuous Queries + +Continuous queries serve two purposes in InfluxDB: + +1. Combining many series into a single series (i.e. removing 1 or more tag dimensions to make queries more efficient) +2. Aggregating and downsampling series + +The purpose of both types of continuous query is to duplicate or downsample data automatically in the background, to make querying their results fast and efficient. Think of them as another way to create indexes on data. + +Generally, there are continuous queries that create copies of data into another measurement or tagset, and queries that downsample and aggregate data. The only difference between the two types is if the query has a `GROUP BY time` clause. + +Before we get to the continuous query examples, we need to define the `INTO` syntax of queries. + +### INTO + +`INTO` is a method for running a query and having it output into either another measurement name, retention policy, or database. The syntax looks like this: + +```sql +SELECT * +INTO [.] [ON ] +FROM +[WHERE ...] +[GROUP BY ...] +``` + +The syntax states that the retention policy, database, where clause, and group by clause are all optional. If a retention policy isn't specified, the database's default retention policy will be written into. If the database isn't specified, the database the query is running from will be written into. + +By selecting specific fields, `INTO` can merge many series into one that will go into either a new measurement, retention policy, or database. For example: + +```sql +SELECT mean(value) as value, region +INTO "1h.cpu_load" +FROM cpu_load +GROUP BY time(1h), region +``` + +That will give 1h summaries of the mean value of the `cpu_load` for each `region`. Specifying `region` in the `GROUP BY` clause is unnecessary since having it in the `SELECT` clause forces it to be grouped by that tag, we've just included it in the example for clarity. + +With `SELECT ... INTO`, fields will be written as fields and tags will be written as tags. + +### Continuous Query Syntax + +The `INTO` queries run once. Continuous queries will turn `INTO` queries into something that run in the background in the cluster. They're kind of like triggers in SQL. + +```sql +CREATE CONTINUOUS QUERY "1h_cpu_load" +ON database_name +BEGIN + SELECT mean(value) as value, region + INTO "1h.cpu_load" + FROM cpu_load + GROUP BY time(1h), region +END +``` + +Or chain them together: + +```sql +CREATE CONTINUOUS QUERY "10m_event_count" +ON database_name +BEGIN + SELECT count(value) + INTO "10m.events" + FROM events + GROUP BY time(10m) +END + +-- this selects from the output of one continuous query and outputs to another series +CREATE CONTINUOUS QUERY "1h_event_count" +ON database_name +BEGIN + SELECT sum(count) as count + INTO "1h.events" + FROM events + GROUP BY time(1h) +END +``` + +Or multiple aggregations from all series in a measurement. This example assumes you have a retention policy named `1h`. + +```sql +CREATE CONTINUOUS QUERY "1h_cpu_load" +ON database_name +BEGIN + SELECT mean(value), percentile(80, value) as percentile_80, percentile(95, value) as percentile_95 + INTO "1h.cpu_load" + FROM cpu_load + GROUP BY time(1h), * +END +``` + +The `GROUP BY *` indicates that we want to group by the tagset of the points written in. The same tags will be written to the output series. The multiple aggregates in the `SELECT` clause (percentile, mean) will be written in as fields to the resulting series. + +Showing what continuous queries we have: + +```sql +SHOW CONTINUOUS QUERIES +``` + +Dropping continuous queries: + +```sql +DROP CONTINUOUS QUERY ON +``` + +### Security + +To create or drop a continuous query, the user must be an admin. + +### Limitations + +In order to prevent cycles and endless copying of data, the following limitation is enforced on continuous queries at create time: + +*The output of a continuous query must go to either a different measurement or to a different retention policy.* + +In theory they'd still be able to create a cycle with multiple continuous queries. We should check for these and disallow. + +## Proposed Architecture + +Continuous queries should be stored in the metastore cluster wide. That is, they amount to a database schema that should be stored in every server in a cluster. + +Continuous queries will have to be handled in a different way for two different use cases: those that simply copy data (CQs without a group by time) and those that aggregate and downsample data (those with a group by time). + +### No GROUP BY time + +For CQs that have no `GROUP BY time` clause, they should be evaluated at the data node as part of the write. The single write should create any other writes for the CQ and submit those in the same request to the brokers to ensure that all writes succeed (both the original and the new CQ writes) or none do. + +I imagine the process going something like this: + +1. Convert the data point into its compact form `