diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 1e7127dc..00000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -[flake8] -ignore = E501, F821 -show-source = True - diff --git a/.github/workflows/ci-pull-request.yml b/.github/workflows/ci-pull-request.yml deleted file mode 100644 index 7bcdd819..00000000 --- a/.github/workflows/ci-pull-request.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: CI - Pull Request - -on: - pull_request: - branches: - - master - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-python@v2 - with: - python-version: '3.8' - - - name: Install pipenv - run: python -m pip install pipenv - - - uses: actions/cache@v2 - name: Cache Pipenv dependencies - with: - path: | - ~/.cache - ~/.local/share/virtualenvs/ - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} - restore-keys: | - ${{ runner.os }}-pipenv- - - - name: Get dependencies - run: pipenv install -d - - - name: Lint - continue-on-error: true - run: | - # stop the build if there are Python syntax errors or undefined names - pipenv run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - pipenv run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - - name: Travis Test - Start agent - id: start_agent - env: - PYTHON_SDC_TEST_ACCESS_KEY: ${{ secrets.STAGING_AGENT_KEY }} - run: | - sudo apt-get install linux-headers-$(uname -r) dkms gcc-multilib g++-multilib - ./test/start_agent.sh - - - name: Travis Test - Install dependencies - run: pip install . - - - name: Travis Test - Secure APIs - env: - PYTHON_SDC_TEST_API_TOKEN: ${{ secrets.STAGING_SECURE_API_TOKEN }} - run: ./test/test_secure_apis.sh - - - name: Test in staging - env: - SDC_MONITOR_TOKEN: ${{ secrets.STAGING_MONITOR_API_TOKEN }} - SDC_SECURE_TOKEN: ${{ secrets.STAGING_SECURE_API_TOKEN }} - SDC_MONITOR_URL: "https://app-staging.sysdigcloud.com" - SDC_SECURE_URL: "https://secure-staging.sysdig.com" - run: | - pipenv run mamba -f documentation - - - name: Travis Test - Stop agent - run: ./test/stop_agent.sh - if: steps.start_agent.outcome == 'success' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 2f7d30b3..00000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,71 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -name: "CodeQL" - -on: - push: - branches: [master] - pull_request: - # The branches below must be a subset of the branches above - branches: [master] - schedule: - - cron: '0 4 * * 4' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - # Override automatic language detection by changing the below list - # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] - language: ['python'] - # Learn more... - # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 - - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # â„šī¸ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 553376f9..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Release - -on: - push: - tags: - - v* - -jobs: - release: - runs-on: ubuntu-latest - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - - steps: - - name: Create Release - id: create_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ github.ref }} - release_name: ${{ github.ref }} - draft: true - prerelease: false - body: | - This is the ${{ github.ref }} release of the sysdig-sdk-python (sdcclient), the Python client for Sysdig Platform - - ### Major Changes - - ### Minor Changes - - ### Bug fixes - - pypi: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - - - name: Build and publish - env: - TWINE_USERNAME: ${{ secrets.PYPI_USER }} - TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} - run: | - python setup.py sdist bdist_wheel - twine upload dist/* diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 404fc5aa..00000000 --- a/.gitignore +++ /dev/null @@ -1,68 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -*.egg-info/ -.installed.cfg -*.egg - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*,cover - -# Translations -*.mo -*.pot - -# Django stuff: -*.log - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# virtualenv -venv/ - -# Direnv -.envrc - -# IntelliJ projects -.idea/ - -coverage/ diff --git a/LICENSE.txt b/LICENSE.txt deleted file mode 100644 index 395d538f..00000000 --- a/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright Sysdig Inc., https://sysdig.com/ - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Makefile b/Makefile deleted file mode 100644 index 74ef3cc2..00000000 --- a/Makefile +++ /dev/null @@ -1,16 +0,0 @@ - - -.PHONY: test -test: - pipenv run mamba -f documentation - -.coverage: - pipenv run coverage run $(shell pipenv run which mamba) -f documentation || true - -cover: .coverage - pipenv run coverage report --include 'sdcclient/*' - -.PHONY: cover-html -cover-html: .coverage - pipenv run coverage html -d coverage --include 'sdcclient/*' - diff --git a/Pipfile b/Pipfile deleted file mode 100644 index 9528bcd8..00000000 --- a/Pipfile +++ /dev/null @@ -1,24 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.org/simple" -verify_ssl = true - -[dev-packages] -mamba = "*" -doublex = "*" -doublex-expects = "*" -expects = "*" -flake8 = "*" -pipenv-setup = "*" -sdcclient = {editable = true, path = "."} -coverage = "*" - -[packages] -requests = ">=2.23.0" -pyaml = "*" -requests-toolbelt = "*" -tatsu = "*" -urllib3 = ">=1.25.8" - -[requires] -python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock deleted file mode 100644 index 644fece1..00000000 --- a/Pipfile.lock +++ /dev/null @@ -1,542 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "24dd4525900522b9e99904b116060dbdf8b2db09daf77ff47ae243566ce6913c" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.8" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "certifi": { - "hashes": [ - "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", - "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" - ], - "version": "==2020.6.20" - }, - "chardet": { - "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" - ], - "version": "==3.0.4" - }, - "idna": { - "hashes": [ - "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", - "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.10" - }, - "pyaml": { - "hashes": [ - "sha256:29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71", - "sha256:67081749a82b72c45e5f7f812ee3a14a03b3f5c25ff36ec3b290514f8c4c4b99" - ], - "index": "pypi", - "version": "==20.4.0" - }, - "pyyaml": { - "hashes": [ - "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97", - "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76", - "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2", - "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648", - "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf", - "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f", - "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2", - "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee", - "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d", - "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c", - "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a" - ], - "version": "==5.3.1" - }, - "requests": { - "hashes": [ - "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", - "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" - ], - "index": "pypi", - "version": "==2.24.0" - }, - "requests-toolbelt": { - "hashes": [ - "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f", - "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0" - ], - "index": "pypi", - "version": "==0.9.1" - }, - "tatsu": { - "hashes": [ - "sha256:0adbf7189a8c4f9a882b442f7b8ed6c6ab3baae37057db0e96b6888daacffad0", - "sha256:3a043490e577632a05374b5033646bbc26cbb17386df81735a569ecbd45d934b" - ], - "index": "pypi", - "version": "==5.5.0" - }, - "urllib3": { - "hashes": [ - "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", - "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" - ], - "index": "pypi", - "version": "==1.25.10" - } - }, - "develop": { - "appdirs": { - "hashes": [ - "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", - "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" - ], - "version": "==1.4.4" - }, - "args": { - "hashes": [ - "sha256:a785b8d837625e9b61c39108532d95b85274acd679693b71ebb5156848fcf814" - ], - "version": "==0.1.0" - }, - "attrs": { - "hashes": [ - "sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594", - "sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.2.0" - }, - "black": { - "hashes": [ - "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b", - "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539" - ], - "markers": "python_version >= '3.6'", - "version": "==19.10b0" - }, - "cached-property": { - "hashes": [ - "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130", - "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0" - ], - "version": "==1.5.2" - }, - "cerberus": { - "hashes": [ - "sha256:302e6694f206dd85cb63f13fd5025b31ab6d38c99c50c6d769f8fa0b0f299589" - ], - "version": "==1.3.2" - }, - "certifi": { - "hashes": [ - "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", - "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" - ], - "version": "==2020.6.20" - }, - "chardet": { - "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" - ], - "version": "==3.0.4" - }, - "click": { - "hashes": [ - "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", - "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==7.1.2" - }, - "clint": { - "hashes": [ - "sha256:05224c32b1075563d0b16d0015faaf9da43aa214e4a2140e51f08789e7a4c5aa" - ], - "version": "==0.5.1" - }, - "colorama": { - "hashes": [ - "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.4.4" - }, - "coverage": { - "hashes": [ - "sha256:0203acd33d2298e19b57451ebb0bed0ab0c602e5cf5a818591b4918b1f97d516", - "sha256:0f313707cdecd5cd3e217fc68c78a960b616604b559e9ea60cc16795c4304259", - "sha256:1c6703094c81fa55b816f5ae542c6ffc625fec769f22b053adb42ad712d086c9", - "sha256:1d44bb3a652fed01f1f2c10d5477956116e9b391320c94d36c6bf13b088a1097", - "sha256:280baa8ec489c4f542f8940f9c4c2181f0306a8ee1a54eceba071a449fb870a0", - "sha256:29a6272fec10623fcbe158fdf9abc7a5fa032048ac1d8631f14b50fbfc10d17f", - "sha256:2b31f46bf7b31e6aa690d4c7a3d51bb262438c6dcb0d528adde446531d0d3bb7", - "sha256:2d43af2be93ffbad25dd959899b5b809618a496926146ce98ee0b23683f8c51c", - "sha256:381ead10b9b9af5f64646cd27107fb27b614ee7040bb1226f9c07ba96625cbb5", - "sha256:47a11bdbd8ada9b7ee628596f9d97fbd3851bd9999d398e9436bd67376dbece7", - "sha256:4d6a42744139a7fa5b46a264874a781e8694bb32f1d76d8137b68138686f1729", - "sha256:50691e744714856f03a86df3e2bff847c2acede4c191f9a1da38f088df342978", - "sha256:530cc8aaf11cc2ac7430f3614b04645662ef20c348dce4167c22d99bec3480e9", - "sha256:582ddfbe712025448206a5bc45855d16c2e491c2dd102ee9a2841418ac1c629f", - "sha256:63808c30b41f3bbf65e29f7280bf793c79f54fb807057de7e5238ffc7cc4d7b9", - "sha256:71b69bd716698fa62cd97137d6f2fdf49f534decb23a2c6fc80813e8b7be6822", - "sha256:7858847f2d84bf6e64c7f66498e851c54de8ea06a6f96a32a1d192d846734418", - "sha256:78e93cc3571fd928a39c0b26767c986188a4118edc67bc0695bc7a284da22e82", - "sha256:7f43286f13d91a34fadf61ae252a51a130223c52bfefb50310d5b2deb062cf0f", - "sha256:86e9f8cd4b0cdd57b4ae71a9c186717daa4c5a99f3238a8723f416256e0b064d", - "sha256:8f264ba2701b8c9f815b272ad568d555ef98dfe1576802ab3149c3629a9f2221", - "sha256:9342dd70a1e151684727c9c91ea003b2fb33523bf19385d4554f7897ca0141d4", - "sha256:9361de40701666b034c59ad9e317bae95c973b9ff92513dd0eced11c6adf2e21", - "sha256:9669179786254a2e7e57f0ecf224e978471491d660aaca833f845b72a2df3709", - "sha256:aac1ba0a253e17889550ddb1b60a2063f7474155465577caa2a3b131224cfd54", - "sha256:aef72eae10b5e3116bac6957de1df4d75909fc76d1499a53fb6387434b6bcd8d", - "sha256:bd3166bb3b111e76a4f8e2980fa1addf2920a4ca9b2b8ca36a3bc3dedc618270", - "sha256:c1b78fb9700fc961f53386ad2fd86d87091e06ede5d118b8a50dea285a071c24", - "sha256:c3888a051226e676e383de03bf49eb633cd39fc829516e5334e69b8d81aae751", - "sha256:c5f17ad25d2c1286436761b462e22b5020d83316f8e8fcb5deb2b3151f8f1d3a", - "sha256:c851b35fc078389bc16b915a0a7c1d5923e12e2c5aeec58c52f4aa8085ac8237", - "sha256:cb7df71de0af56000115eafd000b867d1261f786b5eebd88a0ca6360cccfaca7", - "sha256:cedb2f9e1f990918ea061f28a0f0077a07702e3819602d3507e2ff98c8d20636", - "sha256:e8caf961e1b1a945db76f1b5fa9c91498d15f545ac0ababbe575cfab185d3bd8" - ], - "index": "pypi", - "version": "==5.3" - }, - "distlib": { - "hashes": [ - "sha256:8c09de2c67b3e7deef7184574fc060ab8a793e7adbb183d942c389c8b13c52fb", - "sha256:edf6116872c863e1aa9d5bb7cb5e05a022c519a4594dc703843343a9ddd9bff1" - ], - "version": "==0.3.1" - }, - "doublex": { - "hashes": [ - "sha256:4e9f17f346276db7faa461dfa105f17de7f837e5ceccca34f4c70d4ff9d2f20c" - ], - "index": "pypi", - "version": "==1.9.2" - }, - "doublex-expects": { - "hashes": [ - "sha256:8040682d97f0a66f632c5df982f78d09aee36b2c4a1eb275b0c596d115f200aa" - ], - "index": "pypi", - "version": "==0.7.1" - }, - "expects": { - "hashes": [ - "sha256:419902ccafe81b7e9559eeb6b7a07ef9d5c5604eddb93000f0642b3b2d594f4c" - ], - "index": "pypi", - "version": "==0.9.0" - }, - "flake8": { - "hashes": [ - "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839", - "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b" - ], - "index": "pypi", - "version": "==3.8.4" - }, - "idna": { - "hashes": [ - "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", - "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.10" - }, - "mamba": { - "hashes": [ - "sha256:f976735949bc9a8731cc0876aaea2720949bd3d1554b0e94004c91a4f61abecb" - ], - "index": "pypi", - "version": "==0.11.1" - }, - "mccabe": { - "hashes": [ - "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", - "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" - ], - "version": "==0.6.1" - }, - "orderedmultidict": { - "hashes": [ - "sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad", - "sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3" - ], - "version": "==1.0.1" - }, - "packaging": { - "hashes": [ - "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", - "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.4" - }, - "pathspec": { - "hashes": [ - "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0", - "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061" - ], - "version": "==0.8.0" - }, - "pep517": { - "hashes": [ - "sha256:576c480be81f3e1a70a16182c762311eb80d1f8a7b0d11971e5234967d7a342c", - "sha256:8e6199cf1288d48a0c44057f112acf18aa5ebabbf73faa242f598fbe145ba29e" - ], - "version": "==0.8.2" - }, - "pip-shims": { - "hashes": [ - "sha256:05b00ade9d1e686a98bb656dd9b0608a933897283dc21913fad6ea5409ff7e91", - "sha256:16ca9f87485667b16b978b68a1aae4f9cc082c0fa018aed28567f9f34a590569" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.5.3" - }, - "pipenv-setup": { - "hashes": [ - "sha256:8a439aff7b16e18d7e07702c9186fc5fe86156679eace90e10c2578a43bd7af1", - "sha256:e1bfd55c1152024e762f1c17f6189fcb073166509e7c0228870f7ea160355648" - ], - "index": "pypi", - "version": "==3.1.1" - }, - "pipfile": { - "hashes": [ - "sha256:f7d9f15de8b660986557eb3cc5391aa1a16207ac41bc378d03f414762d36c984" - ], - "version": "==0.0.2" - }, - "plette": { - "extras": [ - "validation" - ], - "hashes": [ - "sha256:46402c03e36d6eadddad2a5125990e322dd74f98160c8f2dcd832b2291858a26", - "sha256:d6c9b96981b347bddd333910b753b6091a2c1eb2ef85bb373b4a67c9d91dca16" - ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.2.3" - }, - "pyaml": { - "hashes": [ - "sha256:29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71", - "sha256:67081749a82b72c45e5f7f812ee3a14a03b3f5c25ff36ec3b290514f8c4c4b99" - ], - "index": "pypi", - "version": "==20.4.0" - }, - "pycodestyle": { - "hashes": [ - "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367", - "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.6.0" - }, - "pyflakes": { - "hashes": [ - "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92", - "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.2.0" - }, - "pyhamcrest": { - "hashes": [ - "sha256:412e00137858f04bde0729913874a48485665f2d36fe9ee449f26be864af9316", - "sha256:7ead136e03655af85069b6f47b23eb7c3e5c221aa9f022a4fbb499f5b7308f29" - ], - "markers": "python_version >= '3.5'", - "version": "==2.0.2" - }, - "pyparsing": { - "hashes": [ - "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", - "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" - ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.4.7" - }, - "python-dateutil": { - "hashes": [ - "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", - "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.8.1" - }, - "pyyaml": { - "hashes": [ - "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97", - "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76", - "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2", - "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648", - "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf", - "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f", - "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2", - "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee", - "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d", - "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c", - "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a" - ], - "version": "==5.3.1" - }, - "regex": { - "hashes": [ - "sha256:1a16afbfadaadc1397353f9b32e19a65dc1d1804c80ad73a14f435348ca017ad", - "sha256:2308491b3e6c530a3bb38a8a4bb1dc5fd32cbf1e11ca623f2172ba17a81acef1", - "sha256:39a5ef30bca911f5a8a3d4476f5713ed4d66e313d9fb6755b32bec8a2e519635", - "sha256:3d5a8d007116021cf65355ada47bf405656c4b3b9a988493d26688275fde1f1c", - "sha256:4302153abb96859beb2c778cc4662607a34175065fc2f33a21f49eb3fbd1ccd3", - "sha256:463e770c48da76a8da82b8d4a48a541f314e0df91cbb6d873a341dbe578efafd", - "sha256:46ab6070b0d2cb85700b8863b3f5504c7f75d8af44289e9562195fe02a8dd72d", - "sha256:4f5c0fe46fb79a7adf766b365cae56cafbf352c27358fda811e4a1dc8216d0db", - "sha256:60c4f64d9a326fe48e8738c3dbc068e1edc41ff7895a9e3723840deec4bc1c28", - "sha256:671c51d352cfb146e48baee82b1ee8d6ffe357c292f5e13300cdc5c00867ebfc", - "sha256:6cf527ec2f3565248408b61dd36e380d799c2a1047eab04e13a2b0c15dd9c767", - "sha256:7c4fc5a8ec91a2254bb459db27dbd9e16bba1dabff638f425d736888d34aaefa", - "sha256:850339226aa4fec04916386577674bb9d69abe0048f5d1a99f91b0004bfdcc01", - "sha256:8ba3efdd60bfee1aa784dbcea175eb442d059b576934c9d099e381e5a9f48930", - "sha256:8c8c42aa5d3ac9a49829c4b28a81bebfa0378996f9e0ca5b5ab8a36870c3e5ee", - "sha256:8e7ef296b84d44425760fe813cabd7afbb48c8dd62023018b338bbd9d7d6f2f0", - "sha256:a2a31ee8a354fa3036d12804730e1e20d58bc4e250365ead34b9c30bbe9908c3", - "sha256:a63907332531a499b8cdfd18953febb5a4c525e9e7ca4ac147423b917244b260", - "sha256:a8240df4957a5b0e641998a5d78b3c4ea762c845d8cb8997bf820626826fde9a", - "sha256:b8806649983a1c78874ec7e04393ef076805740f6319e87a56f91f1767960212", - "sha256:c077c9d04a040dba001cf62b3aff08fd85be86bccf2c51a770c77377662a2d55", - "sha256:c529ba90c1775697a65b46c83d47a2d3de70f24d96da5d41d05a761c73b063af", - "sha256:d537e270b3e6bfaea4f49eaf267984bfb3628c86670e9ad2a257358d3b8f0955", - "sha256:d629d750ebe75a88184db98f759633b0a7772c2e6f4da529f0027b4a402c0e2f", - "sha256:d9d53518eeed12190744d366ec4a3f39b99d7daa705abca95f87dd8b442df4ad", - "sha256:e490f08897cb44e54bddf5c6e27deca9b58c4076849f32aaa7a0b9f1730f2c20", - "sha256:f579caecbbca291b0fcc7d473664c8c08635da2f9b1567c22ea32311c86ef68c" - ], - "version": "==2020.10.11" - }, - "requests": { - "hashes": [ - "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", - "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" - ], - "index": "pypi", - "version": "==2.24.0" - }, - "requests-toolbelt": { - "hashes": [ - "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f", - "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0" - ], - "index": "pypi", - "version": "==0.9.1" - }, - "requirementslib": { - "hashes": [ - "sha256:cdf8aa652ac52216d156cee2b89c3c9ee53373dded0035184d0b9af569a0f10c", - "sha256:fd98ea873effaede6b3394725a232bcbd3fe3985987e226109a841c85a69e2e3" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==1.5.13" - }, - "sdcclient": { - "editable": true, - "path": "." - }, - "six": { - "hashes": [ - "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", - "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.15.0" - }, - "tatsu": { - "hashes": [ - "sha256:0adbf7189a8c4f9a882b442f7b8ed6c6ab3baae37057db0e96b6888daacffad0", - "sha256:3a043490e577632a05374b5033646bbc26cbb17386df81735a569ecbd45d934b" - ], - "index": "pypi", - "version": "==5.5.0" - }, - "toml": { - "hashes": [ - "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", - "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" - ], - "version": "==0.10.1" - }, - "tomlkit": { - "hashes": [ - "sha256:6babbd33b17d5c9691896b0e68159215a9387ebfa938aa3ac42f4a4beeb2b831", - "sha256:ac57f29693fab3e309ea789252fcce3061e19110085aa31af5446ca749325618" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.7.0" - }, - "typed-ast": { - "hashes": [ - "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355", - "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919", - "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa", - "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652", - "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75", - "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01", - "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d", - "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1", - "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907", - "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c", - "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3", - "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b", - "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614", - "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb", - "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b", - "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41", - "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6", - "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34", - "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe", - "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4", - "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7" - ], - "version": "==1.4.1" - }, - "urllib3": { - "hashes": [ - "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", - "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" - ], - "index": "pypi", - "version": "==1.25.10" - }, - "vistir": { - "hashes": [ - "sha256:a37079cdbd85d31a41cdd18457fe521e15ec08b255811e81aa061fd5f48a20fb", - "sha256:eff1d19ef50c703a329ed294e5ec0b0fbb35b96c1b3ee6dcdb266dddbe1e935a" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.5.2" - }, - "wheel": { - "hashes": [ - "sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2", - "sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.35.1" - } - } -} diff --git a/README.md b/README.md deleted file mode 100644 index 265958a9..00000000 --- a/README.md +++ /dev/null @@ -1,104 +0,0 @@ -Sysdig Monitor/Secure Python client library -=== - -[![Build Status](https://travis-ci.org/draios/python-sdc-client.png?branch=master)](https://travis-ci.org/draios/python-sdc-client) -[![Current version on PyPI](http://img.shields.io/pypi/v/sdcclient.svg)](https://pypi.python.org/pypi/sdcclient) - -A Python client API for Sysdig Monitor/Sysdig Secure. - -This module is a wrapper around the Sysdig Monitor/Sysdig Secure APIs, which are documented [here](http://support.sysdigcloud.com/hc/en-us/articles/205233166-The-Sysdig-Cloud-API-Specification). It exposes most of the sysdig REST API functionality as an easy to use and easy to install Python interface. The repository includes a rich set of examples (in the [examples](examples/) subdir) that quickly address several use cases. - -Installation ------------- -#### Automatic w/ PyPI ([virtualenv](http://virtualenv.readthedocs.org/en/latest/) is recommended.) - pip install sdcclient - -#### Manual - git clone https://github.com/draios/python-sdc-client.git - cd python-sdc-client - python setup.py install - -#### One-step cmdline to create virtualenv, install client, and gain access to sample programs - -``` -$ virtualenv python-sdc-env && source python-sdc-env/bin/activate && pip install sdcclient && git clone https://github.com/draios/python-sdc-client && python python-sdc-client/examples/set_secure_system_falco_rules.py --help -``` - -Quick start ------------ -- If you are interested in exporting metrics data from Sysdig Monitor, take a look at [examples/get_data_simple.py](examples/get_data_simple.py) and [examples/get_data_advanced.py](examples/get_data_advanced.py). -- If you want to programmatically create an alert, refer to [examples/create_alert.py](examples/create_alert.py) -- If you want to programmatically create a dashboard, refer to [examples/create_dashboard.py](examples/create_dashboard.py) - -Usage ------ - -_Note:_ in order to use this API you must obtain a Sysdig Monitor/Secure API token. You can get your user's token in the _Sysdig Monitor API_ section of the settings page for [monitor](https://app.sysdigcloud.com/#/settings/user) or [secure](https://secure.sysdig.com/#/settings/user). - -The library exports two classes, `SdMonitorClient` and `SdSecureClient` that are used to connect to Sysdig Monitor/Secure and execute actions. They can be instantiated like this: - -``` python -from sdcclient import SdMonitorClient - -api_token = "MY_API_TOKEN" - -# -# Instantiate the Sysdig Monitor client -# -client = SdMonitorClient(api_token) -``` - -For backwards compatibility purposes, a third class `SdcClient` is exported which is an alias of `SdMonitorClient`. - -Once instantiated, all the methods documented below can be called on the object. - -#### Return Values -Every method in the SdMonitorClient/SdSecureClient classes returns **a list with two entries**. The first one is a boolean value indicating if the call was successful. The second entry depends on the result: -- If the call was successful, it's a dictionary reflecting the json returned by the underlying REST call -- If the call failed, it's a string describing the error - -For an example on how to parse this output, take a look at a simple example like [get_data_simple.py](examples/get_data_simple.py) - -Function List & Documentation ------------------------------ -For the list of available functions in the current `master` branch of this repo and corresponding docs, refer to the [Python Script Library documentation page](http://python-sdc-client.readthedocs.io/en/latest/). Equivalent docs for the functions in the most recent "stable" release (which is what you'd get via `pip` install, and corresponds to the most recent [release](https://github.com/draios/python-sdc-client/releases)) are located [here](http://python-sdc-client.readthedocs.io/en/stable/). - -On-Premises Installs --------------------- -For [On-Premises Sysdig Monitor installs](https://support.sysdigcloud.com/hc/en-us/articles/206519903-On-Premises-Installation-Guide), additional configuration is necessary to point to your API server rather than the default SaaS-based one, and also to easily connect when using a self-signed certificate for SSL. One way to handle this is by setting environment variables before running your Python scripts: - -``` -export SDC_URL='https://' -export SDC_SSL_VERIFY='false' -``` - -Alternatively, you can specify the additional arguments in your Python scripts as you instantiate the SDC client: - -``` -client = SdMonitorClient(api_token, sdc_url='https://', ssl_verify=False) -``` - - -Transitioning from Python to REST ---------------------------------- - -If your goal is to interact with the REST API directly, you can use this Python client library to understand the REST interactions by logging the actions it takes. This is useful because full documentation of the REST API has not yet been created; and also provides a complete example of known working operations. - -- Use or modify an example, or write a new script against the Python sdcclient module. -- Log the HTTP requests made by the script. - -To log all the requests made by your script in significant detail, add to your script: - -``` python -import logging -import httplib -httplib.HTTPConnection.debuglevel = 1 - -logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests -logging.getLogger().setLevel(logging.DEBUG) -requests_log = logging.getLogger("requests.packages.urllib3") -requests_log.setLevel(logging.DEBUG) -requests_log.propagate = True -``` - -Then run as normal. diff --git a/_config.yml b/_config.yml new file mode 100644 index 00000000..2060f8f0 --- /dev/null +++ b/_config.yml @@ -0,0 +1,9 @@ +title: Sysdig Python SDK + +remote_theme: sysdiglabs/jekyll-theme-sysdiglabs-docs@main + +sass: + style: compressed + +plugins: + - jekyll-remote-theme diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index cf735800..00000000 --- a/doc/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXPROJ = python-sdc-client -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index d17cadb3..00000000 --- a/doc/conf.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -# -# python-sdc-client documentation build configuration file, created by -# sphinx-quickstart on Thu Dec 22 11:59:02 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys -sys.path.insert(0, os.path.abspath('..')) - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.linkcode' ] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'python-sdc-client' -copyright = u'2016, Sysdig Inc.' -author = u'Sysdig Inc.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'' -# The full version, including alpha/beta/rc tags. -release = u'' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'classic' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'python-sdc-clientdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'python-sdc-client.tex', u'python-sdc-client Documentation', - u'Sysdig Inc.', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'python-sdc-client', u'python-sdc-client Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'python-sdc-client', u'python-sdc-client Documentation', - author, 'python-sdc-client', 'One line description of project.', - 'Miscellaneous'), -] - - -def linkcode_resolve(domain, info): - def find_line(): - obj = sys.modules[info['module']] - for part in info['fullname'].split('.'): - obj = getattr(obj, part) - import inspect - fn = inspect.getsourcefile(obj) - source, lineno = inspect.findsource(obj) - return lineno + 1 - - if domain != 'py' or not info['module']: - return None - #tag = 'master' if 'dev' in release else ('v' + release) - url = "https://github.com/draios/python-sdc-client/blob/master/sdcclient/_client.py" - try: - return url + '#L%d' % find_line() - except Exception: - return url diff --git a/doc/index.rst b/doc/index.rst deleted file mode 100644 index febbf326..00000000 --- a/doc/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. python-sdc-client documentation master file, created by - sphinx-quickstart on Thu Dec 22 11:59:02 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Sysdig Cloud Python Script Library -================================== - -This page documents the functions available in the `Python Script Library `_ for `Sysdig Cloud `_. It is is a wrapper around the `Sysdig Cloud API `_. - -* :ref:`genindex` -* :ref:`search` - - - -Function List -============= -.. py:module:: sdcclient -.. autoclass:: SdMonitorClient - :members: - :inherited-members: - :undoc-members: -.. autoclass:: SdSecureClient - :members: - :inherited-members: - :undoc-members: diff --git a/doc/make.bat b/doc/make.bat deleted file mode 100644 index 483b1d9c..00000000 --- a/doc/make.bat +++ /dev/null @@ -1,36 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build -set SPHINXPROJ=python-sdc-client - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/examples/add_notification_email.py b/examples/add_notification_email.py deleted file mode 100755 index c81e5861..00000000 --- a/examples/add_notification_email.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# Post a user event to Sysdig Cloud -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s email' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -email = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Post the event -# -ok, res = sdclient.add_email_notification_recipient(email) - -# -# Return the result -# -if ok: - print('Recipient added successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/add_policy.py b/examples/add_policy.py deleted file mode 100755 index 7bbf2db7..00000000 --- a/examples/add_policy.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# Add a new policy -# - -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('Reads policy json from standard input') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -policy_json = sys.stdin.read() - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.add_policy_json(policy_json) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/add_policy_v1.py b/examples/add_policy_v1.py deleted file mode 100755 index f9a63098..00000000 --- a/examples/add_policy_v1.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# Add a new policy -# - -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('Reads policy json from standard input') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -policy_json = sys.stdin.read() - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.add_policy(policy_json) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/add_users_to_secure.py b/examples/add_users_to_secure.py deleted file mode 100755 index 59f4f066..00000000 --- a/examples/add_users_to_secure.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -# -# Make sure all users are members of the Secure Operations team. -# -# As of when this script was written, there is only one team for -# all Secure users. Newly-created users that land in the default -# team for Monitor (such as those created via the API) will -# therefore not be in the Secure Operations team. If you have an -# environment where you want all users to have both Monitor and -# Secure access by default, you could run this script periodically -# (e.g. as a cron job) to make sure any such users are made part -# of the Secure Operations team as well. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -SECURE_TEAM_NAME = 'Secure Operations' - -# -# As of when this script was written, the Secure Operations team does -# not have the concepts of RBAC roles like "Read User" vs. "Edit User". -# Rather, all members of the Secure team have full visibility within -# Secure, which is associated with ROLE_TEAM_EDIT. -# -SECURE_TEAM_ROLE = 'ROLE_TEAM_EDIT' - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, sdc_url='https://app.sysdigcloud.com') - -ok, res = sdclient.list_memberships(SECURE_TEAM_NAME) - -if not ok: - print(('Unable to get memberships for ' + SECURE_TEAM_NAME + ' team: ', res)) - sys.exit(1) -memberships = res - -ok, res = sdclient.get_users() - -if not ok: - print(('Unable to get users: ', res)) - sys.exit(1) -all_users = res - -# -# The memberships passed into edit_team() are based on username -# rather than ID, so convert the IDs. -# -for user in all_users: - if user['username'] in memberships: - print(('Will preserve existing membership for: ' + user['username'])) - else: - print(('Will add new member: ' + user['username'])) - memberships[user['username']] = SECURE_TEAM_ROLE - -ok, res = sdclient.save_memberships(SECURE_TEAM_NAME, memberships=memberships) -if not ok: - print(('Could not edit team:', res, '. Exiting.')) - sys.exit(1) -else: - print(('Finished syncing memberships of "' + SECURE_TEAM_NAME + '" team')) - -sys.exit(0) diff --git a/examples/create_alert.py b/examples/create_alert.py deleted file mode 100755 index 04aeb62a..00000000 --- a/examples/create_alert.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -# -# This script shows how to use the create_alert() call to create the following -# Sysdig Cloud alert: 'send an email notification when the CPU of any tomcat -# process running on any of the instrumented machines goes over 80%' -# -# - -import getopt -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-a|--alert ] ' % sys.argv[0])) - print('-a|--alert: Set name of alert to create') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "a:", ["alert="]) -except getopt.GetoptError: - usage() - -alert_name = "tomcat cpu > 80% on any host" -for opt, arg in opts: - if opt in ("-a", "--alert"): - alert_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Find notification channels (you need IDs to create an alert). -# -notify_channels = [{'type': 'SLACK', 'channel': '#python-sdc-test-alert'}, - {'type': 'EMAIL', 'emailRecipients': ['python-sdc-testing@draios.com', 'test@sysdig.com']}, - {'type': 'SNS', 'snsTopicARNs': ['arn:aws:sns:us-east-1:273107874544:alarms-stg']} - ] - -ok, res = sdclient.get_notification_ids(notify_channels) -if not ok: - print(("Could not get IDs and hence not creating the alert: " + res)) - sys.exit(-1) - -notification_channel_ids = res - -# -# Create the alert. -# -ok, res = sdclient.create_alert( - alert_name, # Alert name. - 'this alert was automatically created using the python Sysdig Cloud library', # Alert description. - 6, # Syslog-encoded severity. 6 means 'info'. - 60, # The alert will fire if the condition is met for at least 60 seconds. - 'avg(cpu.used.percent) > 80', # The condition. - ['host.mac', 'proc.name'], # Segmentation. We want to check this metric for every process on every machine. - 'ANY', - # in case there is more than one tomcat process, this alert will fire when a single one of them crosses the 80% threshold. - 'proc.name = "tomcat"', - # Filter. We want to receive a notification only if the name of the process meeting the condition is 'tomcat'. - notification_channel_ids, - False) # This alert will be disabled when it's created. - -# -# Validate a print the results. -# -print(res) -if not ok: - sys.exit(1) diff --git a/examples/create_dashboard.py b/examples/create_dashboard.py deleted file mode 100755 index 9bee7b97..00000000 --- a/examples/create_dashboard.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -# -# This example shows two easy ways to create a dasboard: using a view as a -# templeate, and copying another dashboard. -# In both cases, a filter is used to define what entities the new dashboard -# will monitor. -# - -import getopt -import sys - -from sdcclient import SdMonitorClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-d|--dashboard ] ' % sys.argv[0])) - print('-d|--dashboard: Set name of dashboard to create') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "d:", ["dashboard="]) -except getopt.GetoptError: - usage() - -# Name for the dashboard to create -dashboardName = "Overview by Process" -for opt, arg in opts: - if opt in ("-d", "--dashboard"): - dashboardName = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Create the new dashboard, applying to cassandra in production -# - -# Name of the view to copy -viewName = "Overview by Process" -# Filter to apply to the new dashboard. -# Remember that you can use combinations of any segmentation criteria you find -# in Sysdig Cloud Explore page. -# You can also refer to AWS tags by using "cloudProvider.tag.*" metadata or -# agent tags by using "agent.tag.*" metadata -dashboardFilter = 'proc.name = "cassandra"' -print('Creating dashboard from view') -ok, res = sdclient.create_dashboard_from_view(dashboardName, viewName, dashboardFilter) -# -# Check the result -# -if ok: - print('Dashboard created successfully') -else: - print(res) - sys.exit(1) - -# -# Make a Copy the just created dasboard, this time applying it to cassandra in -# the dev namespace -# - -# Name of the dashboard to copy -dashboardCopy = "Copy of {}".format(dashboardName) -# Filter to apply to the new dashboard. Same as above. -dashboardFilter = 'proc.name != "cassandra"' - -print('Creating dashboard from dashboard') -ok, res = sdclient.create_dashboard_from_dashboard(dashboardCopy, dashboardName, dashboardFilter) - -# -# Check the result -# -if ok: - print('Dashboard copied successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/create_default_policies.py b/examples/create_default_policies.py deleted file mode 100755 index d7c904fd..00000000 --- a/examples/create_default_policies.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# Create the default set of policies given the falco rules file. -# Existing policies with the same name are unchanged. New policies -# as needed will be added. Returns JSON representing the new -# policies created. -# - -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.create_default_policies() - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/create_default_policies_v1.py b/examples/create_default_policies_v1.py deleted file mode 100755 index 620ab063..00000000 --- a/examples/create_default_policies_v1.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# Create the default set of policies given the falco rules file. -# Existing policies with the same name are unchanged. New policies -# as needed will be added. Returns JSON representing the new -# policies created. -# - -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.create_default_policies() - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/create_sysdig_capture.py b/examples/create_sysdig_capture.py deleted file mode 100755 index bbf5d31f..00000000 --- a/examples/create_sysdig_capture.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python -# -# Creates a sysdig capture, waits for termination and prints the download URL. -# - -import sys -import time - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) not in (5, 6): - print(('usage: %s hostname capture_name duration [filter]' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -hostname = sys.argv[2] -capture_name = sys.argv[3] -duration = sys.argv[4] -capture_filter = '' - -if len(sys.argv) == 6: - capture_filter = sys.argv[5] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -ok, res = sdclient.create_sysdig_capture(hostname, capture_name, int(duration), capture_filter) - -# -# Show the list of metrics -# -if ok: - capture = res['dump'] -else: - print(res) - sys.exit(1) - -while True: - ok, res = sdclient.poll_sysdig_capture(capture) - if ok: - capture = res['dump'] - else: - print(res) - sys.exit(1) - - print(('Capture is in state ' + capture['status'])) - if capture['status'] in ('requested', 'capturing', 'uploading'): - pass - elif capture['status'] in ('error', 'uploadingError'): - sys.exit(1) - elif capture['status'] in ('done', 'uploaded'): - print(('Download at: ' + sdclient.url + capture['downloadURL'])) - sys.exit(0) - - time.sleep(1) diff --git a/examples/dashboard.py b/examples/dashboard.py deleted file mode 100755 index 2441a4e9..00000000 --- a/examples/dashboard.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python -# -# This example shows various functions to create a new dashboard or find an existing on, -# edit the content, and then delete it. -# - -import getopt -import sys - -from sdcclient import SdMonitorClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-d|--dashboard ] ' % sys.argv[0])) - print('-d|--dashboard: Set name of dashboard to create') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "d:", ["dashboard="]) -except getopt.GetoptError: - usage() - -dashboard_name = "My Dashboard" -for opt, arg in opts: - if opt in ("-d", "--dashboard"): - dashboard_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Create an empty dashboard -# -dashboard_configuration = None -ok, res = sdclient.create_dashboard(dashboard_name) - -# Check the result -if ok: - print(('Dashboard %d created successfully' % res['dashboard']['id'])) - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Find a dashboard by name -# -ok, res = sdclient.find_dashboard_by(dashboard_name) - -# Check the result -if ok and len(res) > 0: - print('Dashboard found') - dashboard_configuration = res[0]['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Add a time series -# -panel_name = 'CPU Over Time' -panel_type = 'timeSeries' -metrics = [ - {'id': 'proc.name'}, - {'id': 'cpu.used.percent', 'aggregations': {'time': 'avg', 'group': 'avg'}} -] -scope = 'proc.name = "cassandra"' -ok, res = sdclient.add_dashboard_panel(dashboard_configuration, panel_name, panel_type, metrics, scope=scope) - -# Check the result -if ok: - print('Panel added successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Add a top bar chart -# -panel_name = 'CPU by host' -panel_type = 'top' -metrics = [ - {'id': 'host.hostName'}, - {'id': 'cpu.used.percent', 'aggregations': {'time': 'avg', 'group': 'avg'}} -] -sort_direction = 'desc' -limit = 10 -layout = {'col': 1, 'row': 7, 'size_x': 12, 'size_y': 6} -ok, res = sdclient.add_dashboard_panel(dashboard_configuration, panel_name, panel_type, metrics, - sort_direction=sort_direction, limit=limit, layout=layout) - -# Check the result -if ok: - print('Panel added successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Add a number panel -# -panel_name = 'CPU' -panel_type = 'number' -metrics = [ - {'id': 'cpu.used.percent', 'aggregations': {'time': 'avg', 'group': 'avg'}} -] -layout = {'col': 1, 'row': 13, 'size_x': 12, 'size_y': 6} -ok, res = sdclient.add_dashboard_panel(dashboard_configuration, panel_name, panel_type, metrics, layout=layout) - -# Check the result -if ok: - print('Panel added successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Remove a panel -# -ok, res = sdclient.remove_dashboard_panel(dashboard_configuration, 'CPU Over Time') - -# Check the result -if ok: - print('Panel removed successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# -# Delete the dashboard -# -ok, res = sdclient.delete_dashboard(dashboard_configuration) - -# Check the result -if ok: - print('Dashboard deleted successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/dashboard_backup_v1_restore_v2.py b/examples/dashboard_backup_v1_restore_v2.py deleted file mode 100755 index f33d9ba0..00000000 --- a/examples/dashboard_backup_v1_restore_v2.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -# -# Save the first user dashboard to file and then use create_dashboard_from_file() -# to apply the stored dasboard again with a different filter. -# -import sys - -from sdcclient import SdMonitorClient -from sdcclient import SdMonitorClientV1 - -# -# Parse arguments -# -if len(sys.argv) != 5: - print(( - 'usage: %s ' - % sys.argv[0])) - print( - 'You can find your token at https://app.sysdigcloud.com/#/settings/user' - ) - sys.exit(1) - -sdc_v1_url = sys.argv[1] -sdc_v1_token = sys.argv[2] -sdc_v2_url = sys.argv[3] -sdc_v2_token = sys.argv[4] - -# -# Instantiate the SDC client -# -sdclient_v2 = SdMonitorClient(sdc_v2_token, sdc_url=sdc_v2_url) -sdclient_v1 = SdMonitorClientV1(sdc_v1_token, sdc_url=sdc_v1_url) - -# -# Serialize the first user dashboard to disk -# -ok, res = sdclient_v1.get_dashboards() - -if not ok: - print(res) - sys.exit(1) - -for dashboard in res['dashboards']: - file_name = '{}.json'.format(dashboard['id']) - print(('Saving v1 dashboard {} to file {}...'.format( - dashboard['name'], file_name))) - sdclient_v1.save_dashboard_to_file(dashboard, file_name) - - print('Importing dashboard to v2...') - ok, res = sdclient_v2.create_dashboard_from_file( - 'import of {}'.format(dashboard['name']), - file_name, - None, - shared=dashboard['isShared'], - public=dashboard['isPublic']) - - if ok: - print(('Dashboard {} imported!'.format(dashboard['name']))) - sdclient_v2.delete_dashboard(res['dashboard']) - else: - print(('Dashboard {} import failed:'.format(dashboard['name']))) - print(res) - - print('\n') diff --git a/examples/dashboard_basic_crud.py b/examples/dashboard_basic_crud.py deleted file mode 100755 index 89945e29..00000000 --- a/examples/dashboard_basic_crud.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Simple example of dashboard creation, retrieval, updating, and deletion. -# -import sys -import uuid - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Create Dashboard. -# -ok, res = sdclient.create_dashboard("Sample dashboard - " + uuid.uuid4().hex) - -# -# Check for successful creation -# -if not ok: - print(res) - sys.exit(1) - -dashboard = res['dashboard'] - -# -# Get Dashboard. -# -ok, res = sdclient.get_dashboard(dashboard['id']) - -# -# Check for successful retrieval -# -if not ok: - print(res) - sys.exit(1) - -dashboard = res['dashboard'] - -# -# Update Dashboard. -# -dashboard['name'] = "Let's change the dashboard name. " + uuid.uuid4().hex -ok, res = sdclient.update_dashboard(dashboard) - -# -# Check for successful update -# -if not ok: - print(res) - sys.exit(1) - -dashboard = res['dashboard'] - -# -# Delete Dashboard. -# -ok, res = sdclient.delete_dashboard(dashboard) - -# -# Check for successful delete -# -if not ok: - print(res) - sys.exit(1) diff --git a/examples/dashboard_ibm_cloud.py b/examples/dashboard_ibm_cloud.py deleted file mode 100755 index ad9745e1..00000000 --- a/examples/dashboard_ibm_cloud.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -# This example uses IBM Cloud IAM authentication and makes a few calls to the -# Dashboard API as validation. Creates, edits and then deletes a dashboard. - -import sys - -from sdcclient import IbmAuthHelper, SdMonitorClient - - -# Parse arguments. -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('endpoint-url: The endpoint URL that should point to IBM Cloud') - print('apikey: IBM Cloud IAM apikey that will be used to retrieve an access token') - print('instance-guid: GUID of an IBM Cloud Monitoring with Sysdig instance') - sys.exit(1) - - -if len(sys.argv) != 4: - usage() - -URL = sys.argv[1] -APIKEY = sys.argv[2] -GUID = sys.argv[3] -DASHBOARD_NAME = 'IBM Cloud IAM with Python Client Example' -PANEL_NAME = 'CPU Over Time' - -# Instantiate the client with an IBM Cloud auth object -ibm_headers = IbmAuthHelper.get_headers(URL, APIKEY, GUID) -sdclient = SdMonitorClient(sdc_url=URL, custom_headers=ibm_headers) - -# Create an empty dashboard -ok, res = sdclient.create_dashboard(DASHBOARD_NAME) - -# Check the result -dashboard_configuration = None -if ok: - print(('Dashboard %d created successfully' % res['dashboard']['id'])) - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# Add a time series panel -panel_type = 'timeSeries' -metrics = [ - {'id': 'proc.name'}, - {'id': 'cpu.used.percent', 'aggregations': {'time': 'avg', 'group': 'avg'}} -] -ok, res = sdclient.add_dashboard_panel( - dashboard_configuration, PANEL_NAME, panel_type, metrics) - -# Check the result -if ok: - print('Panel added successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# Remove the time series panel -ok, res = sdclient.remove_dashboard_panel(dashboard_configuration, PANEL_NAME) - -# Check the result -if ok: - print('Panel removed successfully') - dashboard_configuration = res['dashboard'] -else: - print(res) - sys.exit(1) - -# Delete the dashboard -ok, res = sdclient.delete_dashboard(dashboard_configuration) - -# Check the result -if ok: - print('Dashboard deleted successfully') -else: - print(res) - sys.exit(1) - -print('IBM Cloud IAM auth worked successfully!') diff --git a/examples/dashboard_save_load.py b/examples/dashboard_save_load.py deleted file mode 100755 index 8cb5924c..00000000 --- a/examples/dashboard_save_load.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -# Save the first user dashboard to file and then use create_dashboard_from_file() -# to apply the stored dasboard again with a different filter. -# -import sys - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Serialize the first user dashboard to disk -# -ok, res = sdclient.get_dashboards() - -if not ok: - print(res) - sys.exit(1) - -if len(res['dashboards']) > 0: - sdclient.save_dashboard_to_file(res['dashboards'][0], 'dashboard.json') -else: - print('the user has no dashboards. Exiting.') - sys.exit(0) - -# -# Now create the dashboard from the file. We use a filter for the Cassandra process -# as an example. -# -dashboardFilter = 'proc.name = "cassandra"' - -ok, res = sdclient.create_dashboard_from_file('test dasboard from file', 'dashboard.json', dashboardFilter) - -if ok: - print('Dashboard created successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/dashboard_scope.py b/examples/dashboard_scope.py deleted file mode 100755 index f1c95698..00000000 --- a/examples/dashboard_scope.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -# -# This example shows some examples of scope you can use for dashboards. -# - -import sys - -from sdcclient import SdcClient - - -# -# Scopes can be passed to most of dashboard-related functions, e.g. create_dashboard_from_file. -# -# NOTE: convert_scope_string_to_expression should never be used in a user script -# We're going to use it here just to demonstrate some scope options and some constraints -# -def evaluate(scope, expected): - parsed_scope = SdcClient.convert_scope_string_to_expression(scope) - print('{} is valid: {}'.format(scope, parsed_scope[0] is True)) - - if parsed_scope[0] != expected: - print('Unexpected parsing result!') - sys.exit(1) - - -# simple example: tag = value -evaluate('proc.name = "cassandra"', True) - -# NOTE: For now you can still leave values without quotes. -# The API will be more strict, so please make sure you adopt the new format! -evaluate('proc.name = cassandra', True) - -# other operators -evaluate('proc.name != "cassandra"', True) -evaluate('proc.name starts with "cassandra"', True) -evaluate('proc.name contains "cassandra"', True) - -# list operators -evaluate('proc.name in ("cassandra", "mysql")', True) - -# not-ed expressions -evaluate('not proc.name starts with "cassandra"', True) -evaluate('not proc.name contains "cassandra"', True) -evaluate('not proc.name in ("cassandra", "mysql")', True) - -# you can combine multiple expressions; note that only AND'd scopes are currently supported -evaluate('kubernetes.service.name = "database" and proc.name = "cassandra"', True) - -# the scope can obviously be omitted in the dashboard configuration -evaluate('', True) -evaluate(None, True) - -# invalid scopes will cause errors -evaluate('proc.name == "cassandra"', False) # invalid operator - -# currently, one space is required around operands and operators -- improvements will come soon -evaluate('proc.name="cassandra"', False) - -# -# The current grammar is unable to validate all errors -- in these cases, the API will fail! -# Improvements will come soon! -# -# Here some errors that will not be detected by the Python library, but the API will -# -evaluate('proc.name = "cassandra" or proc.name = "mysql"', True) # not AND'd expressions are supported -evaluate('proc.name in ("cassandra\', \'mysql")', True) # mismatching quotes -evaluate('proc.name in ("cassandra", "mysql"', True) # missing parenthesis diff --git a/examples/delete_alert.py b/examples/delete_alert.py deleted file mode 100755 index dd3cfe84..00000000 --- a/examples/delete_alert.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -# This example shows how to delete an alert -# - -import getopt -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-a|--alert ] ' % sys.argv[0])) - print('-a|--alert: Set name of alert to delete') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "a:", ["alert="]) -except getopt.GetoptError: - usage() - -alert_name = "tomcat cpu > 80% on any host" -for opt, arg in opts: - if opt in ("-a", "--alert"): - alert_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -ok, res = sdclient.get_alerts() -if not ok: - print(res) - sys.exit(1) - -for alert in res['alerts']: - if alert['name'] == alert_name: - print("Deleting alert") - ok, res = sdclient.delete_alert(alert) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/delete_all_policies.py b/examples/delete_all_policies.py deleted file mode 100755 index 0cb3a1c9..00000000 --- a/examples/delete_all_policies.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -# Delete all secure policies. -# - -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# Get a list of policyIds -ok, res = sdclient.list_policies() -policies = [] - -if not ok: - print(res) - sys.exit(1) -else: - policies = res - -for policy in policies: - print(("deleting policy: " + str(policy['id']))) - ok, res = sdclient.delete_policy_id(policy['id']) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/delete_all_policies_v1.py b/examples/delete_all_policies_v1.py deleted file mode 100755 index b13a3d27..00000000 --- a/examples/delete_all_policies_v1.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -# Delete all secure policies. -# - -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -# Get a list of policyIds -ok, res = sdclient.list_policies() -policies = [] - -if not ok: - print(res) - sys.exit(1) -else: - policies = res['policies'] - -for policy in policies: - print(("deleting policy: " + str(policy['id']))) - ok, res = sdclient.delete_policy_id(policy['id']) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/delete_dashboard.py b/examples/delete_dashboard.py deleted file mode 100755 index 220a4122..00000000 --- a/examples/delete_dashboard.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python -# -# This example shows how to delete a dashboard -# - -import getopt -import sys - -from sdcclient import SdMonitorClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-p|--pattern ] ' % sys.argv[0])) - print('-p|--pattern: Delete all dashboards containing the provided pattern') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "p:", ["pattern="]) -except getopt.GetoptError: - usage() - -pattern = "API Test" -for opt, arg in opts: - if opt in ("-p", "--pattern"): - pattern = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# List the dashboards -# -ok, res = sdclient.get_dashboards() -if not ok: - print(res) - sys.exit(1) - -# -# Delete all the dashboards containing pattern -# -for dashboard in res['dashboards']: - if pattern in dashboard['name']: - print(("Deleting " + dashboard['name'])) - ok, res = sdclient.delete_dashboard(dashboard) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/delete_event.py b/examples/delete_event.py deleted file mode 100755 index 44b6fdab..00000000 --- a/examples/delete_event.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# -# Delete user events from Sysdig Cloud -# - -import getopt -import sys - -from sdcclient import SdcClient, SdMonitorClient - -# -# Parse arguments -# -from sdcclient.monitor import EventsClientV2 - - -def usage(): - print(('usage: %s [-e|--event ] ' % sys.argv[0])) - print('-e|--event: Name of event to delete') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "e:", ["event="]) -except getopt.GetoptError: - usage() - -event_name = "test_event_name" -for opt, arg in opts: - if opt in ("-e", "--event"): - event_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Get the events that match a name -# -ok, res = sdclient.get_events(name=event_name) - -if not ok: - print(res) - sys.exit(1) - -# -# Delete the first event among the returned ones -# -for event in res['events']: - print(("Deleting event " + event['name'])) - - ok, res = sdclient.delete_event(event) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/delete_policy.py b/examples/delete_policy.py deleted file mode 100755 index c644da2d..00000000 --- a/examples/delete_policy.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# -# Delete a policy, by either id or name. -# - -import getopt -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s [-i|--id ] [-n|--name ] ' % sys.argv[0])) - print('-i|--id: the id of the policy to delete') - print('-n|--name: the name of the policy to delete') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -try: - opts, args = getopt.getopt(sys.argv[1:], "i:n:", ["id=", "name="]) -except getopt.GetoptError: - usage() - -id = "" -name = "" -for opt, arg in opts: - if opt in ("-i", "--id"): - id = arg - elif opt in ("-n", "--name"): - name = arg - -if len(id) + len(name) == 0: - usage() - -if len(args) < 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -if len(id) > 0: - ok, res = sdclient.delete_policy_id(id) -else: - ok, res = sdclient.delete_policy_name(name) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/delete_policy_v1.py b/examples/delete_policy_v1.py deleted file mode 100755 index bd8f7f06..00000000 --- a/examples/delete_policy_v1.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# -# Delete a policy, by either id or name. -# - -import getopt -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s [-i|--id ] [-n|--name ] ' % sys.argv[0])) - print('-i|--id: the id of the policy to delete') - print('-n|--name: the name of the policy to delete') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -try: - opts, args = getopt.getopt(sys.argv[1:], "i:n:", ["id=", "name="]) -except getopt.GetoptError: - usage() - -id = "" -name = "" -for opt, arg in opts: - if opt in ("-i", "--id"): - id = arg - elif opt in ("-n", "--name"): - name = arg - -if len(id) + len(name) == 0: - usage() - -if len(args) < 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -if len(id) > 0: - ok, res = sdclient.delete_policy_id(id) -else: - ok, res = sdclient.delete_policy_name(name) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/download_dashboards.py b/examples/download_dashboards.py deleted file mode 100755 index 860acc29..00000000 --- a/examples/download_dashboards.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -# -# Save/restore dashboards -# - -import os -import sys -import zipfile - -from sdcclient import SdMonitorClient - - -def zipdir(path, ziph): - # ziph is zipfile handle - for root, dirs, files in os.walk(path): - for file in files: - ziph.write(os.path.join(root, file)) - - -def cleanup_dir(path): - if not os.path.exists(path): - return - if not os.path.isdir(path): - print('Provided path is not a directory') - sys.exit(-1) - - for file in os.listdir(path): - file_path = os.path.join(path, file) - try: - if os.path.isfile(file_path): - os.unlink(file_path) - else: - print(('Cannot clean the provided directory due to delete failure on %s' % file_path)) - except Exception as e: - print(e) - os.rmdir(path) - - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -dashboard_state_file = sys.argv[2] -sysdig_dashboard_dir = 'sysdig-dashboard-dir' - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_dashboards() - -# -# Show the list of dashboards -# -if not ok: - print(res) - sys.exit(1) - -# Clean up any state in the tmp directory -cleanup_dir(sysdig_dashboard_dir) - -# Creating sysdig dashboard directory to store dashboards -if not os.path.exists(sysdig_dashboard_dir): - os.makedirs(sysdig_dashboard_dir) - -for db in res['dashboards']: - sdclient.save_dashboard_to_file(db, os.path.join(sysdig_dashboard_dir, str(db['id']))) - - print(("Name: %s, # Charts: %d" % (db['name'], len(db['widgets'])))) - -zipf = zipfile.ZipFile(dashboard_state_file, 'w', zipfile.ZIP_DEFLATED) -zipdir(sysdig_dashboard_dir, zipf) -zipf.close() - -# Clean up any state in the directory -cleanup_dir(sysdig_dashboard_dir) diff --git a/examples/flip_alerts_enabled.py b/examples/flip_alerts_enabled.py deleted file mode 100755 index 62075854..00000000 --- a/examples/flip_alerts_enabled.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python -# -# This script shows how to use the update_alert() call to modify the -# details of an existing alert. -# -# - -import getopt -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-a|--alert ] ' % sys.argv[0])) - print('-a|--alert: Comma seperated list of alerts') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "a:", ["alert="]) -except getopt.GetoptError: - usage() - -alert_list = "95% CPU" -for opt, arg in opts: - if opt in ("-a", "--alert"): - alert_list = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -ok, res = sdclient.get_alerts() -if not ok: - print(res) - sys.exit(1) - -alert_found = False -for alert in res['alerts']: - if alert['name'] in alert_list: - alert_found = True - print(("Updating \'" + alert['name'] + "\'. Enabled status before change:")) - print((alert['enabled'])) - alert['enabled'] = not alert['enabled'] - ok, res_update = sdclient.update_alert(alert) - - if not ok: - print(res_update) - sys.exit(1) - - # Validate and print the results - print('Alert status after modification:') - print((alert['enabled'])) - print(' ') - -if not alert_found: - print('Alert to be updated not found') - sys.exit(1) diff --git a/examples/get_agents_config.py b/examples/get_agents_config.py deleted file mode 100755 index f13785b0..00000000 --- a/examples/get_agents_config.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -# Get the sysdig cloud agents configuration as yaml and print it on the screen. -# Agents configuration settings can be managed in a centralized way through the API -# This script downloads the settings and its result can be edited and the used from -# the set_agents_config script. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, 'https://app.sysdigcloud.com') - -# -# Get the configuration -# -ok, res = sdclient.get_agents_config() - -# -# Return the result -# -if ok: - if not ("files" in res) or len(res["files"]) == 0: - print("No current auto configuration") - else: - print("Current contents of config file:") - print("--------------------------------") - print((res["files"][0]["content"])) - print("--------------------------------") -else: - print(res) diff --git a/examples/get_anchore_users_account.py b/examples/get_anchore_users_account.py deleted file mode 100644 index e7703654..00000000 --- a/examples/get_anchore_users_account.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# -# Get a specific anchore user account -# - -import sys - -from sdcclient import SdScanningClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdScanningClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_anchore_users_account() - -# -# Return the result -# -if ok: - print(("Anchore User Info %s" % res)) -else: - print(res) - sys.exit(1) diff --git a/examples/get_data_advanced.py b/examples/get_data_advanced.py deleted file mode 100755 index a1959aa6..00000000 --- a/examples/get_data_advanced.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -# -# This script shows an advanced Sysdig Monitor data request that leverages -# filtering and segmentation. -# -# The request returns the last 10 minutes of CPU utilization for the 5 -# busiest containers inside the given host, with 1 minute data granularity -# - -import json -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -hostname = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Prepare the metrics list. -# -metrics = [ - # The first metric we request is the container name. This is a segmentation - # metric, and you can tell by the fact that we don't specify any aggregation - # criteria. This entry tells Sysdig Monitor that we want to see the CPU - # utilization for each container separately. - {"id": "container.name"}, - # The second metric we request is the CPU. We aggregate it as an average. - {"id": "cpu.used.percent", - "aggregations": { - "time": "avg", - "group": "avg" - } - } -] - -# -# Prepare the filter -# -filter = "host.hostName = '%s'" % hostname - -# -# Paging (from and to included; by default you get from=0 to=9) -# Here we'll get the top 5. -# -paging = {"from": 0, "to": 4} - -# -# Fire the query. -# -ok, res = sdclient.get_data(metrics=metrics, # List of metrics to query - start_ts=-600, # Start of query span is 600 seconds ago - end_ts=0, # End the query span now - sampling_s=60, # 1 data point per minute - filter=filter, # The filter specifying the target host - paging=paging, # Paging to limit to just the 5 most busy - datasource_type='container') # The source for our metrics is the container - -# -# Show the result! -# -print((json.dumps(res, sort_keys=True, indent=4))) -if not ok: - sys.exit(1) diff --git a/examples/get_data_datasource.py b/examples/get_data_datasource.py deleted file mode 100755 index e7625079..00000000 --- a/examples/get_data_datasource.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -# -# This script shows the use of the datasource_type argument in the get_data request, -# by providing a few clarifying examples -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -cpu_metric = [ - {"id": "cpu.used.percent", - "aggregations": { - "time": "avg", - "group": "avg" - } - }] - -# -# First example: CPU by host name -# datasource_type is not necessary since it's infered from the grouping key host.hostName -# -req = [{"id": "host.hostName"}] -req.extend(cpu_metric) -ok, res = sdclient.get_data(req, # metrics list - -600, # start_ts = 600 seconds ago - 0) # end_ts = now - -if ok: - data = res -else: - print(res) - sys.exit(1) - -print("\n\nCPU by host:") -print(data) - -# -# Second example: CPU by container name -# datasource_type is not necessary since it's infered from the grouping key container.name -# -req = [{"id": "container.name"}] -req.extend(cpu_metric) -ok, res = sdclient.get_data(req, # metrics list - -600, # start_ts = 600 seconds ago - 0) # end_ts = now - -if ok: - data = res -else: - print(res) - sys.exit(1) - -print("\n\nCPU by container:") -print(data) - -# -# Third example: CPU average across all hosts -# datasource_type is set to host since no grouping keys or filters are specified (default would be host anyway) -# -ok, res = sdclient.get_data(cpu_metric, # metrics list - -600, # start_ts = 600 seconds ago - 0, # end_ts = now - datasource_type='host') # ask data from hosts - -if ok: - data = res -else: - print(res) - sys.exit(1) - -print("\n\nAverage CPU across all the hosts in the infrastructure:") -print(data) - -# -# Third example: CPU average across all containers -# datasource_type is set to container since no grouping keys or filters are specified (ovverrides the host default) -# -ok, res = sdclient.get_data(cpu_metric, # metrics list - -600, # start_ts = 600 seconds ago - 0, # end_ts = now - datasource_type='container') # ask data from containers - -if ok: - data = res -else: - print(res) - sys.exit(1) - -print("\n\nAverage CPU across all the containers in the infrastructure:") -print(data) diff --git a/examples/get_data_simple.py b/examples/get_data_simple.py deleted file mode 100755 index 2da2ffb6..00000000 --- a/examples/get_data_simple.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -# -# This script shows the basics of getting data out of Sysdig Monitor by creating a -# very simple request that has no filter and no segmentation. -# -# The request queries for the average CPU across all of the instrumented hosts for -# the last 10 minutes, with 1 minute data granularity -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -sdclient = SdcClient(sdc_token) - -# -# List of metrics to export. Imagine a SQL data table, with key columns and value columns -# You just need to specify the ID for keys, and ID with aggregation for values. -# -metrics = [ - # {"id": "container.id"}, - # {"id": "agent.tag.env", "aggregations": {"time": "concat", "group": "concat"}}, - {"id": "cpu.used.percent", "aggregations": {"time": "timeAvg", "group": "avg"}} -] - -# -# Data filter or None if you want to see "everything" -# -filter = None - -# -# Time window: -# - for "from A to B": start is equal to A, end is equal to B (expressed in seconds) -# - for "last X seconds": start is equal to -X, end is equal to 0 -# -start = -600 -end = 0 - -# -# Sampling time: -# - for time series: sampling is equal to the "width" of each data point (expressed in seconds) -# - for aggregated data (similar to bar charts, pie charts, tables, etc.): sampling is equal to 0 -# -sampling = 60 - -# -# Load data -# -ok, res = sdclient.get_data(metrics, start, end, sampling, filter=filter) - -# -# Show the result -# -if ok: - # - # Read response. The JSON looks like this: - # - # { - # start: timestamp, - # end: timestamp, - # data: [ - # { - # t: timestamp, - # d: [ value1, value2, value3, ... ] - # }, - # ... - # ] - # } - # - colLen = 25 - - # - # Print summary (what, when) - # - start = res['start'] - end = res['end'] - data = res['data'] - - print(('Data for %s from %d to %d' % (filter if filter else 'everything', start, end))) - print('') - - # - # Print table headers - # - dataToPrint = ' '.join([str(x['id']).ljust(colLen) if len(str(x['id'])) < colLen else str(x['id'])[ - :(colLen - 3)].ljust( - colLen - 3) + '...' for x in metrics]) - print(('%s %s' % ('timestamp'.ljust(colLen), dataToPrint) if sampling > 0 else dataToPrint)) - print('') - - # - # Print table body - # - for d in data: - timestamp = d['t'] if sampling > 0 else start - values = d['d'] - - dataToPrint = ' '.join( - [str(x).ljust(colLen) if len(str(x)) < colLen else str(x)[:(colLen - 3)].ljust(colLen - 3) + '...' for x in - values]) - - print(('%s %s' % (('' % (timestamp)).ljust(colLen), dataToPrint) if sampling > 0 else dataToPrint)) - -else: - print(res) - sys.exit(1) diff --git a/examples/get_image_info_by_id.py b/examples/get_image_info_by_id.py deleted file mode 100644 index 00857832..00000000 --- a/examples/get_image_info_by_id.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python -# -# Get an image scan result given image id -# - -import sys - -from sdcclient import SdScanningClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 3: - usage() - -sdc_token = sys.argv[1] -image_id_sha = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdScanningClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_image_info_by_id(image_id_sha) - -# -# Return the result -# -if ok: - print(("Image Info %s" % res)) -else: - print(res) - sys.exit(1) diff --git a/examples/get_image_scan_result_by_id.py b/examples/get_image_scan_result_by_id.py deleted file mode 100644 index de8b2a87..00000000 --- a/examples/get_image_scan_result_by_id.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -# -# Get an image scan result given image id -# - -import sys - -from sdcclient import SdScanningClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 5: - usage() - -sdc_token = sys.argv[1] -image_id = sys.argv[2] -full_tag_name = sys.argv[3] -detail = sys.argv[4] - -# -# Instantiate the SDC client -# -sdclient = SdScanningClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_image_scan_result_by_id(image_id, full_tag_name, detail) - -# -# Return the result -# -if ok: - print(("Image Scan Result %s" % res)) -else: - print(res) - sys.exit(1) diff --git a/examples/get_latest_pdf_report_by_digest.py b/examples/get_latest_pdf_report_by_digest.py deleted file mode 100644 index 78495c14..00000000 --- a/examples/get_latest_pdf_report_by_digest.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# -# Get a specific policy -# - -import sys - -from sdcclient import SdScanningClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 5: - usage() - -sdc_token = sys.argv[1] -image_digest = sys.argv[2] -full_tag = sys.argv[3] -pdf_path = sys.argv[4] - -# -# Instantiate the SDC client -# -sdclient = SdScanningClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_latest_pdf_report_by_digest(image_digest, full_tag) - -# -# Return the result -# -if ok: - with open(pdf_path, 'wb') as f: - f.write(res) - print(("PDF %s saved" % pdf_path)) -else: - print(res) - sys.exit(1) diff --git a/examples/get_pdf_report.py b/examples/get_pdf_report.py deleted file mode 100755 index 33136d46..00000000 --- a/examples/get_pdf_report.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# Get a specific policy -# - -import sys - -from sdcclient import SdScanningClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 4: - usage() - -sdc_token = sys.argv[1] -image = sys.argv[2] -pdf_path = sys.argv[3] - -# -# Instantiate the SDC client -# -sdclient = SdScanningClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_pdf_report(image) - -# -# Return the result -# -if ok: - with open(pdf_path, 'wb') as f: - f.write(res) - print(("PDF %s saved" % pdf_path)) -else: - print(res) - sys.exit(1) diff --git a/examples/get_policy.py b/examples/get_policy.py deleted file mode 100755 index 99db48c8..00000000 --- a/examples/get_policy.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -# -# Get a specific policy -# - -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 3: - usage() - -sdc_token = sys.argv[1] -name = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_policy(name) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/get_policy_v1.py b/examples/get_policy_v1.py deleted file mode 100755 index b94d1e7d..00000000 --- a/examples/get_policy_v1.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -# -# Get a specific policy -# - -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 3: - usage() - -sdc_token = sys.argv[1] -name = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_policy(name) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/get_secure_default_falco_rules_files.py b/examples/get_secure_default_falco_rules_files.py deleted file mode 100755 index be526718..00000000 --- a/examples/get_secure_default_falco_rules_files.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# -# Get the sysdig secure default rules files. -# -# The _files programs and endpoints are a replacement for the -# system_file endpoints and allow for publishing multiple files -# instead of a single file as well as publishing multiple variants of -# a given file that are compatible with different agent versions. -# - -import getopt -import pprint -import sys - -from sdcclient import SdSecureClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-s|--save ] ' % sys.argv[0])) - print('-s|--save: save the retrieved files to a set of files below using save_default_rules_files().') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "s:", ["save="]) -except getopt.GetoptError: - usage() - -save_dir = "" -for opt, arg in opts: - if opt in ("-s", "--save"): - save_dir = arg - -# -# Parse arguments -# -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# -# Get the configuration -# -ok, res = sdclient.get_default_falco_rules_files() - -# -# Return the result -# -if ok: - if save_dir == "": - pp = pprint.PrettyPrinter(indent=4) - pp.pprint(res) - else: - print(("Saving falco rules files below {}...".format(save_dir))) - ok, sres = sdclient.save_default_falco_rules_files(res, save_dir) - if not ok: - print(sres) -else: - print(res) - sys.exit(1) diff --git a/examples/get_secure_policy_events.py b/examples/get_secure_policy_events.py deleted file mode 100755 index 30b06386..00000000 --- a/examples/get_secure_policy_events.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python -# -# Get all policy events for a given time range or in the last N seconds. -# The events are written in jsonl format to stdout. -# -# If --summarize is provided, summarize the policy events by sanitized -# (removing container ids when present) description and print the -# descriptions by decreasing frequency. This allows you to see which policy -# events are occurring most often. -# -# Progress information is written to standard error. -# - -import getopt -import json -import operator -import re -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s [-s|--summarize] [-l|--limit ] [| ]' % - sys.argv[0])) - print('-s|--summarize: group policy events by sanitized output and print by frequency') - print('-l|--limit: with -s, only print the first outputs') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "sl:", ["summarize", "limit="]) -except getopt.GetoptError: - usage() - -summarize = False -limit = 0 -for opt, arg in opts: - if opt in ("-s", "--summarize"): - summarize = True - elif opt in ("-l", "--limit"): - limit = int(arg) -# -# Parse arguments -# -if len(args) < 2: - usage() - -sdc_token = args[0] - -duration = None -from_sec = None -to_sec = None - -if len(args) == 2: - duration = args[1] -elif len(args) == 3: - from_sec = args[1] - to_sec = args[2] -else: - usage() - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -if duration is not None: - ok, res = sdclient.get_policy_events_duration(duration) -else: - ok, res = sdclient.get_policy_events_range(from_sec, to_sec) - -all_outputs = dict() - -while True: - - # - # Return the result - # - if not ok: - print(res) - sys.exit(1) - - if not res["ctx"]["cursor"] or len(res['data']) == 0: - break - - for event in res['data']: - if summarize: - sanitize_output = re.sub(r'\S+\s\(id=\S+\)', '', event['output']) - all_outputs[sanitize_output] = all_outputs.get(sanitize_output, 0) + 1 - else: - sys.stdout.write(json.dumps(event) + "\n") - - ok, res = sdclient.get_more_policy_events(res['ctx']) - -if summarize: - sorted = sorted(list(all_outputs.items()), key=operator.itemgetter(1), reverse=True) - count = 0 - for val in sorted: - count += 1 - sys.stdout.write("{} {}\n".format(val[1], val[0])) - if limit != 0 and count > limit: - break diff --git a/examples/get_secure_policy_events_old.py b/examples/get_secure_policy_events_old.py deleted file mode 100755 index b6250b78..00000000 --- a/examples/get_secure_policy_events_old.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# -# Get all policy events for a given time range or in the last N seconds. -# The events are written in jsonl format to stdout. -# -# If --summarize is provided, summarize the policy events by sanitized -# (removing container ids when present) description and print the -# descriptions by decreasing frequency. This allows you to see which policy -# events are occurring most often. -# -# Progress information is written to standard error. -# -# =========================================================================================== -# UNSUPPORTED: This script is unsupported as it is only an example from an old API version. -# =========================================================================================== - -import os -import sys -import json -import operator -import re -import getopt -sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..')) -from sdcclient.secure import PolicyEventsClientOld - - -def usage(): - print('usage: %s [-s|--summarize] [-l|--limit ] [| ]' % sys.argv[0]) - print('-s|--summarize: group policy events by sanitized output and print by frequency') - print('-l|--limit: with -s, only print the first outputs') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "sl:", ["summarize", "limit="]) -except getopt.GetoptError: - usage() - -summarize = False -limit = 0 -for opt, arg in opts: - if opt in ("-s", "--summarize"): - summarize = True - elif opt in ("-l", "--limit"): - limit = int(arg) -# -# Parse arguments -# -if len(args) < 2: - usage() - -sdc_token = args[0] - -duration = None -from_sec = None -to_sec = None - -if len(args) == 2: - duration = args[1] -elif len(args) == 3: - from_sec = args[1] - to_sec = args[2] -else: - usage() - -# -# Instantiate the SDC client -# -sdclient = PolicyEventsClientOld(sdc_token, 'https://secure.sysdig.com') - -if duration is not None: - ok, res = sdclient.get_policy_events_duration(duration) -else: - ok, res = sdclient.get_policy_events_range(from_sec, to_sec) - -all_outputs = dict() - -while True: - - # - # Return the result - # - if not ok: - print(res) - sys.exit(1) - - if len(res['data']['policyEvents']) == 0: - break - - sys.stderr.write("offset={}\n".format(res['ctx']['offset'])) - - for event in res['data']['policyEvents']: - if summarize: - sanitize_output = re.sub(r'\S+\s\(id=\S+\)', '', event['output']) - all_outputs[sanitize_output] = all_outputs.get(sanitize_output, 0) + 1 - else: - sys.stdout.write(json.dumps(event) + "\n") - - ok, res = sdclient.get_more_policy_events(res['ctx']) - -if summarize: - sorted = sorted(all_outputs.items(), key=operator.itemgetter(1), reverse=True) - count = 0 - for val in sorted: - count += 1 - sys.stdout.write("{} {}\n".format(val[1], val[0])) - if limit != 0 and count > limit: - break diff --git a/examples/get_secure_system_falco_rules.py b/examples/get_secure_system_falco_rules.py deleted file mode 100755 index e2672279..00000000 --- a/examples/get_secure_system_falco_rules.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# Get the sysdig secure system rules file. -# - -import sys - -from sdcclient import SdSecureClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# -# Get the configuration -# -ok, res = sdclient.get_system_falco_rules() - -# -# Return the result -# -if ok: - sys.stdout.write(res["systemRulesFile"]["content"]) -else: - print(res) - sys.exit(1) diff --git a/examples/get_secure_user_falco_rules.py b/examples/get_secure_user_falco_rules.py deleted file mode 100755 index 91d84e26..00000000 --- a/examples/get_secure_user_falco_rules.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# Get the sysdig secure user rules file. -# - -import sys - -from sdcclient import SdSecureClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# -# Get the configuration -# -ok, res = sdclient.get_user_falco_rules() - -# -# Return the result -# -if ok: - sys.stdout.write(res["userRulesFile"]["content"]) -else: - print(res) - sys.exit(1) diff --git a/examples/list_access_keys.py b/examples/list_access_keys.py deleted file mode 100755 index 36139a77..00000000 --- a/examples/list_access_keys.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# List all the access keys in a Sysdig Monitor environment. The token you provide must -# have Admin rights. -# - -import os -import sys -sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..')) -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print('usage: %s ' % sys.argv[0]) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - print('For this script to work, the user for the token must have Admin rights') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, 'https://app.sysdigcloud.com') - -# -# Get the configuration -# -ok, res = sdclient.list_access_keys() -if ok: - print('Access Keys\n===========') - for access_key in res['customerAccessKeys']: - print(access_key['accessKey']) -else: - print(res) - sys.exit(1) diff --git a/examples/list_admins.py b/examples/list_admins.py deleted file mode 100755 index 048a45cf..00000000 --- a/examples/list_admins.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -# List all the Admin users in a Sysdig Monitor environment. The token you -# provide must have Admin rights. -# If you're running this script in an On-Premise install of Sysdig Montior, -# the "super" Admin (the first Admin user that was created at initial -# install) will be highlighted. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - print('For this script to work, the user for the token must have Admin rights') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, 'https://app.sysdigcloud.com') - -# -# Get the configuration -# -ok, res = sdclient.get_users() -if ok: - admins = [] - superadmins = [] - for user in res: - if 'ROLE_CUSTOMER' in user['roles']: - admins.append(user['username']) - if 'ROLE_ADMIN' in user['roles']: - superadmins.append(user['username']) - print('Admin users') - print('-----------') - for username in admins: - print(username) - print('\nSuper Admins') - print('------------') - for username in superadmins: - print(username) -else: - print(res) - sys.exit(1) diff --git a/examples/list_alert_notifications.py b/examples/list_alert_notifications.py deleted file mode 100755 index b64ff355..00000000 --- a/examples/list_alert_notifications.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# -# Get alert notifications from Sysdig Cloud -# - -import sys -import time - -from sdcclient import SdcClient - - -def print_notifications(notifications): - for notification in notifications: - values = [] - for entity in notification['entities']: - for value in entity['metricValues']: - values.append(str(value['value'])) - notification.update({'values': ','.join(values)}) - notification["filter"] = notification.get("filter", "") - print("#%(id)s, State: %(state)s, Severity: %(severity)s, Scope: %(filter)s, Condition: %(condition)s, " - "Value: %(values)s, Resolved: %(resolved)s" % - notification) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Get the notifications in the last day -# -ok, res = sdclient.get_notifications( - from_ts=int(time.time() - 86400), - to_ts=int(time.time())) - -print_notifications(res['notifications']) -if not ok: - sys.exit(1) - -# -# Get the notifications in the last day and active state -# -ok, res = sdclient.get_notifications( - from_ts=int(time.time() - 86400), - to_ts=int(time.time()), state='ACTIVE') - -print_notifications(res['notifications']) -if not ok: - sys.exit(1) - -# -# Get the notifications in the last day and active state -# -ok, res = sdclient.get_notifications( - from_ts=int(time.time() - 86400), - to_ts=int(time.time()), state='OK') - -print_notifications(res['notifications']) -if not ok: - sys.exit(1) - -# -# Get the notifications in the last day and resolved state -# -ok, res = sdclient.get_notifications( - from_ts=int(time.time() - 86400), - to_ts=int(time.time()), - resolved=True) - -print_notifications(res['notifications']) -if not ok: - sys.exit(1) diff --git a/examples/list_alerts.py b/examples/list_alerts.py deleted file mode 100755 index 95ff1d68..00000000 --- a/examples/list_alerts.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# -# Print 'enabled' flag and name for all of the alerts created by the user -# Optionally dump the full Alerts list as a JSON object to a target file. -# - -import json -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -json_dumpfilename = None -if len(sys.argv) < 2 or len(sys.argv) > 3: - print(('usage: %s [json-dumpfile]' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) -elif len(sys.argv) == 3: - json_dumpfilename = sys.argv[2] - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_alerts() - -# -# Show the list of alerts -# -if not ok: - print(res) - sys.exit(1) - -for alert in res['alerts']: - print(('enabled: %s, name: %s' % (str(alert['enabled']), alert['name']))) - -if json_dumpfilename: - with open(json_dumpfilename, "w") as f: - json.dump(res, f, sort_keys=True, indent=4) diff --git a/examples/list_dashboards.py b/examples/list_dashboards.py deleted file mode 100755 index 0023bdce..00000000 --- a/examples/list_dashboards.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# Print the list of dashboards. -# - -import sys - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_dashboards() - -# -# Show the list of dashboards -# -if not ok: - print(res) - sys.exit(1) - -for db in res['dashboards']: - print(("Name: %s, # Charts: %d" % (db['name'], len(db['widgets'] if 'widgets' in db else [])))) diff --git a/examples/list_events.py b/examples/list_events.py deleted file mode 100755 index 41517c8f..00000000 --- a/examples/list_events.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python -# -# Get user events from Sysdig Cloud -# - -import sys - -from sdcclient import SdMonitorClient - - -def print_events(data): - for event in data['events']: - event['sev'] = event.get('severity', 'not set') - event['description'] = event.get('description', 'not set') - print(('id: %(id)s, time: %(timestamp)d, name: %(name)s, description: %(description)s, severity: %(sev)s' - % event)) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Get the entire list of events -# -ok, res = sdclient.get_events() - -if ok: - print_events(res) -else: - print(res) - sys.exit(1) - -# -# Get the events before other event -# -if len(res['events']) > 0: - ok, res = sdclient.get_events(pivot=res['events'][-1]["id"]) -else: - ok, res = True, {"events": []} - -if ok: - print_events(res) -else: - print(res) - sys.exit(1) - -# -# Get the events that match a category -# -ok, res = sdclient.get_events(category=["kubernetes"]) - -if ok: - print_events(res) -else: - print(res) - sys.exit(1) - -# -# Get the events that match a status -# -ok, res = sdclient.get_events(status=['triggered', 'unacknowledged']) - -if ok: - print_events(res) -else: - print(res) - sys.exit(1) - -# -# Get the last event only -# -ok, res = sdclient.get_events(limit=1) - -if ok: - print_events(res) -else: - print(res) - sys.exit(1) diff --git a/examples/list_hosts.py b/examples/list_hosts.py deleted file mode 100755 index fb767d86..00000000 --- a/examples/list_hosts.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -# This script shows how to leverage Sysdig data query API to obtain the list of the instrumented -# hosts that have been seen in your infrastructure. -# The output will show the container count (`container.count` metric) in addition to the -# hostnames (`host.hostName` tag) in a format like this: -# -# host-1 12 -# host-2 4 -# -# where the first column is the hostname and the second column is the number of containers running -# in each host. -# -import getopt -import json -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-j|--json] [-d|--duration ] [-c|--count ] ' % sys.argv[0])) - print('-d|--duration: List hosts seen in the last seconds (default: 3600, ie. last hour)') - print('-c|--count: Number of hosts to print (default: 100)') - print('-j|--json: Print output as json') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "jd:c:", ["json", "duration=", "count="]) -except getopt.GetoptError: - usage() - -duration = 3600 -count = 100 -print_json = False -for opt, arg in opts: - if opt in ("-d", "--duration"): - duration = int(arg) - elif opt in ("-c", "--count"): - count = int(arg) - elif opt in ("-j", "--json"): - print_json = True -sdc_token = args[0] - -# Instantiate the SDC client -sdclient = SdcClient(sdc_token) - -# -# Prepare the query's metrics list. -# In this case, we have one tag (used for segmentation) and one metric: -# - host.hostName. This is a tag, to identify each item of the output -# - container.count: This is the metric -# -metrics = [ - {"id": "host.hostName"}, - {"id": "container.count", "aggregations": {"time": "avg", "group": "avg"}} -] - -ok, res = sdclient.get_data( - metrics, # list of metrics - -duration, # start time: either a unix timestamp, or a difference from "now" - 0, # end time: either a unix timestamp, or a difference from "now" (0 means you need "last X seconds") - duration, # sampling time, ie. data granularity; - # if equal to the time window span then the result will contain a single sample - paging={ - "from": 0, - "to": count - 1 - }) - -if not ok: - # data fetch failed - print(res) - sys.exit(1) - -# data fetched successfully -if print_json: - print((json.dumps(res))) -else: - data = res['data'] - output = [] - for i in range(0, len(data)): - sample = data[i] - metrics = sample['d'] - hostName = metrics[0] - count = metrics[1] - output.append('%s\t%d' % (hostName, count)) - - print('\n'.join(output)) diff --git a/examples/list_metrics.py b/examples/list_metrics.py deleted file mode 100755 index a9da394b..00000000 --- a/examples/list_metrics.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# Print the list of metrics. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_metrics() - -# -# Show the list of metrics -# -if not ok: - print(res) - sys.exit(1) - -for metric_id, metric in res.items(): - print(("Metric name: " + metric_id + ", type: " + metric['type'])) diff --git a/examples/list_notification_channels.py b/examples/list_notification_channels.py deleted file mode 100755 index 3025c230..00000000 --- a/examples/list_notification_channels.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# Post a user event to Sysdig Cloud -# - -import json -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Post the event -# -ok, res = sdclient.list_notification_channels() - -# -# Return the result -# -if ok: - print((json.dumps(res['notificationChannels'], indent=4))) -else: - print(res) - sys.exit(1) diff --git a/examples/list_policies.py b/examples/list_policies.py deleted file mode 100755 index 2146c93d..00000000 --- a/examples/list_policies.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# -# List the current set of secure policies. -# - -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.list_policies() - -if not ok: - print(res) - sys.exit(1) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/list_policies_v1.py b/examples/list_policies_v1.py deleted file mode 100755 index 30758a7f..00000000 --- a/examples/list_policies_v1.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -# -# List the current set of secure policies. -# - -import getopt -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s [-o|--order-only] ' % sys.argv[0])) - print('-o|--order-only: Only display the list of policy ids in evaluation order. ' - 'Suitable for use by set_policy_order.py') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "o", ["order-only"]) -except getopt.GetoptError: - usage() - -order_only = False -for opt, arg in opts: - if opt in ("-o", "--order-only"): - order_only = True - -# -# Parse arguments -# -if len(args) < 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.get_policy_priorities() - -if not ok: - print(res) - sys.exit(1) - -# Strip the surrounding json to only keep the list of policy ids -res = res['priorities']['policyIds'] - -if not order_only: - priorities = res - ok, res = sdclient.list_policies() - if ok: - res['policies'].sort(key=lambda p: priorities.index(p['id'])) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/list_profiles.py b/examples/list_profiles.py deleted file mode 100755 index 963e4db8..00000000 --- a/examples/list_profiles.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -# List the current set of image profiles. -# - -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - sys.exit(1) - - -# -# Check number of parameters -# -if len(sys.argv) < 2: - usage() - -sdc_endpoint = sys.argv[1] -sdc_token = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, sdc_endpoint) - -# -# Retrieve all the image profiles -# -ok, res = sdclient.list_image_profiles() - -if not ok: - print(res) - sys.exit(1) - -# Strip the surrounding json to only keep the list of profiles -res = res['profiles'] - -for profile in res: - print(("ID: {}, Name: {}".format(profile["profileId"], profile["imageName"]))) diff --git a/examples/list_sysdig_captures.py b/examples/list_sysdig_captures.py deleted file mode 100755 index fb72b4b0..00000000 --- a/examples/list_sysdig_captures.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -# -# Print the list of sysdig captures. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_sysdig_captures() - -# -# Show the list of metrics -# -if ok: - captures = res['dumps'] -else: - print(res) - sys.exit(1) - -for capture in captures: - print(("Folder %s, Name %s, Host: %s, Size: %d, Status: %s" % - (capture['folder'], capture['name'], capture['agent']['hostName'], capture['size'], capture['status']))) diff --git a/examples/list_users.py b/examples/list_users.py deleted file mode 100755 index 642d7904..00000000 --- a/examples/list_users.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# List all the users in a Sysdig Monitor environment. The token you provide must -# have Admin rights. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - print('For this script to work, the user for the token must have Admin rights') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, 'https://app.sysdigcloud.com') - -# -# Get the configuration -# -ok, res = sdclient.get_users() -if ok: - print('Users\n=====') - for user in res: - print((user['username'])) -else: - print(res) - sys.exit(1) diff --git a/examples/notification_channels.py b/examples/notification_channels.py deleted file mode 100755 index bf93eed8..00000000 --- a/examples/notification_channels.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -# -# This script shows how to manipulate the notification channel list for alerts -# - -import getopt -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-c|--channel ] ' % sys.argv[0])) - print('-c|--channel: Set name of channel to create') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "c:", ["channel="]) -except getopt.GetoptError: - usage() - -# Name for the dashboard to create -channel_name = "Api Channel" -for opt, arg in opts: - if opt in ("-c", "--channel"): - channel_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Create an email notification channel -# -ok, res = sdclient.create_email_notification_channel(channel_name, ['gianluca.borello@sysdig.com', 'foo@sysdig.com', - 'bar@sysdig.com']) -if not ok: - print(res) - sys.exit(1) - -# -# The notification channel will contain the id, that can be used when creating alerts -# -channel = res['notificationChannel'] -print(channel) - -# -# Notification channels can also be programmatically deleted -# -ok, res = sdclient.delete_notification_channel(channel) -if not ok: - print(res) - sys.exit(1) diff --git a/examples/post_event.py b/examples/post_event.py deleted file mode 100755 index e45d2e34..00000000 --- a/examples/post_event.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# -# Post a user event to Sysdig Cloud -# - -import argparse -import json -import sys - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -# Usage: post_event.py [-h] [-d DESCRIPTION] [-s SEVERITY] [-c SCOPE] [-t TAGS] sysdig_token name -# -parser = argparse.ArgumentParser() -parser.add_argument('-d', '--description') -parser.add_argument('-s', '--severity', help='syslog style from 0 (high) to 7 (low)') -parser.add_argument('-c', '--scope', - help='metadata, in Sysdig Cloud format, of nodes to associate with the event, ' - 'eg: \'host.hostName = "ip-10-1-1-1" and container.name = "foo"\'') -parser.add_argument('-t', '--tags', - help='dictionary of arbitrary key-value pairs, eg: \'{"app":"my_app", "file":"text.py"}\'') -parser.add_argument('sysdig_token', help='You can find your token at https://app.sysdigcloud.com/#/settings/user') -parser.add_argument('name') -args = parser.parse_args() - -tags = None -if args.tags: - tags = json.loads(args.tags) - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(args.sysdig_token) - -# -# Post the event using post_event(self, name, description=None, severity=None, event_filter=None, tags=None) -# -ok, res = sdclient.post_event(args.name, args.description, args.severity, args.scope, tags) - -# -# Return the result -# -if ok: - print('Event Posted Successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/post_event_simple.py b/examples/post_event_simple.py deleted file mode 100755 index feeadd96..00000000 --- a/examples/post_event_simple.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -# Post a user event to Sysdig Cloud -# - -import sys - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -if len(sys.argv) < 4: - print(('usage: %s name description [severity]' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -name = sys.argv[2] -description = sys.argv[3] - -scope = 'host.hostName = "foo" and container.name = "bar"' -tags = {"tag1": "value1"} - -severity = 6 -if len(sys.argv) < 4: - severity = int(sys.argv[4]) - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -# -# Post the event -# -ok, res = sdclient.post_event(name, description, severity, scope, tags) - -# -# Return the result -# -if ok: - print('Event Posted Successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/print_conn_table.py b/examples/print_conn_table.py deleted file mode 100755 index dadc71d5..00000000 --- a/examples/print_conn_table.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -# The request prints N entries from the conn table for the filter specified -# mimicking the top connections table in the Sysdig Monitor UI -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) not in [2, 3]: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -if len(sys.argv) == 3: - hostname = sys.argv[2] -else: - hostname = None - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Prepare the metrics list. -# -metrics = [ - {"id": "net.local.endpoint"}, - {"id": "net.local.service"}, - {"id": "net.remote.endpoint"}, - {"id": "net.remote.service"}, - {"id": "net.connection.count.total", - "aggregations": { - "time": "timeAvg", - "group": "sum" - } - }, - {"id": "net.bytes.in", - "aggregations": { - "time": "timeAvg", - "group": "avg" - }, - }, - {"id": "net.bytes.out", - "aggregations": { - "time": "timeAvg", - "group": "avg" - } - }, - {"id": "net.bytes.total", - "aggregations": { - "time": "timeAvg", - "group": "avg" - } - }, - {"id": "net.request.count.in", - "aggregations": { - "time": "timeAvg", - "group": "avg" - } - }, - {"id": "net.request.count.out", - "aggregations": { - "time": "timeAvg", - "group": "avg" - } - }, - {"id": "net.request.count", - "aggregations": { - "time": "timeAvg", - "group": "avg" - } - } -] - -# -# Prepare the filter -# - -if hostname is not None: - flt = "host.hostName = '%s'" % hostname -else: - flt = "" - -# -# Time window: -# - for "last X seconds": start is equal to -X, end is equal to 0 -# -start = -7200 -end = 0 - -# -# Fire the query. -# -page_size = 500 -fetch_limit = 10000 - -cur = 0 - -row_format = "{:20.20}\t{:20.20}\t{:20.20}\t{:20.20}\t{:10}\t{:10}\t{:10}\t{:10}\t{:10}\t{:10}\t{:10}" - -print((row_format.format("Source", "Source Process", "Destination", "Destination Process", "Count", - "Bytes In", "Bytes Out", "Bytes", "Req In", "Req Out", "Req"))) - -while cur < fetch_limit: - paging = {'from': cur, 'to': cur + page_size} - ok, res = sdclient.get_data(metrics, - start, - end, - 0, - flt, - 'host', - paging) - - if not ok: - sys.exit(res) - - data = res['data'] - - if len(data) == 0: - break - - cur += len(data) - for line in data: - print((row_format.format(*line['d']))) diff --git a/examples/print_data_retention_info.py b/examples/print_data_retention_info.py deleted file mode 100755 index b0e97ff6..00000000 --- a/examples/print_data_retention_info.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# Print the different retention intervals available for data export. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request. -# -ok, res = sdclient.get_data_retention_info() - -# -# Show the list of retention intervals -# -if not ok: - print(res) - sys.exit(1) - -print(res['agents']) diff --git a/examples/print_explore_grouping.py b/examples/print_explore_grouping.py deleted file mode 100755 index 4ee28154..00000000 --- a/examples/print_explore_grouping.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python -# -# Print the user's Explore grouping hierarchy. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request -# -_, res = sdclient.get_explore_grouping_hierarchy() - -# -# Show the result -# -print(res) diff --git a/examples/print_user_info.py b/examples/print_user_info.py deleted file mode 100755 index e7cbb302..00000000 --- a/examples/print_user_info.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# -# Print email, current and maximum number of agents for the Sysdig Cloud user -# identified by the given token. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Get the required info -# -ok, res = sdclient.get_user_info() - -if ok: - uinfo = res -else: - print(res) - sys.exit(1) - -ok, res = sdclient.get_n_connected_agents() - -# -# Print the results -# -if ok: - nagents = res -else: - print(res) - sys.exit(1) - -print(('User Email: ' + uinfo['user']['username'])) -print(('Current Agents: %d' % nagents)) -print(('Max Agents: %s' % uinfo['user']['customerSettings']['plan']['maxAgents'])) diff --git a/examples/resolve_alert_notifications.py b/examples/resolve_alert_notifications.py deleted file mode 100755 index 2aab3e0c..00000000 --- a/examples/resolve_alert_notifications.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# -# Resolve alert notifications from Sysdig Cloud -# - -import sys -import time - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -num_days_to_resolve = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Get the unresolved notifications in the last day -# -ok, res = sdclient.get_notifications(from_ts=int(time.time() - int(num_days_to_resolve) * 86400), - to_ts=int(time.time()), resolved=False) - -if not ok: - print(res) - sys.exit(1) - -# -# Resolve them -# -notifications = res['notifications'] - -print(("Resolving " + str(len(notifications)) + " notifications")) -for notification in notifications: - ok, res = sdclient.update_notification_resolution(notification, True) - if not ok: - print(res) - sys.exit(1) diff --git a/examples/restore_alerts.py b/examples/restore_alerts.py deleted file mode 100755 index fe0ad0fc..00000000 --- a/examples/restore_alerts.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -# -# Restore Alerts of the format in a JSON dumpfile from the list_alerts.py example. -# - -import json -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -alerts_dump_file = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# If the dump we're restoring from has an Alert with the same name -# as one that's already configured, we'll update the existing Alert -# so it will have the config from the dump. When we do this, however, -# we need to give the ID and Version # of the existing Alert as a -# basis. We save them off here so we can refer to them later. -# -existing_alerts = {} -ok, res = sdclient.get_alerts() -if ok: - for alert in res['alerts']: - existing_alerts[alert['name']] = {'id': alert['id'], 'version': alert['version']} -else: - print(res) - sys.exit(1) - -# -# Someone might be restoring Alert configs from another environment, -# in which case the Notification Channel IDs in the saved Alert JSON -# is not expected to match the Notification Channel IDs in the target -# environment. We'll get the list of target IDs so we can drop non- -# matching IDs when we restore. -# -ok, res = sdclient.get_notification_ids() -if ok: - existing_notification_channel_ids = res -else: - print(res) - sys.exit(1) - -created_count = 0 -updated_count = 0 - -with open(alerts_dump_file, 'r') as f: - j = json.load(f) - for a in j['alerts']: - if 'notificationChannelIds' in a: - for channel_id in a['notificationChannelIds']: - if channel_id not in existing_notification_channel_ids: - print(('Notification Channel ID ' + str(channel_id) + ' referenced in Alert "' + a[ - 'name'] + '" does not exist.\n Restoring without this ID.')) - a['notificationChannelIds'].remove(channel_id) - - # The Create/Update APIs will validate but actually ignore these fields; - # to avoid problems, don't submit in the API request - for timefield in ['createdOn', 'modifiedOn']: - del a[timefield] - - # NOTE: when exporting alerts that contain deprecated metrics you will - # need to remove them from the source json - # (see https://sysdigdocs.atlassian.net/wiki/spaces/Monitor/pages/205684810/Metrics#Metrics-HeuristicandDeprecatedMetrics) - if a['name'] in existing_alerts: - a['id'] = existing_alerts[a['name']]['id'] - a['version'] = existing_alerts[a['name']]['version'] - ok, res = sdclient.update_alert(a) - updated_count += 1 - else: - ok, res = sdclient.create_alert(alert_obj=a) - created_count += 1 - if not ok: - print(res) - sys.exit(1) - -print(('All Alerts in ' + alerts_dump_file + ' restored successfully (' - + str(created_count) + ' created, ' + str(updated_count) + ' updated)')) diff --git a/examples/restore_dashboards.py b/examples/restore_dashboards.py deleted file mode 100755 index c29720d2..00000000 --- a/examples/restore_dashboards.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -# -# Save/restore dashboards -# - -import json -import sys -import zipfile - -from sdcclient import SdMonitorClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] -dashboard_state_file = sys.argv[2] - -# -# Instantiate the SDC client -# -sdclient = SdMonitorClient(sdc_token) - -zipf = zipfile.ZipFile(dashboard_state_file, 'r') - -for info in zipf.infolist(): - data = zipf.read(info.filename) - try: - j = json.loads(data) - except ValueError: - print(('Invalid JSON file found in ZIP file ' + info.filename + ': skipping')) - continue - - # - # Handle old files - # - if 'dashboard' in j: - j = j['dashboard'] - - ok, res = sdclient.create_dashboard_with_configuration(j) - if ok: - print(('Restored Dashboard named: ' + j['name'])) - else: - print(("Dashboard creation failed for dashboard name %s with error %s" % (j['name'], res))) diff --git a/examples/set_agents_config.py b/examples/set_agents_config.py deleted file mode 100755 index 963053bd..00000000 --- a/examples/set_agents_config.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python -# -# Set the sysdig cloud agents configuration. -# This script takes a user token and a yaml configuration file as input, and pushes the configuration -# in the yaml config file to the user. -# Typically, you want to first read the config file using the get_agents_config.py script, -# edit it and then push it back with this script. -# - -import sys - -import yaml - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Load the config file -# -with open(sys.argv[2]) as cfile: - yaml_conf = cfile.read() - # Verify that the content is valid yaml - parsed_yaml_conf = yaml.safe_load(yaml_conf) -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, 'https://app.sysdigcloud.com') - -json = {"files": [{"filter": "*", "content": yaml_conf}]} - -# -# Push the configuration -# -ok, res = sdclient.set_agents_config(json) - -# -# Check if everything went well -# -if ok: - print('configuration set successfully') -else: - print(res) diff --git a/examples/set_explore_group_configuration.py b/examples/set_explore_group_configuration.py deleted file mode 100755 index 112b3fad..00000000 --- a/examples/set_explore_group_configuration.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python -# -# Set the group configuration in explore. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 2: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -# -# Fire the request, set the group configuration you need in the example below -# -groupConfig = ['agent.tag.role', 'host.mac'] -ok, res = sdclient.set_explore_grouping_hierarchy(groupConfig) - -# -# Show the error if there was one -# -if not ok: - print(res) diff --git a/examples/set_policy_order_v1.py b/examples/set_policy_order_v1.py deleted file mode 100755 index e2b7d812..00000000 --- a/examples/set_policy_order_v1.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# -# Change the evaluation order of policies to match the provided json. -# - -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('Reads json representing new policy evaluation order from standard input') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] -priorities_json = sys.stdin.read() - -try: - priorities_obj = json.loads(priorities_json) -except Exception as e: - print(("priorities json is not valid json: {}".format(str(e)))) - sys.exit(1) - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -# -# The argument to /api/policies/priorities is the list of ids wrapped -# in an object containing a version and dates. So fetch the list of -# priorities, update the list in-place and set it. -# - -ok, res = sdclient.get_policy_priorities() - -if not ok: - print(res) - sys.exit(1) - -obj = res -obj['priorities']['policyIds'] = priorities_obj - -ok, res = sdclient.set_policy_priorities(json.dumps(obj)) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/set_secure_default_falco_rules_files.py b/examples/set_secure_default_falco_rules_files.py deleted file mode 100755 index a0b49a57..00000000 --- a/examples/set_secure_default_falco_rules_files.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -# -# Set the sysdig secure default rules files. -# -# The _files programs and endpoints are a replacement for the -# system_file endpoints and allow for publishing multiple files -# instead of a single file as well as publishing multiple variants of -# a given file that are compatible with different agent versions. -# - -import getopt -import os -import sys - -import yaml - -from sdcclient import SdSecureClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-l|--load ] [-t|--tag ] [-c|--content ] ' % sys.argv[0])) - print('-l|--load: load the files to set from a set of files below using load_default_rules_files().') - print('-t|--tag: Set a tag for the set of files') - print('-c|--content: the (single) file to set') - print('if --load is specified, neither --tag nor --content can be specified') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "l:t:n:c:", ["load=", "tag=", "name=", "content="]) -except getopt.GetoptError: - usage() - -load_dir = "" -tag = "" -cpath = "" -for opt, arg in opts: - if opt in ("-l", "--load"): - load_dir = arg - elif opt in ("-t", "--tag"): - tag = arg - elif opt in ("-c", "--content"): - cpath = arg - -if load_dir != "" and (tag != "" or cpath != ""): - usage() -# -# Parse arguments -# -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -files_obj = {} -if load_dir != "": - print(("Loading falco rules files from {}...".format(load_dir))) - ok, res = sdclient.load_default_falco_rules_files(load_dir) - if ok: - files_obj = res - else: - print(res) - sys.exit(1) -else: - with open(cpath, 'r') as content_file: - content = content_file.read() - required_engine_version = 0 - cyaml = yaml.safe_load(content) - for obj in cyaml: - if "required_engine_version" in obj: - try: - required_engine_version = int(obj["required_engine_version"]) - except ValueError: - print(("Required engine version \"{}\" in content {} must be a number".format( - obj["required_engine_version"], cpath))) - sys.exit(1) - files_obj = { - "tag": tag, - "files": [{ - "name": os.path.basename(cpath), - "variants": { - "required_engine_version": required_engine_version, - "content": content - } - }] - } - -ok, res = sdclient.set_default_falco_rules_files(files_obj) - -# -# Return the result -# -if ok: - print('default falco rules files set successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/set_secure_system_falco_rules.py b/examples/set_secure_system_falco_rules.py deleted file mode 100755 index 6ae7b185..00000000 --- a/examples/set_secure_system_falco_rules.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -# Set the sysdig secure system rules file. -# This script takes a user token and a falco rules file (yaml) as input, and sets the -# system falco rules file for this customer to that file. -# - -import sys - -import yaml - -from sdcclient import SdSecureClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Load the config file -# -with open(sys.argv[2]) as cfile: - yaml_conf = cfile.read() - # Verify that the content is valid yaml - parsed_yaml_conf = yaml.safe_load(yaml_conf) - -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# -# Push the configuration -# -ok, res = sdclient.set_system_falco_rules(yaml_conf) - -# -# Check if everything went well -# -if ok: - print('system falco rules set successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/set_secure_user_falco_rules.py b/examples/set_secure_user_falco_rules.py deleted file mode 100755 index ec790375..00000000 --- a/examples/set_secure_user_falco_rules.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -# Set the sysdig secure system rules file. -# This script takes a user token and a falco rules file (yaml) as input, and sets the -# system falco rules file for this customer to that file. -# - -import sys - -import yaml - -from sdcclient import SdSecureClient - -# -# Parse arguments -# -if len(sys.argv) != 3: - print(('usage: %s ' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Load the config file -# -with open(sys.argv[2]) as cfile: - yaml_conf = cfile.read() - # Verify that the content is valid yaml - parsed_yaml_conf = yaml.safe_load(yaml_conf) - -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -# -# Push the configuration -# -ok, res = sdclient.set_user_falco_rules(yaml_conf) - -# -# Check if everything went well -# -if ok: - print('user falco rules set successfully') -else: - print(res) - sys.exit(1) diff --git a/examples/update_alert.py b/examples/update_alert.py deleted file mode 100755 index d134d7b8..00000000 --- a/examples/update_alert.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python -# -# This script shows how to use the update_alert() call to modify the -# details of an existing alert. -# -# - -import getopt -import json -import sys - -from sdcclient import SdcClient - - -# -# Parse arguments -# -def usage(): - print(('usage: %s [-a|--alert ] ' % sys.argv[0])) - print('-a|--alert: Set name of alert to update') - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - - -try: - opts, args = getopt.getopt(sys.argv[1:], "a:", ["alert="]) -except getopt.GetoptError: - usage() - -alert_name = "tomcat cpu > 80% on any host" -for opt, arg in opts: - if opt in ("-a", "--alert"): - alert_name = arg - -if len(args) != 1: - usage() - -sdc_token = args[0] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token) - -ok, res = sdclient.get_alerts() -if not ok: - print(res) - sys.exit(1) - -alert_found = False -for alert in res['alerts']: - if alert['name'] == alert_name: - alert_found = True - print('Updating alert. Configuration before changing timespan, description, and notification channels:') - print((json.dumps(alert, sort_keys=True, indent=4))) - if 'notificationChannelIds' in alert: - alert['notificationChannelIds'] = alert['notificationChannelIds'][0:-1] - update_txt = ' (changed by update_alert)' - if alert['description'][-len(update_txt):] != update_txt: - alert['description'] = alert['description'] + update_txt - alert['timespan'] = alert['timespan'] * 2 # Note: Expressed in seconds * 1000000 - ok, res_update = sdclient.update_alert(alert) - - if not ok: - print(res_update) - sys.exit(1) - - # Validate and print the results - print('\nAlert after modification:') - print((json.dumps(res_update, sort_keys=True, indent=4))) - -if not alert_found: - print('Alert to be updated not found') - sys.exit(1) diff --git a/examples/update_policy.py b/examples/update_policy.py deleted file mode 100755 index f5b8d3ab..00000000 --- a/examples/update_policy.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -# -# Update a specific policy -# - -import json -import sys - -from sdcclient import SdSecureClient - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('Reads json representing updated policy from standard input') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] -policy_json = sys.stdin.read() - -# -# Instantiate the SDC client -# -sdclient = SdSecureClient(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.update_policy_json(policy_json) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/update_policy_v1.py b/examples/update_policy_v1.py deleted file mode 100755 index d4eb056a..00000000 --- a/examples/update_policy_v1.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -# -# Update a specific policy -# - -import json -import sys - -from sdcclient import SdSecureClientV1 - - -def usage(): - print(('usage: %s ' % sys.argv[0])) - print('Reads json representing updated policy from standard input') - print('You can find your token at https://secure.sysdig.com/#/settings/user') - sys.exit(1) - - -# -# Parse arguments -# -if len(sys.argv) != 2: - usage() - -sdc_token = sys.argv[1] -policy_json = sys.stdin.read() - -# -# Instantiate the SDC client -# -sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com') - -ok, res = sdclient.update_policy(policy_json) - -# -# Return the result -# -if ok: - print((json.dumps(res, indent=2))) -else: - print(res) - sys.exit(1) diff --git a/examples/user_team_mgmt.py b/examples/user_team_mgmt.py deleted file mode 100755 index 214049d1..00000000 --- a/examples/user_team_mgmt.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -# -# This example shows the different aspects of user/team management. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 4: - print(('usage: %s team-name user-name' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, sdc_url='https://app.sysdigcloud.com') - -team_name = sys.argv[2] -user_name = sys.argv[3] - -print(('Trying to invite a user:', user_name)) -ok, res = sdclient.create_user_invite(user_name) -if not ok: - if res == 'user ' + user_name + ' already exists': - print(('User creation failed because', user_name, 'already exists. Continuing.')) - else: - print(('User creation failed:', res, '. Exiting.')) - sys.exit(1) -else: - print('User creation succeeded') - -# Possible failures on Team creation might include having reached the -# max limit on Teams for this customer account or if the Team by that -# name already exists. Since a previous successful run of this test -# would have deleted the Team by the same name, and we need to be able -# to configure Teams for this test to pass, we'll treat both types of -# error as a genuine fail of the test. -print(('Now trying to create a team with name:', team_name)) -ok, res = sdclient.create_team(team_name) -if not ok: - print(('Team creation failed:', res, '. Exiting.')) - sys.exit(1) -else: - print(('Team creation succeeded.', res)) - -print(('Now trying to find team with name:', team_name)) -ok, res = sdclient.get_team(team_name) -if not ok: - print(('Could not get team info:', res, '. Exiting.')) - sys.exit(1) -else: - print('Team fetch succeeded') - -print(('Now trying to edit team:', team_name)) -memberships = { - 'admin@draios.com': 'ROLE_TEAM_MANAGER', - 'john-doe@sysdig.com': 'ROLE_TEAM_READ' -} -ok, res = sdclient.edit_team(team_name, description='Nextgen2', memberships=memberships) -if not ok: - print(('Could not edit team:', res, '. Exiting.')) - sys.exit(1) -else: - print('Edited team to change description and add users') - -print(('Now trying to edit user:', user_name)) -ok, res = sdclient.edit_user(user_name, firstName='Just', lastName='Edited3', systemRole='ROLE_CUSTOMER') -if not ok: - print(('Could not edit user:', res, '. Exiting.')) - sys.exit(1) -else: - print('Edit user succeeded') - -print(('Now trying to delete the team:', team_name)) -ok, res = sdclient.delete_team(team_name) -if not ok: - print(('Could not delete team:', res, '. Exiting.')) - sys.exit(1) -else: - print('Delete team succeeded') - -sys.exit(0) diff --git a/examples/user_team_mgmt_extended.py b/examples/user_team_mgmt_extended.py deleted file mode 100755 index ff33a1f9..00000000 --- a/examples/user_team_mgmt_extended.py +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/env python -# -# This example shows the different aspects of user/team management. -# - -import sys - -from sdcclient import SdcClient - -# -# Parse arguments -# -if len(sys.argv) != 4: - print(('usage: %s team-prefix user-name' % sys.argv[0])) - print('You can find your token at https://app.sysdigcloud.com/#/settings/user') - sys.exit(1) - -sdc_token = sys.argv[1] - -# -# Instantiate the SDC client -# -sdclient = SdcClient(sdc_token, sdc_url='https://app.sysdigcloud.com') - -team_prefix = sys.argv[2] - -user_email_parts = sys.argv[3].split('@') -user_email_prefix = user_email_parts[0] -user_email_domain = user_email_parts[1] - -# -# Create test users -# -# All users initially are part of default team. -# - -admin = user_email_prefix + '+team_mgmt-admin' + '@' + user_email_domain -userA = user_email_prefix + '+team_mgmt-a' + '@' + user_email_domain -userB = user_email_prefix + '+team_mgmt-b' + '@' + user_email_domain - -teamA = team_prefix + 'A' -teamB = team_prefix + 'B' - -print('Creating test users...') - -try: - ok, res = sdclient.create_user_invite(admin, first_name='TestUser', last_name='Admin', system_role='ROLE_CUSTOMER') - if not ok: - print(('-- User creation failed:', res, '. Exiting.')) - sys.exit(1) - else: - print(('-- User \'', admin, '\' created successfully.')) - - ok, res = sdclient.create_user_invite(userA, first_name='TestUser', last_name='Alpha') - if not ok: - print(('-- User creation failed:', res, '. Exiting.')) - sys.exit(1) - else: - print(('-- User \'', userA, '\' created successfully.')) - - ok, res = sdclient.create_user_invite(userB, first_name='TestUser', last_name='Beta') - if not ok: - print(('-- User creation failed:', res, '. Exiting.')) - sys.exit(1) - else: - print(('-- User \'', userB, '\' created successfully.')) - - # - # Create test teams - # - # Possible failures on Team creation might include having reached the - # max limit on Teams for this customer account or if the Team by that - # name already exists. Since a previous successful run of this test - # would have deleted the Team by the same name, and we need to be able - # to configure Teams for this test to pass, we'll treat both types of - # error as a genuine fail of the test. - # - - print('Creating test teams...') - - ok, res = sdclient.create_team(teamA) - if not ok: - print(('-- Team creation failed:', res, '. Exiting.')) - sys.exit(1) - else: - print(('-- Team \'', teamA, '\' created successfully.')) - - ok, res = sdclient.create_team(teamB) - if not ok: - print(('-- Team creation failed:', res, '. Exiting.')) - sys.exit(1) - else: - print(('-- Team \'', teamB, '\' created successfully.')) - - # - # Membership manipulation - # - # Admins are part of all teams and their membership cannot be edited. - # - - print('Membership manipulation...') - - ok, res = sdclient.list_memberships(teamA) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif admin not in list(res.keys()): - print(('-- Admin should be part of all teams!', 'Exiting.')) - sys.exit(1) - elif userA in list(res.keys()) or userB in list(res.keys()): - print(('-- Users ', userA, ' and ', userB, ' should not be part of team ', teamA, '!', 'Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamB) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif admin not in list(res.keys()): - print(('-- Admin should be part of all teams!', 'Exiting.')) - sys.exit(1) - elif userA in list(res.keys()) or userB in list(res.keys()): - print(('-- Users ', userA, ' and ', userB, ' should not be part of team ', teamB, '!', 'Exiting.')) - sys.exit(1) - - # - # Create team memberships - # - - print('-- Create team memberships') - - # Manipulate with teamA - - ok, res = sdclient.save_memberships(teamA, {userA: 'ROLE_TEAM_EDIT'}) - if not ok: - print(('-- Unable to add ', userA, ' to ', teamA, ' due to: ', res, '. Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamA) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif userA not in list(res.keys()) or admin not in list(res.keys()): - print(('-- Users ', userA, ' and ', admin, ' should be part of team ', teamA, '!', 'Exiting.')) - sys.exit(1) - - # Manipulate with teamB - - ok, res = sdclient.save_memberships(teamB, {userA: 'ROLE_TEAM_MANAGER', userB: 'ROLE_TEAM_READ'}) - if not ok: - print(('-- Unable to add ', userA, ' and ', userB, ' to ', teamB, ' due to: ', res, '. Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamB) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif userA not in list(res.keys()) or userB not in list(res.keys()) or admin not in list(res.keys()): - print(('-- Users ', userA, ', ', userB, ' and ', admin, ' should be part of team ', teamB, '!', 'Exiting.')) - sys.exit(1) - - # Update team memberships - - print('-- Update team memberships') - - # Add new or update existing memberships - ok, res = sdclient.save_memberships(teamA, {userA: 'ROLE_TEAM_READ', userB: 'ROLE_TEAM_EDIT'}) - if not ok: - print(('-- Unable to modify membership for ', userA, ' and to add ', userB, ' to ', teamA, ' due to: ', res, - '. Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamA) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif userA not in list(res.keys()) or userB not in list(res.keys()) or admin not in list(res.keys()): - print(('-- Users ', userA, ', ', userB, ' and ', admin, ' should be part of team ', teamA, '!', 'Exiting.')) - sys.exit(1) - elif res[userA] != 'ROLE_TEAM_READ' or res[userB] != 'ROLE_TEAM_EDIT': - print(('-- Users ', userA, ' and ', userB, ' should have appropriate roles assigned for team ', teamA, '!', - 'Exiting.')) - sys.exit(1) - - # Remove team memberships - - print('-- Remove team memberships') - - ok, res = sdclient.remove_memberships(teamA, [userB]) - if not ok: - print(('-- Unable to remove membership for ', userB, ' from team', teamA, ' due to: ', res, '. Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamA) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif userB in list(res.keys()): - print(('-- User ', userB, ' should not be part of team ', teamA, '!', 'Exiting.')) - sys.exit(1) - - # Admin user cannot be removed from any team - ok, res = sdclient.remove_memberships(teamB, [admin, userA]) - if not ok: - print(('-- Unable to remove membership for ', userB, ' from team', teamA, ' due to: ', res, '. Exiting.')) - sys.exit(1) - - ok, res = sdclient.list_memberships(teamB) - if not ok: - print(('-- Unable to fetch team memberships:', res, '. Exiting.')) - sys.exit(1) - elif userA in list(res.keys()): - print(('-- User ', userA, ' should not be part of team ', teamB, '!', 'Exiting.')) - sys.exit(1) - elif admin not in list(res.keys()): - print(('-- User ', admin, ' should be always part of all teams!', 'Exiting.')) - sys.exit(1) - -finally: - # - # Clean-up - # - print('Cleaning up...') - - print('-- Deleting test teams.') - - try: - ok, res = sdclient.delete_team(teamA) - if not ok: - print(('-- Team \'', teamA, '\' deletion failed: ', res)) - except Exception as exception: - print(('-- Team \'', teamA, '\' deletion failed: ', exception)) - - try: - ok, res = sdclient.delete_team(teamB) - if not ok: - print(('-- Team \'', teamB, '\' deletion failed: ', res)) - except Exception as exception: - print(('-- Team \'', teamB, '\' deletion failed: ', exception)) - - print('-- Deleting test users.') - - try: - ok, res = sdclient.delete_user(admin) - if not ok: - print(('-- User \'', admin, '\' deletion failed: ', res)) - except Exception as exception: - print(('-- User \'', admin, '\' deletion failed: ', exception)) - - try: - ok, res = sdclient.delete_user(userA) - if not ok: - print(('-- User \'', userA, '\' deletion failed: ', res)) - except Exception as exception: - print(('-- User \'', userA, '\' deletion failed: ', exception)) - - try: - ok, res = sdclient.delete_user(userB) - if not ok: - print(('-- User \'', userB, '\' deletion failed: ', res)) - except Exception as exception: - print(('-- User \'', userB, '\' deletion failed: ', exception)) - -print('All done successfully!!!') - -sys.exit(0) diff --git a/index.md b/index.md new file mode 100644 index 00000000..bd9d7ddd --- /dev/null +++ b/index.md @@ -0,0 +1,74 @@ +--- +description: A Python client API for Sysdig Monitor/Sysdig Secure. +--- + +This module is a wrapper around the Sysdig Monitor/Sysdig Secure APIs. It +exposes most of the sysdig REST API functionality as an easy to use and easy to +install Python interface. + +There are more details the [Sysdig SDK Python documentation](https://sysdig-sdk-python.readthedocs.io). + +## Installation + +### Automatic with PyPI + +```console +$ pip install sdcclient +``` + +### Manual (development only) + +This method requires [Poetry](https://python-poetry.org/) installed + +```console +$ git clone https://github.com/sysdiglabs/sysdig-sdk-python.git +$ cd python-sdc-client +$ poetry install +``` + +## Usage + +_Note:_ in order to use this API you must obtain a Sysdig Monitor/Secure API token. +You can get your user's token in the _Sysdig Monitor API_ section of the settings page +for [monitor](https://app.sysdigcloud.com/#/settings/user) or +[secure](https://secure.sysdig.com/#/settings/user). + +The library exports two classes, `SdMonitorClient` and `SdSecureClient` that +are used to connect to Sysdig Monitor/Secure and execute actions. + +They can be instantiated like this: + +``` python +from sdcclient import SdMonitorClient + +api_token = "MY_API_TOKEN" + +# +# Instantiate the Sysdig Monitor client +# +client = SdMonitorClient(api_token) +``` + +For backwards compatibility purposes, a third class `SdcClient` is exported which is an alias of `SdMonitorClient`. + +Once instantiated, all the methods documented below can be called on the object. + +## On-Premises Installs + +For [On-Premises Sysdig Monitor installs](https://support.sysdigcloud.com/hc/en-us/articles/206519903-On-Premises-Installation-Guide), +additional configuration is necessary to point to your API server rather than +the default SaaS-based one, and also to easily connect when using a self-signed +certificate for SSL. One way to handle this is by setting environment variables +before running your Python scripts: + +```console +export SDC_URL='https://' +export SDC_SSL_VERIFY='false' +``` + +Alternatively, you can specify the additional arguments in your Python scripts +as you instantiate the SDC client: + +``` +client = SdMonitorClient(api_token, sdc_url='https://', ssl_verify=False) +``` diff --git a/sdcclient/__init__.py b/sdcclient/__init__.py deleted file mode 100644 index f11bda4d..00000000 --- a/sdcclient/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from sdcclient._monitor import SdcClient -from sdcclient._monitor import SdMonitorClient -from sdcclient._monitor_v1 import SdMonitorClientV1 -from sdcclient._secure import SdSecureClient -from sdcclient._secure_v1 import SdSecureClientV1 -from sdcclient._scanning import SdScanningClient -from sdcclient.ibm_auth_helper import IbmAuthHelper -import sdcclient.secure -import sdcclient.monitor \ No newline at end of file diff --git a/sdcclient/_common.py b/sdcclient/_common.py deleted file mode 100644 index 279c4c7c..00000000 --- a/sdcclient/_common.py +++ /dev/null @@ -1,1013 +0,0 @@ -import json -import os - -import requests - - -class _SdcCommon(object): - '''Interact with the Sysdig Monitor/Secure API. - - **Arguments** - - **token**: A Sysdig Monitor/Secure API token from the *Sysdig Cloud API* section of the Settings page for `monitor `_ or .`secure `_. - - **sdc_url**: URL for contacting the Sysdig API server. Set this in `On-Premises installs `__. - - **ssl_verify**: Whether to verify certificate. Set to False if using a self-signed certificate in an `On-Premises install `__. - - **custom_headers**: [dict] Pass in custom headers. Useful for authentication and will override the default headers. - - **Returns** - An object for further interactions with the Sysdig Monitor/Secure API. See methods below. - ''' - lasterr = None - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - self.token = os.environ.get("SDC_TOKEN", token) - self.hdrs = self.__get_headers(custom_headers) - self.url = os.environ.get("SDC_URL", sdc_url).rstrip('/') - self.ssl_verify = os.environ.get("SDC_SSL_VERIFY", None) - if self.ssl_verify == None: - self.ssl_verify = ssl_verify - else: - if self.ssl_verify.lower() in ['true', 'false']: - self.ssl_verify = self.ssl_verify.lower() == 'true' - - def __get_headers(self, custom_headers): - headers = { - 'Content-Type': 'application/json', - 'Authorization': 'Bearer ' + self.token - } - if custom_headers: - headers.update(custom_headers) - return headers - - def _checkResponse(self, res): - if res.status_code >= 300: # FIXME: Should it be >=400? 301 = Moved Permanently, 302 = Found, 303 = See Other - errorcode = res.status_code - self.lasterr = None - - try: - j = res.json() - except Exception: - self.lasterr = 'status code ' + str(errorcode) - return False - - if 'errors' in j: - error_msgs = [] - for error in j['errors']: - error_msg = [] - if 'message' in error: - error_msg.append(error['message']) - - if 'reason' in error: - error_msg.append(error['reason']) - - error_msgs.append(': '.join(error_msg)) - - self.lasterr = '\n'.join(error_msgs) - elif 'message' in j: - self.lasterr = j['message'] - else: - self.lasterr = 'status code ' + str(errorcode) - return False - return True - - def get_user_info(self): - '''**Description** - Get details about the current user. - - **Success Return Value** - A dictionary containing information about the user, for example its email and the maximum number of agents it can install. - - **Example** - `examples/print_user_info.py `_ - ''' - res = requests.get(self.url + '/api/user/me', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_user_token(self): - '''**Description** - Return the API token of the current user. - - **Success Return Value** - A string containing the user token. - ''' - res = requests.get(self.url + '/api/token', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - tkinfo = res.json() - - return [True, tkinfo['token']['key']] - - def get_connected_agents(self): - '''**Description** - Return the agents currently connected to Sysdig Monitor for the current user. - - **Success Return Value** - A list of the agents with all their attributes. - ''' - res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['agents']] - - def get_n_connected_agents(self): - '''**Description** - Return the number of agents currently connected to Sysdig Monitor for the current user. - - **Success Return Value** - An integer number. - ''' - res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['total']] - - def list_notification_channels(self): - '''**Description** - List all configured Notification Channels - - **Arguments** - none - - **Success Return Value** - A JSON representation of all the notification channels - ''' - res = requests.get(self.url + '/api/notificationChannels', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_notification_ids(self, channels=None): - '''**Description** - Get an array of all configured Notification Channel IDs, or a filtered subset of them. - - **Arguments** - - **channels**: an optional array of dictionaries to limit the set of Notification Channel IDs returned. If not specified, IDs for all configured Notification Channels are returned. Each dictionary contains a ``type`` field that can be one of the available types of Notification Channel (``EMAIL``, ``SNS``, ``PAGER_DUTY``, ``SLACK``, ``OPSGENIE``, ``VICTOROPS``, ``WEBHOOK``) as well as additional elements specific to each channel type. - - **Success Return Value** - An array of Notification Channel IDs (integers). - - **Examples** - - `examples/create_alert.py `_ - - `examples/restore_alerts.py `_ - ''' - - res = requests.get(self.url + '/api/notificationChannels', headers=self.hdrs, verify=self.ssl_verify) - - if not self._checkResponse(res): - return False, self.lasterr - - ids = [] - - # If no array of channel types/names was provided to filter by, - # just return them all. - if channels is None: - for ch in res.json()["notificationChannels"]: - ids.append(ch['id']) - return [True, ids] - - # Return the filtered set of channels based on the provided types/names array. - # Should try and improve this M * N lookup - for c in channels: - found = False - for ch in res.json()["notificationChannels"]: - if c['type'] == ch['type']: - if c['type'] == 'SNS': - opt = ch['options'] - if set(opt['snsTopicARNs']) == set(c['snsTopicARNs']): - found = True - ids.append(ch['id']) - elif c['type'] == 'EMAIL': - opt = ch['options'] - if 'emailRecipients' in c: - if set(c['emailRecipients']) == set(opt['emailRecipients']): - found = True - ids.append(ch['id']) - elif 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'PAGER_DUTY': - opt = ch['options'] - if opt['account'] == c['account'] and opt['serviceName'] == c['serviceName']: - found = True - ids.append(ch['id']) - elif c['type'] == 'SLACK': - opt = ch['options'] - if 'channel' in opt and opt['channel'] == c['channel']: - found = True - ids.append(ch['id']) - elif c['type'] == 'OPSGENIE': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'VICTOROPS': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'WEBHOOK': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - if not found: - return False, "Channel not found: " + str(c) - - return True, ids - - def create_email_notification_channel(self, channel_name, email_recipients): - channel_json = { - 'notificationChannel': { - 'type': 'EMAIL', - 'name': channel_name, - 'enabled': True, - 'options': { - 'emailRecipients': email_recipients - } - } - } - - res = requests.post(self.url + '/api/notificationChannels', headers=self.hdrs, data=json.dumps(channel_json), - verify=self.ssl_verify) - return self._request_result(res) - - def create_notification_channel(self, channel): - channel["id"] = None - channel["version"] = None - channel["createdOn"] = None - channel["modifiedOn"] = None - channel_json = { - 'notificationChannel': channel - } - - res = requests.post(self.url + '/api/notificationChannels', headers=self.hdrs, data=json.dumps(channel_json), - verify=self.ssl_verify) - return self._request_result(res) - - def get_notification_channel(self, id): - - res = requests.get(self.url + '/api/notificationChannels/' + str(id), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return False, self.lasterr - - return True, res.json()['notificationChannel'] - - def update_notification_channel(self, channel): - if 'id' not in channel: - return [False, "Invalid channel format"] - - res = requests.put(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, - data=json.dumps({"notificationChannel": channel}), verify=self.ssl_verify) - return self._request_result(res) - - def delete_notification_channel(self, channel): - if 'id' not in channel: - return [False, "Invalid channel format"] - - res = requests.delete(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return False, self.lasterr - return True, None - - def get_data_retention_info(self): - '''**Description** - Return the list of data retention intervals, with beginning and end UTC time for each of them. Sysdig Monitor performs rollups of the data it stores. This means that data is stored at different time granularities depending on how far back in time it is. This call can be used to know what precision you can expect before you make a call to :func:`~SdcClient.get_data`. - - **Success Return Value** - A dictionary containing the list of available sampling intervals. - - **Example** - `examples/print_data_retention_info.py `_ - ''' - res = requests.get(self.url + '/api/history/timelines/', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_topology_map(self, grouping_hierarchy, time_window_s, sampling_time_s): - # - # Craft the time interval section - # - tlines = self.get_data_retention_info() - - for tline in tlines[1]['agents']: - if tline['sampling'] == sampling_time_s * 1000000: - timeinfo = tline - - if timeinfo is None: - return [False, "sampling time " + str(sampling_time_s) + " not supported"] - - timeinfo['from'] = timeinfo['to'] - timeinfo['sampling'] - - # - # Create the grouping hierarchy - # - gby = [{'metric': g} for g in grouping_hierarchy] - - # - # Prepare the json - # - req_json = { - 'format': { - 'type': 'map', - 'exportProcess': True - }, - 'time': timeinfo, - # 'filter': { - # 'filters': [ - # { - # 'metric': 'agent.tag.Tag', - # 'op': '=', - # 'value': 'production-maintenance', - # 'filters': None - # } - # ], - # 'logic': 'and' - # }, - 'limit': { - 'hostGroups': 20, - 'hosts': 20, - 'containers': 20, - 'processes': 10 - }, - 'group': { - 'configuration': { - 'groups': [ - { - 'filters': [], - 'groupBy': gby - } - ] - } - }, - 'nodeMetrics': [ - { - 'id': 'cpu.used.percent', - 'aggregation': 'timeAvg', - 'groupAggregation': 'avg' - } - ], - 'linkMetrics': [ - { - 'id': 'net.bytes.total', - 'aggregation': 'timeAvg', - 'groupAggregation': 'sum' - } - ] - } - - # - # Fire the request - # - res = requests.post(self.url + '/api/data?format=map', headers=self.hdrs, - data=json.dumps(req_json), verify=self.ssl_verify) - return self._request_result(res) - - def get_data(self, metrics, start_ts, end_ts=0, sampling_s=0, - filter='', datasource_type='host', paging=None): - '''**Description** - Export metric data (both time-series and table-based). - - **Arguments** - - **metrics**: a list of dictionaries, specifying the metrics and grouping keys that the query will return. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. These entries are used to apply single or hierarchical segmentation to the returned data and don't require the aggregations section. Refer to the Example link below for ready-to-use code snippets. - - **start_ts**: the UTC time (in seconds) of the beginning of the data window. A negative value can be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". - - **end_ts**: the UTC time (in seconds) of the end of the data window, or 0 to indicate "now". A negative value can also be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". - - **sampling_s**: the duration of the samples that will be returned. 0 means that the whole data will be returned as a single sample. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the query will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **datasource_type**: specify the metric source for the request, can be ``container`` or ``host``. Most metrics, for example ``cpu.used.percent`` or ``memory.bytes.used``, are reported by both hosts and containers. By default, host metrics are used, but if the request contains a container-specific grouping key in the metric list/filter (e.g. ``container.name``), then the container source is used. In cases where grouping keys are missing or apply to both hosts and containers (e.g. ``tag.Name``), *datasource_type* can be explicitly set to avoid any ambiguity and allow the user to select precisely what kind of data should be used for the request. `examples/get_data_datasource.py `_ contains a few examples that should clarify the use of this argument. - - **paging**: if segmentation of the query generates values for several different entities (e.g. containers/hosts), this parameter specifies which to include in the returned result. It's specified as a dictionary of inclusive values for ``from`` and ``to`` with the default being ``{ "from": 0, "to": 9 }``, which will return values for the "top 10" entities. The meaning of "top" is query-dependent, based on points having been sorted via the specified group aggregation, with the results sorted in ascending order if the group aggregation is ``min`` or ``none``, and descending order otherwise. - - **Success Return Value** - A dictionary with the requested data. Data is organized in a list of time samples, each of which includes a UTC timestamp and a list of values, whose content and order reflect what was specified in the *metrics* argument. - - **Examples** - - `examples/get_data_simple.py `_ - - `examples/get_data_advanced.py `_ - - `examples/list_hosts.py `_ - - `examples/get_data_datasource.py `_ - ''' - reqbody = { - 'metrics': metrics, - 'dataSourceType': datasource_type, - } - - if start_ts < 0: - reqbody['last'] = -start_ts - elif start_ts == 0: - return [False, "start_ts cannot be 0"] - else: - reqbody['start'] = start_ts - reqbody['end'] = end_ts - - if filter != '': - reqbody['filter'] = filter - - if paging is not None: - reqbody['paging'] = paging - - if sampling_s != 0: - reqbody['sampling'] = sampling_s - - res = requests.post(self.url + '/api/data/', headers=self.hdrs, data=json.dumps(reqbody), - verify=self.ssl_verify) - return self._request_result(res) - - def get_sysdig_captures(self, from_sec=None, to_sec=None, scope_filter=None): - '''**Description** - Returns the list of sysdig captures for the user. - - **Arguments** - - from_sec: the start of the timerange for which to get the captures - - end_sec: the end of the timerange for which to get the captures - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - - **Success Return Value** - A dictionary containing the list of captures. - - **Example** - `examples/list_sysdig_captures.py `_ - ''' - url = '{url}/api/sysdig?source={source}{frm}{to}{scopeFilter}'.format( - url=self.url, - source=self.product, - frm="&from=%d" % (from_sec * 10 ** 6) if from_sec else "", - to="&to=%d" % (to_sec * 10 ** 6) if to_sec else "", - scopeFilter="&scopeFilter=%s" % scope_filter if scope_filter else "") - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def poll_sysdig_capture(self, capture): - '''**Description** - Fetch the updated state of a sysdig capture. Can be used to poll the status of a capture that has been previously created and started with :func:`~SdcClient.create_sysdig_capture`. - - **Arguments** - - **capture**: the capture object as returned by :func:`~SdcClient.get_sysdig_captures` or :func:`~SdcClient.create_sysdig_capture`. - - **Success Return Value** - A dictionary showing the updated details of the capture. Use the ``status`` field to check the progress of a capture. - - **Example** - `examples/create_sysdig_capture.py `_ - ''' - if 'id' not in capture: - return [False, 'Invalid capture format'] - - url = '{url}/api/sysdig/{id}?source={source}'.format( - url=self.url, id=capture['id'], source=self.product) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def create_sysdig_capture(self, hostname, capture_name, duration, capture_filter='', folder='/'): - '''**Description** - Create a new sysdig capture. The capture will be immediately started. - - **Arguments** - - **hostname**: the hostname of the instrumented host where the capture will be taken. - - **capture_name**: the name of the capture. - - **duration**: the duration of the capture, in seconds. - - **capture_filter**: a sysdig filter expression. - - **folder**: directory in the S3 bucket where the capture will be saved. - - **Success Return Value** - A dictionary showing the details of the new capture. - - **Example** - `examples/create_sysdig_capture.py `_ - ''' - res = self.get_connected_agents() - if not res[0]: - return res - - capture_agent = None - - for agent in res[1]: - if hostname == agent['hostName']: - capture_agent = agent - break - - if capture_agent is None: - return [False, hostname + ' not found'] - - data = { - 'agent': capture_agent, - 'name': capture_name, - 'duration': duration, - 'folder': folder, - 'filters': capture_filter, - 'bucketName': '', - 'source': self.product - } - - res = requests.post(self.url + '/api/sysdig', headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) - return self._request_result(res) - - def download_sysdig_capture(self, capture_id): - '''**Description** - Download a sysdig capture by id. - - **Arguments** - - **capture_id**: the capture id to download. - - **Success Return Value** - The bytes of the scap - ''' - url = '{url}/api/sysdig/{id}/download?_product={product}'.format( - url=self.url, id=capture_id, product=self.product) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return False, self.lasterr - - return True, res.content - - def create_user_invite(self, user_email, first_name=None, last_name=None, system_role=None): - '''**Description** - Invites a new user to use Sysdig Monitor. This should result in an email notification to the specified address. - - **Arguments** - - **user_email**: the email address of the user that will be invited to use Sysdig Monitor - - **first_name**: the first name of the user being invited - - **last_name**: the last name of the user being invited - - **system_role**: system-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' to create an Admin. if not specified, default is a non-Admin ('ROLE_USER'). - - **Success Return Value** - The newly created user. - - **Examples** - - `examples/user_team_mgmt.py `_ - - `examples/user_team_mgmt_extended.py `_ - - ''' - # Look up the list of users to see if this exists, do not create if one exists - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - for user in data['users']: - if user['username'] == user_email: - return [False, 'user ' + user_email + ' already exists'] - - # Create the user - options = {'username': user_email, - 'firstName': first_name, - 'lastName': last_name, - 'systemRole': system_role} - user_json = {k: v for k, v in options.items() if v is not None} - - res = requests.post(self.url + '/api/users', headers=self.hdrs, data=json.dumps(user_json), - verify=self.ssl_verify) - return self._request_result(res) - - def delete_user(self, user_email): - '''**Description** - Deletes a user from Sysdig Monitor. - - **Arguments** - - **user_email**: the email address of the user that will be deleted from Sysdig Monitor - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_user_ids([user_email]) - if res[0] == False: - return res - userid = res[1][0] - res = requests.delete(self.url + '/api/users/' + str(userid), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def get_user(self, user_email): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - for u in res.json()['users']: - if u['username'] == user_email: - return [True, u] - return [False, 'User not found'] - - def get_users(self): - '''**Description** - Return a list containing details about all users in the Sysdig Monitor environment. The API token must have Admin rights for this to succeed. - - **Success Return Value** - A list user objects - ''' - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()['users']] - - def edit_user(self, user_email, firstName=None, lastName=None, systemRole=None): - res = self.get_user(user_email) - if res[0] == False: - return res - user = res[1] - reqbody = { - 'systemRole': systemRole if systemRole else user['systemRole'], - 'username': user_email, - 'enabled': user.get('enabled', False), - 'version': user['version'] - } - - if firstName == None: - reqbody['firstName'] = user['firstName'] if 'firstName' in list(user.keys()) else '' - else: - reqbody['firstName'] = firstName - - if lastName == None: - reqbody['lastName'] = user['lastName'] if 'lastName' in list(user.keys()) else '' - else: - reqbody['lastName'] = lastName - - res = requests.put(self.url + '/api/users/' + str(user['id']), headers=self.hdrs, data=json.dumps(reqbody), - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, 'Successfully edited user'] - - def get_teams(self, team_filter=''): - '''**Description** - Return the set of teams that match the filter specified. The *team_filter* should be a substring of the names of the teams to be returned. - - **Arguments** - - **team_filter**: the team filter to match when returning the list of teams - - **Success Return Value** - The teams that match the filter. - ''' - res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - ret = [t for t in res.json()['teams'] if team_filter in t['name']] - return [True, ret] - - def get_team(self, name): - '''**Description** - Return the team with the specified team name, if it is present. - - **Arguments** - - **name**: the name of the team to return - - **Success Return Value** - The requested team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_teams(name) - if res[0] == False: - return res - for t in res[1]: - if t['name'] == name: - return [True, t] - return [False, 'Could not find team'] - - def get_team_ids(self, teams): - res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['teams'] if x['name'] in teams] - return [True, [x['id'] for x in u]] - - def _get_user_id_dict(self, users): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['users'] if x['username'] in users] - return [True, dict((user['username'], user['id']) for user in u)] - - def _get_id_user_dict(self, user_ids): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['users'] if x['id'] in user_ids] - return [True, dict((user['id'], user['username']) for user in u)] - - def get_user_ids(self, users): - res = self._get_user_id_dict(users) - if res[0] == False: - return res - else: - return [True, list(res[1].values())] - - def create_team(self, name, memberships=None, filter='', description='', show='host', theme='#7BB0B2', - perm_capture=False, perm_custom_events=False, perm_aws_data=False): - ''' - **Description** - Creates a new team - - **Arguments** - - **name**: the name of the team to create. - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. - - **filter**: the scope that this team is able to access within Sysdig Monitor. - - **description**: describes the team that will be created. - - **show**: possible values are *host*, *container*. - - **theme**: the color theme that Sysdig Monitor will use when displaying the team. - - **perm_capture**: if True, this team will be allowed to take sysdig captures. - - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. - - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. - - **Success Return Value** - The newly created team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - reqbody = { - 'name': name, - 'description': description, - 'theme': theme, - 'show': show, - 'canUseSysdigCapture': perm_capture, - 'canUseCustomEvents': perm_custom_events, - 'canUseAwsMetrics': perm_aws_data, - } - - # Map user-names to IDs - if memberships != None and len(memberships) != 0: - res = self._get_user_id_dict(list(memberships.keys())) - if res[0] == False: - return [False, 'Could not fetch IDs for user names'] - reqbody['userRoles'] = [ - { - 'userId': user_id, - 'role': memberships[user_name] - } - for (user_name, user_id) in res[1].items() - ] - else: - reqbody['users'] = [] - - if filter != '': - reqbody['filter'] = filter - - res = requests.post(self.url + '/api/teams', headers=self.hdrs, data=json.dumps(reqbody), - verify=self.ssl_verify) - return self._request_result(res) - - def edit_team(self, name, memberships=None, filter=None, description=None, show=None, theme=None, - perm_capture=None, perm_custom_events=None, perm_aws_data=None): - ''' - **Description** - Edits an existing team. All arguments are optional. Team settings for any arguments unspecified will remain at their current settings. - - **Arguments** - - **name**: the name of the team to edit. - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. - - **filter**: the scope that this team is able to access within Sysdig Monitor. - - **description**: describes the team that will be created. - - **show**: possible values are *host*, *container*. - - **theme**: the color theme that Sysdig Monitor will use when displaying the team. - - **perm_capture**: if True, this team will be allowed to take sysdig captures. - - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. - - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. - - **Success Return Value** - The edited team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_team(name) - if res[0] == False: - return res - - t = res[1] - reqbody = { - 'name': name, - 'theme': theme if theme else t['theme'], - 'show': show if show else t['show'], - 'canUseSysdigCapture': perm_capture if perm_capture else t['canUseSysdigCapture'], - 'canUseCustomEvents': perm_custom_events if perm_custom_events else t['canUseCustomEvents'], - 'canUseAwsMetrics': perm_aws_data if perm_aws_data else t['canUseAwsMetrics'], - 'id': t['id'], - 'version': t['version'] - } - - # Handling team description - if description is not None: - reqbody['description'] = description - elif 'description' in list(t.keys()): - reqbody['description'] = t['description'] - - # Handling for users to map (user-name, team-role) pairs to memberships - if memberships != None: - res = self._get_user_id_dict(list(memberships.keys())) - if res[0] == False: - return [False, 'Could not convert user names to IDs'] - reqbody['userRoles'] = [ - { - 'userId': user_id, - 'role': memberships[user_name] - } - for (user_name, user_id) in res[1].items() - ] - elif 'userRoles' in list(t.keys()): - reqbody['userRoles'] = t['userRoles'] - else: - reqbody['userRoles'] = [] - - # Special handling for filters since we don't support blank filters - if filter != None: - reqbody['filter'] = filter - elif 'filter' in list(t.keys()): - reqbody['filter'] = t['filter'] - - res = requests.put(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, data=json.dumps(reqbody), - verify=self.ssl_verify) - return self._request_result(res) - - def delete_team(self, name): - '''**Description** - Deletes a team from Sysdig Monitor. - - **Arguments** - - **name**: the name of the team that will be deleted from Sysdig Monitor - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_team(name) - if res[0] == False: - return res - - t = res[1] - res = requests.delete(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def list_memberships(self, team): - ''' - **Description** - List all memberships for specified team. - - **Arguments** - - **team**: the name of the team for which we want to see memberships - - **Result** - Dictionary of (user-name, team-role) pairs that should describe memberships of the team. - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - res = self.get_team(team) - if res[0] == False: - return res - - raw_memberships = res[1]['userRoles'] - user_ids = [m['userId'] for m in raw_memberships] - - res = self._get_id_user_dict(user_ids) - if res[0] == False: - return [False, 'Could not fetch IDs for user names'] - else: - id_user_dict = res[1] - - return [True, dict([(id_user_dict[m['userId']], m['role']) for m in raw_memberships])] - - def save_memberships(self, team, memberships): - ''' - **Description** - Create new user team memberships or update existing ones. - - **Arguments** - - **team**: the name of the team for which we are creating new memberships - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - - res = self.list_memberships(team) - - if res[0] is False: - return res - - full_memberships = res[1] - full_memberships.update(memberships) - - res = self.edit_team(team, full_memberships) - - if res[0] is False: - return res - else: - return [True, None] - - def remove_memberships(self, team, users): - ''' - **Description** - Remove user memberships from specified team. - - **Arguments** - - **team**: the name of the team from which user memberships are removed - - **users**: list of usernames which should be removed from team - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - - res = self.list_memberships(team) - - if res[0] is False: - return res - - old_memberships = res[1] - new_memberships = {k: v for k, v in old_memberships.items() if k not in users} - - res = self.edit_team(team, new_memberships) - - if res[0] is False: - return res - else: - return [True, None] - - def list_access_keys(self): - ''' - **Description** - List all the access keys enabled and disabled for this instance of Sysdig Monitor/Secure - - **Reslut** - A list of access keys objects - - **Example** - `examples/list_access_keys.py `_ - ''' - res = requests.get(self.url + '/api/customer/accessKeys', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def create_access_key(self): - ''' - **Description** - Create a new access key for Sysdig Monitor/Secure - - **Reslut** - The access keys object - ''' - res = requests.post(self.url + '/api/customer/accessKeys', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def disable_access_key(self, access_key): - ''' - **Description** - Disable an existing access key - - **Arguments** - - **access_key**: the access key to be disabled - - **Reslut** - The access keys object - ''' - res = requests.post(self.url + '/api/customer/accessKeys/' + access_key + "/disable/", headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def enable_access_key(self, access_key): - ''' - **Description** - Enable an existing access key - - **Arguments** - - **access_key**: the access key to be enabled - - **Reslut** - The access keys object - ''' - res = requests.post(self.url + '/api/customer/accessKeys/' + access_key + "/enable/", headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_agents_config(self): - res = requests.get(self.url + '/api/agents/config', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data] - - def set_agents_config(self, config): - res = requests.put(self.url + '/api/agents/config', headers=self.hdrs, data=json.dumps(config), - verify=self.ssl_verify) - return self._request_result(res) - - def clear_agents_config(self): - data = {'files': []} - return self.set_agents_config(data) - - def get_user_api_token(self, username, teamname): - res = self.get_team(teamname) - if res[0] == False: - return res - - t = res[1] - - res = requests.get(self.url + '/api/token/%s/%d' % (username, t['id']), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['token']['key']] - - def _request_result(self, res): - if not self._checkResponse(res): - return False, self.lasterr - - return True, res.json() diff --git a/sdcclient/_monitor.py b/sdcclient/_monitor.py deleted file mode 100644 index bd420e93..00000000 --- a/sdcclient/_monitor.py +++ /dev/null @@ -1,343 +0,0 @@ -import json -import re - -import requests - -from sdcclient._common import _SdcCommon -from sdcclient.monitor import EventsClientV2, DashboardsClientV3 - - -class SdMonitorClient(DashboardsClientV3, EventsClientV2, _SdcCommon): - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - super(SdMonitorClient, self).__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDC" - - - def get_alerts(self): - '''**Description** - Retrieve the list of alerts configured by the user. - - **Success Return Value** - An array of alert dictionaries, with the format described at `this link `__ - - **Example** - `examples/list_alerts.py `_ - ''' - res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_notifications(self, from_ts, to_ts, state=None, resolved=None): - '''**Description** - Returns the list of Sysdig Monitor alert notifications. - - **Arguments** - - **from_ts**: filter events by start time. Timestamp format is in UTC (seconds). - - **to_ts**: filter events by start time. Timestamp format is in UTC (seconds). - - **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``. - - **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``. - - **Success Return Value** - A dictionary containing the list of notifications. - - **Example** - `examples/list_alert_notifications.py `_ - ''' - params = {} - - if from_ts is not None: - params['from'] = from_ts * 1000000 - - if to_ts is not None: - params['to'] = to_ts * 1000000 - - if state is not None: - params['state'] = state - - if resolved is not None: - params['resolved'] = resolved - - res = requests.get(self.url + '/api/notifications', headers=self.hdrs, params=params, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def update_notification_resolution(self, notification, resolved): - '''**Description** - Updates the resolution status of an alert notification. - - **Arguments** - - **notification**: notification object as returned by :func:`~SdcClient.get_notifications`. - - **resolved**: new resolution status. Supported values are ``True`` and ``False``. - - **Success Return Value** - The updated notification. - - **Example** - `examples/resolve_alert_notifications.py `_ - ''' - if 'id' not in notification: - return [False, 'Invalid notification format'] - - notification['resolved'] = resolved - data = {'notification': notification} - - res = requests.put(self.url + '/api/notifications/' + str(notification['id']), headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) - return self._request_result(res) - - def create_alert(self, name=None, description=None, severity=None, for_atleast_s=None, condition=None, - segmentby=None, segment_condition='ANY', user_filter='', notify=None, enabled=True, - annotations=None, alert_obj=None, type="MANUAL"): - '''**Description** - Create a threshold-based alert. - - **Arguments** - - **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails. - - **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails. - - **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'. - - **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire. - - **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts - - **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine. - - **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition). - - **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*. - - **enabled**: if True, the alert will be enabled when created. - - **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons - - **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above. - - **Success Return Value** - A dictionary describing the just created alert, with the format described at `this link `__ - - **Example** - `examples/create_alert.py `_ - ''' - - if annotations is None: - annotations = {} - - if segmentby is None: - segmentby = [] - - # - # Get the list of alerts from the server - # - res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - res.json() - - if alert_obj is None: - if None in (name, description, severity, for_atleast_s, condition): - return [False, 'Must specify a full Alert object or all parameters: name, description, severity, for_atleast_s, condition'] - else: - # - # Populate the alert information - # - alert_json = { - 'alert': { - 'type': type, - 'name': name, - 'description': description, - 'enabled': enabled, - 'severity': severity, - 'timespan': for_atleast_s * 1000000, - 'condition': condition, - 'filter': user_filter - } - } - - if segmentby != None and segmentby != []: - alert_json['alert']['segmentBy'] = segmentby - alert_json['alert']['segmentCondition'] = {'type': segment_condition} - - if annotations != None and annotations != {}: - alert_json['alert']['annotations'] = annotations - - if notify != None: - alert_json['alert']['notificationChannelIds'] = notify - else: - # The REST API enforces "Alert ID and version must be null", so remove them if present, - # since these would have been there in a dump from the list_alerts.py example. - alert_obj.pop('id', None) - alert_obj.pop('version', None) - alert_json = { - 'alert': alert_obj - } - - # - # Create the new alert - # - res = requests.post(self.url + '/api/alerts', headers=self.hdrs, data=json.dumps(alert_json), verify=self.ssl_verify) - return self._request_result(res) - - def update_alert(self, alert): - '''**Description** - Update a modified threshold-based alert. - - **Arguments** - - **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`. - - **Success Return Value** - The updated alert. - - **Example** - `examples/update_alert.py `_ - ''' - if 'id' not in alert: - return [False, "Invalid alert format"] - - res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({"alert": alert}), verify=self.ssl_verify) - return self._request_result(res) - - def delete_alert(self, alert): - '''**Description** - Deletes an alert. - - **Arguments** - - **alert**: the alert dictionary as returned by :func:`~SdcClient.get_alerts`. - - **Success Return Value** - ``None``. - - **Example** - `examples/delete_alert.py `_ - ''' - if 'id' not in alert: - return [False, 'Invalid alert format'] - - res = requests.delete(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] - - def get_explore_grouping_hierarchy(self): - '''**Description** - Return the user's current grouping hierarchy as visible in the Explore tab of Sysdig Monitor. - - **Success Return Value** - A list containing the list of the user's Explore grouping criteria. - - **Example** - `examples/print_explore_grouping.py `_ - ''' - res = requests.get(self.url + '/api/groupConfigurations', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - data = res.json() - - if 'groupConfigurations' not in data: - return [False, 'corrupted groupConfigurations API response'] - - gconfs = data['groupConfigurations'] - - for gconf in gconfs: - if gconf['id'] == 'explore': - res = [] - items = gconf['groups'][0]['groupBy'] - - for item in items: - res.append(item['metric']) - - return [True, res] - - return [False, 'corrupted groupConfigurations API response, missing "explore" entry'] - - def set_explore_grouping_hierarchy(self, new_hierarchy): - '''**Description** - Changes the grouping hierarchy in the Explore panel of the current user. - - **Arguments** - - **new_hierarchy**: a list of sysdig segmentation metrics indicating the new grouping hierarchy. - ''' - body = { - 'id': 'explore', - 'groups': [{'groupBy': []}] - } - - for item in new_hierarchy: - body['groups'][0]['groupBy'].append({'metric': item}) - - res = requests.put(self.url + '/api/groupConfigurations/explore', headers=self.hdrs, - data=json.dumps(body), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, None] - - - def get_metrics(self): - '''**Description** - Return the metric list that can be used for data requests/alerts/dashboards. - - **Success Return Value** - A dictionary containing the list of available metrics. - - **Example** - `examples/list_metrics.py `_ - ''' - res = requests.get(self.url + '/api/data/metrics', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - @staticmethod - def convert_scope_string_to_expression(scope): - '''**Description** - Internal function to convert a filter string to a filter object to be used with dashboards. - ''' - # - # NOTE: The supported grammar is not perfectly aligned with the grammar supported by the Sysdig backend. - # Proper grammar implementation will happen soon. - # For practical purposes, the parsing will have equivalent results. - # - - if scope is None or not scope: - return [True, []] - - expressions = [] - string_expressions = scope.strip(' \t\n\r').split(' and ') - expression_re = re.compile('^(?Pnot )?(?P[^ ]+) (?P=|!=|in|contains|starts with) (?P(:?"[^"]+"|\'[^\']+\'|\(.+\)|.+))$') - - for string_expression in string_expressions: - matches = expression_re.match(string_expression) - - if matches is None: - return [False, 'invalid scope format'] - - is_not_operator = matches.group('not') is not None - - if matches.group('operator') == 'in': - list_value = matches.group('value').strip(' ()') - value_matches = re.findall('(:?\'[^\',]+\')|(:?"[^",]+")|(:?[,]+)', list_value) - - if len(value_matches) == 0: - return [False, 'invalid scope value list format'] - - value_matches = map(lambda v: v[0] if v[0] else v[1], value_matches) - values = map(lambda v: v.strip(' "\''), value_matches) - else: - values = [matches.group('value').strip('"\'')] - - operator_parse_dict = { - 'in': 'in' if not is_not_operator else 'notIn', - '=': 'equals' if not is_not_operator else 'notEquals', - '!=': 'notEquals' if not is_not_operator else 'equals', - 'contains': 'contains' if not is_not_operator else 'notContains', - 'starts with': 'startsWith' - } - - operator = operator_parse_dict.get(matches.group('operator'), None) - if operator is None: - return [False, 'invalid scope operator'] - - expressions.append({ - 'operand': matches.group('operand'), - 'operator': operator, - 'value': values - }) - - return [True, expressions] - - -# For backwards compatibility -SdcClient = SdMonitorClient diff --git a/sdcclient/_monitor_v1.py b/sdcclient/_monitor_v1.py deleted file mode 100644 index ebf5ec24..00000000 --- a/sdcclient/_monitor_v1.py +++ /dev/null @@ -1,297 +0,0 @@ -import json -import copy -import requests -import re - -from sdcclient._monitor import SdMonitorClient - -try: - basestring -except NameError: - basestring = str - - -class SdMonitorClientV1(SdMonitorClient): - '''**Description** - Handles dashboards version 1 (ie. up to February 2019). For later Sysdig Monitor versions, please use :class:`~SdMonitorClient` instead. - ''' - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): - super(SdMonitorClientV1, self).__init__(token, sdc_url, ssl_verify) - self._dashboards_api_version = 'v1' - self._dashboards_api_endpoint = '/ui/dashboards' - self._default_dashboards_api_endpoint = '/api/defaultDashboards' - - def create_dashboard_from_template(self, dashboard_name, template, scope, shared=False, public=False, annotations={}): - if scope is not None: - if isinstance(scope, basestring) == False: - return [False, 'Invalid scope format: Expected a string'] - - # - # Clean up the dashboard we retireved so it's ready to be pushed - # - template['id'] = None - template['version'] = None - template['schema'] = 1 - template['name'] = dashboard_name - template['isShared'] = shared - template['isPublic'] = public - template['publicToken'] = None - - # set dashboard scope to the specific parameter - scopeExpression = self.convert_scope_string_to_expression(scope) - if scopeExpression[0] == False: - return scopeExpression - template['filterExpression'] = scope - template['scopeExpressionList'] = map(lambda ex: {'operand':ex['operand'], 'operator':ex['operator'],'value':ex['value'],'displayName':'', 'isVariable':False}, scopeExpression[1]) - - if 'widgets' in template and template['widgets'] is not None: - # Default dashboards (aka Explore views) specify panels with the property `widgets`, - # while custom dashboards use `items` - template['items'] = list(template['widgets']) - del template['widgets'] - - # NOTE: Individual panels might override the dashboard scope, the override will NOT be reset - if 'items' in template and template['items'] is not None: - for chart in template['items']: - if 'overrideFilter' not in chart: - chart['overrideFilter'] = False - - if chart['overrideFilter'] == False: - # patch frontend bug to hide scope override warning even when it's not really overridden - chart['scope'] = scope - - # if chart scope is equal to dashboard scope, set it as non override - chart_scope = chart['scope'] if 'scope' in chart else None - chart['overrideFilter'] = chart_scope != scope - - if 'annotations' in template: - template['annotations'].update(annotations) - else: - template['annotations'] = annotations - - template['annotations']['createdByEngine'] = True - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, data=json.dumps({'dashboard': template}), verify=self.ssl_verify) - return self._request_result(res) - - def create_dashboard(self, name): - ''' - **Description** - Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. - - **Arguments** - - **name**: the name of the dashboard that will be created. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - dashboard_configuration = { - 'name': name, - 'schema': 2, - 'items': [] - } - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - - def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None): - """**Description** - Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. - - **Arguments** - - **dashboard**: dashboard to edit - - **name**: name of the new panel - - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - - ``number``: 1 metric only - - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) - - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - """ - panel_configuration = { - 'name': name, - 'showAs': None, - 'showAsType': None, - 'metrics': [], - 'gridConfiguration': { - 'col': 1, - 'row': 1, - 'size_x': 12, - 'size_y': 6 - } - } - - if panel_type == 'timeSeries': - # - # In case of a time series, the current dashboard implementation - # requires the timestamp to be explicitly specified as "key". - # However, this function uses the same abstraction of the data API - # that doesn't require to specify a timestamp key (you only need to - # specify time window and sampling) - # - metrics = copy.copy(metrics) - metrics.insert(0, {'id': 'timestamp'}) - - # - # Convert list of metrics to format used by Sysdig Monitor - # - property_names = {} - k_count = 0 - v_count = 0 - for i, metric in enumerate(metrics): - property_name = 'v' if 'aggregations' in metric else 'k' - - if property_name == 'k': - i = k_count - k_count += 1 - else: - i = v_count - v_count += 1 - property_names[metric['id']] = property_name + str(i) - - panel_configuration['metrics'].append({ - 'metricId': metric['id'], - 'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None, - 'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None, - 'propertyName': property_name + str(i) - }) - - panel_configuration['scope'] = scope - # if chart scope is equal to dashboard scope, set it as non override - panel_configuration['overrideFilter'] = ('scope' in dashboard and dashboard['scope'] != scope) or ('scope' not in dashboard and scope != None) - - # - # Configure panel type - # - if panel_type == 'timeSeries': - panel_configuration['showAs'] = 'timeSeries' - panel_configuration['showAsType'] = 'line' - - if limit != None: - panel_configuration['paging'] = { - 'from': 0, - 'to': limit - 1 - } - - elif panel_type == 'number': - panel_configuration['showAs'] = 'summary' - panel_configuration['showAsType'] = 'summary' - elif panel_type == 'top': - panel_configuration['showAs'] = 'top' - panel_configuration['showAsType'] = 'bars' - - if sort_by is None: - panel_configuration['sorting'] = [{ - 'id': 'v0', - 'mode': 'desc' - }] - else: - panel_configuration['sorting'] = [{ - 'id': property_names[sort_by['metric']], - 'mode': sort_by['mode'] - }] - - if limit is None: - panel_configuration['paging'] = { - 'from': 0, - 'to': 10 - } - else: - panel_configuration['paging'] = { - 'from': 0, - 'to': limit - 1 - } - - # - # Configure layout - # - if layout != None: - panel_configuration['gridConfiguration'] = layout - - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - dashboard_configuration['id'] = None - - # - # ... and add the new panel - # - dashboard_configuration['items'].append(panel_configuration) - - # - # Update dashboard - # - res = requests.put(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - - def remove_dashboard_panel(self, dashboard, panel_name): - '''**Description** - Removes a panel from the dashboard. The panel to remove is identified by the specified ``name``. - - **Arguments** - - **name**: name of the panel to find and remove - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - dashboard_configuration['id'] = None - - # - # ... find the panel - # - def filter_fn(panel): - return panel['name'] == panel_name - - panels = list(filter(filter_fn, dashboard_configuration['items'])) - - if len(panels) > 0: - # - # ... and remove it - # - for panel in panels: - dashboard_configuration['items'].remove(panel) - - # - # Update dashboard - # - res = requests.put(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - else: - return [False, 'Not found'] - - def _get_dashboard_converters(self): - '''**Description** - Internal function to return dashboard converters from one version to another one. - ''' - # There's not really a previous version... - return {} diff --git a/sdcclient/_scanning.py b/sdcclient/_scanning.py deleted file mode 100644 index efe18f13..00000000 --- a/sdcclient/_scanning.py +++ /dev/null @@ -1,1182 +0,0 @@ -import base64 -import json -import re -import time - -import requests -from requests_toolbelt.multipart.encoder import MultipartEncoder - -try: - from urllib.parse import quote_plus, unquote_plus -except ImportError: - from urllib import quote_plus, unquote_plus - -from sdcclient._common import _SdcCommon - - -class SdScanningClient(_SdcCommon): - - def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None): - super(SdScanningClient, self).__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDS" - - def add_image(self, image, force=False, dockerfile=None, annotations={}, autosubscribe=True): - '''**Description** - Add an image to the scanner - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - dockerfile: The contents of the dockerfile as a str. - - annotations: A dictionary of annotations {str: str}. - - autosubscribe: Should active the subscription to this image? - - **Success Return Value** - A JSON object representing the image that was added. - ''' - itype = self._discover_inputimage_format(image) - if itype != 'tag': - return [False, "can only add a tag"] - - payload = {} - if dockerfile: - payload['dockerfile'] = base64.b64encode(dockerfile.encode()).decode("utf-8") - payload['tag'] = image - if annotations: - payload['annotations'] = annotations - - url = "{base_url}/api/scanning/v1/anchore/images?autosubscribe={autosubscribe}{force}".format( - base_url=self.url, - autosubscribe=str(autosubscribe), - force="&force=true" if force else "") - - res = requests.post(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_image(self, image, show_history=False): - '''**Description** - Find the image with the tag and return its json description - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - **Success Return Value** - A JSON object representing the image. - ''' - itype = self._discover_inputimage_format(image) - if itype not in ['tag', 'imageid', 'imageDigest']: - return [False, "cannot use input image string: no discovered imageDigest"] - - params = {} - params['history'] = str(show_history and itype not in ['imageid', 'imageDigest']).lower() - if itype == 'tag': - params['fulltag'] = image - - url = self.url + "/api/scanning/v1/anchore/images" - url += { - 'imageid': '/by_id/{}'.format(image), - 'imageDigest': '/{}'.format(image) - }.get(itype, '') - - res = requests.get(url, params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_images(self): - '''**Description** - List the current set of images in the scanner. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing all the images. - ''' - url = self.url + "/api/scanning/v1/anchore/images" - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_whitelisted_cves(self): - '''**Description** - List the whitelisted global CVEs. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing all the whitelisted CVEs. - ''' - url = self.url + "/api/scanning/v1/whitelists/global?bundle=default" - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def query_image_content(self, image, content_type=""): - '''**Description** - Find the image with the tag and return its content. - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - content_type: The content type can be one of the following types: - - os: Operating System Packages - - npm: Node.JS NPM Module - - gem: Ruby GEM - - files: Files - - **Success Return Value** - A JSON object representing the image content. - ''' - return self._query_image(image, query_group='content', query_type=content_type) - - def query_image_metadata(self, image, metadata_type=""): - '''**Description** - Find the image with the tag and return its metadata. - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - metadata_type: The metadata type can be one of the types returned by running without a type specified - - **Success Return Value** - A JSON object representing the image metadata. - ''' - return self._query_image(image, query_group='metadata', query_type=metadata_type) - - def query_image_vuln(self, image, vuln_type="", vendor_only=True): - '''**Description** - Find the image with the tag and return its vulnerabilities. - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - vuln_type: Vulnerability type can be one of the following types: - - os: CVE/distro vulnerabilities against operating system packages - - **Success Return Value** - A JSON object representing the image vulnerabilities. - ''' - return self._query_image(image, query_group='vuln', query_type=vuln_type, vendor_only=vendor_only) - - def query_images_by_vulnerability(self, vulnerability_id, namespace=None, package=None, severity=None, - vendor_only=True): - '''**Description** - Search system for images with the given vulnerability ID present - - **Arguments** - - vulnerability_id: Search for images vulnerable to this vulnerability ID (e.g. CVE-1999-0001) - - namespace: Filter results to images with vulnerable packages in the given namespace (e.g. debian:9) - - package: Filter results to images with the given vulnerable package name (e.g. sed) - - severity: Filter results to images with the given vulnerability severity (e.g. Medium) - - vendor_only: Only show images with vulnerabilities explicitly deemed applicable by upstream OS vendor, if present - - **Success Return Value** - A JSON object representing the images. - ''' - url = "{base_url}/api/scanning/v1/anchore/query/images/by_vulnerability?vulnerability_id={vulnerability_id}{namespace}{package}{severity}&vendor_only={vendor_only}".format( - base_url=self.url, - vulnerability_id=vulnerability_id, - namespace="&namespace={}".format(namespace) if namespace else "", - package="&affected_package={}".format(package) if package else "", - severity="&severity={}".format(severity) if severity else "", - vendor_only=vendor_only) - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def query_images_by_package(self, name, version=None, package_type=None): - '''**Description** - Search system for images with the given package installed - - **Arguments** - - name: Search for images with this package name (e.g. sed) - - version: Filter results to only packages with given version (e.g. 4.4-1) - - package-type: Filter results to only packages of given type (e.g. dpkg) - - **Success Return Value** - A JSON object representing the images. - ''' - url = "{base_url}/api/scanning/v1/anchore/query/images/by_package?name={name}{version}{package_type}".format( - base_url=self.url, - name=name, - version="&version={}".format(version) if version else "", - package_type="&package_type={}".format(package_type) if package_type else "") - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def _query_image(self, image, query_group="", query_type="", vendor_only=True): - if not query_group: - raise Exception("need to specify a query group") - - _, _, image_digest = self._discover_inputimage(image) - if not image_digest: - return [False, "cannot use input image string (no discovered imageDigest)"] - - url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/{query_group}/{query_type}{vendor_only}".format( - base_url=self.url, - image_digest=image_digest, - query_group=query_group, - query_type=query_type if query_type else '', - vendor_only="?vendor_only={}".format(vendor_only) if query_group == 'vuln' else '') - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_image(self, image, force=False): - '''**Description** - Delete image from the scanner. - - **Arguments** - - None - ''' - _, _, image_digest = self._discover_inputimage(image) - if not image_digest: - return [False, "cannot use input image string: no discovered imageDigest"] - - url = self.url + "/api/scanning/v1/anchore/images/" + image_digest - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def check_image_evaluation(self, image, show_history=False, detail=False, tag=None, policy=None): - '''**Description** - Check the latest policy evaluation for an image - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - show_history: Show all previous policy evaluations - - detail: Show detailed policy evaluation report - - tag: Specify which TAG is evaluated for a given image ID or Image Digest - - policy: Specify which POLICY to use for evaluate (defaults currently active policy) - - **Success Return Value** - A JSON object representing the evaluation status. - ''' - itype, _, image_digest = self._discover_inputimage(image) - if not image_digest: - return [False, "could not get image record from anchore"] - if not tag and itype != 'tag': - return [False, "input image name is not a tag, and no --tag is specified"] - - thetag = tag if tag else image - - url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/check?history={history}&detail={detail}&tag={tag}{policy_id}" - url = url.format( - base_url=self.url, - image_digest=image_digest, - history=str(show_history).lower(), - detail=str(detail).lower(), - tag=thetag, - policy_id=("&policyId=%s" % policy) if policy else "") - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_pdf_report(self, image, tag=None, date=None): - '''**Description** - Get a pdf report of one image - - **Arguments** - - image: Input image can be in the following formats: registry/repo:tag - - tag: Specify which TAG is evaluated for a given image ID or Image Digest - - date: date for the report of interest, the format is 'YYYY-MM-DDTHH:MM:SSZ', - if not provided the latest report will be returned - - **Success Return Value** - The pdf content - ''' - image_type, _, image_digest = self._discover_inputimage(image) - if not image_digest: - return [False, "could not get image record from anchore"] - if not tag and image_type != 'tag': - return [False, "input image name is not a tag"] - image_tag = tag if tag else image - - url = "{base_url}/api/scanning/v1/images/{image_digest}/report?tag={tag}{at}".format( - base_url=self.url, - image_digest=image_digest, - tag=image_tag, - at=("&at=%s" % date) if date else "") - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.content] - - def get_latest_pdf_report_by_digest(self, image_digest, full_tag=None): - '''**Description** - Get the latest pdf report of one image digest - - **Arguments** - - image_digest: Input image digest should be in the following formats: sha256:134dhgfd65765 - - tag: Specify which FULLTAG is evaluated for a given Image Digest: docker.io/alpine:latest - - **Success Return Value** - The pdf content - ''' - url = "{base_url}/api/scanning/v1/images/{image_digest}/report?tag={tag}".format( - base_url=self.url, - image_digest=image_digest, - tag=full_tag) - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.content] - - def import_image(self, infile, image_id, digest_id, image_name, sync=False): - '''**Description** - Import an image archive - - **Arguments** - - infile: An image archive file - - sync: Whether the import should be synchronous or asynchronous - - **Success Return Value** - If synchronous, A JSON object representing the image that was imported. - - ''' - try: - m = MultipartEncoder( - fields={'archive_file': (infile, open(infile, 'rb'), 'text/plain')} - ) - if sync: - url = self.url + "/api/scanning/v1/anchore/import/images" - else: - url = self.url + "/api/scanning/v1/import/images" - - headers = {'Authorization': 'Bearer ' + self.token, 'Content-Type': m.content_type, - 'imageId': image_id, 'digestId': digest_id, 'imageName': image_name} - res = requests.post(url, data=m, headers=headers, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json() if sync else res.content] - - except Exception as err: - print(err) - - def get_anchore_users_account(self): - '''**Description** - Get the anchore user account. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing user account information. - ''' - url = self.url + "/api/scanning/v1/account" - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_image_scan_result_by_id(self, image_id, full_tag_name, detail): - '''**Description** - Get the anchore image scan result for an image id. - - **Arguments** - - image_id: Docker image id of the image whose scan result is to be fetched. - - full_tag_name: The complete tag name of the image for e.g. docker.io/alpine:3.10. - - detail: Boolean to indicate whether full scan result API is needed. - - **Success Return Value** - A JSON object containing pass/fail status of image scan policy. - ''' - url = "{base_url}/api/scanning/v1/anchore/images/by_id/{image_id}/check?tag={full_tag_name}&detail={detail}".format( - base_url=self.url, - image_id=image_id, - full_tag_name=full_tag_name, - detail=detail) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def add_registry(self, registry, registry_user, registry_pass, insecure=False, registry_type="docker_v2", - validate=True): - '''**Description** - Add image registry - - **Arguments** - - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 - - registry_user: Username - - registry_pass: Password - - insecure: Allow connection to registry without SSL cert checks (ex: if registry uses a self-signed SSL certificate) - - registry_type: Specify the registry type. 'docker_v2' and 'awsecr' are supported (default='docker_v2') - - validate: If set to 'False' will not attempt to validate registry/creds on registry add - - **Success Return Value** - A JSON object representing the registry. - ''' - registry_types = ['docker_v2', 'awsecr'] - if registry_type and registry_type not in registry_types: - return [False, "input registry type not supported (supported registry_types: " + str(registry_types)] - if self._registry_string_is_valid(registry): - return [False, - "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] - - if not registry_type: - registry_type = self._get_registry_type(registry) - - payload = { - 'registry': registry, - 'registry_user': registry_user, - 'registry_pass': registry_pass, - 'registry_type': registry_type, - 'registry_verify': not insecure} - url = "{base_url}/api/scanning/v1/anchore/registries?validate={validate}".format( - base_url=self.url, - validate=validate) - - res = requests.post(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def update_registry(self, registry, registry_user, registry_pass, insecure=False, registry_type="docker_v2", - validate=True): - '''**Description** - Update an existing image registry. - - **Arguments** - - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 - - registry_user: Username - - registry_pass: Password - - insecure: Allow connection to registry without SSL cert checks (ex: if registry uses a self-signed SSL certificate) - - registry_type: Specify the registry type. 'docker_v2' and 'awsecr' are supported (default='docker_v2') - - validate: If set to 'False' will not attempt to validate registry/creds on registry add - - **Success Return Value** - A JSON object representing the registry. - ''' - if self._registry_string_is_valid(registry): - return [False, - "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] - - payload = { - 'registry': registry, - 'registry_user': registry_user, - 'registry_pass': registry_pass, - 'registry_type': registry_type, - 'registry_verify': not insecure} - url = "{base_url}/api/scanning/v1/anchore/registries/{registry}?validate={validate}".format( - base_url=self.url, - registry=registry, - validate=validate) - - res = requests.put(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_registry(self, registry): - '''**Description** - Delete an existing image registry - - **Arguments** - - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 - ''' - # do some input string checking - if re.match(".*\\/.*", registry): - return [False, - "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] - - url = self.url + "/api/scanning/v1/anchore/registries/" + registry - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_registry(self): - '''**Description** - List all current image registries - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of registries. - ''' - url = self.url + "/api/scanning/v1/anchore/registries" - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_registry(self, registry): - '''**Description** - Find the registry and return its json description - - **Arguments** - - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 - - **Success Return Value** - A JSON object representing the registry. - ''' - if self._registry_string_is_valid(registry): - return [False, - "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] - - url = self.url + "/api/scanning/v1/anchore/registries/" + registry - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def _get_registry_type(self, registry): - if re.match("[0-9]+\\.dkr\\.ecr\\..*\\.amazonaws\\.com", registry): - return "awsecr" - return "docker_v2" - - def _registry_string_is_valid(self, registry): - return re.match(".*\\/.*", registry) - - def add_repo(self, repo, autosubscribe=True, lookuptag=None): - '''**Description** - Add a repository - - **Arguments** - - repo: Input repository can be in the following formats: registry/repo - - autosubscribe: If unset, instruct the engine to disable subscriptions for any discovered tags. - - lookuptag: Specify a tag to use for repo tag scan if 'latest' tag does not exist in the repo. - - **Success Return Value** - A JSON object representing the repo. - ''' - url = "{base_url}/api/scanning/v1/anchore/repositories?repository={repo}&autosubscribe={autosubscribe}{lookuptag}".format( - base_url=self.url, - repo=repo, - autosubscribe=autosubscribe, - lookuptag="&lookuptag={}".format(lookuptag) if lookuptag else "") - - res = requests.post(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def watch_repo(self, repo): - '''**Description** - Instruct engine to start automatically watching the repo for image updates - - **Arguments** - - repo: Input repository can be in the following formats: registry/repo - ''' - return self.activate_subscription('repo_update', repo) - - def unwatch_repo(self, repo): - '''**Description** - Instruct engine to stop automatically watching the repo for image updates - - **Arguments** - - repo: Input repository can be in the following formats: registry/repo - ''' - return self.deactivate_subscription('repo_update', repo) - - def delete_repo(self, repo): - '''**Description** - Delete a repository from the watch list (does not delete already analyzed images) - - **Arguments** - - repo: Input repository can be in the following formats: registry/repo - ''' - return self.delete_subscription('repo_update', repo) - - def list_repos(self): - '''**Description** - List added repositories - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of repositories. - ''' - return self.get_subscriptions("repo_update") - - def get_repo(self, repo): - '''**Description** - Get a repository - - **Arguments** - - repo: Input repository can be in the following formats: registry/repo - - **Success Return Value** - A JSON object representing the registry. - ''' - return self.get_subscriptions("repo_update", repo) - - def add_policy(self, name, rules, comment="", bundleid=None): - '''**Description** - Create a new policy - - **Arguments** - - name: The name of the policy. - - rules: A list of Anchore PolicyRule elements (while creating/updating a policy, new rule IDs will be created backend side) - - comment: A human-readable description. - - bundleid: Target bundle. If not specified, the currently active bundle will be used. - - **Success Return Value** - A JSON object containing the policy description. - ''' - policy = { - 'name': name, - 'comment': comment, - 'rules': rules, - 'version': '1_0' - } - if bundleid: - policy['policyBundleId'] = bundleid - - url = self.url + '/api/scanning/v1/policies' - data = json.dumps(policy) - res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_policy_bundles(self, detail=False): - url = "{base_url}/api/scanning/v1/anchore/policies?detail={detail}".format( - base_url=self.url, - detail=str(detail)) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_policies(self, bundleid=None): - '''**Description** - List the current set of scanning policies. - - **Arguments** - - bundleid: Target bundle. If not specified, the currently active bundle will be used. - - **Success Return Value** - A JSON object containing the list of policies. - ''' - url = self.url + '/api/scanning/v1/policies' - if bundleid: - url += '?bundleId=' + bundleid - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_policy(self, policyid, bundleid=None): - '''**Description** - Retrieve the policy with the given id in the targeted policy bundle - - **Arguments** - - policyid: Unique identifier associated with this policy. - - bundleid: Target bundle. If not specified, the currently active bundle will be used. - - **Success Return Value** - A JSON object containing the policy description. - ''' - ok, policies = self.list_policies(bundleid) - if not ok: - return [ok, policies] - - for policy in policies: - if policy["id"] == policyid: - return [True, policy] - - return [False, "Policy not found"] - - def update_policy(self, policyid, policy_description): - '''**Description** - Update the policy with the given id - - **Arguments** - - policyid: Unique identifier associated with this policy. - - policy_description: A dictionary with the policy description. - - **Success Return Value** - A JSON object containing the policy description. - ''' - url = self.url + '/api/scanning/v1/policies/' + policyid - data = json.dumps(policy_description) - res = requests.put(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_policy(self, policyid, bundleid=None): - '''**Description** - Delete the policy with the given id in the targeted policy Bundle - - **Arguments** - - policyid: Unique identifier associated with this policy. - - policy_description: A dictionary with the policy description. - ''' - url = self.url + '/api/scanning/v1/policies/' + policyid - if bundleid: - url += '?bundleId=' + bundleid - - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.text] - - def add_alert(self, name, description=None, scope="", triggers={'failed': True, 'unscanned': True}, - enabled=False, notification_channels=[]): - '''**Description** - Create a new alert - - **Arguments** - - name: The name of the alert. - - description: The descprition of the alert. - - scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"') - - tiggers: A dict {str: bool} indicating wich triggers should be enabled/disabled. (default: {'failed': True, 'unscanned': True}) - - enabled: Whether this alert should actually be applied. - - notification_channels: A list of notification channel ids. - - **Success Return Value** - A JSON object containing the alert description. - ''' - alert = { - 'name': name, - 'description': description, - 'triggers': triggers, - 'scope': scope, - 'enabled': enabled, - 'autoscan': True, - 'notificationChannelIds': notification_channels, - } - - url = self.url + '/api/scanning/v1/alerts' - data = json.dumps(alert) - res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def list_alerts(self, limit=None, cursor=None): - '''**Description** - List the current set of scanning alerts. - - **Arguments** - - limit: Maximum number of alerts in the response. - - cursor: An opaque string representing the current position in the list of alerts. It's provided in the 'responseMetadata' of the list_alerts response. - - **Success Return Value** - A JSON object containing the list of alerts. - ''' - url = self.url + '/api/scanning/v1/alerts' - if limit: - url += '?limit=' + str(limit) - if cursor: - url += '&cursor=' + cursor - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_alert(self, alertid): - '''**Description** - Retrieve the scanning alert with the given id - - **Arguments** - - alertid: Unique identifier associated with this alert. - - **Success Return Value** - A JSON object containing the alert description. - ''' - url = self.url + '/api/scanning/v1/alerts/' + alertid - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def update_alert(self, alertid, alert_description): - '''**Description** - Update the alert with the given id - - **Arguments** - - alertid: Unique identifier associated with this alert. - - alert_description: A dictionary with the alert description. - - **Success Return Value** - A JSON object containing the alert description. - ''' - url = self.url + '/api/scanning/v1/alerts/' + alertid - data = json.dumps(alert_description) - res = requests.put(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_alert(self, policyid): - '''**Description** - Delete the alert with the given id - - **Arguments** - - alertid: Unique identifier associated with this alert. - ''' - url = self.url + '/api/scanning/v1/alerts/' + policyid - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.text] - - def get_subscriptions(self, subscription_type=None, subscription_key=None): - '''**Description** - Get the list of subscriptions - - **Arguments** - - subscription_type: Type of subscription. Valid options: - - 'tag_update': Receive notification when new image is pushed - - 'policy_eval': Receive notification when image policy status changes - - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified - - 'repo_update': Receive notification when a repo is updated - - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest - ''' - url = self.url + "/api/scanning/v1/anchore/subscriptions/" - if subscription_key or subscription_type: - url += "?" - if subscription_key: - url += "subscription_key={}&".format(subscription_key) - if subscription_type: - url += "subscription_type={}".format(subscription_type) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def activate_subscription(self, subscription_type, subscription_key): - '''**Description** - Activate a subscription - - **Arguments** - - subscription_type: Type of subscription. Valid options: - - 'tag_update': Receive notification when new image is pushed - - 'policy_eval': Receive notification when image policy status changes - - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified - - 'repo_update': Receive notification when a repo is updated - - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest - ''' - return self._update_subscription(subscription_type, subscription_key, True) - - def deactivate_subscription(self, subscription_type, subscription_key): - '''**Description** - Deactivate a subscription - - **Arguments** - - subscription_type: Type of subscription. Valid options: - - 'tag_update': Receive notification when new image is pushed - - 'policy_eval': Receive notification when image policy status changes - - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified - - 'repo_update': Receive notification when a repo is updated - - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest - ''' - return self._update_subscription(subscription_type, subscription_key, False) - - def delete_subscription(self, subscription_type, subscription_key): - '''**Description** - Delete a subscription - - **Arguments** - - subscription_type: Type of subscription. Valid options: - - 'tag_update': Receive notification when new image is pushed - - 'policy_eval': Receive notification when image policy status changes - - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified - - 'repo_update': Receive notification when a repo is updated - - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest - ''' - try: - url = self._subscription_url(subscription_type, subscription_key) - except Exception as err: - return [False, err] - - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def _update_subscription(self, subscription_type, subscription_key, activate): - try: - url = self._subscription_url(subscription_type, subscription_key) - except Exception as err: - return [False, err] - - payload = {'active': activate, 'subscription_key': subscription_key, 'subscription_type': subscription_type} - res = requests.put(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def _subscription_url(self, subscription_type, subscription_key): - ok, res = self.get_subscriptions(subscription_type, subscription_key) - if not ok: - raise Exception(res) - - if len(res) != 1: - raise Exception("Subscription {} doesn't exist".format(subscription_key)) - id = res[0].get("subscription_id", None) - if not id: - raise Exception("Subscription malformed") - - return self.url + "/api/scanning/v1/anchore/subscriptions/" + id - - def list_subscription(self): - '''**Description** - List all subscriptions - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of subscriptions. - ''' - return self.get_subscriptions() - - def list_runtime(self, scope="", skip_policy_evaluation=True, start_time=None, end_time=None): - '''**Description** - List runtime containers - - **Arguments** - - scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"') - - skip_policy_evaluation: If true, no policy evaluations will be triggered for the images. - - start_time: Start of the time range (integer of unix time). - - end_time: End of the time range (integer of unix time). - - **Success Return Value** - A JSON object representing the list of runtime containers. - ''' - containers = { - 'scope': scope, - 'skipPolicyEvaluation': skip_policy_evaluation - } - if start_time or end_time: - containers['time'] = {} - containers['time']['from'] = int(start_time * 100000) if start_time else 0 - end_time = end_time if end_time else time.time() - containers['time']['to'] = int(end_time * 1000000) - - url = self.url + '/api/scanning/v1/query/containers' - data = json.dumps(containers) - res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def _discover_inputimage_format(self, input_string): - itype = None - - if re.match("^sha256:[0-9a-fA-F]{64}", input_string): - itype = 'imageDigest' - elif re.match("[0-9a-fA-F]{64}", input_string): - itype = 'imageid' - else: - itype = 'tag' - - return itype - - def _discover_inputimage(self, input_string): - patt = re.match(".*(sha256:.*)", input_string) - if patt: - urldigest = quote_plus(patt.group(1)) - return "digest", input_string, urldigest - - try: - digest = unquote_plus(str(input_string)) - for tpe in ["sha256", "local"]: - patt = re.match(".*({}:.*)".format(tpe), digest) - if patt: - return "imageDigest", input_string, input_string - except Exception: - pass - - urldigest = None - ret_type = "tag" - ok, ret = self.get_image(input_string) - if ok: - image_record = ret[0] - urldigest = image_record.get('imageDigest', None) - for image_detail in image_record.get('image_detail', []): - if input_string == image_detail.get('imageId', ''): - ret_type = "imageid" - break - - return ret_type, input_string, urldigest - - def get_vulnerability_details(self, id): - if id is None: - return [False, "No vulnerability ID provided"] - - url = self.url + f"/api/scanning/v1/anchore/query/vulnerabilities" - - params = { - "id": id, - } - - res = requests.get(url, params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - json_res = res.json() - if "vulnerabilities" not in json_res or not json_res["vulnerabilities"]: - return [False, f"Vulnerability {id} was not found"] - - return [True, json_res["vulnerabilities"][0]] - - def add_vulnerability_exception_bundle(self, name, comment=""): - if not name: - return [False, "A name is required for the exception bundle"] - - url = self.url + f"/api/scanning/v1/vulnexceptions" - params = { - "version": "1_0", - "name": name, - "comment": comment, - } - - data = json.dumps(params) - res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_vulnerability_exception_bundle(self, id): - - url = self.url + f"/api/scanning/v1/vulnexceptions/{id}" - - res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] - - def list_vulnerability_exception_bundles(self): - url = self.url + f"/api/scanning/v1/vulnexceptions" - - params = { - "bundleId": "default", - } - - res = requests.get(url, params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_vulnerability_exception_bundle(self, bundle): - url = f"{self.url}/api/scanning/v1/vulnexceptions/{bundle}" - - params = { - "bundleId": "default", - } - - res = requests.get(url, params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - res_json = res.json() - for item in res_json["items"]: - item["trigger_id"] = str(item["trigger_id"]).rstrip("+*") - return [True, res_json] - - def add_vulnerability_exception(self, bundle, cve, note=None, expiration_date=None): - url = f"{self.url}/api/scanning/v1/vulnexceptions/{bundle}/vulnerabilities" - - params = { - "gate": "vulnerabilities", - "is_busy": False, - "trigger_id": f"{cve}+*", - "expiration_date": int(expiration_date) if expiration_date else None, - "notes": note, - } - - data = json.dumps(params) - res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - res_json = res.json() - res_json["trigger_id"] = str(res_json["trigger_id"]).rstrip("+*") - return [True, res_json] - - def delete_vulnerability_exception(self, bundle, id): - url = f"{self.url}/api/scanning/v1/vulnexceptions/{bundle}/vulnerabilities/{id}" - - params = { - "bundleId": "default", - } - - res = requests.delete(url, params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] - - def update_vulnerability_exception(self, bundle, id, cve, enabled, note, expiration_date): - url = f"{self.url}/api/scanning/v1/vulnexceptions/{bundle}/vulnerabilities/{id}" - - data = { - "id": id, - "gate": "vulnerabilities", - "trigger_id": f"{cve}+*", - "enabled": enabled, - "notes": note, - "expiration_date": int(expiration_date) if expiration_date else None, - } - params = { - "bundleId": "default", - } - - res = requests.put(url, data=json.dumps(data), params=params, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - res_json = res.json() - res_json["trigger_id"] = str(res_json["trigger_id"]).rstrip("+*") - return [True, res_json] \ No newline at end of file diff --git a/sdcclient/_secure.py b/sdcclient/_secure.py deleted file mode 100644 index c1066c12..00000000 --- a/sdcclient/_secure.py +++ /dev/null @@ -1,1343 +0,0 @@ -import json -import os -import shutil -import time - -import requests -import yaml - -from sdcclient._common import _SdcCommon -from sdcclient.secure import PolicyEventsClientV1, PolicyEventsClientOld - - -class SdSecureClient(PolicyEventsClientV1, PolicyEventsClientOld, _SdcCommon): - def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None): - super(SdSecureClient, self).__init__(token, sdc_url, ssl_verify, custom_headers) - - self.customer_id = None - self.product = "SDS" - self._policy_v2 = None - - @property - def policy_v2(self): - '''**Description** - True if policy V2 API is available - ''' - if self._policy_v2 is None: - res = requests.get(self.url + '/api/v2/policies/default', headers=self.hdrs, verify=self.ssl_verify) - self._policy_v2 = res.status_code != 404 - return self._policy_v2 - - def _get_falco_rules(self, kind): - res = requests.get(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data] - - def get_system_falco_rules(self): - '''**Description** - Get the system falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - None - - **Success Return Value** - The contents of the system falco rules file. - - **Example** - `examples/get_secure_system_falco_rules.py `_ - ''' - - return self._get_falco_rules("system") - - def get_user_falco_rules(self): - '''**Description** - Get the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - None - - **Success Return Value** - The contents of the user falco rules file. - - **Example** - `examples/get_secure_user_falco_rules.py `_ - ''' - return self._get_falco_rules("user") - - def _set_falco_rules(self, kind, rules_content): - payload = self._get_falco_rules(kind) - - if not payload[0]: - return payload - - payload[1]["{}RulesFile".format(kind)]["content"] = rules_content # pylint: disable=unsubscriptable-object - - res = requests.put(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, - data=json.dumps(payload[1]), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def set_system_falco_rules(self, rules_content): - '''**Description** - Set the system falco rules file in use for this customer. NOTE: This API endpoint can *only* be used in on-premise deployments. Generally the system falco rules file is only modified in conjunction with Sysdig support. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - A string containing the system falco rules. - - **Success Return Value** - The contents of the system falco rules file that were just updated. - - **Example** - `examples/set_secure_system_falco_rules.py `_ - - ''' - return self._set_falco_rules("system", rules_content) - - def set_user_falco_rules(self, rules_content): - '''**Description** - Set the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - A string containing the user falco rules. - - **Success Return Value** - The contents of the user falco rules file that were just updated. - - **Example** - `examples/set_secure_user_falco_rules.py `_ - - ''' - return self._set_falco_rules("user", rules_content) - - # Only one kind for now called "default", but might add a "custom" kind later. - def _get_falco_rules_files(self, kind): - - res = requests.get(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - - return [True, data] - - def get_default_falco_rules_files(self): - '''**Description** - Get the set of falco rules files from the backend. The _files programs and endpoints are a - replacement for the system_file endpoints and allow for publishing multiple files instead - of a single file as well as publishing multiple variants of a given file that are compatible - with different agent versions. - - **Arguments** - - None - - **Success Return Value** - A dict with the following keys: - - tag: A string used to uniquely identify this set of rules. It is recommended that this tag change every time the set of rules is updated. - - files: An array of dicts. Each dict has the following keys: - - name: the name of the file - - variants: An array of dicts with the following keys: - - requiredEngineVersion: the minimum falco engine version that can read this file - - content: the falco rules content - An example would be: - {'tag': 'v1.5.9', - 'files': [ - { - 'name': 'falco_rules.yaml', - 'variants': [ - { - 'content': '- required_engine_version: 29\n\n- list: foo\n', - 'requiredEngineVersion': 29 - }, - { - 'content': '- required_engine_version: 1\n\n- list: foo\n', - 'requiredEngineVersion': 1 - } - ] - }, - { - 'name': 'k8s_audit_rules.yaml', - 'variants': [ - { - 'content': '# some comment\n', - 'requiredEngineVersion': 0 - } - ] - } - ] - } - - **Example** - `examples/get_default_falco_rules_files.py `_ - ''' - - res = self._get_falco_rules_files("default") - - if not res[0]: - return res - else: - res_obj = res[1]["defaultFalcoRulesFiles"] - - # Copy only the tag and files over - ret = {} - - if "tag" in res_obj: - ret["tag"] = res_obj["tag"] - - if "files" in res_obj: - ret["files"] = res_obj["files"] - - if "defaultPolicies" in res_obj: - ret["defaultPolicies"] = res_obj["defaultPolicies"] - - return [True, ret] - - def save_default_falco_rules_files(self, fsobj, save_dir): - '''**Description** - Given a dict returned from get_default_falco_rules_files, save those files to a set of files below save_dir. - The first level below save_dir is a directory with the tag name and an optional default_policies.yaml file, - which groups rules into recommended default policies. The second level is a directory per file. - The third level is a directory per variant. Finally the files are at the lowest level, in a file called "content". - For example, using the example dict in get_default_falco_rules_files(), the directory layout would look like: - save_dir/ - default_policies.yaml - v1.5.9/ - falco_rules.yaml/ - 29/ - content: a file containing "- required_engine_version: 29\n\n- list: foo\n" - 1/ - content: a file containing "- required_engine_version: 1\n\n- list: foo\n" - k8s_audit_rules.yaml/ - 0/ - content: a file containing "# some comment" - **Arguments** - - fsobj: a python dict matching the structure returned by get_default_falco_rules_files() - - save_dir: a directory path under which to save the files. If the path already exists, it will be removed first. - - **Success Return Value** - - None - - **Example** - `examples/get_default_falco_rules_files.py `_ - ''' - if os.path.exists(save_dir): - try: - if os.path.isdir(save_dir): - shutil.rmtree(save_dir) - else: - os.unlink(save_dir) - except Exception as e: - return [False, "Could not remove existing save dir {}: {}".format(save_dir, str(e))] - - prefix = os.path.join(save_dir, fsobj["tag"]) - try: - os.makedirs(prefix) - except Exception as e: - return [False, "Could not create tag directory {}: {}".format(prefix, str(e))] - - if "defaultPolicies" in fsobj: - with open(os.path.join(save_dir, "default_policies.yaml"), 'w') as outfile: - yaml.safe_dump(fsobj["defaultPolicies"], outfile) - - if "files" in fsobj: - for fobj in fsobj["files"]: - fprefix = os.path.join(prefix, fobj["name"]) - try: - os.makedirs(fprefix) - except Exception as e: - return [False, "Could not create file directory {}: {}".format(fprefix, str(e))] - for variant in fobj["variants"]: - vprefix = os.path.join(fprefix, str(variant["requiredEngineVersion"])) - try: - os.makedirs(vprefix) - except Exception as e: - return [False, "Could not create variant directory {}: {}".format(vprefix, str(e))] - cpath = os.path.join(vprefix, "content") - try: - with open(cpath, "w") as cfile: - cfile.write(variant["content"]) - except Exception as e: - return [False, "Could not write content to {}: {}".format(cfile, str(e))] - - return [True, None] - - # Only One kind for now, but might add a "custom" kind later. - def _set_falco_rules_files(self, kind, rules_files): - - payload = self._get_falco_rules_files(kind) - - if not payload[0]: - return payload - - obj = payload[1]["{}FalcoRulesFiles".format(kind)] # pylint: disable=unsubscriptable-object - - obj["tag"] = rules_files["tag"] - obj["files"] = rules_files["files"] - if "defaultPolicies" in rules_files: - obj["defaultPolicies"] = rules_files["defaultPolicies"] - - res = requests.put(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, - data=json.dumps(payload[1]), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def set_default_falco_rules_files(self, rules_files): - '''**Description** - Update the set of falco rules files to the provided set of files. See the `Falco wiki `_ for documentation on the falco rules format. - The _files programs and endpoints are a replacement for the system_file endpoints and - allow for publishing multiple files instead of a single file as well as publishing - multiple variants of a given file that are compatible with different agent versions. - - **Arguments** - - rules_files: a dict with the same structure as returned by get_default_falco_rules_files. - - **Success Return Value** - The contents of the default falco rules files that were just updated. - - **Example** - `examples/set_default_falco_rules_files.py `_ - - ''' - - return self._set_falco_rules_files("default", rules_files) - - def load_default_falco_rules_files(self, save_dir): - '''**Description** - Given a file and directory layout as described in save_default_falco_rules_files(), load those files and - return a dict representing the contents. This dict is suitable for passing to set_default_falco_rules_files(). - - **Arguments** - - save_dir: a directory path from which to load the files. - - **Success Return Value** - - A dict matching the format described in get_default_falco_rules_files. - - **Example** - `examples/set_default_falco_rules_files.py `_ - ''' - - tags = os.listdir(save_dir) - - try: - tags.remove("default_policies.yaml") - except ValueError: - # Do nothing, it wasn't in the list of files - pass - - if len(tags) != 1: - return [False, "Directory {} did not contain exactly 1 entry".format(save_dir)] - - tpath = os.path.join(save_dir, tags[0]) - - if not os.path.isdir(tpath): - return [False, "Tag path {} is not a directory".format(tpath)] - - defjson = [] - defpath = os.path.join(save_dir, "default_policies.yaml") - if os.path.exists(defpath): - try: - with open(defpath, "r") as infile: - defjson = yaml.safe_load(infile) - except Exception as exc: - return [False, "Could not load default_policies.yaml: " + exc] - - ret = {"tag": os.path.basename(tpath), "files": [], "defaultPolicies": defjson} - - for fdir in os.listdir(tpath): - fpath = os.path.join(tpath, fdir) - if not os.path.isdir(fpath): - return [False, "File path {} is not a directory".format(fpath)] - fobj = {"name": os.path.basename(fpath), "variants": []} - for vdir in os.listdir(fpath): - vpath = os.path.join(fpath, vdir) - if not os.path.isdir(vpath): - return [False, "Variant path {} is not a directory".format(vpath)] - cpath = os.path.join(vpath, "content") - try: - with open(cpath, 'r') as content_file: - try: - required_engine_version = int(os.path.basename(vpath)) - if int(os.path.basename(vpath)) < 0: - return [False, "Variant directory {} must be a positive number".format(vpath)] - fobj["variants"].append({ - "requiredEngineVersion": required_engine_version, - "content": content_file.read() - }) - except ValueError: - return [False, "Variant directory {} must be a number".format(vpath)] - except Exception as e: - return [False, "Could not read content at {}: {}".format(cpath, str(e))] - - ret["files"].append(fobj) - - return [True, ret] - - def create_default_policies(self): - '''**Description** - Create new policies based on the currently available set of rules. For now, this only covers Falco rules, but we might extend - the endpoint later. The backend should use the defaultPolicies property of a previously provided FalcoRulesFiles model as - guidance on the set of policies to create. The backend should only create new policies (not delete or modify), and should only - create new policies if there is not an existing policy with the same name. - - **Arguments** - - None - - **Success Return Value** - JSON containing details on any new policies that were added. - - **Example** - `examples/create_default_policies.py `_ - - ''' - res = requests.post(self.url + '/api/v2/policies/default', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def delete_all_policies(self): - '''**Description** - Delete all existing policies. The falco rules file is unchanged. - - **Arguments** - - None - - **Success Return Value** - The string "Policies Deleted" - - **Example** - `examples/delete_all_policies.py `_ - - ''' - ok, res = self.list_policies() - if not ok: - return False, res - - for policy in res: - ok, res = self.delete_policy_id(policy["id"]) - if not ok: - return False, res - - return True, "Policies Deleted" - - def list_policies(self): - '''**Description** - List the current set of policies. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing the number and details of each policy. - - **Example** - `examples/list_policies.py `_ - - ''' - res = requests.get(self.url + '/api/v2/policies', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_policy(self, name): - '''**Description** - Find the policy with name and return its json description. - - **Arguments** - - name: the name of the policy to fetch - - **Success Return Value** - A JSON object containing the description of the policy. If there is no policy with - the given name, returns False. - - **Example** - `examples/get_policy.py `_ - - ''' - ok, res = self.list_policies() - if not ok: - return [False, res] - policies = res - - # Find the policy with the given name and return it. - for policy in policies: - if policy["name"] == name: - return [True, policy] - - return [False, "No policy with name {}".format(name)] - - def get_policy_id(self, id): - '''**Description** - Find the policy with id and return its json description. - - **Arguments** - - id: the id of the policy to fetch - - **Success Return Value** - A JSON object containing the description of the policy. If there is no policy with - the given name, returns False. - ''' - res = requests.get(self.url + '/api/v2/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def add_policy(self, name, description, rule_names=[], actions=[], scope=None, severity=0, enabled=True, - notification_channels=[]): - '''**Description** - Add a new policy. - - **Arguments** - - name: A short name for the policy - - description: Description of policy - - rule_names: Array of rule names. (They must be names instead of ids, as the rules list view is by name, to account for multiple rules having the same name). - - actions: It can be a stop, pause and/or capture action - - scope: Where the policy is being applied- Container, Host etc.. (example: "container.image.repository = sysdig/agent") - - enabled: True if the policy should be considered - - severity: How severe is this policy when violated. Range from 0 to 7 included. - - notification_channels: ids of the notification channels to subscribe to the policy - - **Success Return Value** - The string "OK" - ''' - policy = { - "name": name, - "description": description, - "ruleNames": rule_names, - "actions": actions, - "scope": scope, - "severity": severity, - "enabled": enabled, - "notificationChannelIds": notification_channels - } - res = requests.post(self.url + '/api/v2/policies', headers=self.hdrs, data=json.dumps(policy), - verify=self.ssl_verify) - return self._request_result(res) - - def add_policy_json(self, policy_json): - '''**Description** - Add a new policy using the provided json. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - - **Example** - `examples/add_policy.py `_ - - ''' - - try: - policy_obj = json.loads(policy_json) - if "origin" in policy_obj: - del policy_obj["origin"] - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - res = requests.post(self.url + '/api/v2/policies', headers=self.hdrs, data=json.dumps(policy_obj), - verify=self.ssl_verify) - return self._request_result(res) - - def update_policy(self, id, name=None, description=None, rule_names=None, actions=None, scope=None, - severity=None, enabled=None, notification_channels=None): - '''**Description** - Update policy with the provided values. - - **Arguments** - - id: the id of the policy to update - - name: A short name for the policy - - description: Description of policy - - rule_names: Array of rule names. (They must be names instead of ids, as the rules list view is by name, to account for multiple rules having the same name). - - actions: It can be a stop, pause and/or capture action - - scope: Where the policy is being applied- Container, Host etc.. (example: "container.image.repository = sysdig/agent") - - enabled: True if the policy should be considered - - severity: How severe is this policy when violated. Range from 0 to 7 included. - - notification_channels: ids of the notification channels to subscribe to the policy - - **Success Return Value** - The string "OK" - ''' - ok, res = self.get_policy_id(id) - if not ok: - return [False, res] - policy = res - - if name is not None: - policy["name"] = name - if description is not None: - policy["description"] = description - if rule_names is not None: - policy["ruleNames"] = rule_names - if actions is not None: - policy["actions"] = actions - if scope is not None: - policy["scope"] = scope - if severity is not None: - policy["severity"] = severity - if enabled is not None: - policy["enabled"] = enabled - if notification_channels is not None: - policy["notificationChannelIds"] = notification_channels - - res = requests.put(self.url + '/api/v2/policies/{}'.format(id), headers=self.hdrs, data=json.dumps(policy), - verify=self.ssl_verify) - return self._request_result(res) - - def update_policy_json(self, policy_json): - '''**Description** - Update an existing policy using the provided json. The 'id' field from the policy is - used to determine which policy to update. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - - **Example** - `examples/update_policy.py `_ - - ''' - try: - policy_obj = json.loads(policy_json) - if "origin" in policy_obj: - del policy_obj["origin"] - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - if "id" not in policy_obj: - return [False, "Policy Json does not have an 'id' field"] - - res = requests.put(self.url + '/api/v2/policies/{}'.format(policy_obj["id"]), headers=self.hdrs, - data=json.dumps(policy_obj), verify=self.ssl_verify) - return self._request_result(res) - - def delete_policy_name(self, name): - '''**Description** - Delete the policy with the given name. - - **Arguments** - - name: the name of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - - **Example** - `examples/delete_policy.py `_ - - ''' - ok, res = self.list_policies() - if not ok: - return [False, res] - - # Find the policy with the given name and delete it - for policy in res: - if policy["name"] == name: - return self.delete_policy_id(policy["id"]) - - return [False, "No policy with name {}".format(name)] - - def delete_policy_id(self, id): - '''**Description** - Delete the policy with the given id - - **Arguments** - - id: the id of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - - **Example** - `examples/delete_policy.py `_ - - ''' - res = requests.delete(self.url + '/api/v2/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def list_rules(self): - '''**Description** - Returns the list of rules in the system. These are grouped by name - and do not necessarily represent individual rule objects, as multiple - rules can have the same name. - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of rules. - ''' - res = requests.get(self.url + '/api/secure/rules/summaries', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_rules_group(self, name): - '''**Description** - Retrieve a group of all rules having the given name. This is used to - show how a base rule is modified by later rules that override/append - to the rule. - - **Arguments** - - name: the name of the rule group - - **Success Return Value** - A JSON object representing the list of rules. - ''' - res = requests.get(self.url + '/api/secure/rules/groups?name={}'.format(name), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_rule_id(self, id): - '''**Description** - Retrieve info about a single rule - - **Arguments** - - id: the id of the rule - - **Success Return Value** - A JSON object representing the rule. - ''' - res = requests.get(self.url + '/api/secure/rules/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def add_rule(self, name, details={}, description="", tags=[]): - '''**Description** - Create a new rule - - **Arguments** - - name: A name for this object. Should exactly be the value of the "rule" property of the yaml object. - - details: The rule description as a python dictionary. - - description: A description of this rule. No newlines/formatting. - - tags: The set of tags. - - **Success Return Value** - A JSON object representing the rule. - ''' - rule = { - "name": name, - "description": description, - "details": details, - "tags": tags - } - res = requests.post(self.url + '/api/secure/rules', data=json.dumps(rule), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def update_rule(self, id, details={}, description="", tags=[]): - '''**Description** - Update info associated with a rule - - **Arguments** - - id: The rule id - - details: The rule description as a python dictionary. - - description: A description of this rule. No newlines/formatting. - - tags: The set of tags. - - **Success Return Value** - A JSON object representing the rule. - ''' - ok, res = self.get_rule_id(id) - if not ok: - return [False, res] - rule = res - - if details: - rule['details'] = details - if description: - rule['description'] = description - if tags: - rule['tags'] = tags - res = requests.put(self.url + '/api/secure/rules/{}'.format(id), data=json.dumps(rule), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def delete_rule(self, id): - '''**Description** - Delete the rule with given id. - - **Arguments** - - id: The rule id - - **Success Return Value** - A JSON object representing the rule. - ''' - res = requests.delete(self.url + '/api/secure/rules/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def list_falco_macros(self): - '''**Description** - Returns the list of macros in the system. These are grouped by name - and do not necessarily represent individual macro objects, as multiple - macros can have the same name. - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of falco macros. - ''' - res = requests.get(self.url + '/api/secure/falco/macros/summaries', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_falco_macros_group(self, name): - '''**Description** - Retrieve a group of all falco groups having the given name. This is used - to show how a base macro is modified by later macrosthat override/append - to the macro. - - **Arguments** - - name: the name of the falco macros group - - **Success Return Value** - A JSON object representing the list of falco macros. - ''' - res = requests.get(self.url + '/api/secure/falco/macros/groups?name={}'.format(name), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_falco_macro_id(self, id): - '''**Description** - Retrieve info about a single falco macro - - **Arguments** - - id: the id of the falco macro - - **Success Return Value** - A JSON object representing the falco macro. - ''' - res = requests.get(self.url + '/api/secure/falco/macros/{}'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def add_falco_macro(self, name, condition, append=False): - '''**Description** - Create a new macro - - **Arguments** - - name: A name for this object. Should exactly be the value of the "macro" property of the yaml object. - - condition: the full condition text exactly as represented in the yaml file. - - **Success Return Value** - A JSON object representing the falco macro. - ''' - macro = { - "name": name, - "condition": { - "components": [], - "condition": condition - }, - "append": append - } - res = requests.post(self.url + '/api/secure/falco/macros', data=json.dumps(macro), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def update_falco_macro(self, id, condition): - '''**Description** - Update info associated with a macro - - **Arguments** - - id: The rule id - - condition: the full condition text exactly as represented in the yaml file. - - **Success Return Value** - A JSON object representing the macro. - ''' - ok, res = self.get_falco_macro_id(id) - if not ok: - return [False, res] - macro = res - macro['condition']['condition'] = condition - - res = requests.put(self.url + '/api/secure/falco/macros/{}'.format(id), data=json.dumps(macro), - headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def delete_falco_macro(self, id): - '''**Description** - Delete the macro with given id. - - **Arguments** - - id: The macro id - - **Success Return Value** - A JSON object representing the macro. - ''' - res = requests.delete(self.url + '/api/secure/falco/macros/{}'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def list_falco_lists(self): - '''**Description** - Returns the list of falco lists in the system. These are grouped by - name and do not necessarily represent individual falco list objects, - as multiple falco lists can have the same name. - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of falco lists. - ''' - res = requests.get(self.url + '/api/secure/falco/lists/summaries', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_falco_lists_group(self, name): - '''**Description** - Retrieve a group of all falco lists having the given name. This is used - to show how a base list is modified by later lists that override/append - to the list. - - **Arguments** - - name: the name of the falco lists group - - **Success Return Value** - A JSON object representing the list of falco lists. - ''' - res = requests.get(self.url + '/api/secure/falco/lists/groups?name={}'.format(name), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_falco_list_id(self, id): - '''**Description** - Retrieve info about a single falco list - - **Arguments** - - id: the id of the falco list - - **Success Return Value** - A JSON object representing the falco list. - ''' - res = requests.get(self.url + '/api/secure/falco/lists/{}'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def add_falco_list(self, name, items, append=False): - '''**Description** - Create a new list - - **Arguments** - - name: A name for this object. Should exactly be the value of the "list" property of the yaml object. - - items: the array of items as represented in the yaml List. - - **Success Return Value** - A JSON object representing the falco list. - ''' - flist = { - "name": name, - "items": { - "items": items - }, - "append": append - } - res = requests.post(self.url + '/api/secure/falco/lists', data=json.dumps(flist), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def update_falco_list(self, id, items): - '''**Description** - Update info associated with a list - - **Arguments** - - id: The rule id - - items: the array of items as represented in the yaml List. - - **Success Return Value** - A JSON object representing the list. - ''' - ok, res = self.get_falco_list_id(id) - if not ok: - return [False, res] - flist = res - flist['items']['items'] = items - - res = requests.put(self.url + '/api/secure/falco/lists/{}'.format(id), data=json.dumps(flist), - headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def delete_falco_list(self, id): - '''**Description** - Delete the list with given id. - - **Arguments** - - id: The list id - - **Success Return Value** - A JSON object representing the list. - ''' - res = requests.delete(self.url + '/api/secure/falco/lists/{}'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def add_compliance_task(self, name, module_name='docker-bench-security', schedule='06:00:00Z/PT12H', scope=None, - enabled=True): - '''**Description** - Add a new compliance task. - - **Arguments** - - name: The name of the task e.g. 'Check Docker Compliance'. - - module_name: The name of the module that implements this task. Separate from task name in case you want to use the same module to run separate tasks with different scopes or schedules. [ 'docker-bench-security', 'kube-bench' ] - - schedule: The frequency at which this task should run. Expressed as an `ISO 8601 Duration `_ - - scope: The agent will only run the task on hosts matching this scope or on hosts where containers match this scope. - - enabled: Whether this task should actually run as defined by its schedule. - - **Success Return Value** - A JSON representation of the compliance task. - ''' - task = { - "id": None, - "name": name, - "moduleName": module_name, - "enabled": enabled, - "scope": scope, - "schedule": schedule - } - res = requests.post(self.url + '/api/complianceTasks', data=json.dumps(task), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def list_compliance_tasks(self): - '''**Description** - Get the list of all compliance tasks. - - **Arguments** - - None - - **Success Return Value** - A JSON list with the representation of each compliance task. - ''' - res = requests.get(self.url + '/api/complianceTasks', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_compliance_task(self, id): - '''**Description** - Get a compliance task. - - **Arguments** - - id: the id of the compliance task to get. - - **Success Return Value** - A JSON representation of the compliance task. - ''' - res = requests.get(self.url + '/api/complianceTasks/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def update_compliance_task(self, id, name=None, module_name=None, schedule=None, scope=None, enabled=None): - '''**Description** - Update an existing compliance task. - - **Arguments** - - id: the id of the compliance task to be updated. - - name: The name of the task e.g. 'Check Docker Compliance'. - - module_name: The name of the module that implements this task. Separate from task name in case you want to use the same module to run separate tasks with different scopes or schedules. [ 'docker-bench-security', 'kube-bench' ] - - schedule: The frequency at which this task should run. Expressed as an `ISO 8601 Duration `_ - - scope: The agent will only run the task on hosts matching this scope or on hosts where containers match this scope. - - enabled: Whether this task should actually run as defined by its schedule. - - **Success Return Value** - A JSON representation of the compliance task. - ''' - ok, res = self.get_compliance_task(id) - if not ok: - return ok, res - - task = res - options = { - 'name': name, - 'moduleName': module_name, - 'schedule': schedule, - 'scope': scope, - 'enabled': enabled - } - task.update({k: v for k, v in options.items() if v is not None}) - res = requests.put(self.url + '/api/complianceTasks/{}'.format(id), data=json.dumps(task), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def delete_compliance_task(self, id): - '''**Description** - Delete the compliance task with the given id - - **Arguments** - - id: the id of the compliance task to delete - ''' - res = requests.delete(self.url + '/api/complianceTasks/{}'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return False, self.lasterr - - return True, None - - def list_compliance_results(self, limit=50, direction=None, cursor=None, filter=""): - '''**Description** - Get the list of all compliance tasks runs. - - **Arguments** - - limit: Maximum number of alerts in the response. - - direction: the direction (PREV or NEXT) that determines which results to return in relation to cursor. - - cursor: An opaque string representing the current position in the list of alerts. It's provided in the 'responseMetadata' of the list_alerts response. - - filter: an optional case insensitive filter used to match against the completed task name and return matching results. - - **Success Return Value** - A JSON list with the representation of each compliance task run. - ''' - url = "{url}/api/complianceResults?cursor{cursor}&filter={filter}&limit={limit}{direction}".format( - url=self.url, - limit=limit, - direction="&direction=%s" % direction if direction else "", - cursor="=%d" % cursor if cursor is not None else "", - filter=filter) - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_compliance_results(self, id): - '''**Description** - Retrieve the details for a specific compliance task run result. - - **Arguments** - - id: the id of the compliance task run to get. - - **Success Return Value** - A JSON representation of the compliance task run result. - ''' - res = requests.get(self.url + '/api/complianceResults/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_compliance_results_csv(self, id): - '''**Description** - Retrieve the details for a specific compliance task run result in csv. - - **Arguments** - - id: the id of the compliance task run to get. - - **Success Return Value** - A CSV representation of the compliance task run result. - ''' - res = requests.get(self.url + '/api/complianceResults/{}/csv'.format(id), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return False, self.lasterr - - return True, res.text - - def list_commands_audit(self, from_sec=None, to_sec=None, scope_filter=None, command_filter=None, limit=100, - offset=0, metrics=[]): - '''**Description** - List the commands audit. - - **Arguments** - - from_sec: the start of the timerange for which to get commands audit. - - end_sec: the end of the timerange for which to get commands audit. - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, commands are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only commands that have happened on an ubuntu container). - - command_filter: this is a SysdigMonitor-like filter (e.g. command.comm="touch"). When provided, commands are filtered by some of their properties. Currently the supported set of filters is command.comm, command.cwd, command.pid, command.ppid, command.uid, command.loginshell.id, command.loginshell.distance - - limit: Maximum number of commands in the response. - - metrics: A list of metric values to include in the return. - - **Success Return Value** - A JSON representation of the commands audit. - ''' - if to_sec is None: - to_sec = time.time() - if from_sec is None: - from_sec = to_sec - (24 * 60 * 60) # 1 day - - url = "{url}/api/commands?from={frm}&to={to}&offset={offset}&limit={limit}{scope}{commandFilter}{metrics}".format( - url=self.url, - offset=offset, - limit=limit, - frm=int(from_sec * 10 ** 6), - to=int(to_sec * 10 ** 6), - scope="&scopeFilter=" + scope_filter if scope_filter else "", - commandFilter="&commandFilter=" + command_filter if command_filter else "", - metrics="&metrics=" + json.dumps(metrics) if metrics else "") - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_command_audit(self, id, metrics=[]): - '''**Description** - Get a command audit. - - **Arguments** - - id: the id of the command audit to get. - - **Success Return Value** - A JSON representation of the command audit. - ''' - url = "{url}/api/commands/{id}?from=0&to={to}{metrics}".format( - url=self.url, - id=id, - to=int(time.time() * 10 ** 6), - metrics="&metrics=" + json.dumps(metrics) if metrics else "") - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def list_image_profiles(self): - '''**Description** - List the current set of image profiles. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing the details of each profile. - - ''' - url = "{url}/api/v1/profiling/profileGroups/0/profiles".format( - url=self.url - ) - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_image_profile(self, profileId): - '''**Description** - Find the image profile with a (partial) profile ID and return its json description. - - **Arguments** - - name: the name of the image profile to fetch - - **Success Return Value** - A JSON object containing the description of the image profile. If there is no image profile with - the given name, returns False. Moreover, it could happen that more than one profile IDs have a collision. - It is due to the fact that a partial profile ID can be passed and interpreted; in this case a set of - collision profiles is returned, and the full complete ID string is printed. In this case, it returns - false. - - ''' - - # RETRIEVE ALL THE IMAGE PROFILES - ok, image_profiles = self.list_image_profiles() - - if not ok: - return [False, self.lasterr] - - ''' - The content of the json stored in the image_profiles dictionary: - - { - "offset": 0, - "limit": 99, - "canLoadMore": false, - "profiles": [ - ... - ] - } - ''' - - matched_profiles = self.__get_matched_profileIDs(profileId, image_profiles['profiles']) - - # Profile ID not found - if len(matched_profiles) == 0: - return [False, "No profile with ID {}".format(profileId)] - - # Principal workflow. Profile ID found - elif len(matched_profiles) == 1: - # Matched id. Return information - url = "{url}/api/v1/profiling/profiles/{profileId}".format( - url=self.url, - profileId=matched_profiles[0]['profileId'] - ) - - res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - # Collision detected. The full profile IDs are returned - elif len(matched_profiles) >= 2: - return [False, matched_profiles] - - def __get_matched_profileIDs(self, requested_profile, profile_list): - ''' - **Description** - Helper function for retrieving the list of matching profile - - **Arguments** - - the requested profile Id (string) - - List of dictionary, where each dictionary contains the profile information - - **Success Return Value** - List of dictionary, where each dictionary represents a profile with the ID prefix substring - matching the requested one - - **Content structure of the profile_list parameter** - This array of profiles contains all the relevant information. For the purposes of this function, only - the profileId field is relevant. - - [ - { - "profileGroupId": 0, - "profileId": "00000000000000000000000000000000000000000000", - "profileVersion": 0, - "profileName": "AAA/BBB:XYZ@0000000000000000000000", - "imageId": "00000000000000000000000000000000000000000000", - "imageName": "AAA/BBB:XYZ", - "processesProposal": { - "subcategories": [ - { - "name": "process", - "ruleName": "process - 00000000000000000000000000000000000000000000", - "ruleType": "PROCESS", - "score": 000 - } - ], - "score": 000 - }, - "fileSystemProposal": { - "subcategories": [ - { - "name": "filesystem", - "ruleName": "filesystem - 00000000000000000000000000000000000000000000", - "ruleType": "FILESYSTEM", - "score": 000 - } - ], - "score": 000 - }, - "syscallProposal": { - "subcategories": [ - { - "name": "syscalls", - "ruleName": "syscalls - 00000000000000000000000000000000000000000000", - "ruleType": "SYSCALL", - "score": 000 - } - ], - "score": 000 - }, - "networkProposal": { - "subcategories": [ - { - "name": "network", - "ruleName": "network - 00000000000000000000000000000000000000000000", - "ruleType": "NETWORK", - "score": 000 - } - ], - "score": 000 - }, - "containerImagesProposal": { - "subcategories": [ - { - "name": "container image", - "ruleName": "container image - 00000000000000000000000000000000000000000000", - "ruleType": "CONTAINER", - "score": 0 - } - ], - "score": 0 - }, - "status": "STATUS_VALUE", - "score": 000 - }, - ... - ] - ''' - - matched_profiles = [] - - request_len = len(requested_profile) - for profile in profile_list: - - # get the length of the substring to match - str_len_match = min(len(profile), request_len) - - if profile['profileId'][0:str_len_match] == requested_profile[0:str_len_match]: - matched_profiles.append(profile) - - return matched_profiles diff --git a/sdcclient/_secure_v1.py b/sdcclient/_secure_v1.py deleted file mode 100644 index 9fad6c74..00000000 --- a/sdcclient/_secure_v1.py +++ /dev/null @@ -1,204 +0,0 @@ -import json -import requests - -from sdcclient._secure import SdSecureClient - - -class SdSecureClientV1(SdSecureClient): - '''**Description** - Handles policies version 1 (ie. up to August 2019). For later Sysdig Secure versions, please use :class:`~SdSecureClient` instead. - ''' - - def create_default_policies(self): - '''**Description** - Create a set of default policies using the current system falco rules file as a reference. For every falco rule in the system - falco rules file, one policy will be created. The policy will take the name and description from the name and description of - the corresponding falco rule. If a policy already exists with the same name, no policy is added or modified. Existing - policies will be unchanged. - - **Arguments** - - None - - **Success Return Value** - JSON containing details on any new policies that were added. - ''' - res = requests.post(self.url + '/api/policies/createDefault', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def delete_all_policies(self): - '''**Description** - Delete all existing policies. The falco rules file is unchanged. - - **Arguments** - - None - - **Success Return Value** - The string "Policies Deleted" - ''' - res = requests.post(self.url + '/api/policies/deleteAll', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, "Policies Deleted"] - - def list_policies(self): - '''**Description** - List the current set of policies. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing the number and details of each policy. - ''' - res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def get_policy_priorities(self): - '''**Description** - Get a list of policy ids in the order they will be evaluated. - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of policy ids. - ''' - - res = requests.get(self.url + '/api/policies/priorities', headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def set_policy_priorities(self, priorities_json): - '''**Description** - Change the policy evaluation order - - **Arguments** - - priorities_json: a description of the new policy order. - - **Success Return Value** - A JSON object representing the updated list of policy ids. - ''' - - try: - json.loads(priorities_json) - except Exception as e: - return [False, "priorities json is not valid json: {}".format(str(e))] - - res = requests.put(self.url + '/api/policies/priorities', headers=self.hdrs, data=priorities_json, verify=self.ssl_verify) - return self._request_result(res) - - def get_policy(self, name): - '''**Description** - Find the policy with name and return its json description. - - **Arguments** - - name: the name of the policy to fetch - - **Success Return Value** - A JSON object containing the description of the policy. If there is no policy with - the given name, returns False. - ''' - ok, res = self.list_policies() - if not ok: - return [False, res] - - policies = res["policies"] - - # Find the policy with the given name and return it. - for policy in policies: - if policy["name"] == name: - return [True, policy] - - return [False, "No policy with name {}".format(name)] - - def get_policy_id(self, id): - '''**Description** - Find the policy with id and return its json description. - - **Arguments** - - id: the id of the policy to fetch - - **Success Return Value** - A JSON object containing the description of the policy. If there is no policy with - the given name, returns False. - ''' - res = requests.get(self.url + '/api/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def add_policy(self, policy_json): - '''**Description** - Add a new policy using the provided json. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - ''' - try: - policy_obj = json.loads(policy_json) - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - body = {"policy": policy_obj} - res = requests.post(self.url + '/api/policies', headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) - return self._request_result(res) - - def update_policy(self, policy_json): - '''**Description** - Update an existing policy using the provided json. The 'id' field from the policy is - used to determine which policy to update. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - ''' - - try: - policy_obj = json.loads(policy_json) - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - if "id" not in policy_obj: - return [False, "Policy Json does not have an 'id' field"] - - body = {"policy": policy_obj} - - res = requests.put(self.url + '/api/policies/{}'.format(policy_obj["id"]), headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) - return self._request_result(res) - - def delete_policy_name(self, name): - '''**Description** - Delete the policy with the given name. - - **Arguments** - - name: the name of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - ''' - ok, res = self.list_policies() - if not ok: - return [False, res] - - # Find the policy with the given name and delete it - for policy in res["policies"]: - if policy["name"] == name: - return self.delete_policy_id(policy["id"]) - - return [False, "No policy with name {}".format(name)] - - def delete_policy_id(self, id): - '''**Description** - Delete the policy with the given id - - **Arguments** - - id: the id of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - ''' - res = requests.delete(self.url + '/api/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) diff --git a/sdcclient/ibm_auth_helper.py b/sdcclient/ibm_auth_helper.py deleted file mode 100644 index 71af793d..00000000 --- a/sdcclient/ibm_auth_helper.py +++ /dev/null @@ -1,50 +0,0 @@ -import requests - -class IbmAuthHelper(): - '''Authenticate with IBM Cloud IAM. - - **Arguments** - **url**: Sysdig endpoint URL that should point to IBM Cloud - **apikey**: IBM Cloud IAM apikey that will be used to retrieve an access token - **guid**: GUID of an IBM Cloud Monitoring with Sysdig instance - - **Returns** - A dictionary that will authenticate you with the IBM Cloud IAM API. - ''' - - @staticmethod - def get_headers(url, apikey, guid): - iam_token = IbmAuthHelper.__get_iam_token(url, apikey) - return { - 'Authorization': 'Bearer ' + iam_token, - 'IBMInstanceID': guid - } - - @staticmethod - def __get_iam_endpoint(url): - IAM_ENDPOINT = { - 'stage': 'iam.test.cloud.ibm.com', - 'prod': 'iam.cloud.ibm.com' - } - if '.test.' in url: - return IAM_ENDPOINT['stage'] - else: - return IAM_ENDPOINT['prod'] - - @staticmethod - def __get_iam_token(url, apikey): - env_url = IbmAuthHelper.__get_iam_endpoint(url) - response = requests.post( - 'https://' + env_url + '/identity/token', - data={ - 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', - 'response_type': 'cloud_iam', - 'apikey': apikey - }, - headers={ - 'Accept': 'application/json' - }) - if response.status_code == 200: - return response.json()['access_token'] - else: - response.raise_for_status() diff --git a/sdcclient/monitor/__init__.py b/sdcclient/monitor/__init__.py deleted file mode 100644 index a4ff11e0..00000000 --- a/sdcclient/monitor/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ._dashboards_v3 import DashboardsClientV3 -from ._dashboards_v2 import DashboardsClientV2 -from ._events_v1 import EventsClientV1 -from ._events_v2 import EventsClientV2 diff --git a/sdcclient/monitor/_dashboards_v2.py b/sdcclient/monitor/_dashboards_v2.py deleted file mode 100644 index 7ef9fcb4..00000000 --- a/sdcclient/monitor/_dashboards_v2.py +++ /dev/null @@ -1,589 +0,0 @@ -import copy -import json - -import requests - -from sdcclient._common import _SdcCommon -from sdcclient.monitor.dashboard_converters import convert_dashboard_between_versions -from sdcclient.monitor.dashboard_converters._dashboard_scope import convert_scope_string_to_expression - - -class DashboardsClientV2(_SdcCommon): - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - super(DashboardsClientV2, self).__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDC" - self._dashboards_api_version = 'v2' - self._dashboards_api_endpoint = '/api/{}/dashboards'.format(self._dashboards_api_version) - self._default_dashboards_api_endpoint = '/api/{}/defaultDashboards'.format(self._dashboards_api_version) - - def get_views_list(self): - res = requests.get(self.url + self._default_dashboards_api_endpoint, headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_view(self, name): - gvres = self.get_views_list() - if gvres[0] is False: - return gvres - - vlist = gvres[1]['defaultDashboards'] - - id = None - - for v in vlist: - if v['name'] == name: - id = v['id'] - break - - if not id: - return [False, 'view ' + name + ' not found'] - - res = requests.get(self.url + self._default_dashboards_api_endpoint + '/' + id, headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_dashboards(self): - '''**Description** - Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. - - **Success Return Value** - A dictionary containing the list of available sampling intervals. - - **Example** - `examples/list_dashboards.py `_ - ''' - res = requests.get(self.url + self._dashboards_api_endpoint, headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def update_dashboard(self, dashboard_data): - '''**Description** - Updates dashboard with provided in data. Please note that the dictionary will require a valid ID and version field to work as expected. - - **Success Return Value** - A dictionary containing the updated dashboard data. - - **Example** - `examples/dashboard_basic_crud.py `_ - ''' - res = requests.put(self.url + self._dashboards_api_endpoint + "/" + str(dashboard_data['id']), - headers=self.hdrs, verify=self.ssl_verify, data=json.dumps({'dashboard': dashboard_data})) - return self._request_result(res) - - def find_dashboard_by(self, name=None): - '''**Description** - Finds dashboards with the specified name. You can then delete the dashboard (with :func:`~SdcClient.delete_dashboard`) or edit panels (with :func:`~SdcClient.add_dashboard_panel` and :func:`~SdcClient.remove_dashboard_panel`) - - **Arguments** - - **name**: the name of the dashboards to find. - - **Success Return Value** - A list of dictionaries of dashboards matching the specified name. - - **Example** - `examples/dashboard.py `_ - ''' - res = self.get_dashboards() - if res[0] is False: - return res - else: - def filter_fn(configuration): - return configuration['name'] == name - - def create_item(configuration): - return {'dashboard': configuration} - - dashboards = list(map(create_item, list(filter(filter_fn, res[1]['dashboards'])))) - return [True, dashboards] - - def create_dashboard_with_configuration(self, configuration): - # Remove id and version properties if already set - configuration_clone = copy.deepcopy(configuration) - if 'id' in configuration_clone: - del configuration_clone['id'] - if 'version' in configuration_clone: - del configuration_clone['version'] - - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': configuration_clone}), - verify=self.ssl_verify) - return self._request_result(res) - - def create_dashboard(self, name): - ''' - **Description** - Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. - - **Arguments** - - **name**: the name of the dashboard that will be created. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - dashboard_configuration = { - 'name': name, - 'schema': 2, - 'widgets': [], - 'eventsOverlaySettings': { - 'filterNotificationsUserInputFilter': '' - } - } - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - - # TODO COVER - def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_direction='desc', limit=None, - layout=None): - """**Description** - Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. - - **Arguments** - - **dashboard**: dashboard to edit - - **name**: name of the new panel - - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - - ``number``: 1 metric only - - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **sort_direction**: Data sorting; The parameter is optional and it's a string identifying the sorting direction (it can be ``desc`` or ``asc``) - - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - """ - panel_configuration = { - 'name': name, - 'showAs': None, - 'metrics': [], - 'gridConfiguration': { - 'col': 1, - 'row': 1, - 'size_x': 12, - 'size_y': 6 - }, - 'customDisplayOptions': {} - } - - if panel_type == 'timeSeries': - # - # In case of a time series, the current dashboard implementation - # requires the timestamp to be explicitly specified as "key". - # However, this function uses the same abstraction of the data API - # that doesn't require to specify a timestamp key (you only need to - # specify time window and sampling) - # - metrics = copy.copy(metrics) - metrics.insert(0, {'id': 'timestamp'}) - - # - # Convert list of metrics to format used by Sysdig Monitor - # - property_names = {} - k_count = 0 - v_count = 0 - for i, metric in enumerate(metrics): - property_name = 'v' if 'aggregations' in metric else 'k' - - if property_name == 'k': - i = k_count - k_count += 1 - else: - i = v_count - v_count += 1 - property_names[metric['id']] = property_name + str(i) - - panel_configuration['metrics'].append({ - 'id': metric['id'], - 'timeAggregation': metric['aggregations']['time'] if 'aggregations' in metric else None, - 'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None, - 'propertyName': property_name + str(i) - }) - - panel_configuration['scope'] = scope - # if chart scope is equal to dashboard scope, set it as non override - panel_configuration['overrideScope'] = ('scope' in dashboard and dashboard['scope'] != scope) or ( - 'scope' not in dashboard and scope != None) - - if 'custom_display_options' not in panel_configuration: - panel_configuration['custom_display_options'] = { - 'valueLimit': { - 'count': 10, - 'direction': 'desc' - }, - 'histogram': { - 'numberOfBuckets': 10 - }, - 'yAxisScale': 'linear', - 'yAxisLeftDomain': { - 'from': 0, - 'to': None - }, - 'yAxisRightDomain': { - 'from': 0, - 'to': None - }, - 'xAxis': { - 'from': 0, - 'to': None - } - } - # - # Configure panel type - # - if panel_type == 'timeSeries': - panel_configuration['showAs'] = 'timeSeries' - - if limit != None: - panel_configuration['custom_display_options']['valueLimit'] = { - 'count': limit, - 'direction': 'desc' - } - - elif panel_type == 'number': - panel_configuration['showAs'] = 'summary' - elif panel_type == 'top': - panel_configuration['showAs'] = 'top' - - if limit != None: - panel_configuration['custom_display_options']['valueLimit'] = { - 'count': limit, - 'direction': sort_direction - } - - # - # Configure layout - # - if layout != None: - panel_configuration['gridConfiguration'] = layout - - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - - # - # ... and add the new panel - # - dashboard_configuration['widgets'].append(panel_configuration) - - # - # Update dashboard - # - res = requests.put(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, - data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - - # TODO COVER - def remove_dashboard_panel(self, dashboard, panel_name): - '''**Description** - Removes a panel from the dashboard. The panel to remove is identified by the specified ``name``. - - **Arguments** - - **name**: name of the panel to find and remove - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - - # - # ... find the panel - # - def filter_fn(panel): - return panel['name'] == panel_name - - panels = list(filter(filter_fn, dashboard_configuration['widgets'])) - - if len(panels) > 0: - # - # ... and remove it - # - for panel in panels: - dashboard_configuration['widgets'].remove(panel) - - # - # Update dashboard - # - res = requests.put(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, - data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - else: - return [False, 'Not found'] - - def create_dashboard_from_template(self, dashboard_name, template, scope, shared=False, public=False): - if scope is not None: - if not isinstance(scope, str): - return [False, 'Invalid scope format: Expected a string'] - - # - # Clean up the dashboard we retireved so it's ready to be pushed - # - template['id'] = None - template['version'] = None - template['schema'] = 2 - template['name'] = dashboard_name - template['shared'] = shared - template['public'] = public - template['publicToken'] = None - - # default dashboards don't have eventsOverlaySettings property - # make sure to add the default set if the template doesn't include it - if 'eventsOverlaySettings' not in template or not template['eventsOverlaySettings']: - template['eventsOverlaySettings'] = { - 'filterNotificationsUserInputFilter': '' - } - - # set dashboard scope to the specific parameter - scopeOk, scopeRes = convert_scope_string_to_expression(scope) - if not scopeOk: - return scopeOk, scopeRes - if scopeRes: - template['scopeExpressionList'] = list(map( - lambda ex: {'operand': ex['operand'], 'operator': ex['operator'], 'value': ex['value'], - 'displayName': '', 'variable': False}, scopeRes)) - else: - template['scopeExpressionList'] = None - - # NOTE: Individual panels might override the dashboard scope, the override will NOT be reset - if 'widgets' in template and template['widgets'] is not None: - for chart in template['widgets']: - if 'overrideScope' not in chart: - chart['overrideScope'] = False - - if chart['overrideScope'] == False: - # patch frontend bug to hide scope override warning even when it's not really overridden - chart['scope'] = scope - - if chart['showAs'] != 'map': - # if chart scope is equal to dashboard scope, set it as non override - chart_scope = chart['scope'] if 'scope' in chart else None - chart['overrideScope'] = chart_scope != scope - else: - # topology panels must override the scope - chart['overrideScope'] = True - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': template}), verify=self.ssl_verify) - - return self._request_result(res) - - def create_dashboard_from_view(self, newdashname, viewname, filter, shared=False, public=False): - '''**Description** - Create a new dasboard using one of the Sysdig Monitor views as a template. You will be able to define the scope of the new dashboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the view to use as the template for the new dashboard. This corresponds to the name that the view has in the Explore page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Find our template view - # - gvres = self.get_view(viewname) - if gvres[0] is False: - return gvres - - view = gvres[1]['defaultDashboard'] - - view['timeMode'] = {'mode': 1} - view['time'] = {'last': 2 * 60 * 60 * 1000000, 'sampling': 2 * 60 * 60 * 1000000} - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(newdashname, view, filter, shared, public) - - def get_dashboard(self, dashboard_id): - '''**Description** - Return a dashboard with the pased in ID. This includes the dashboards created by the user and the ones shared with them by other users. - - **Success Return Value** - A dictionary containing the requested dashboard data. - - **Example** - `examples/dashboard_basic_crud.py `_ - ''' - res = requests.get(self.url + self._dashboards_api_endpoint + "/" + str(dashboard_id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def create_dashboard_from_dashboard(self, newdashname, templatename, filter, shared=False, public=False): - '''**Description** - Create a new dasboard using one of the existing dashboards as a template. You will be able to define the scope of the new dasboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the dasboard to use as the template, as it appears in the Sysdig Monitor dashboard page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Get the list of dashboards from the server - # - res = requests.get(self.url + self._dashboards_api_endpoint, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - j = res.json() - - # - # Find our template dashboard - # - dboard = None - - for db in j['dashboards']: - if db['name'] == templatename: - dboard = db - break - - if dboard is None: - self.lasterr = 'can\'t find dashboard ' + templatename + ' to use as a template' - return [False, self.lasterr] - - # - # Create the dashboard - # - return self.create_dashboard_from_template(newdashname, dboard, filter, shared, public) - - def create_dashboard_from_file(self, dashboard_name, filename, filter, shared=False, public=False): - ''' - **Description** - Create a new dasboard using a dashboard template saved to disk. See :func:`~SdcClient.save_dashboard_to_file` to use the file to create a dashboard (usefl to create and restore backups). - - The file can contain the following JSON formats: - 1. dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - 2. JSON object with the following properties: - * version: dashboards API version (e.g. 'v2') - * dashboard: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **Arguments** - - **dashboard_name**: the name of the dashboard that will be created. - - **filename**: name of a file containing a JSON object - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard_save_load.py `_ - ''' - # - # Load the Dashboard - # - with open(filename) as data_file: - loaded_object = json.load(data_file) - - # - # Handle old files - # - if 'dashboard' not in loaded_object: - loaded_object = { - 'version': 'v1', - 'dashboard': loaded_object - } - - dashboard = loaded_object['dashboard'] - - if loaded_object['version'] != self._dashboards_api_version: - # - # Convert the dashboard (if possible) - # - conversion_result, dashboard = convert_dashboard_between_versions(dashboard, - loaded_object['version'], - self._dashboards_api_version) - - if not conversion_result: - return conversion_result, dashboard - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(dashboard_name, dashboard, filter, shared, public) - - def save_dashboard_to_file(self, dashboard, filename): - ''' - **Description** - Save a dashboard to disk. See :func:`~SdcClient.create_dashboard_from_file` to use the file to create a dashboard (usefl to create and restore backups). - - The file will contain a JSON object with the following properties: - * version: dashboards API version (e.g. 'v2') - * dashboard: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **Arguments** - - **dashboard**: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **filename**: name of a file that will contain a JSON object - - **Example** - `examples/dashboard_save_load.py `_ - ''' - with open(filename, 'w') as outf: - json.dump({ - 'version': self._dashboards_api_version, - 'dashboard': dashboard - }, outf) - - def delete_dashboard(self, dashboard): - '''**Description** - Deletes a dashboard. - - **Arguments** - - **dashboard**: the dashboard object as returned by :func:`~SdcClient.get_dashboards`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_dashboard.py `_ - ''' - if 'id' not in dashboard: - return [False, "Invalid dashboard format"] - - res = requests.delete(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] diff --git a/sdcclient/monitor/_dashboards_v3.py b/sdcclient/monitor/_dashboards_v3.py deleted file mode 100644 index 2c8f3570..00000000 --- a/sdcclient/monitor/_dashboards_v3.py +++ /dev/null @@ -1,554 +0,0 @@ -import copy -import json - -import requests - -from sdcclient._common import _SdcCommon -from sdcclient.monitor.dashboard_converters import convert_dashboard_between_versions, \ - convert_scope_string_to_expression - -PANEL_VISUALIZATION_TIMECHART = "advancedTimechart" -PANEL_VISUALIZATION_NUMBER = "advancedNumber" - - -class DashboardsClientV3(_SdcCommon): - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - super(DashboardsClientV3, self).__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDC" - self._dashboards_api_version = 'v3' - self._dashboards_api_endpoint = '/api/{}/dashboards'.format(self._dashboards_api_version) - self._default_dashboards_api_endpoint = '/api/{}/dashboards/templates'.format(self._dashboards_api_version) - - def get_views_list(self): - res = requests.get(self.url + self._default_dashboards_api_endpoint, headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_view(self, name): - gvres = self.get_views_list() - if gvres[0] is False: - return gvres - - vlist = gvres[1]['dashboardTemplates'] - - id = None - - for v in vlist: - if v['name'] == name: - id = v['dashboardId'] - break - - if not id: - return [False, 'view ' + name + ' not found'] - - res = requests.get(self.url + self._default_dashboards_api_endpoint + '/' + id, headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def get_dashboards(self): - '''**Description** - Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. - - **Success Return Value** - A dictionary containing the list of available sampling intervals. - - **Example** - `examples/list_dashboards.py `_ - ''' - res = requests.get(self.url + self._dashboards_api_endpoint, params={"light": "true"}, headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def update_dashboard(self, dashboard_data): - '''**Description** - Updates dashboard with provided in data. Please note that the dictionary will require a valid ID and version field to work as expected. - - **Success Return Value** - A dictionary containing the updated dashboard data. - - **Example** - `examples/dashboard_basic_crud.py `_ - ''' - res = requests.put(self.url + self._dashboards_api_endpoint + "/" + str(dashboard_data['id']), - headers=self.hdrs, verify=self.ssl_verify, data=json.dumps({'dashboard': dashboard_data})) - return self._request_result(res) - - def find_dashboard_by(self, name=None): - '''**Description** - Finds dashboards with the specified name. You can then delete the dashboard (with :func:`~SdcClient.delete_dashboard`) or edit panels (with :func:`~SdcClient.add_dashboard_panel` and :func:`~SdcClient.remove_dashboard_panel`) - - **Arguments** - - **name**: the name of the dashboards to find. - - **Success Return Value** - A list of dictionaries of dashboards matching the specified name. - - **Example** - `examples/dashboard.py `_ - ''' - res = self.get_dashboards() - if res[0] is False: - return res - else: - def filter_fn(configuration): - return configuration['name'] == name - - def create_item(configuration): - return {'dashboard': configuration} - - dashboards = list(map(create_item, list(filter(filter_fn, res[1]['dashboards'])))) - return [True, dashboards] - - def create_dashboard_with_configuration(self, configuration): - # Remove id and version properties if already set - configuration_clone = copy.deepcopy(configuration) - if 'id' in configuration_clone: - del configuration_clone['id'] - if 'version' in configuration_clone: - del configuration_clone['version'] - - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': configuration_clone}), - verify=self.ssl_verify) - return self._request_result(res) - - def create_dashboard(self, name): - ''' - **Description** - Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. - - **Arguments** - - **name**: the name of the dashboard that will be created. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - dashboard_configuration = { - 'name': name, - 'schema': 3, - 'widgets': [], - 'eventsOverlaySettings': { - 'filterNotificationsUserInputFilter': '' - }, - 'layout': [], - 'panels': [], - } - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - return self._request_result(res) - - # TODO COVER - def add_dashboard_panel(self, dashboard, panel_name, visualization, query): - dboard = copy.deepcopy(dashboard) - new_panel_id = dboard["panels"][-1]["id"] + 1 - new_panel = { - "id": new_panel_id, - "type": visualization, - "name": panel_name, - "description": "", - "advancedQueries": [ - { - "enabled": True, - "displayInfo": { - "displayName": "", - "timeSeriesDisplayNameTemplate": "", - "type": "lines" - }, - "format": { - "unit": "%", - "inputFormat": "0-100", - "displayFormat": "auto", - "decimals": None, - "yAxis": "auto" - }, - "query": query - } - ] - } - new_layout = { - "panelId": new_panel_id, - "x": 0, - # Hackish way to position a panel, the API doesn't provide auto-position - "y": len(dboard["panels"]) * 12 + 12, - "w": 12, - "h": 6, - } - - if visualization == PANEL_VISUALIZATION_TIMECHART: - new_panel["axesConfiguration"] = { - "bottom": { - "enabled": True - }, - "left": { - "enabled": True, - "displayName": None, - "unit": "auto", - "displayFormat": "auto", - "decimals": None, - "minValue": 0, - "maxValue": None, - "minInputFormat": "0-100", - "maxInputFormat": "0-100", - "scale": "linear" - }, - "right": { - "enabled": True, - "displayName": None, - "unit": "auto", - "displayFormat": "auto", - "decimals": None, - "minValue": 0, - "maxValue": None, - "minInputFormat": "1", - "maxInputFormat": "1", - "scale": "linear" - } - } - new_panel["legendConfiguration"] = { - "enabled": True, - "position": "right", - "layout": "table", - "showCurrent": True - } - if visualization == PANEL_VISUALIZATION_NUMBER: - new_panel["numberThresholds"] = { - "values": [], - "base": { - "severity": "none", - "displayText": "", - } - } - - dboard["panels"].append(new_panel) - dboard["layout"].append(new_layout) - - return self.update_dashboard(dboard) - - # TODO COVER - def remove_dashboard_panel(self, dashboard, panel_id): - dboard = copy.deepcopy(dashboard) - dboard["panels"] = [panel for panel in dboard["panels"] if panel["id"] != panel_id] - dboard["layout"] = [layout for layout in dboard["layout"] if layout["panelId"] != panel_id] - - return self.update_dashboard(dboard) - - def create_dashboard_from_template(self, dashboard_name, template, scope=None, shared=False, public=False): - if scope is not None: - if not isinstance(scope, list) and not isinstance(scope, str): - return [False, 'Invalid scope format: Expected a list, a string or None'] - else: - scope = [] - - # - # Clean up the dashboard we retireved so it's ready to be pushed - # - template['id'] = None - template['version'] = None - template['schema'] = 3 - template['name'] = dashboard_name - template['shared'] = shared - template['public'] = public - template['publicToken'] = None - - # default dashboards don't have eventsOverlaySettings property - # make sure to add the default set if the template doesn't include it - if 'eventsOverlaySettings' not in template or not template['eventsOverlaySettings']: - template['eventsOverlaySettings'] = { - 'filterNotificationsUserInputFilter': '' - } - - # set dashboard scope to the specific parameter - template['scopeExpressionList'] = [] - if isinstance(scope, list): - for s in scope: - ok, converted_scope = convert_scope_string_to_expression(s) - if not ok: - return ok, converted_scope - template['scopeExpressionList'].append(converted_scope[0]) - elif isinstance(scope, str): - ok, converted_scope = convert_scope_string_to_expression(scope) - if not ok: - return ok, converted_scope - template['scopeExpressionList'] = converted_scope - - - # NOTE: Individual panels might override the dashboard scope, the override will NOT be reset - if 'widgets' in template and template['widgets'] is not None: - for chart in template['widgets']: - if 'overrideScope' not in chart: - chart['overrideScope'] = False - - if chart['overrideScope'] == False: - # patch frontend bug to hide scope override warning even when it's not really overridden - chart['scope'] = scope - - if chart['showAs'] != 'map': - # if chart scope is equal to dashboard scope, set it as non override - chart_scope = chart['scope'] if 'scope' in chart else None - chart['overrideScope'] = chart_scope != scope - else: - # topology panels must override the scope - chart['overrideScope'] = True - - # - # Create the new dashboard - # - res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, - data=json.dumps({'dashboard': template}), verify=self.ssl_verify) - - return self._request_result(res) - - def create_dashboard_from_file(self, dashboard_name, filename, filter=None, shared=False, public=False): - ''' - **Description** - Create a new dasboard using a dashboard template saved to disk. See :func:`~SdcClient.save_dashboard_to_file` to use the file to create a dashboard (usefl to create and restore backups). - - The file can contain the following JSON formats: - 1. dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - 2. JSON object with the following properties: - * version: dashboards API version (e.g. 'v2') - * dashboard: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **Arguments** - - **dashboard_name**: the name of the dashboard that will be created. - - **filename**: name of a file containing a JSON object - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard_save_load.py `_ - ''' - # - # Load the Dashboard - # - with open(filename) as data_file: - loaded_object = json.load(data_file) - - # - # Handle old files - # - if 'dashboard' not in loaded_object: - loaded_object = { - 'version': f'v{loaded_object["schema"]}', - 'dashboard': loaded_object - } - - dashboard = loaded_object['dashboard'] - - if loaded_object['version'] != self._dashboards_api_version: - # - # Convert the dashboard (if possible) - # - conversion_result, dashboard = convert_dashboard_between_versions(dashboard, - loaded_object['version'], - self._dashboards_api_version) - - if not conversion_result: - return conversion_result, dashboard - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(dashboard_name, dashboard, filter, shared, public) - - def get_dashboard(self, dashboard_id): - '''**Description** - Return a dashboard with the pased in ID. This includes the dashboards created by the user and the ones shared with them by other users. - - **Success Return Value** - A dictionary containing the requested dashboard data. - - **Example** - `examples/dashboard_basic_crud.py `_ - ''' - res = requests.get(self.url + self._dashboards_api_endpoint + "/" + str(dashboard_id), headers=self.hdrs, - verify=self.ssl_verify) - return self._request_result(res) - - def create_dashboard_from_dashboard(self, newdashname, templatename, filter=None, shared=False, public=False): - '''**Description** - Create a new dasboard using one of the existing dashboards as a template. You will be able to define the scope of the new dasboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the dasboard to use as the template, as it appears in the Sysdig Monitor dashboard page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Get the list of dashboards from the server - # - dashboard = requests.get(self.url + self._dashboards_api_endpoint, params={"light": "true"}, headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(dashboard): - return [False, self.lasterr] - - j = dashboard.json() - - # - # Find our template dashboard - # - dboard = None - - for db in j['dashboards']: - if db['name'] == templatename: - dboard = db - break - - if dboard is None: - self.lasterr = 'can\'t find dashboard ' + templatename + ' to use as a template' - return [False, self.lasterr] - - ok, dboard = self.get_dashboard(dboard["id"]) - if not ok: - return ok, dboard - # - # Create the dashboard - # - return self.create_dashboard_from_template(newdashname, dboard["dashboard"], filter, shared, public) - - def favorite_dashboard(self, dashboard_id, favorite): - data = {"dashboard": {"favorite": favorite}} - res = requests.patch(self.url + self._dashboards_api_endpoint + "/" + str(dashboard_id), json=data, - headers=self.hdrs, verify=self.ssl_verify) - return self._request_result(res) - - def share_dashboard_with_all_teams(self, dashboard, mode="r"): - role = "ROLE_RESOURCE_READ" if mode == "r" else "ROLE_RESOURCE_EDIT" - dboard = copy.deepcopy(dashboard) - dboard["sharingSettings"] = [ - { - "member": { - "type": "USER_TEAMS", - }, - "role": role, - } - ] - dboard["shared"] = True - - return self.update_dashboard(dboard) - - def unshare_dashboard(self, dashboard): - dboard = copy.deepcopy(dashboard) - dboard["sharingSettings"] = [] - dboard["shared"] = False - - return self.update_dashboard(dboard) - - def share_dashboard_with_team(self, dashboard, team_id, mode="r"): - role = "ROLE_RESOURCE_READ" if mode == "r" else "ROLE_RESOURCE_EDIT" - dboard = copy.deepcopy(dashboard) - - if dboard["sharingSettings"] is None: - dboard["sharingSettings"] = [] - - dboard["sharingSettings"].append({ - "member": { - "type": "TEAM", - "id": team_id, - }, - "role": role, - }) - dboard["shared"] = True - - return self.update_dashboard(dboard) - - def create_dashboard_from_view(self, newdashname, viewname, filter, shared=False, public=False): - '''**Description** - Create a new dasboard using one of the Sysdig Monitor views as a template. You will be able to define the scope of the new dashboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the view to use as the template for the new dashboard. This corresponds to the name that the view has in the Explore page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Find our template view - # - gvres = self.get_view(viewname) - if gvres[0] is False: - return gvres - - view = gvres[1]['dashboard'] - - view['timeMode'] = {'mode': 1} - view['time'] = {'last': 2 * 60 * 60 * 1000000, 'sampling': 2 * 60 * 60 * 1000000} - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(newdashname, view, filter, shared, public) - - def save_dashboard_to_file(self, dashboard, filename): - ''' - **Description** - Save a dashboard to disk. See :func:`~SdcClient.create_dashboard_from_file` to use the file to create a dashboard (usefl to create and restore backups). - - The file will contain a JSON object with the following properties: - * version: dashboards API version (e.g. 'v2') - * dashboard: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **Arguments** - - **dashboard**: dashboard object in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **filename**: name of a file that will contain a JSON object - - **Example** - `examples/dashboard_save_load.py `_ - ''' - with open(filename, 'w') as outf: - json.dump({ - 'version': self._dashboards_api_version, - 'dashboard': dashboard - }, outf) - - def delete_dashboard(self, dashboard): - '''**Description** - Deletes a dashboard. - - **Arguments** - - **dashboard**: the dashboard object as returned by :func:`~SdcClient.get_dashboards`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_dashboard.py `_ - ''' - if 'id' not in dashboard: - return [False, "Invalid dashboard format"] - - res = requests.delete(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] diff --git a/sdcclient/monitor/_events_v1.py b/sdcclient/monitor/_events_v1.py deleted file mode 100644 index d7587534..00000000 --- a/sdcclient/monitor/_events_v1.py +++ /dev/null @@ -1,92 +0,0 @@ -import json - -import requests - -from sdcclient._common import _SdcCommon - - -class EventsClientV1(_SdcCommon): - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - super().__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDC" - - def get_events(self, from_s=None, to_s=None, last_s=None): - '''**Description** - Returns the list of Sysdig Monitor events. - - **Arguments** - - **name**: filter events by name. Default: None. - - **category**: filter events by category. Default: ['alert', 'custom', 'docker', 'containerd', 'kubernetes']. - - **direction**: orders the list of events. Valid values: "before", "after". Default: "before". - - **status**: status of the event as list. Default: ['triggered', 'resolved', 'acknowledged', 'unacknowledged'] - - **limit**: max number of events to retrieve. Default: 100. - - **pivot**: event id to use as pivot. Default: None. - - **Success Return Value** - A dictionary containing the list of events. - - **Example** - `examples/list_events.py `_ - ''' - - options = { - "from": from_s, - "to": to_s, - "last": last_s, - } - params = {k: v for k, v in options.items() if v is not None} - res = requests.get(self.url + '/api/events/', headers=self.hdrs, params=params, verify=self.ssl_verify) - return self._request_result(res) - - def post_event(self, name, description=None, severity=None, event_filter=None, tags=None): - '''**Description** - Send an event to Sysdig Monitor. The events you post are available in the Events tab in the Sysdig Monitor UI and can be overlied to charts. - - **Arguments** - - **name**: the name of the new event. - - **description**: a longer description offering detailed information about the event. - - **severity**: syslog style from 0 (high) to 7 (low). - - **event_filter**: metadata, in Sysdig Monitor format, of nodes to associate with the event, e.g. ``host.hostName = 'ip-10-1-1-1' and container.name = 'foo'``. - - **tags**: a list of key-value dictionaries that can be used to tag the event. Can be used for filtering/segmenting purposes in Sysdig Monitor. - - **Success Return Value** - A dictionary describing the new event. - - **Examples** - - `examples/post_event_simple.py `_ - - `examples/post_event.py `_ - ''' - options = { - 'name': name, - 'description': description, - 'severity': severity, - 'filter': event_filter, - 'tags': tags - } - edata = { - 'event': {k: v for k, v in options.items() if v is not None} - } - res = requests.post(self.url + '/api/events/', headers=self.hdrs, data=json.dumps(edata), - verify=self.ssl_verify) - return self._request_result(res) - - def delete_event(self, event): - '''**Description** - Deletes an event. - - **Arguments** - - **event**: the event object as returned by :func:`~SdcClient.get_events`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_event.py `_ - ''' - if 'id' not in event: - return [False, "Invalid event format"] - - res = requests.delete(self.url + '/api/events/' + str(event['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] diff --git a/sdcclient/monitor/_events_v2.py b/sdcclient/monitor/_events_v2.py deleted file mode 100644 index 9dab3ae9..00000000 --- a/sdcclient/monitor/_events_v2.py +++ /dev/null @@ -1,118 +0,0 @@ -import json - -import requests - -from sdcclient._common import _SdcCommon - - -class EventsClientV2(_SdcCommon): - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True, custom_headers=None): - super().__init__(token, sdc_url, ssl_verify, custom_headers) - self.product = "SDC" - - def get_events(self, name=None, category=None, direction='before', status=None, limit=100, pivot=None): - '''**Description** - Returns the list of Sysdig Monitor events. - - **Arguments** - - **name**: filter events by name. Default: None. - - **category**: filter events by category. Default: ['alert', 'custom', 'docker', 'containerd', 'kubernetes']. - - **direction**: orders the list of events. Valid values: "before", "after". Default: "before". - - **status**: status of the event as list. Default: ['triggered', 'resolved', 'acknowledged', 'unacknowledged'] - - **limit**: max number of events to retrieve. Default: 100. - - **pivot**: event id to use as pivot. Default: None. - - **Success Return Value** - A dictionary containing the list of events. - - **Example** - `examples/list_events.py `_ - ''' - valid_categories = ['alert', 'custom', 'docker', 'containerd', 'kubernetes'] - - if category is None: - category = valid_categories - - for c in category: - if c not in valid_categories: - return False, "Invalid category '{}'".format(c) - - valid_status = ["triggered", "resolved", "acknowledged", "unacknowledged"] - if status is None: - status = valid_status - - for s in status: - if s not in valid_status: - return False, "Invalid status '{}'".format(s) - - if direction not in ["before", "after"]: - return False, "Invalid direction '{}', must be either 'before' or 'after'".format(direction) - - options = { - 'alertStatus': status, - 'category': ','.join(category), - 'dir': direction, - 'feed': 'true', - 'include_pivot': 'true', - 'include_total': 'true', - 'limit': str(limit), - 'pivot': pivot, - 'filter': name, - } - params = {k: v for k, v in options.items() if v is not None} - res = requests.get(self.url + '/api/v2/events/', headers=self.hdrs, params=params, verify=self.ssl_verify) - return self._request_result(res) - - def delete_event(self, event): - '''**Description** - Deletes an event. - - **Arguments** - - **event**: the event object as returned by :func:`~SdcClient.get_events`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_event.py `_ - ''' - if 'id' not in event: - return [False, "Invalid event format"] - - res = requests.delete(self.url + '/api/v2/events/' + str(event['id']), headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def post_event(self, name, description=None, severity=None, event_filter=None, tags=None): - '''**Description** - Send an event to Sysdig Monitor. The events you post are available in the Events tab in the Sysdig Monitor UI and can be overlied to charts. - - **Arguments** - - **name**: the name of the new event. - - **description**: a longer description offering detailed information about the event. - - **severity**: syslog style from 0 (high) to 7 (low). - - **event_filter**: metadata, in Sysdig Monitor format, of nodes to associate with the event, e.g. ``host.hostName = 'ip-10-1-1-1' and container.name = 'foo'``. - - **tags**: a list of key-value dictionaries that can be used to tag the event. Can be used for filtering/segmenting purposes in Sysdig Monitor. - - **Success Return Value** - A dictionary describing the new event. - - **Examples** - - `examples/post_event_simple.py `_ - - `examples/post_event.py `_ - ''' - options = { - 'name': name, - 'description': description, - 'severity': severity, - 'filter': event_filter, - 'tags': tags - } - edata = { - 'event': {k: v for k, v in options.items() if v is not None} - } - res = requests.post(self.url + '/api/v2/events/', headers=self.hdrs, data=json.dumps(edata), - verify=self.ssl_verify) - return self._request_result(res) diff --git a/sdcclient/monitor/dashboard_converters/__init__.py b/sdcclient/monitor/dashboard_converters/__init__.py deleted file mode 100644 index 508873eb..00000000 --- a/sdcclient/monitor/dashboard_converters/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ._dashboard_versions import convert_dashboard_between_versions -from ._dashboard_scope import convert_scope_string_to_expression \ No newline at end of file diff --git a/sdcclient/monitor/dashboard_converters/_dashboard_scope.py b/sdcclient/monitor/dashboard_converters/_dashboard_scope.py deleted file mode 100644 index 5c63ed1a..00000000 --- a/sdcclient/monitor/dashboard_converters/_dashboard_scope.py +++ /dev/null @@ -1,103 +0,0 @@ -import tatsu - - -def convert_scope_string_to_expression(scope = None): - if scope is None or not scope: - return [True, []] - - _SCOPE_GRAMMAR = """ - @@grammar::CALC - - start = expression $ ; - - expression - = - | operand simple_operator word - | operand multiple_operator multiple_value - ; - - simple_operator - = - | 'is not' - | 'is' - | 'contains' - | 'does not contain' - | 'starts with' - | '=' - ; - - multiple_operator - = - | 'not in' - | 'in' - ; - - operand = /[\w\.]+/ ; - - multiple_value - = - | '[' word_array ']' - | word - ; - - word_array - = - | word ',' word_array - | word - ; - - word = - | /[\w\.]+/ - | '"' /[\w\.]+/ '"' - | "'" /[\w\.]+/ "'" - ; - """ - - def flatten(S): - if S == [] or S == (): - return list(S) - if isinstance(S[0], list) or isinstance(S[0], tuple): - return flatten(S[0]) + flatten(S[1:]) - return list(S[:1]) + flatten(S[1:]) - - try: - grammar = tatsu.compile(_SCOPE_GRAMMAR) - scope_list = [] - - scope_expressions = scope.strip(' \t\n\r').split(' and ') - - for scope in scope_expressions: - - operand, parsed_operator, value = grammar.parse(scope) - - operator_match = { - "is": "equals", - "=": "equals", - "is not": "notEquals", - "in": "in", - "not in": "notIn", - "contains": "contains", - "does not contain": "notContains", - "starts with": "startsWith", - } - - if isinstance(value, tuple): - value = flatten(value) - if len(value) > 1: - value = list(value[1:-1]) # Remove '[' and ']' - value = [elem for elem in value if elem != ','] # Remove ',' - else: - value = [value] - - operator = "" if parsed_operator not in operator_match else operator_match[parsed_operator] - - scope_list.append({ - 'displayName': "", - "isVariable": False, - 'operand': operand, - 'operator': operator, - 'value': value - }) - return [True, scope_list] - except Exception as ex: - return [False, f"invalid scope: {scope}, {ex.message}"] diff --git a/sdcclient/monitor/dashboard_converters/_dashboard_versions.py b/sdcclient/monitor/dashboard_converters/_dashboard_versions.py deleted file mode 100644 index a5ebf3db..00000000 --- a/sdcclient/monitor/dashboard_converters/_dashboard_versions.py +++ /dev/null @@ -1,283 +0,0 @@ -import copy - -from sdcclient.monitor.dashboard_converters._dashboard_scope import convert_scope_string_to_expression - - -def _convert_dashboard_v1_to_v2(dashboard): - # - # Migrations - # - # Each converter function will take: - # 1. name of the v1 dashboard property - # 2. v1 dashboard configuration - # 3. v2 dashboard configuration - # - # Each converter will apply changes to v2 dashboard configuration according to v1 - # - def when_set(converter): - def fn(prop_name, old_obj, new_obj): - if prop_name in old_obj and old_obj[prop_name] is not None: - converter(prop_name, old_obj, new_obj) - - return fn - - def with_default(converter, default=None): - def fn(prop_name, old_obj, new_obj): - if prop_name not in old_obj: - old_obj[prop_name] = default - - converter(prop_name, old_obj, new_obj) - - return fn - - def keep_as_is(prop_name, old_obj, new_obj): - new_obj[prop_name] = old_obj[prop_name] - - def drop_it(prop_name=None, old_obj=None, new_obj=None): - pass - - def ignore(prop_name=None, old_obj=None, new_obj=None): - pass - - def rename_to(new_prop_name): - def rename(prop_name, old_obj, new_obj): - new_obj[new_prop_name] = old_obj[prop_name] - - return rename - - def convert_schema(prop_name, old_dashboard, new_dashboard): - new_dashboard[prop_name] = 2 - - def convert_scope(prop_name, old_dashboard, new_dashboard): - # # TODO! - - scope = old_dashboard[prop_name] - scope_conversion = convert_scope_string_to_expression(scope) - - if scope_conversion[0]: - if scope_conversion[1]: - new_dashboard['scopeExpressionList'] = scope_conversion[1] - else: - # the property can be either `null` or a non-empty array - new_dashboard['scopeExpressionList'] = None - else: - raise SyntaxError('scope not supported by the current grammar') - - def convert_events_filter(prop_name, old_dashboard, new_dashboard): - rename_to('eventsOverlaySettings')(prop_name, old_dashboard, new_dashboard) - - if 'showNotificationsDoNotFilterSameMetrics' in new_dashboard['eventsOverlaySettings']: - del new_dashboard['eventsOverlaySettings']['showNotificationsDoNotFilterSameMetrics'] - if 'showNotificationsDoNotFilterSameScope' in new_dashboard['eventsOverlaySettings']: - del new_dashboard['eventsOverlaySettings']['showNotificationsDoNotFilterSameScope'] - - def convert_items(prop_name, old_dashboard, new_dashboard): - def convert_color_coding(prop_name, old_widget, new_widget): - best_value = None - worst_value = None - for item in old_widget[prop_name]['thresholds']: - if item['color'] == 'best': - best_value = item['max'] if not item['max'] else item['min'] - elif item['color'] == 'worst': - worst_value = item['min'] if not item['min'] else item['max'] - - if best_value is not None and worst_value is not None: - new_widget[prop_name] = { - 'best': best_value, - 'worst': worst_value - } - - def convert_display_options(prop_name, old_widget, new_widget): - keep_as_is(prop_name, old_widget, new_widget) - - if 'yAxisScaleFactor' in new_widget[prop_name]: - del new_widget[prop_name]['yAxisScaleFactor'] - - def convert_group(prop_name, old_widget, new_widget): - group_by_metrics = old_widget[prop_name]['configuration']['groups'][0]['groupBy'] - - migrated = [] - for metric in group_by_metrics: - migrated.append({'id': metric['metric']}) - - new_widget['groupingLabelIds'] = migrated - - def convert_override_filter(prop_name, old_widget, new_widget): - if old_widget['showAs'] == 'map': - # override scope always true if scope is set - new_widget['overrideScope'] = True - else: - new_widget['overrideScope'] = old_widget[prop_name] - - def convert_name(prop_name, old_widget, new_widget): - # - # enforce unique name (on old dashboard, before migration) - # - unique_id = 1 - name = old_widget[prop_name] - - for widget in old_dashboard['items']: - if widget == old_widget: - break - - if old_widget[prop_name] == widget[prop_name]: - old_widget[prop_name] = '{} ({})'.format(name, unique_id) - unique_id += 1 - - keep_as_is(prop_name, old_widget, new_widget) - - def convert_metrics(prop_name, old_widget, new_widget): - def convert_property_name(prop_name, old_metric, new_metric): - keep_as_is(prop_name, old_metric, new_metric) - - if old_metric['metricId'] == 'timestamp': - return 'k0' - - metric_migrations = { - 'metricId': rename_to('id'), - 'aggregation': rename_to('timeAggregation'), - 'groupAggregation': rename_to('groupAggregation'), - 'propertyName': convert_property_name - } - - migrated_metrics = [] - for old_metric in old_widget[prop_name]: - migrated_metric = {} - - for key in metric_migrations.keys(): - if key in old_metric: - metric_migrations[key](key, old_metric, migrated_metric) - - migrated_metrics.append(migrated_metric) - - # Property name convention: - # timestamp: k0 (if present) - # other keys: k* (from 0 or 1, depending on timestamp) - # values: v* (from 0) - sorted_metrics = [] - timestamp_key = [m for m in migrated_metrics - if m['id'] == 'timestamp' and - not ('timeAggregation' in m) or - not (m['timeAggregation'] is not None) - ] - no_timestamp_keys = [m for m in migrated_metrics - if m['id'] != 'timestamp' and - not ('timeAggregation' in m) or - not (m['timeAggregation'] is not None) - ] - values = [m for m in migrated_metrics - if 'timeAggregation' in m and - m['timeAggregation'] is not None - ] - if timestamp_key: - timestamp_key[0]['propertyName'] = 'k0' - sorted_metrics.append(timestamp_key[0]) - k_offset = 1 if timestamp_key else 0 - for i in range(0, len(no_timestamp_keys)): - no_timestamp_keys[i]['propertyName'] = 'k{}'.format(i + k_offset) - sorted_metrics.append(no_timestamp_keys[i]) - for i in range(0, len(values)): - values[i]['propertyName'] = 'v{}'.format(i) - sorted_metrics.append(values[i]) - - new_widget['metrics'] = sorted_metrics - - widget_migrations = { - 'colorCoding': when_set(convert_color_coding), - 'compareToConfig': when_set(keep_as_is), - 'customDisplayOptions': with_default(convert_display_options, {}), - 'gridConfiguration': keep_as_is, - 'group': when_set(convert_group), - 'hasTransparentBackground': when_set(rename_to('transparentBackground')), - 'limitToScope': when_set(keep_as_is), - 'isPanelTitleVisible': when_set(rename_to('panelTitleVisible')), - 'markdownSource': when_set(keep_as_is), - 'metrics': with_default(convert_metrics, []), - 'name': with_default(convert_name, 'Panel'), - 'overrideFilter': convert_override_filter, - 'paging': drop_it, - 'scope': with_default(keep_as_is, None), - 'showAs': keep_as_is, - 'showAsType': drop_it, - 'sorting': drop_it, - 'textpanelTooltip': when_set(keep_as_is), - } - - migrated_widgets = [] - for old_widget in old_dashboard[prop_name]: - migrated_widget = {} - - for key in widget_migrations.keys(): - widget_migrations[key](key, old_widget, migrated_widget) - - migrated_widgets.append(migrated_widget) - - new_dashboard['widgets'] = migrated_widgets - - return migrated - - migrations = { - 'autoCreated': keep_as_is, - 'createdOn': keep_as_is, - 'eventsFilter': with_default(convert_events_filter, { - 'filterNotificationsUserInputFilter': '' - }), - 'filterExpression': convert_scope, - 'scopeExpressionList': ignore, # scope will be generated from 'filterExpression' - 'id': keep_as_is, - 'isPublic': rename_to('public'), - 'isShared': rename_to('shared'), - 'items': convert_items, - 'layout': drop_it, - 'modifiedOn': keep_as_is, - 'name': keep_as_is, - 'publicToken': drop_it, - 'schema': convert_schema, - 'teamId': keep_as_is, - 'username': keep_as_is, - 'version': keep_as_is, - } - - # - # Apply migrations - # - migrated = {} - for key in migrations.keys(): - migrations[key](key, copy.deepcopy(dashboard), migrated) - - return True, migrated - - -_DASHBOARD_CONVERTERS = { - 'v2': { - 'v1': _convert_dashboard_v1_to_v2 - } -} - -def convert_dashboard_between_versions(dashboard, version_from, version_to): - ''' - **Description** - Converts a dashboard from a version to another version. - Current conversions supported: - - v1 -> v2 - - **Arguments** - - **version_from**: the version of the original dashboard to convert from - - **version_to**: the version of the wanted dashboard - - **Success Return Value** - A dashboard transformed between versions. - ''' - converters_to = _DASHBOARD_CONVERTERS.get(version_to, None) - if converters_to is None: - return False, f'unexpected error: no dashboard converters from version {version_to} are supported' - - converter = converters_to.get(version_from, None) - - if converter is None: - return False, 'dashboard version {} cannot be converted to {}'.format(version_from, version_to) - - try: - return converter(dashboard) - except Exception as err: - return False, str(err) diff --git a/sdcclient/secure/__init__.py b/sdcclient/secure/__init__.py deleted file mode 100644 index 098e7de6..00000000 --- a/sdcclient/secure/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ._policy_events_old import PolicyEventsClientOld -from ._policy_events_v1 import PolicyEventsClientV1 \ No newline at end of file diff --git a/sdcclient/secure/_policy_events_old.py b/sdcclient/secure/_policy_events_old.py deleted file mode 100644 index 19ec0e7b..00000000 --- a/sdcclient/secure/_policy_events_old.py +++ /dev/null @@ -1,217 +0,0 @@ -import datetime -import json -from warnings import warn - -import requests - -from sdcclient._common import _SdcCommon - - -class PolicyEventsClientOld(_SdcCommon): - def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None): - super(PolicyEventsClientOld, self).__init__(token, sdc_url, ssl_verify, custom_headers) - - self.customer_id = None - self.product = "SDS" - self._policy_v2 = None - - def _get_policy_events_int(self, ctx): - warn("The PolicyEventsClientOld class is deprecated in favour of PolicyEventsClientV1; use it only if you have " - "an old on-premises installation", DeprecationWarning, 3) - policy_events_url = self.url + '/api/policyEvents{id}?from={frm:d}&to={to:d}&offset={offset}&limit={limit}{sampling}{aggregations}{scope}{filter}'.format( - id="/%s" % ctx["id"] if "id" in ctx else "", - frm=int(ctx['from']), - to=int(ctx['to']), - offset=ctx['offset'], - limit=ctx['limit'], - sampling='&sampling=%d' % int(ctx['sampling']) if "sampling" in ctx else "", - aggregations='&aggregations=%s' % json.dumps(ctx['aggregations']) if "aggregations" in ctx else "", - scope='&scopeFilter=%s' % ctx['scopeFilter'] if "scopeFilter" in ctx else "", - filter='&eventFilter=%s' % ctx['eventFilter'] if "eventFilter" in ctx else "") - - res = requests.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - # Increment the offset by limit - ctx['offset'] += ctx['limit'] - - return [True, {"ctx": ctx, "data": res.json()}] - - def get_policy_events_range(self, from_sec, to_sec, sampling=None, aggregations=None, scope_filter=None, - event_filter=None): - '''**Description** - Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction - with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - from_sec: the start of the timerange for which to get events - - end_sec: the end of the timerange for which to get events - - sampling: sample all policy events using *sampling* interval. - - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - - ''' - options = {"from": int(from_sec) * 1000000, - "to": int(to_sec) * 1000000, - "offset": 0, - "limit": 1000, - "sampling": sampling, - "aggregations": aggregations, - "scopeFilter": scope_filter, - "eventFilter": event_filter} - ctx = {k: v for k, v in options.items() if v is not None} - return self._get_policy_events_int(ctx) - - def get_policy_events_duration(self, duration_sec, sampling=None, aggregations=None, scope_filter=None, - event_filter=None): - '''**Description** - Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with - :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - - sampling: Sample all policy events using *sampling* interval. - - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - epoch = datetime.datetime.utcfromtimestamp(0) - to_ts = (datetime.datetime.utcnow() - epoch).total_seconds() * 1000 * 1000 - from_ts = to_ts - (int(duration_sec) * 1000 * 1000) - - options = {"to": to_ts, - "from": from_ts, - "offset": 0, - "limit": 1000, - "sampling": sampling, - "aggregations": aggregations, - "scopeFilter": scope_filter, - "eventFilter": event_filter} - ctx = {k: v for k, v in options.items() if v is not None} - return self._get_policy_events_int(ctx) - - def get_policy_events_id_range(self, id, from_sec, to_sec, sampling=None, aggregations=None, scope_filter=None, - event_filter=None): - '''**Description** - Fetch all policy events with id that occurred in the time range [from_sec:to_sec]. This method is used in conjunction - with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - id: the id of the policy events to fetch. - - from_sec: the start of the timerange for which to get events - - end_sec: the end of the timerange for which to get events - - sampling: sample all policy events using *sampling* interval. - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. - - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - - options = {"id": id, - "from": int(from_sec) * 1000000, - "to": int(to_sec) * 1000000, - "offset": 0, - "limit": 1000, - "sampling": sampling, - "aggregations": aggregations, - "scopeFilter": scope_filter, - "eventFilter": event_filter} - ctx = {k: v for k, v in options.items() if v is not None} - return self._get_policy_events_int(ctx) - - def get_policy_events_id_duration(self, id, duration_sec, sampling=None, aggregations=None, scope_filter=None, - event_filter=None): - '''**Description** - Fetch all policy events with id that occurred in the last duration_sec seconds. This method is used in conjunction with - :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - id: the id of the policy events to fetch. - - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - - sampling: Sample all policy events using *sampling* interval. - - aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType. - - scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container). - - event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - epoch = datetime.datetime.utcfromtimestamp(0) - to_ts = (datetime.datetime.utcnow() - epoch).total_seconds() * 1000 * 1000 - from_ts = to_ts - (int(duration_sec) * 1000 * 1000) - - options = {"id": id, - "to": to_ts, - "from": from_ts, - "offset": 0, - "limit": 1000, - "sampling": sampling, - "aggregations": aggregations, - "scopeFilter": scope_filter, - "eventFilter": event_filter} - ctx = {k: v for k, v in options.items() if v is not None} - return self._get_policy_events_int(ctx) - - def get_more_policy_events(self, ctx): - '''**Description** - Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Arguments** - - ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events() - - An array of policy events, in JSON format. Each policy event contains the following: - - hostMac: the mac address of the machine where the event occurred - - severity: a severity level from 1-7 - - timestamp: when the event occurred (ns since the epoch) - - version: a version number for this message (currently 1) - - policyId: a reference to the policy that generated this policy event - - output: A string describing the event that occurred - - id: a unique identifier for this policy event - - isAggregated: if true, this is a combination of multiple policy events - - containerId: the container in which the policy event occurred - - When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events(). - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - return self._get_policy_events_int(ctx) diff --git a/sdcclient/secure/_policy_events_v1.py b/sdcclient/secure/_policy_events_v1.py deleted file mode 100644 index 5c128f2a..00000000 --- a/sdcclient/secure/_policy_events_v1.py +++ /dev/null @@ -1,122 +0,0 @@ -import datetime - -import requests - -from sdcclient._common import _SdcCommon - - -class PolicyEventsClientV1(_SdcCommon): - def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None): - super(PolicyEventsClientV1, self).__init__(token, sdc_url, ssl_verify, custom_headers) - - self.customer_id = None - self.product = "SDS" - self._policy_v2 = None - - def _get_policy_events_int(self, ctx): - limit = ctx.get("limit", 50) - policy_events_url = self.url + '/api/v1/secureEvents?limit={limit}{frm}{to}{filter}{cursor}'.format( - limit=limit, - frm=f"&from={int(ctx['from']):d}" if "from" in ctx else "", - to=f"&to={int(ctx['to']):d}" if "to" in ctx else "", - filter=f'&filter={ctx["filter"]}' if "filter" in ctx else "", - cursor=f'&cursor={ctx["cursor"]}' if "cursor" in ctx else "") - - res = requests.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - ctx = { - "limit": limit, - "cursor": res.json()["page"].get("prev", None) - } - - return [True, {"ctx": ctx, "data": res.json()["data"]}] - - def get_policy_events_range(self, from_sec, to_sec, filter=None): - '''**Description** - Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction - with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - from_sec: the start of the timerange for which to get events - - end_sec: the end of the timerange for which to get events - - filter: this is a SysdigMonitor-like filter (e.g. filter: 'severity in ("4","5") and freeText in ("Suspicious")') - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - - ''' - options = {"from": int(from_sec) * 1_000_000_000, - "to": int(to_sec) * 1_000_000_000, - "limit": 50, - "filter": filter} - ctx = {k: v for k, v in options.items() if v is not None} - return self._get_policy_events_int(ctx) - - def get_policy_events_duration(self, duration_sec, filter=None): - '''**Description** - Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with - :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - - filter: this is a SysdigMonitor-like filter (e.g. filter: 'severity in ("4","5") and freeText in ("Suspicious")') - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - - ''' - to_sec = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds()) - from_sec = to_sec - (int(duration_sec)) - - return self.get_policy_events_range(from_sec, to_sec, filter) - - def get_more_policy_events(self, ctx): - '''**Description** - Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Arguments** - - ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events() - - An array of policy events, in JSON format. Each policy event contains the following: - - id: a unique identifier for this policy event - - cursor: unique ID that can be used with get_more_policy_events context to retrieve paginated policy events - - timestamp: when the event occurred (ns since the epoch) - - source: the source of the policy event. It can be "syscall" or "k8s_audit" - - description: the description of the event - - severity: a severity level from 1-7 - - agentId: the agent that reported this event - - machineId: the MAC of the machine that reported this event - - content: More information about what triggered the event - - falsePositive: if the event is considered a false-positive - - fields: raw information from the rule that fired this event - - output: Output from the rule that fired this event - - policyId: the ID of the policy that fired this event - - ruleName: name of the rule that fired this event - - ruleTags: tags from the rule that fired this event - - labels: more information from the scope of this event - - When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events(). - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - return self._get_policy_events_int(ctx) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 224a7795..00000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[metadata] -description-file = README.md \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index db0809e2..00000000 --- a/setup.py +++ /dev/null @@ -1,41 +0,0 @@ -from setuptools import setup, find_packages - -setup( - name="sdcclient", - version="0.13.1", - description="Python client for Sysdig Cloud", - url="http://github.com/sysdiglabs/sysdig-sdk-python", - author="Sysdig Inc.", - author_email="info@sysdig.com", - classifiers=[ - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Topic :: Software Development :: Build Tools", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - ], - packages=find_packages( - exclude=["contrib", "doc", "specs", "tests", "examples", "utils"] - ), - python_requires=">=3.8, <4", - install_requires=[ - "certifi>=2020.6.20", - "chardet>=3.0.4", - "idna>=2.10; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "pyaml>=20.4.0", - "pyyaml>=5.3.1", - "requests>=2.23.0", - "requests-toolbelt>=0.9.1", - "tatsu>=5.5.0", - "urllib3>=1.25.8", - ], - extras_require={"dev": []}, - project_urls={ - "Bug Reports": "https://github.com/sysdiglabs/sysdig-sdk-python/issues", - "Source": "https://github.com/sysdiglabs/sysdig-sdk-python/", - }, -) diff --git a/specs/__init__.py b/specs/__init__.py deleted file mode 100644 index 38cb982d..00000000 --- a/specs/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from expects.matchers import Matcher - - -class _be_successful_api_call(Matcher): - def _match(self, expect): - ok, result = expect - if ok: - return True, [f"the api call was successful: {str(result)}"] - return False, [f"the result is {str(result)}"] - - -be_successful_api_call = _be_successful_api_call() diff --git a/specs/_common/__init__.py b/specs/_common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/specs/_common/agent_spec.py b/specs/_common/agent_spec.py deleted file mode 100644 index a04692cc..00000000 --- a/specs/_common/agent_spec.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import time - -from expects import expect, have_key, have_keys -from expects.matchers import _Or -from expects.matchers.built_in import be_empty, contain, be_above_or_equal -from mamba import before, it, description, after - -from sdcclient import SdcClient -from specs import be_successful_api_call - - -def _mysql_app_check(): - return """\ -app_checks: - - name: mysql - pattern: - comm: mysqld - conf: - server: 127.0.0.1 - user: sysdig-cloud - pass: sysdig-cloud-password -""" - - -def _debug_enabled(): - return """\ -log: - console_priority: debug -""" - - -# See https://docs.sysdig.com/en/agent-auto-config.html for more information -def _agent_configuration(): - return { - "files": [ - { - "filter": "host.mac = \"08:00:27:de:5b:b9\"", - "content": _mysql_app_check() - }, - { - "filter": "*", - "content": _debug_enabled() - } - ] - } - - -with description("Agent") as self: - with before.all: - self.client = SdcClient(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - - with it("is able to retrieve the agent configuration"): - ok, res = self.client.get_agents_config() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("files", _Or(be_empty, contain(have_key("content"))))) - - with it("is able to set up the agent configuration"): - ok, res = self.client.set_agents_config(_agent_configuration()) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("files", contain(have_key("content", contain(_mysql_app_check()))))) - expect(res).to(have_key("files", contain(have_key("content", contain(_debug_enabled()))))) - - with it("is able to clean up the agent configuration"): - ok, res = self.client.clear_agents_config() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("files", be_empty)) - - with it("is able to retrieve the number of connected agents"): - ok, res = self.client.get_n_connected_agents() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(be_above_or_equal(1)) - - with it("is able to retrieve the info from the connected agents"): - ok, res = self.client.get_connected_agents() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(contain(have_keys( - "customer", - "machineId", - "hostName", - connected=True, - attributes=have_keys( - "hidden", - "version", - ) - ))) diff --git a/specs/monitor/alerts_v1_spec.py b/specs/monitor/alerts_v1_spec.py deleted file mode 100644 index 8919e489..00000000 --- a/specs/monitor/alerts_v1_spec.py +++ /dev/null @@ -1,79 +0,0 @@ -import os - -from expects import expect, have_key, have_keys, have_len -from expects.matchers.built_in import be_above_or_equal -from mamba import description, before, it, context, after - -from sdcclient import SdMonitorClient -from specs import be_successful_api_call - -_ALERT_NAME = "Test - Alert" -_ALERT_DESCRIPTION = "This alert was automatically created using the Sysdig SDK Python" - -with description("Alerts v1") as self: - with before.all: - self.client = SdMonitorClient(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - - with before.each: - self.cleanup_alerts() - - with after.each: - self.cleanup_alerts() - - - def cleanup_alerts(self): - ok, res = self.client.get_alerts() - expect((ok, res)).to(be_successful_api_call) - - for alert in res["alerts"]: - if str(alert["name"]).startswith("Test -"): - call = self.client.delete_alert(alert) - expect(call).to(be_successful_api_call) - - - def create_test_alert(self): - ok, res = self.client.create_alert( - name=_ALERT_NAME, - description=_ALERT_DESCRIPTION, - severity=6, - for_atleast_s=60, - condition='avg(cpu.used.percent) > 80', - # We want to check this metric for every process on every machine. - segmentby=['host.mac', 'proc.name'], - segment_condition='ANY', - # if there is more than one tomcat process, this alert will fire when a single one of them crosses the - # 80% threshold. - user_filter='proc.name = "tomcat"', - enabled=False) - - if ok: - self.test_alert = res["alert"] - return ok, res - - - with it("is able to create an alert"): - ok, res = self.create_test_alert() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("alert", have_keys("id", "name"))) - - with it("is able to remove the alert"): - self.create_test_alert() - - call = self.client.delete_alert(self.test_alert) - expect(call).to(be_successful_api_call) - - with context("with existing alerts"): - with before.each: - self.create_test_alert() - - with it("is able to list all existing alerts"): - ok, res = self.client.get_alerts() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("alerts", have_len(be_above_or_equal(1)))) - - with it("is able to update an alert"): - self.test_alert["enabled"] = not self.test_alert["enabled"] - - call = self.client.update_alert(self.test_alert) - expect(call).to(be_successful_api_call) diff --git a/specs/monitor/captures_v1_spec.py b/specs/monitor/captures_v1_spec.py deleted file mode 100644 index ee6ffda4..00000000 --- a/specs/monitor/captures_v1_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import random -import socket -import string -import time - -from expects import expect, have_key, contain -from expects.matchers import _Or -from expects.matchers.built_in import have_keys, equal -from mamba import description, it, before - -from sdcclient import SdMonitorClient -from specs import be_successful_api_call - - -def randomword(length): - letters = string.ascii_lowercase + string.digits - return ''.join(random.choice(letters) for _ in range(length)) - - -with description("Captures v1") as self: - with before.all: - self.client = SdMonitorClient(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - self.capture_name = f"apicapture-sdk-{randomword(10)}" - self.hostname = socket.gethostname() - - with it("is able to create a capture"): - ok, res = self.client.create_sysdig_capture(hostname=self.hostname, - capture_name=self.capture_name, - duration=10) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to retrieve the capture we have created"): - ok, res = self.client.get_sysdig_captures() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key( - "dumps", contain( - have_keys( - "size", - "status", - "folder", - agent=have_key("hostName", equal(self.hostname)), - name=equal(f"{self.capture_name}.scap"), - )) - )) - - # DEACTIVATED: This test is not enabled because sometimes the agent does not trigger the capture - # and therefore this test fails. As it is not our duty to verify that the agent is able to create the capture, - # we assume this won't be covered by the library. - with _it("polls the status of the capture until it's done"): - _, res = self.client.get_sysdig_captures() - capture = [capture for capture in res["dumps"] if capture["name"] == f"{self.capture_name}.scap"][0] - - status = "undefined" - for _ in range(300): - ok, res = self.client.poll_sysdig_capture(capture) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dump", have_key("status"))) - - status = res["dump"]["status"] - if status in ["done", "uploaded", "error", "uploadingError"]: - break - - time.sleep(1) - - expect(status).to(_Or(equal("done"), equal("uploaded"))) - - # DEACTIVATED: This test is not enabled because sometimes the agent does not trigger the capture - # and therefore this test fails. As it is not our duty to verify that the agent is able to create the capture, - # we assume this won't be covered by the library. - with _it("is able to download the capture"): - _, res = self.client.get_sysdig_captures() - capture = [capture for capture in res["dumps"] if capture["name"] == f"{self.capture_name}.scap"][0] - - call = self.client.download_sysdig_capture(capture["id"]) - expect(call).to(be_successful_api_call) diff --git a/specs/monitor/dashboard_converters/__init__.py b/specs/monitor/dashboard_converters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/specs/monitor/dashboard_converters/dashboard_scope_spec.py b/specs/monitor/dashboard_converters/dashboard_scope_spec.py deleted file mode 100644 index 9fbc8a0d..00000000 --- a/specs/monitor/dashboard_converters/dashboard_scope_spec.py +++ /dev/null @@ -1,210 +0,0 @@ -from expects import * -from mamba import * - -from sdcclient.monitor.dashboard_converters import convert_scope_string_to_expression - -with description("Dashboard Scopes"): - with it("parses correctly: agent.id is foo"): - param = "agent.id is foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "equals", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id = foo"): - param = "agent.id = foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "equals", - "value": ["foo"] - }]])) - - with it('parses correctly: agent.id = "foo"'): - param = 'agent.id = "foo"' - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "equals", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id = 'foo'"): - param = "agent.id = 'foo'" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "equals", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id is not foo"): - param = "agent.id is not foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "notEquals", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id in foo"): - param = "agent.id in foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "in", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id in [foo]"): - param = "agent.id in [foo]" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "in", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id in [foo, bar]"): - param = "agent.id in [foo, bar]" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "in", - "value": ["foo", "bar"] - }]])) - - with it("parses correctly: agent.id in [foo, bar, baz]"): - param = "agent.id in [foo, bar, baz]" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "in", - "value": ["foo", "bar", "baz"] - }]])) - - with it("parses correctly: agent.id in [foo, bar, baz] and agent.name is 'foobar'"): - param = "agent.id in [foo, bar, baz] and agent.name is 'foobar'" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "in", - "value": ["foo", "bar", "baz"] - }, { - "displayName": "", - "isVariable": False, - "operand": "agent.name", - "operator": "equals", - "value": ["foobar"] - }]])) - - with it("parses correctly: agent.id not in foo"): - param = "agent.id not in foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "notIn", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id not in [foo, bar, baz]"): - param = "agent.id not in [foo, bar, baz]" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "notIn", - "value": ["foo", "bar", "baz"] - }]])) - - with it("parses correctly: agent.id contains foo"): - param = "agent.id contains foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "contains", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id does not contain foo"): - param = "agent.id does not contain foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "notContains", - "value": ["foo"] - }]])) - - with it("parses correctly: agent.id starts with foo"): - param = "agent.id starts with foo" - res = convert_scope_string_to_expression(param) - expect(res).to(equal([True, [{ - "displayName": "", - "isVariable": False, - "operand": "agent.id", - "operator": "startsWith", - "value": ["foo"] - }]])) - - with it("returns ok, but empty if scope is None"): - res = convert_scope_string_to_expression(None) - expect(res).to(equal([True, []])) - - with it("returns error when parsing incorrect: agent.id starts with [foo, bar]"): - param = "agent.id starts with [foo, bar]" - ok, res = convert_scope_string_to_expression(param) - expect(ok).to(be_false) - expect(res).to(start_with(f"invalid scope: {param}")) - - with it("returns error when parsing incorrect: agent.id is [foo, bar]"): - param = "agent.id is [foo, bar]" - ok, res = convert_scope_string_to_expression(param) - expect(ok).to(be_false) - expect(res).to(start_with(f"invalid scope: {param}")) - - with it("returns error when parsing incorrect: agent.id contains [foo, bar]"): - param = "agent.id contains [foo, bar]" - ok, res = convert_scope_string_to_expression(param) - expect(ok).to(be_false) - expect(res).to(start_with(f"invalid scope: {param}")) - - with it("returns error when parsing incorrect: agent.id"): - param = "agent.id" - ok, res = convert_scope_string_to_expression(param) - expect(ok).to(be_false) - expect(res).to(start_with(f"invalid scope: {param}")) - - with it("returns error when parsing incorrect: agent.id is"): - param = "agent.id is" - ok, res = convert_scope_string_to_expression(param) - expect(ok).to(be_false) - expect(res).to(start_with(f"invalid scope: {param}")) diff --git a/specs/monitor/dashboards_v2_spec.py b/specs/monitor/dashboards_v2_spec.py deleted file mode 100644 index 97227cca..00000000 --- a/specs/monitor/dashboards_v2_spec.py +++ /dev/null @@ -1,167 +0,0 @@ -import json -import os -import tempfile - -from expects import expect, have_key, have_keys, contain, equal, start_with -from expects.matchers.built_in import be_false, be_empty -from mamba import before, it, context, after, description - -from sdcclient.monitor import DashboardsClientV2 -from specs import be_successful_api_call - -_DASHBOARD_NAME = "test_dashboard_ci" - -with description("Dashboards v2") as self: - with before.all: - self.client = DashboardsClientV2(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - - with before.each: - self.cleanup_test_dashboards() - - with after.each: - self.cleanup_test_dashboards() - - - def cleanup_test_dashboards(self): - ok, res = self.client.get_dashboards() - expect((ok, res)).to(be_successful_api_call) - - for dashboard in res["dashboards"]: - if str(dashboard["name"]).startswith(_DASHBOARD_NAME): - call = self.client.delete_dashboard(dashboard) - expect(call).to(be_successful_api_call) - - - def create_test_dashboard(self): - ok, res = self.client.create_dashboard(name=_DASHBOARD_NAME) - if ok: - self.test_dashboard = res["dashboard"] - - return ok, res - - - with it("is able to create a dashboard with just a name"): - ok, res = self.client.create_dashboard(name=_DASHBOARD_NAME) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboard")) - - with it("is able to create a dashboard from a file"): - self.create_test_dashboard() - with tempfile.NamedTemporaryFile(mode="w+") as f: - # Write the info to the temp file - json.dump({"dashboard": self.test_dashboard, "version": "v2"}, f) - f.flush() - f.seek(0) - - ok, res = self.client.create_dashboard_from_file(dashboard_name=f"{_DASHBOARD_NAME}_2", filename=f.name, - filter=None) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to create a dashboard from a view"): - _, res_view_list = self.client.get_views_list() - - call = self.client.create_dashboard_from_view(newdashname=f"{_DASHBOARD_NAME}_2", - viewname=res_view_list["defaultDashboards"][0]["name"], - filter=None) - expect(call).to(be_successful_api_call) - - with context("when there are existing dashbords"): - with before.each: - self.create_test_dashboard() - - with it("is able to list all the dashboards"): - ok, res = self.client.get_dashboards() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboards", contain(have_keys("name", "id")))) - - with it("is able to retrieve the test dashboard by its id"): - ok, res = self.client.get_dashboard(dashboard_id=self.test_dashboard["id"]) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboard", have_keys("name", id=equal(self.test_dashboard["id"])))) - - with context("when deleting a dashboard"): - with it("is able to remove it if all the info provided is correct"): - _, res = self.client.get_dashboards() - dashboard_len_before = len(res["dashboards"]) - - call = self.client.delete_dashboard(self.test_dashboard) - - _, res = self.client.get_dashboards() - dashboard_len_after = len(res["dashboards"]) - - expect(call).to(be_successful_api_call) - expect(dashboard_len_after).to(equal(dashboard_len_before - 1)) - - with it("fails to delete it if the info provided is not correct"): - ok, res = self.client.delete_dashboard({"id": 0}) - expect(ok).to(be_false) - expect(res).to(equal("status code 404")) - - with it("returns an error if there is not 'id' field in the provided object"): - ok, res = self.client.delete_dashboard({}) - - expect(ok).to(be_false) - expect(res).to(equal("Invalid dashboard format")) - - with it("is able to dump the dashboard to a file"): - with tempfile.NamedTemporaryFile(mode="w+") as f: - self.client.save_dashboard_to_file(dashboard=self.test_dashboard, filename=f.name) - f.flush() - f.seek(0) - - data = json.load(f) - expect(data).to(have_keys(version=equal("v2"), dashboard=equal(self.test_dashboard))) - - with it("is able to create a dashboard from template"): - call = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope='agent.id = "foo"') - expect(call).to(be_successful_api_call) - - with context("when it's created with an incorrect scope"): - with it("fails if the scope is not a string"): - ok, res = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope={}) - expect(ok).to(be_false) - expect(res).to(equal("Invalid scope format: Expected a string")) - - with it("fails if the scope has incorrect format"): - ok, res = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope="foobarbaz") - expect(ok).to(be_false) - expect(res).to(start_with("invalid scope: foobarbaz")) - - with it("is able to create a dashboard from a configuration"): - self.test_dashboard["name"] = f"{_DASHBOARD_NAME}_2" - call = self.client.create_dashboard_with_configuration(self.test_dashboard) - - expect(call).to(be_successful_api_call) - - with context("when creating a dashboard from other dashboard"): - with it("creates the dashboard correctly if the template exists"): - ok, res = self.client.create_dashboard_from_dashboard(newdashname=f"{_DASHBOARD_NAME}_2", - templatename=_DASHBOARD_NAME, filter=None) - expect((ok, res)).to(be_successful_api_call) - - with it("returns an error saying the dashboard does not exist"): - ok, res = self.client.create_dashboard_from_dashboard(newdashname=f"{_DASHBOARD_NAME}_2", - templatename="NonExistingDashboard", filter=None) - expect(ok).to(be_false) - expect(res).to(equal("can't find dashboard NonExistingDashboard to use as a template")) - - with it("is able to update a dashboard"): - self.test_dashboard["name"] = f"{_DASHBOARD_NAME}_updated" - call = self.client.update_dashboard(self.test_dashboard) - - expect(call).to(be_successful_api_call) - - with it("is able to retrieve the dashboard by it's name"): - ok, res = self.client.find_dashboard_by(name=self.test_dashboard["name"]) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(contain( - have_key("dashboard", have_keys(id=self.test_dashboard["id"], name=self.test_dashboard["name"]))) - ) diff --git a/specs/monitor/dashboards_v3_spec.py b/specs/monitor/dashboards_v3_spec.py deleted file mode 100644 index 2ddff3e9..00000000 --- a/specs/monitor/dashboards_v3_spec.py +++ /dev/null @@ -1,264 +0,0 @@ -import json -import os -import tempfile - -from expects import expect, have_key, have_keys, contain, equal, start_with -from expects.matchers.built_in import be_false, have_len, be_empty -from mamba import before, it, context, after, description - -from sdcclient import SdMonitorClient -from specs import be_successful_api_call - -_DASHBOARD_NAME = "test_dashboard_ci" - -with description("Dashboards v3") as self: - with before.all: - self.client = SdMonitorClient(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - - with before.each: - self.cleanup_test_dashboards() - - with after.each: - self.cleanup_test_dashboards() - - - def cleanup_test_dashboards(self): - ok, res = self.client.get_dashboards() - expect((ok, res)).to(be_successful_api_call) - - for dashboard in res["dashboards"]: - if str(dashboard["name"]).startswith(_DASHBOARD_NAME): - call = self.client.delete_dashboard(dashboard) - expect(call).to(be_successful_api_call) - - - def create_test_dashboard(self): - ok, res = self.client.create_dashboard(name=_DASHBOARD_NAME) - if ok: - self.test_dashboard = res["dashboard"] - - return ok, res - - - with it("is able to create a dashboard with just a name"): - ok, res = self.client.create_dashboard(name=_DASHBOARD_NAME) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboard")) - - with it("is able to create a dashboard from a file"): - self.create_test_dashboard() - with tempfile.NamedTemporaryFile(mode="w+") as f: - # Write the info to the temp file - json.dump({"dashboard": self.test_dashboard, "version": "v3"}, f) - f.flush() - f.seek(0) - - ok, res = self.client.create_dashboard_from_file(dashboard_name=f"{_DASHBOARD_NAME}_2", filename=f.name, - filter=None) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to create a dashboard from a view"): - ok, res_view_list = self.client.get_views_list() - expect((ok, res_view_list)).to(be_successful_api_call) - - call = self.client.create_dashboard_from_view(newdashname=f"{_DASHBOARD_NAME}_2", - viewname=res_view_list["dashboardTemplates"][0]["name"], - filter=None) - expect(call).to(be_successful_api_call) - - with context("when there are existing dashbords"): - with before.each: - self.create_test_dashboard() - - with it("is able to list all the dashboards"): - ok, res = self.client.get_dashboards() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboards", contain(have_keys("name", "id")))) - - with it("is able to retrieve the test dashboard by its id"): - ok, res = self.client.get_dashboard(dashboard_id=self.test_dashboard["id"]) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("dashboard", have_keys("name", id=equal(self.test_dashboard["id"])))) - - with context("when deleting a dashboard"): - with it("is able to remove it if all the info provided is correct"): - _, res = self.client.get_dashboards() - dashboard_len_before = len(res["dashboards"]) - - call = self.client.delete_dashboard(self.test_dashboard) - - _, res = self.client.get_dashboards() - dashboard_len_after = len(res["dashboards"]) - - expect(call).to(be_successful_api_call) - expect(dashboard_len_after).to(equal(dashboard_len_before - 1)) - - with it("fails to delete it if the info provided is not correct"): - ok, res = self.client.delete_dashboard({"id": 0}) - expect(ok).to(be_false) - expect(res).to(equal("status code 404")) - - with it("returns an error if there is not 'id' field in the provided object"): - ok, res = self.client.delete_dashboard({}) - - expect(ok).to(be_false) - expect(res).to(equal("Invalid dashboard format")) - - with it("is able to dump the dashboard to a file"): - with tempfile.NamedTemporaryFile(mode="w+") as f: - self.client.save_dashboard_to_file(dashboard=self.test_dashboard, filename=f.name) - f.flush() - f.seek(0) - - data = json.load(f) - expect(data).to(have_keys(version=equal("v3"), dashboard=equal(self.test_dashboard))) - - with it("is able to create a dashboard from template"): - call = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope='agent.id = "foo"') - expect(call).to(be_successful_api_call) - - with it("is able to make it public"): - modified_dashboard = self.test_dashboard - modified_dashboard["public"] = True - - ok, res = self.client.update_dashboard(modified_dashboard) - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("public", True)) - - with it("is able to favorite it and unfavorite it"): - ok, res = self.client.favorite_dashboard(self.test_dashboard["id"], True) - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("favorite", True)) - - ok, res = self.client.favorite_dashboard(self.test_dashboard["id"], False) - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("favorite", False)) - - with context("when it's created with an incorrect scope"): - with it("fails if the scope is not a string"): - ok, res = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope={}) - expect(ok).to(be_false) - expect(res).to(equal("Invalid scope format: Expected a list, a string or None")) - - with it("fails if the scope has incorrect format"): - ok, res = self.client.create_dashboard_from_template(dashboard_name=f"{_DASHBOARD_NAME}_2", - template=self.test_dashboard, - scope="foobarbaz") - expect(ok).to(be_false) - expect(res).to(start_with("invalid scope: foobarbaz")) - - with it("is able to create a dashboard from a configuration"): - self.test_dashboard["name"] = f"{_DASHBOARD_NAME}_2" - call = self.client.create_dashboard_with_configuration(self.test_dashboard) - - expect(call).to(be_successful_api_call) - - with context("when creating a dashboard from other dashboard"): - with it("creates the dashboard correctly if the template exists"): - ok, res = self.client.create_dashboard_from_dashboard(newdashname=f"{_DASHBOARD_NAME}_2", - templatename=_DASHBOARD_NAME, filter=None) - expect((ok, res)).to(be_successful_api_call) - - with it("returns an error saying the dashboard does not exist"): - ok, res = self.client.create_dashboard_from_dashboard(newdashname=f"{_DASHBOARD_NAME}_2", - templatename="NonExistingDashboard", filter=None) - expect(ok).to(be_false) - expect(res).to(equal("can't find dashboard NonExistingDashboard to use as a template")) - - with it("is able to update a dashboard"): - self.test_dashboard["name"] = f"{_DASHBOARD_NAME}_updated" - call = self.client.update_dashboard(self.test_dashboard) - - expect(call).to(be_successful_api_call) - - with it("is able to retrieve the dashboard by it's name"): - ok, res = self.client.find_dashboard_by(name=self.test_dashboard["name"]) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(contain( - have_key("dashboard", have_keys(id=self.test_dashboard["id"], name=self.test_dashboard["name"]))) - ) - - with context("when we are sharing a dashboard with all teams"): - with it("shares it with view only permissions"): - ok, res = self.client.share_dashboard_with_all_teams(self.test_dashboard, "r") - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("shared", True)) - expect(res["dashboard"]).to(have_key("sharingSettings")) - expect(res["dashboard"]["sharingSettings"]).to(have_len(1)) - expect(res["dashboard"]["sharingSettings"][0]["role"]).to(equal("ROLE_RESOURCE_READ")) - - with it("shares it with read write permissions"): - ok, res = self.client.share_dashboard_with_all_teams(self.test_dashboard, "w") - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("shared", True)) - expect(res["dashboard"]).to(have_key("sharingSettings")) - expect(res["dashboard"]["sharingSettings"]).to(have_len(1)) - expect(res["dashboard"]["sharingSettings"][0]["role"]).to(equal("ROLE_RESOURCE_EDIT")) - - with context("when there is a shared dashboard"): - with it("unshares it"): - _, dboard = self.client.share_dashboard_with_all_teams(self.test_dashboard, "w") - - ok, res = self.client.unshare_dashboard(dboard["dashboard"]) - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("shared", False)) - expect(res["dashboard"]).to(have_key("sharingSettings")) - expect(res["dashboard"]["sharingSettings"]).to(be_empty) - - with context("when we are sharing a dashboard with a particular team"): - with before.all: - self.new_team_name = "NewMonitorTeam-sdc-cli" - _, self.team = self.client.create_team(self.new_team_name) - - with after.all: - self.client.delete_team(self.new_team_name) - - with it("shares it with view only permissions"): - _, team = self.client.get_team("Monitor Operations") - - ok, res = self.client.share_dashboard_with_team(self.test_dashboard, team["id"], "r") - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("shared", True)) - expect(res["dashboard"]).to(have_key("sharingSettings")) - expect(res["dashboard"]["sharingSettings"]).to(have_len(1)) - expect(res["dashboard"]["sharingSettings"][0]["role"]).to(equal("ROLE_RESOURCE_READ")) - - with it("shares it with read write permissions"): - _, team = self.client.get_team("Monitor Operations") - - ok, res = self.client.share_dashboard_with_team(self.test_dashboard, team["id"], "w") - - expect((ok, res)).to(be_successful_api_call) - expect(res["dashboard"]).to(have_key("shared", True)) - expect(res["dashboard"]).to(have_key("sharingSettings")) - expect(res["dashboard"]["sharingSettings"]).to(have_len(1)) - expect(res["dashboard"]["sharingSettings"][0]["role"]).to(equal("ROLE_RESOURCE_EDIT")) - - with it("shares it with two teams, one of those with write access"): - _, team = self.client.get_team("Monitor Operations") - - ok_team, res_team = self.client.share_dashboard_with_team(self.test_dashboard, team["id"], "r") - ok_team2, res_team2 = self.client.share_dashboard_with_team(res_team["dashboard"], - self.team["team"]["id"], "w") - - expect((ok_team, res_team)).to(be_successful_api_call) - expect((ok_team2, res_team2)).to(be_successful_api_call) - - expect(res_team2["dashboard"]).to(have_key("shared", True)) - expect(res_team2["dashboard"]).to(have_key("sharingSettings")) - expect(res_team2["dashboard"]["sharingSettings"]).to(have_len(2)) - expect(res_team2["dashboard"]["sharingSettings"][0]["role"]).to(equal("ROLE_RESOURCE_READ")) - expect(res_team2["dashboard"]["sharingSettings"][1]["role"]).to(equal("ROLE_RESOURCE_EDIT")) diff --git a/specs/monitor/events_v1_spec.py b/specs/monitor/events_v1_spec.py deleted file mode 100644 index 008ea026..00000000 --- a/specs/monitor/events_v1_spec.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import time - -from expects import expect, have_key, contain, have_keys, be_empty -from mamba import it, before, description - -from sdcclient.monitor import EventsClientV1 -from specs import be_successful_api_call - -with description("Events v1") as self: - with before.all: - self.client = EventsClientV1(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - self.event_name = "event_v1_test_ci" - - with it("is able to create a custom event"): - call = self.client.post_event(name=self.event_name, - description="This event was created in a CI pipeline for the Python SDK library") - expect(call).to(be_successful_api_call) - - with it("is able to list the events happened without any filter"): - time.sleep(3) # Wait for the event to appear in the feed - ok, res = self.client.get_events() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events")) - - with it("is able to list the events created by the tests"): - time.sleep(3) # Wait for the event to appear in the feed - ok, res = self.client.get_events(last_s=60) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", contain(have_keys(name=self.event_name)))) - - with it("is able to remove the event from the feed"): - time.sleep(3) - _, res = self.client.get_events(last_s=60) - - events = [event for event in res["events"] if event["name"] == self.event_name] - expect(events).to_not(be_empty) - - call = self.client.delete_event(events[0]) - expect(call).to(be_successful_api_call) diff --git a/specs/monitor/events_v2_spec.py b/specs/monitor/events_v2_spec.py deleted file mode 100644 index 32fb6d5c..00000000 --- a/specs/monitor/events_v2_spec.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -import time - -from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false -from expects.matchers.built_in import have_len -from mamba import it, before, description - -from sdcclient.monitor import EventsClientV2 -from specs import be_successful_api_call - -with description("Events v2") as self: - with before.all: - self.client = EventsClientV2(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"), - token=os.getenv("SDC_MONITOR_TOKEN")) - self.event_name = "event_v2_test_ci" - - with it("is able to create a custom event"): - call = self.client.post_event(name=self.event_name, - description="This event was created in a CI pipeline for the Python SDK library") - expect(call).to(be_successful_api_call) - - with it("is able to list the events happened without any filter"): - time.sleep(3) # Wait for the event to appear in the feed - ok, res = self.client.get_events() - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events")) - - with it("is able to list the events created by the tests"): - time.sleep(3) # Wait for the event to appear in the feed - ok, res = self.client.get_events(category=["custom"]) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", contain(have_keys(name=self.event_name)))) - - with it("fails to retrieve the events with an incorrect category"): - ok, res = self.client.get_events(category=['incorrect_category']) - - expect(ok).to(be_false) - expect(res).to(equal("Invalid category 'incorrect_category'")) - - with it("is able to retrieve events that match a status"): - ok, res = self.client.get_events(status=['triggered']) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", contain(have_keys(name=self.event_name)))) - - with it("fails to retrieve the events with an incorrect status"): - ok, res = self.client.get_events(status=['incorrect_status']) - - expect(ok).to(be_false) - expect(res).to(equal("Invalid status 'incorrect_status'")) - - with it("retrieves the events correctly specifying direction 'before'"): - ok, res = self.client.get_events(direction="before") - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys('events', 'total', 'matched')) - - with it("retrieves the events correctly specifying direction 'after'"): - ok, res = self.client.get_events(direction="after") - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys('events', 'total', 'matched')) - - with it("fails to retrieve the events with an incorrect direction"): - ok, res = self.client.get_events(direction="incorrect_direction") - - expect(ok).to(be_false) - expect(res).to(equal("Invalid direction 'incorrect_direction', must be either 'before' or 'after'")) - - with it("is able to retrieve events by name"): - ok, res = self.client.get_events(name=self.event_name) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", contain(have_key("name", equal(self.event_name))))) - - with it("retrieves an empty list when the name provided is not found"): - ok, res = self.client.get_events(name="RandomUnexistingEvent") - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", be_empty)) - - with it("is able to retrieve the last event only"): - ok, res = self.client.get_events(limit=1) - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_key("events", have_len(1))) - - with it("is able to remove the event from the feed"): - time.sleep(3) # Wait for the event to appear in the feed - _, res = self.client.get_events(category=["custom"]) - - events = [event for event in res["events"] if event["name"] == self.event_name] - expect(events).to_not(be_empty) - - call = self.client.delete_event(events[0]) - expect(call).to(be_successful_api_call) diff --git a/specs/secure/policy_events_v1_spec.py b/specs/secure/policy_events_v1_spec.py deleted file mode 100644 index d9269e68..00000000 --- a/specs/secure/policy_events_v1_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -import datetime -import os - -from expects import be_within, have_len, expect, contain, have_key, be_empty, have_keys -from mamba import before, context, description, it, _context - -from sdcclient.secure import PolicyEventsClientV1 -from specs import be_successful_api_call - -with description("Policy Events v1") as self: - with before.each: - self.client = PolicyEventsClientV1(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"), - token=os.getenv("SDC_SECURE_TOKEN")) - with context("when we try to retrieve policy events from the last 7 days"): - with it("returns the list of all events happened"): - day_in_seconds = 7 * 24 * 60 * 60 - - ok, res = self.client.get_policy_events_duration(day_in_seconds) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys("ctx", "data")) - expect(res["data"]).to( - contain(have_keys("id", "timestamp", "customerId", "source", "name", "description", "cursor"))) - - with it("returns the list of all events from a range"): - to_sec = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds()) - from_sec = to_sec - (7 * 24 * 60 * 60) - - ok, res = self.client.get_policy_events_range(from_sec, to_sec) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys("ctx", "data")) - expect(res["data"]).to( - contain(have_keys("id", "timestamp", "customerId", "source", "name", "description", "cursor"))) - - with it("returns the list of all events from the last 7 days that match a filter"): - day_in_seconds = 7 * 24 * 60 * 60 - - ok, res = self.client.get_policy_events_duration(day_in_seconds, filter='severity in ("4","5")') - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys("ctx", "data")) - expect(res["data"]).to(contain(have_key("severity", be_within(3, 6)))) - - with it("returns an empty list if the filter does not match"): - day_in_seconds = 7 * 24 * 60 * 60 - - ok, res = self.client.get_policy_events_duration(day_in_seconds, filter='severity in ("-1")') - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(have_keys("ctx", "data")) - expect(res["data"]).to(be_empty) - - with _context("and from the first event we retrieve the rest of events"): - # Deactivated tests. There seems to be a bug in the API -- need confirmation - with it("returns the list of all events except the first"): - day_in_seconds = 7 * 24 * 60 * 60 - _, res = self.client.get_policy_events_duration(day_in_seconds) - ctx = {"cursor": res["data"][0]["cursor"]} - qty_before = len(res["data"]) - - ok, res = self.client.get_more_policy_events(ctx) - - expect((ok, res)).to(be_successful_api_call) - expect(res["data"]).to(have_len(qty_before - 1)) - - with context("when the parameters are wrong"): - with it("returns an error retrieving events"): - wrong_duration = -1 - ok, res = self.client.get_policy_events_duration(wrong_duration) - expect((ok, res)).to_not(be_successful_api_call) - - with it("returns an error with an incorrect context"): - wrong_context = { - "limit": -1, - } - call = self.client.get_more_policy_events(wrong_context) - expect(call).to_not(be_successful_api_call) diff --git a/specs/secure/policy_v1_spec.py b/specs/secure/policy_v1_spec.py deleted file mode 100644 index aad11fd2..00000000 --- a/specs/secure/policy_v1_spec.py +++ /dev/null @@ -1,121 +0,0 @@ -import json -import os -import random - -from expects import expect -from mamba import before, description, after, it - -from sdcclient import SdSecureClientV1 -from specs import be_successful_api_call - -_POLICY_NAME = "Test - Launch Suspicious Network Tool on Host" -_POLICY_DESCRIPTION = "Detect network tools launched on the host" -_POLICY_RULES_REGEX = "Launch Suspicious Network Tool on Host" -_POLICY_ACTIONS = [ - { - "type": "POLICY_ACTION_STOP", - "msg": "" - }, - { - "type": "POLICY_ACTION_PAUSE", - "msg": "" - }, - { - "type": "POLICY_ACTION_CAPTURE", - "beforeEventNs": 5000000000, - "afterEventNs": 18000000000, - "isLimitedToContainer": True - } -] -def policy_json(): - return """\ -{ - "name": "%s", - "description": "%s", - "notificationChannelIds": [], - "severity": 0, - "hostScope": true, - "enabled": true, - "actions": %s, - "falcoConfiguration": { - "fields": [], - "ruleNameRegEx": "%s", - "onDefault": "DEFAULT_MATCH_EFFECT_NEXT" - }, - "policyEventsCount": 0, - "isManual": true, - "isBuiltin": true, - "containerScope": true, - "modifiedOn": 1597646118000, - "createdOn": 1597646118000 -} -""" % (_POLICY_NAME, _POLICY_DESCRIPTION, json.dumps(_POLICY_ACTIONS), _POLICY_RULES_REGEX) - -with description("Policies v1") as self: - with before.all: - self.clientV1 = SdSecureClientV1(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"), - token=os.getenv("SDC_SECURE_TOKEN")) - with after.each: - self.cleanup_policies() - - def cleanup_policies(self): - _, res = self.clientV1.list_policies() - for policy in res['policies']: - if str(policy["name"]).startswith("Test - "): - ok, res = self.clientV1.delete_policy_id(policy["id"]) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to list all existing policies"): - ok, res = self.clientV1.list_policies() - expect((ok, res)).to(be_successful_api_call) - - with it("is able to list all policies priorities"): - ok, res = self.clientV1.get_policy_priorities() - expect((ok, res)).to(be_successful_api_call) - - with it("is able to change the evaluation order of policies"): - ok, res = self.clientV1.get_policy_priorities() - random.shuffle(res['priorities']['policyIds']) - ok, res = self.clientV1.set_policy_priorities(json.dumps(res)) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to add a policy from JSON"): - call = self.clientV1.add_policy(policy_json()) - expect(call).to(be_successful_api_call) - - with it("is able to get a policy by id"): - ok, res = self.clientV1.list_policies() - id = res['policies'][0]['id'] - call = self.clientV1.get_policy_id(id) - expect(call).to(be_successful_api_call) - - with it("is able to get a policy by name"): - ok, res = self.clientV1.list_policies() - name = res['policies'][0]['name'] - call = self.clientV1.get_policy(name) - expect(call).to(be_successful_api_call) - - with it("is able to update a policy from JSON"): - ok, res = self.clientV1.list_policies() - policy_json = res['policies'][0] - policy_json['description'] = "Updated description" - call = self.clientV1.update_policy(json.dumps(policy_json)) - expect(call).to(be_successful_api_call) - - with it("is able to delete a single policy by id"): - ok, res = self.clientV1.list_policies() - ok, res = self.clientV1.delete_policy_id(res['policies'][0]['id']) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to delete a single policy by name"): - ok, res = self.clientV1.list_policies() - ok, res = self.clientV1.delete_policy_name(res['policies'][1]['name']) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to delete all policies at once"): - ok, res = self.clientV1.delete_all_policies() - expect((ok, res)).to(be_successful_api_call) - - with it("is able to create the default policies"): - ok, res = self.clientV1.create_default_policies() - expect((ok, res)).to(be_successful_api_call) diff --git a/specs/secure/policy_v2_spec.py b/specs/secure/policy_v2_spec.py deleted file mode 100644 index bcb620dd..00000000 --- a/specs/secure/policy_v2_spec.py +++ /dev/null @@ -1,84 +0,0 @@ -import json -import os - -from expects import expect -from mamba import before, description, after, it - -from sdcclient import SdSecureClient -from specs import be_successful_api_call - -_POLICY_NAME = "Test - Terminal shell in container" -_POLICY_DESCRIPTION = "A shell was spawned by a program in a container with an attached terminal." -_POLICY_RULES = ["Terminal shell in container"] -_POLICY_ACTIONS = [{ - "type": "POLICY_ACTION_CAPTURE", - "name": "Terminal shell in container", - "filter": "", - "storageType": "S3", - "bucketName": "", - "isLimitedToContainer": False, - "beforeEventNs": 10000000000, - "afterEventNs": 20000000000 -}] - - -def policy_json(): - return """\ -{ - "name": "%s", - "description": "%s", - "origin": "Secure UI", - "versionId": "0.0.0", - "severity": 0, - "enabled": true, - "ruleNames": %s, - "notificationChannelIds": [], - "actions": %s, - "createdOn": 1596902934000, - "modifiedOn": 1597138586000 -} -""" % (_POLICY_NAME, _POLICY_DESCRIPTION, json.dumps(_POLICY_RULES), json.dumps(_POLICY_ACTIONS)) - - -with description("Policies v2") as self: - with before.all: - self.client = SdSecureClient(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"), - token=os.getenv("SDC_SECURE_TOKEN")) - with after.each: - self.cleanup_policies() - - - def cleanup_policies(self): - _, res = self.client.list_policies() - for policy in res: - if str(policy["name"]).startswith("Test - "): - ok, res = self.client.delete_policy_id(policy["id"]) - expect((ok, res)).to(be_successful_api_call) - - - with it("is able to list all existing policies"): - ok, res = self.client.list_policies() - expect((ok, res)).to(be_successful_api_call) - - with it("is able to add a policy from JSON"): - call = self.client.add_policy_json(policy_json()) - expect(call).to(be_successful_api_call) - - with it("is able to create a policy with parameters"): - ok, res = self.client.add_policy(name=_POLICY_NAME, - description=_POLICY_DESCRIPTION, - rule_names=_POLICY_RULES, - actions=_POLICY_ACTIONS) - - expect((ok, res)).to(be_successful_api_call) - - with it("is able to delete all policies"): - _, policies = self.client.list_policies() - - for policy in policies: - ok, res = self.client.delete_policy_id(policy['id']) - expect((ok, res)).to(be_successful_api_call) - - with it("is able to create the default policies"): - ok, res = self.client.create_default_policies() - expect((ok, res)).to(be_successful_api_call) diff --git a/specs/secure/scanning_vulnerability_exceptions_spec.py b/specs/secure/scanning_vulnerability_exceptions_spec.py deleted file mode 100644 index c2cc4eab..00000000 --- a/specs/secure/scanning_vulnerability_exceptions_spec.py +++ /dev/null @@ -1,178 +0,0 @@ -import datetime -import os -import uuid - -from expects import equal, expect, contain, be_empty, have_key, be_true, have_keys, not_, be_false, be_above -from mamba import before, context, description, after, it - -from sdcclient import SdScanningClient -from specs import be_successful_api_call - -with description("Scanning vulnerability exceptions") as self: - with before.each: - self.client = SdScanningClient(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"), - token=os.getenv("SDC_SECURE_TOKEN")) - - with after.each: - self.clean_bundles() - - - def clean_bundles(self): - _, res = self.client.list_vulnerability_exception_bundles() - for bundle in res: - if str(bundle["name"]).startswith("test_exception_bundle_"): - call = self.client.delete_vulnerability_exception_bundle(id=bundle["id"]) - expect(call).to(be_successful_api_call) - - - with context("when we are creating a new vulnerability exception bundle"): - with it("creates the bundle correctly"): - exception_bundle = f"test_exception_bundle_{uuid.uuid4()}" - exception_comment = "This is an example of an exception bundle" - ok, res = self.client.add_vulnerability_exception_bundle(name=exception_bundle, comment=exception_comment) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to( - have_keys("id", items=be_empty, policyBundleId=equal("default"), version="1_0", - comment=equal(exception_comment), name=equal(exception_bundle)) - ) - - with it("creates the bundle correctly with name only and removes it correctly"): - exception_bundle = f"test_exception_bundle_{uuid.uuid4()}" - ok, res = self.client.add_vulnerability_exception_bundle(name=exception_bundle) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to( - have_keys("id", items=be_empty, policyBundleId=equal("default"), version="1_0", - comment=be_empty, name=equal(exception_bundle)) - ) - - with context("when we are listing the vulnerability exception bundles"): - with before.each: - self.exception_bundle = f"test_exception_bundle_{uuid.uuid4()}" - ok, res = self.client.add_vulnerability_exception_bundle(name=self.exception_bundle) - expect((ok, res)).to(be_successful_api_call) - self.created_exception_bundle = res["id"] - - with it("retrieves the list of bundles"): - ok, res = self.client.list_vulnerability_exception_bundles() - - expect((ok, res)).to(be_successful_api_call) - expect(res).to(contain( - have_keys(id=self.created_exception_bundle, items=None, policyBundleId=equal("default"), - version=equal("1_0"), comment=be_empty, name=equal(self.exception_bundle)) - )) - - with context("when we are working with vulnerability exceptions in a bundle"): - with before.each: - ok, res = self.client.add_vulnerability_exception_bundle(name=f"test_exception_bundle_{uuid.uuid4()}") - expect((ok, res)).to(be_successful_api_call) - self.created_exception_bundle = res["id"] - - with it("is able to add a vulnerability exception to a bundle"): - exception_notes = "Microsoft Vulnerability" - exception_cve = "CVE-2020-1234" - ok, res = self.client.add_vulnerability_exception(bundle=self.created_exception_bundle, - cve=exception_cve, - note=exception_notes, - expiration_date=datetime.datetime(2030, 12, 31) - .timestamp()) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to( - have_keys("id", "description", gate=equal("vulnerabilities"), trigger_id=equal(exception_cve), - notes=equal(exception_notes), enabled=be_true) - ) - - with context("and there are existing vulnerability exceptions"): - with before.each: - self.created_exception_cve = "CVE-2020-1234" - ok, res = self.client.add_vulnerability_exception(bundle=self.created_exception_bundle, - cve=self.created_exception_cve) - expect((ok, res)).to(be_successful_api_call) - self.created_exception = res["id"] - - with it("is able to list all the vulnerability exceptions from a bundle"): - ok, res = self.client.get_vulnerability_exception_bundle(bundle=self.created_exception_bundle) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to( - have_keys(id=equal(self.created_exception_bundle), - items=contain( - have_keys( - id=equal(self.created_exception), - gate=equal("vulnerabilities"), - trigger_id=equal(self.created_exception_cve), - enabled=be_true, - ) - )) - ) - - with it("is able to remove them"): - _, ex_before = self.client.get_vulnerability_exception_bundle(bundle=self.created_exception_bundle) - ok, res = self.client.delete_vulnerability_exception(bundle=self.created_exception_bundle, - id=self.created_exception) - _, ex_after = self.client.get_vulnerability_exception_bundle(bundle=self.created_exception_bundle) - - expect((ok, res)).to(be_successful_api_call) - expect(ex_before).to( - have_key("items", contain( - have_keys( - id=equal(self.created_exception), - gate=equal("vulnerabilities"), - trigger_id=equal(self.created_exception_cve), - enabled=be_true, - ) - )) - ) - expect(ex_after).to( - have_key("items", not_(contain( - have_keys( - id=equal(self.created_exception), - gate=equal("vulnerabilities"), - trigger_id=equal(self.created_exception_cve), - enabled=be_true, - ) - ))) - ) - - with it("is able to update them"): - _, ex_before = self.client.get_vulnerability_exception_bundle(bundle=self.created_exception_bundle) - - ok, res = self.client.update_vulnerability_exception(bundle=self.created_exception_bundle, - id=self.created_exception, - cve="CVE-2020-1235", - enabled=False, - note="Dummy note", - expiration_date=datetime.datetime(2030, 12, 31) - .timestamp()) - - _, ex_after = self.client.get_vulnerability_exception_bundle(bundle=self.created_exception_bundle) - - expect((ok, res)).to(be_successful_api_call) - - expect(ex_before).to( - have_key("items", contain( - have_keys( - id=equal(self.created_exception), - gate=equal("vulnerabilities"), - trigger_id=equal(self.created_exception_cve), - notes=equal(None), - expiration_date=equal(None), - enabled=be_true, - ) - )) - ) - - expect(ex_after).to( - have_key("items", contain( - have_keys( - id=equal(self.created_exception), - gate=equal("vulnerabilities"), - trigger_id=equal("CVE-2020-1235"), - notes=equal("Dummy note"), - expiration_date=be_above(0), - enabled=be_false, - ) - )) - ) diff --git a/specs/secure/scanning_vulnerability_spec.py b/specs/secure/scanning_vulnerability_spec.py deleted file mode 100644 index d7a3fe54..00000000 --- a/specs/secure/scanning_vulnerability_spec.py +++ /dev/null @@ -1,37 +0,0 @@ -import os - -from expects import equal, expect, have_keys -from mamba import before, context, description, it - -from sdcclient import SdScanningClient -from specs import be_successful_api_call - -with description("Scanning vulnerability details") as self: - with before.each: - self.client = SdScanningClient(sdc_url=os.getenv("SDC_SECURE_URL", "https://secure.sysdig.com"), - token=os.getenv("SDC_SECURE_TOKEN")) - - with context("when retrieving a simple vulnerability"): - with it("retrieves the vulnerability details correctly if exists"): - vuln_id = "VULNDB-140292" - ok, res = self.client.get_vulnerability_details(id=vuln_id) - - expect((ok, res)).to(be_successful_api_call) - expect(res).to( - have_keys("description", "severity", "vendor_data", "nvd_data", "references", - "affected_packages", id=equal(vuln_id)) - ) - - with it("fails if it does not exist"): - non_existing_vuln_id = "VULNDB-NOEXISTS" - ok, res = self.client.get_vulnerability_details(id=non_existing_vuln_id) - - expect((ok, res)).to_not(be_successful_api_call) - expect(res).to(equal(f"Vulnerability {non_existing_vuln_id} was not found")) - - with it("fails if no id was provided"): - non_existing_vuln_id = None - ok, res = self.client.get_vulnerability_details(id=non_existing_vuln_id) - - expect((ok, res)).to_not(be_successful_api_call) - expect(res).to(equal(f"No vulnerability ID provided")) diff --git a/test/sample-falco-rules.yaml b/test/sample-falco-rules.yaml deleted file mode 100644 index 16df4a6d..00000000 --- a/test/sample-falco-rules.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- rule: My Rule - desc: My Description - condition: evt.type=open and fd.name="/tmp/some-file.txt" - output: Impossible file opened - priority: DEBUG diff --git a/test/start_agent.sh b/test/start_agent.sh deleted file mode 100755 index 09cf19d5..00000000 --- a/test/start_agent.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -# Start an agent using the testing account API key to send some data -docker run -d -it --rm --name sysdig-agent --privileged --net host --pid host -e COLLECTOR=collector-staging.sysdigcloud.com -e ACCESS_KEY=$PYTHON_SDC_TEST_ACCESS_KEY -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro --shm-size=350m sysdig/agent - -# make sure the agent starts sending data and the backend makes it available via API -sleep 60 diff --git a/test/stop_agent.sh b/test/stop_agent.sh deleted file mode 100755 index 359435fe..00000000 --- a/test/stop_agent.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -docker logs sysdig-agent -docker stop sysdig-agent diff --git a/test/test_monitor_apis.sh b/test/test_monitor_apis.sh deleted file mode 100755 index f8ba0c61..00000000 --- a/test/test_monitor_apis.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -SCRIPT=$(readlink -f $0) -SCRIPTDIR=$(dirname $SCRIPT) - -export SDC_URL=https://app-staging.sysdigcloud.com - -AGENT_HOSTNAME=$(hostname -s) -SESSION_UUID=$(head -c 32 /dev/urandom | tr -dc 'a-zA-Z0-9') -ALERT_NAME=python-test-alert-$SESSION_UUID -DASHBOARD_1_NAME=prod-dashboard-$SESSION_UUID -DASHBOARD_2_NAME=dev-dashboard-$SESSION_UUID -EVENT_NAME=event-$SESSION_UUID -CAPTURE_NAME=apicapture-$SESSION_UUID -CHANNEL_NAME=channel-$SESSION_UUID -TEAM_NAME=team-$SESSION_UUID - -date; $SCRIPTDIR/../examples/create_alert.py -a $ALERT_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/update_alert.py -a $ALERT_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/delete_alert.py -a $ALERT_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/dashboard.py -d $DASHBOARD_1_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/dashboard_basic_crud.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/dashboard_scope.py -date; $SCRIPTDIR/../examples/create_dashboard.py -d $DASHBOARD_2_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/delete_dashboard.py -p $SESSION_UUID $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/get_data_advanced.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $AGENT_HOSTNAME -date; $SCRIPTDIR/../examples/get_data_datasource.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/get_data_simple.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_alerts.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_alert_notifications.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/resolve_alert_notifications.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN 1 -date; $SCRIPTDIR/../examples/list_dashboards.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_hosts.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_metrics.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/post_event.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $EVENT_NAME -d "test event description" -date; $SCRIPTDIR/../examples/post_event_simple.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $EVENT_NAME "test event description" -date; $SCRIPTDIR/../examples/list_events.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/delete_event.py -e $EVENT_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/print_data_retention_info.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/print_explore_grouping.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/print_user_info.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_users.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_sysdig_captures.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/create_sysdig_capture.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $AGENT_HOSTNAME $CAPTURE_NAME 10 -date; $SCRIPTDIR/../examples/notification_channels.py -c $CHANNEL_NAME $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/list_notification_channels.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN -date; $SCRIPTDIR/../examples/user_team_mgmt.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $TEAM_NAME example-user@example-domain.com -date; $SCRIPTDIR/../examples/user_team_mgmt_extended.py $PYTHON_SDC_TEST_MONITOR_API_TOKEN $TEAM_NAME example-user@example-domain.com diff --git a/test/test_secure_apis.sh b/test/test_secure_apis.sh deleted file mode 100755 index 7a91c451..00000000 --- a/test/test_secure_apis.sh +++ /dev/null @@ -1,235 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -SCRIPT=$(readlink -f $0) -SCRIPTDIR=$(dirname $SCRIPT) - -export SDC_URL=https://secure-staging.sysdig.com - -# we expect this to fail with 405. It only works for on-premise accounts. -set +e -OUT=`$SCRIPTDIR/../examples/set_secure_system_falco_rules.py $PYTHON_SDC_TEST_API_TOKEN $SCRIPTDIR/sample-falco-rules.yaml` -if [[ $? != 1 ]]; then - echo "set_secure_system_falco_rules.py succeeded when it should have failed" - exit 1 -fi - -if [[ "$OUT" != "Access is denied: Not enough privileges to complete the action" ]]; then - echo "Unexpected output from set_secure_system_falco_rules.py: $OUT" - exit 1 -fi -set -e - -# Get the system falco rules file. Don't validate it, just verify that it can be fetched. -$SCRIPTDIR/../examples/get_secure_system_falco_rules.py $PYTHON_SDC_TEST_API_TOKEN | tee /tmp/falco_rules.yaml - -NOW=$(date) -cat < /tmp/test_apis_user_rules.yaml -- rule: My Rule as of $NOW - desc: My Description - condition: evt.type=open and fd.name="/tmp/some-file.txt" - output: Impossible file opened - priority: INFO -EOF - -$SCRIPTDIR/../examples/set_secure_user_falco_rules.py $PYTHON_SDC_TEST_API_TOKEN /tmp/test_apis_user_rules.yaml -$SCRIPTDIR/../examples/get_secure_user_falco_rules.py $PYTHON_SDC_TEST_API_TOKEN > /tmp/falco_rules.yaml -diff /tmp/falco_rules.yaml /tmp/test_apis_user_rules.yaml - - -# Delete all policies and then get them. There should be none. -$SCRIPTDIR/../examples/delete_all_policies.py $PYTHON_SDC_TEST_API_TOKEN -OUT=`$SCRIPTDIR/../examples/list_policies.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"[]"* ]]; then - echo "Unexpected output after deleting all policies" - exit 1 -fi - -# Create the default set of policies and then fetch them. There should -# be 1, corresponding to the system falco rule. -$SCRIPTDIR/../examples/create_default_policies.py $PYTHON_SDC_TEST_API_TOKEN -OUT=`$SCRIPTDIR/../examples/list_policies.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"Suspicious Filesystem Changes\""* ]]; then - echo "Unexpected output after creating default policies" - exit 1 -fi - -# Get that policy, change the name, and create a new duplicate policy. -OUT=`$SCRIPTDIR/../examples/get_policy.py $PYTHON_SDC_TEST_API_TOKEN "Suspicious Filesystem Changes"` -MY_POLICY=$OUT -if [[ $OUT != *"\"Suspicious Filesystem Changes\""* ]]; then - echo "Could not fetch policy with name \"Suspicious Filesystem Changes\"" - exit 1 -fi - -NEW_POLICY=`echo $MY_POLICY | sed -e "s/Suspicious Filesystem Changes/Suspicious Filesystem Changes 2/g" | sed -e 's/"id": [0-9]*,//' | sed -e 's/"version": [0-9]*/"version": null/'` -OUT=`echo $NEW_POLICY | $SCRIPTDIR/../examples/add_policy.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"Suspicious Filesystem Changes 2\""* ]]; then - echo "Could not create new policy" - exit 1 -fi - -# Change the description of the new policy and update it. -ID=`echo $OUT | grep -E -o '"id": [^,]+,' | awk '{print $2}' | awk -F, '{print $1}'` -MODIFIED_POLICY=`echo $MY_POLICY | sed -e "s/Suspicious Filesystem Changes/Suspicious Filesystem Changes 2/g" | sed -e "s,Identified suspicious filesystem activity that might change sensitive/important files,My New Description,g" | sed -e "s/\"id\": [0-9]*,/\"id\": $ID,/"` -OUT=`echo $MODIFIED_POLICY | $SCRIPTDIR/../examples/update_policy.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"description\": \"My New Description\""* ]]; then - echo "Could not update policy \"Suspicious Filesystem Changes 2\"" - exit 1 -fi - -# Delete the new policy. -OUT=`$SCRIPTDIR/../examples/delete_policy.py --name "Suspicious Filesystem Changes 2" $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"Suspicious Filesystem Changes 2\""* ]]; then - echo "Could not delete policy \"Suspicious Filesystem Changes 2\"" - exit 1 -fi - -OUT=`$SCRIPTDIR/../examples/list_policies.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT = *"\"Suspicious Filesystem Changes 2\""* ]]; then - echo "After deleting policy Suspicious Filesystem Changes 2, policy was still present?" - exit 1 -fi - -# Make a copy again, but this time delete by id -NEW_POLICY=`echo $MY_POLICY | sed -e "s/Suspicious Filesystem Changes/Another Copy Of Suspicious Filesystem Changes/g" | sed -e 's/"id": [0-9]*,//' | sed -e 's/"version": [0-9]*/"version": null/'` -OUT=`echo $NEW_POLICY | $SCRIPTDIR/../examples/add_policy.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"Another Copy Of Suspicious Filesystem Changes\""* ]]; then - echo "Could not create new policy" - exit 1 -fi - -ID=`echo $OUT | grep -E -o '"id": [^,]+,' | awk '{print $2}' | awk -F, '{print $1}'` - -OUT=`$SCRIPTDIR/../examples/delete_policy.py --id $ID $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"Another Copy Of Suspicious Filesystem Changes\""* ]]; then - echo "Could not delete policy \"Another Copy Of Suspicious Filesystem Changes\"" - exit 1 -fi - -OUT=`$SCRIPTDIR/../examples/list_policies.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT = *"\"Another Copy Of Write below binary dir\""* ]]; then - echo "After deleting policy Another Copy Of Suspicious Filesystem Changes, policy was still present?" - exit 1 -fi - -# Trigger some events -FOUND=0 - -for i in $(seq 10); do - sudo cat /etc/shadow - sleep 10 - - EVTS=`$SCRIPTDIR/../examples/get_secure_policy_events.py $PYTHON_SDC_TEST_API_TOKEN 60` - - if [[ "$EVTS" != "" ]]; then - FOUND=1 - break; - fi -done - -if [[ $FOUND == 0 ]]; then - echo "Did not find any policy events after 10 attempts..." - exit 1 -fi - - -# -# Test it again with policy API V1 -# - -# Delete all policies and then get them. There should be none. -$SCRIPTDIR/../examples/delete_all_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN -OUT=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"policies\": []"* ]]; then - echo "Unexpected output after deleting all policies V1" - exit 1 -fi - -# Create the default set of policies and then get them. There should -# be 1, corresponding to the system falco rule. -$SCRIPTDIR/../examples/create_default_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN -OUT=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"name\": \"Write below binary dir\""* ]]; then - echo "Unexpected output after creating default policies V1" - exit 1 -fi - -# Get that policy, change the name, and create a new duplicate policy. -OUT=`$SCRIPTDIR/../examples/get_policy_v1.py $PYTHON_SDC_TEST_API_TOKEN "Write below binary dir"` -MY_POLICY=$OUT -if [[ $OUT != *"\"name\": \"Write below binary dir\""* ]]; then - echo "Could not fetch policy V1 with name \"Write below binary dir\"" - exit 1 -fi - -NEW_POLICY=`echo $MY_POLICY | sed -e "s/Write below binary dir/Copy Of Write below binary dir/g" | sed -e 's/"id": [0-9]*,//' | sed -e 's/"version": [0-9]*/"version": null/'` -OUT=`echo $NEW_POLICY | $SCRIPTDIR/../examples/add_policy_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"name\": \"Copy Of Write below binary dir\""* ]]; then - echo "Could not create new policy V1" - exit 1 -fi - -# Change the description of the new policy and update it. -MODIFIED_POLICY=`echo $MY_POLICY | sed -e "s/an attempt to write to any file below a set of binary directories/My New Description/g"` -OUT=`echo $MODIFIED_POLICY | $SCRIPTDIR/../examples/update_policy_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"description\": \"My New Description\""* ]]; then - echo "Could not update policy V1 \"Copy Of Write below binary dir\"" - exit 1 -fi - -# Delete the new policy. -OUT=`$SCRIPTDIR/../examples/delete_policy_v1.py --name "Copy Of Write below binary dir" $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"name\": \"Copy Of Write below binary dir\""* ]]; then - echo "Could not delete policy V1 \"Copy Of Write below binary dir\"" - exit 1 -fi - -OUT=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT = *"\"name\": \"Copy Of Write below binary dir\""* ]]; then - echo "After deleting policy V1 Copy Of Write below binary dir, policy was still present?" - exit 1 -fi - -# Make a copy again, but this time delete by id -NEW_POLICY=`echo $MY_POLICY | sed -e "s/Write below binary dir/Another Copy Of Write below binary dir/g" | sed -e 's/"id": [0-9]*,//' | sed -e 's/"version": [0-9]*/"version": null/'` -OUT=`echo $NEW_POLICY | $SCRIPTDIR/../examples/add_policy_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"name\": \"Another Copy Of Write below binary dir\""* ]]; then - echo "Could not create new policy V1" - exit 1 -fi - -ID=`echo $OUT | grep -E -o '"id": [^,]+,' | awk '{print $2}' | awk -F, '{print $1}'` - -OUT=`$SCRIPTDIR/../examples/delete_policy_v1.py --id $ID $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT != *"\"name\": \"Another Copy Of Write below binary dir\""* ]]; then - echo "Could not delete policy V1 \"Copy Of Write below binary dir\"" - exit 1 -fi - -OUT=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN` -if [[ $OUT = *"\"name\": \"Another Copy Of Write below binary dir\""* ]]; then - echo "After deleting policy V1 Another Copy Of Write below binary dir, policy was still present?" - exit 1 -fi - - -WRITE_BELOW_BINARY_POS=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN | grep -b "\"name\": \"Write below binary dir" | awk -F: '{print $1}'` - -# Get the list of policy ids only, reverse the list, and set the order -OUT=`$SCRIPTDIR/../examples/list_policies_v1.py -o $PYTHON_SDC_TEST_API_TOKEN | jq reverse | $SCRIPTDIR/../examples/set_policy_order_v1.py $PYTHON_SDC_TEST_API_TOKEN` - -if [ $? != 0 ]; then - echo "Could not set policy order?" - exit 1 -fi - -NEW_WRITE_BELOW_BINARY_POS=`$SCRIPTDIR/../examples/list_policies_v1.py $PYTHON_SDC_TEST_API_TOKEN | grep -b "\"name\": \"Write below binary dir" | awk -F: '{print $1}'` - -if [[ $NEW_WRITE_BELOW_BINARY_POS -lt $WRITE_BELOW_BINARY_POS ]]; then - echo "After reordering policies, Write Below Binary Dir policy did not move to the end?" - exit 1 -fi - -echo $OUT diff --git a/utils/sync_pagerduty_policies.py b/utils/sync_pagerduty_policies.py deleted file mode 100644 index 669d94ff..00000000 --- a/utils/sync_pagerduty_policies.py +++ /dev/null @@ -1,486 +0,0 @@ -#!/usr/bin/env python -# -# Synchronize list of escalation policies with notification channels in Sysdig -# -import argparse -import copy -import json -import os -import sys -from functools import reduce - -import requests - -sys.path.insert( - 0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..')) - -from sdcclient import SdMonitorClient - - -# -# Parse arguments -# -parser = argparse.ArgumentParser( - description='Synchronize PagerDuty escalation policies with Sysdig, to make sure each escalation policy has a notification channel enabled in Sysdig') -parser.add_argument('sysdig-token', nargs=1, help='Sysdig API token') -parser.add_argument( - 'pagerduty-account-id', nargs=1, help='PagerDuty account ID') -parser.add_argument( - 'pagerduty-access-key', nargs=1, help='PagerDuty API access key') -parser.add_argument( - '--link', - action='store_true', - help='Set to creat notification channels in Sysdig and services in PagerDuty for all escalation policies' -) -parser.add_argument( - '--unlink', - action='store_true', - help='Set to remove notification channels connected to PagerDuty escalation policies' -) -parser.add_argument( - '--dry-run', - action='store_true', - help='Set to get a report of changes, without actually apply them') - -args = vars(parser.parse_args()) - - -def run(sysdig_token, pager_duty_id, pager_duty_token, link, unlink, dry_run): - if not link and not unlink: - # by default, you're going to link accounts - link = True - - sysdig = SdMonitorClient(sysdig_token) - pager_duty = PagerDutyAPI(pager_duty_token) - actions_factory = ActionFactory(sysdig, pager_duty, pager_duty_id) - - # - # Get list of Sysdig notification channels - # - ok, res = sysdig.list_notification_channels() - if not ok: - print('\nUnable to fetch Sysdig notification channels') - print(res) - sys.exit(1) - - # - # Find PagerDuty notification channels - # - pager_duty_channels = [channel for channel in res['notificationChannels'] if channel['type'] == 'PAGER_DUTY'] - print('Found {} PagerDuty notification {} configured in Sysdig'.format( - len(pager_duty_channels), pluralize('channel', len(pager_duty_channels)))) - # print(json.dumps(pager_duty_channels, sort_keys=True, indent=4)) - - # Build map of notification channel -> integration key - def get_integration_map(acc, channel): - acc[channel['options']['serviceKey']] = channel - return acc - - integration_keys = reduce(get_integration_map, pager_duty_channels, {}) - - # - # Get list of PagerDuty escalation policies - # - escalation_policies = pager_duty.get( - '/escalation_policies')['escalation_policies'] - print('Found {} PagerDuty escalation {}'.format( - len(escalation_policies), - pluralize('policy', len(escalation_policies), 'policies'))) - escalation_policies_map = {} - for escalation_policy in escalation_policies: - escalation_policies_map[escalation_policy['id']] = escalation_policy - # print(json.dumps(escalation_policies, sort_keys=True, indent=4)) - - # - # Get list of PagerDuty services - # - services = pager_duty.get('/services', {'include[]': ['integrations']})['services'] - print('Found {} PagerDuty {}'.format( - len(services), pluralize('service', len(services)))) - # print(json.dumps(services, sort_keys=True, indent=4)) - - # - # Get Sysdig vendor configuration - # - sysdig_vendor = pager_duty.get('/vendors', {'query': 'sysdig', 'limit': 1, - 'offset': 0, 'total': 'false'})['vendors'][0] - - # - # Get integration details - # - for service in services: - for integration in service['integrations']: - integration['details'] = pager_duty.get( - '/services/{}/integrations/{}'.format(service['id'], integration['id']))['integration'] - - # - # Find integrations with Sysdig - # - service_integration_keys = {} - for service in services: - service['sysdig_integrations'] = [integration for integration in service['integrations'] - if 'vendor' in integration and integration['vendor'] and integration['vendor']['id'] == sysdig_vendor['id']] - - for integration in service['sysdig_integrations']: - service_integration_keys[integration['integration_key']] = { - 'service': service, - 'integration': integration - } - - # - # Get actions - # - actions = [] - - if unlink: - # - # delete all PagerDuty notification channels in Sysdig - # - for channel in pager_duty_channels: - actions.append({ - 'info': 'Sysdig: Delete channel "{}" ({})'.format(channel['name'], channel['id']), - 'fn': actions_factory.delete_notification_channel(channel) - }) - - # - # delete integration with Sysdig - # - for service in services: - if service['sysdig_integrations']: - if len(service['sysdig_integrations']) == len(service['integrations']): - # - # service connected to Sysdig only: delete service - # - actions.append({ - 'info': 'PagerDuty: Delete service "{}" ({})'.format(service['name'], service['id']), - 'fn': actions_factory.delete_service(service['id']) - }) - else: - # - # service with some integrations with Sysdig: delete individual integrations - # - for integration in service['sysdig_integrations']: - actions.append( - { - 'info': 'PagerDuty: Delete integration "{}" ({}) in service "{}" ({})'.format( - integration['name'], - integration['id'], - service['name'], - service['id']), - 'fn': actions_factory.delete_integration( - service['id'], - integration['id'])}) - - if link: - # - # delete all PagerDuty notification channels in Sysdig that do NOT have an integration in PagerDuty - # - for channel in pager_duty_channels: - if not channel['options']['serviceKey'] in service_integration_keys: - actions.append({ - 'info': 'Remove notification channel "{}" not connected to any integration'.format(channel['name']), - 'fn': actions_factory.delete_notification_channel(channel) - }) - - for policy in escalation_policies: - service_name = '{} (Sysdig)'.format(policy['name']) - - policy_services = [service for service in services if service['escalation_policy']['id'] == policy['id']] - sysdig_services = [service for service in policy_services if service['sysdig_integrations']] - disconnected_services = [] - for service in sysdig_services: - for integration in service['integrations']: - if integration['vendor'] and integration['vendor']['id'] == sysdig_vendor['id'] and integration['integration_key'] not in integration_keys: - disconnected_services.append({ - 'service': service, - 'integration': integration - }) - - if not sysdig_services: - # - # create service and integration in PagerDuty, and notification channel in Sysdig - # - actions.append({'info': 'Create service, integration, and notification channel for policy "{}"'.format( - policy['name']), 'fn': actions_factory.create_all(policy, sysdig_vendor)}) - elif disconnected_services: - # - # create notification channel to disconnected integration - # - actions.append( - { - 'info': 'Restore notification channel for disconnected service "{}" for policy "{}"'.format( - disconnected_services[0]['service']['name'], - policy['name']), - 'fn': actions_factory.create_notification_channel( - policy, - disconnected_services[0]['service'], - disconnected_services[0]['integration'])}) - else: - for service in sysdig_services: - for integration in service['integrations']: - if integration['vendor'] and integration['vendor']['id'] == sysdig_vendor['id'] and integration['integration_key'] in integration_keys: - channel = integration_keys[integration['integration_key']] - if channel['name'] != policy['name']: - # - # rename channel to match new policy name - # - actions.append({ - 'info': 'Rename notification channel "{}" to policy name "{}"'.format(channel['name'], policy['name']), - 'fn': actions_factory.rename_notification_channel(channel, policy['name'], service_name) - }) - elif channel['options']['serviceName'] != service_name: - # - # rename channel service to service name - # - actions.append({ - 'info': 'Rename channel service "{}" to service name "{}"'.format(service['name'], service_name), - 'fn': actions_factory.rename_notification_channel(channel, policy['name'], service_name) - }) - - if len(service['integrations']) == 1 and service['name'] != service_name: - # - # rename service to match new policy name - # - actions.append({ - 'info': 'Rename service "{}" to "{}"'.format(service['name'], service_name), - 'fn': actions_factory.rename_service(service, service_name) - }) - - if actions: - # - # Run action, or just print the task in dry mode - # - print('') - print('Action items:') - for action in actions: - if dry_run: - print('\t* {}'.format(action['info'])) - else: - print('\t* {}...'.format(action['info'])) - action['fn']() - print('\t Done!') - - if dry_run: - print('\nTo apply changes, execute the same command without "--dry-run" parameter:\npython {}'.format( - ' '.join([arg for arg in sys.argv if arg != '--dry-run']))) - - else: - if unlink: - print('All escalation policies have been disconnected from Sysdig!') - if link: - print('All escalation policies are already connected to Sysdig!') - - -class PagerDutyAPI(): - def __init__(self, token): - self._base_url = 'https://api.pagerduty.com' - self._token = token - - def get(self, endpoint, params=None): - return self._base_request('get', endpoint, params=params) - - def post(self, endpoint, data=None): - return self._base_request('post', endpoint, data=data) - - def put(self, endpoint, data=None): - return self._base_request('put', endpoint, data=data) - - def delete(self, endpoint, params=None): - return self._base_request('delete', endpoint, params=params) - - def _base_request(self, method, endpoint, params=None, data=None): - url = self._get_url(endpoint) - request_data = json.dumps(data) if data else None - response = getattr(requests, method)(url, params=params, data=request_data, headers=self._get_headers()) - - return self._handle_response(response, url) - - def _handle_response(self, response, url): - if response.status_code >= 300: - error = 'PagerDuty API request {} {} failed: {}, {}'.format( - response.request.method, url, response.status_code, response.content) - - print(error) - raise Exception(error) - elif response.status_code == 204: - return None - else: - return self._parse_response(response) - - def _parse_response(self, response): - return response.json() - - def _get_url(self, endpoint): - return '{}{}'.format(self._base_url, endpoint) - - def _get_headers(self): - return { - 'Accept': 'application/vnd.pagerduty+json;version=2', - 'Content-Type': 'application/json', - 'Authorization': 'Token token={}'.format(self._token) - } - - -class ActionFactory(): - def __init__(self, sysdig, pager_duty, pager_duty_id): - self._sysdig = sysdig - self._pager_duty = pager_duty - self._pager_duty_id = pager_duty_id - - def delete_service(self, service_id): - def fn(): - self._pager_duty.delete('/services/{}'.format(service_id)) - - return fn - - def delete_integration(self, service_id, integration_id): - def fn(): - self._pager_duty.delete('/services/{}/integrations/{}'.format(service_id, integration_id)) - - return fn - - def delete_notification_channel(self, channel): - def fn(): - self._sysdig.delete_notification_channel(channel) - - return fn - - def create_all(self, policy, sysdig_vendor): - def fn(): - new_service = self._pager_duty.post('/services', { - 'service': { - 'type': 'service', - 'name': '{} (Sysdig)'.format(policy['name']), - 'auto_resolve_timeout': None, - 'acknowledgement_timeout': None, - 'status': 'active', - 'escalation_policy': { - 'id': policy['id'], - 'type': 'escalation_policy_reference' - }, - 'incident_urgency_rule': { - 'type': 'use_support_hours', - 'during_support_hours': { - 'type': 'constant', - 'urgency': 'high' - }, - 'outside_support_hours': { - 'type': 'constant', - 'urgency': 'low' - } - }, - 'support_hours': { - 'type': 'fixed_time_per_day', - 'time_zone': 'America/Lima', - 'start_time': '09:00:00', - 'end_time': '17:00:00', - 'days_of_week': [ - 1, - 2, - 3, - 4, - 5 - ] - }, - 'scheduled_actions': [ - { - 'type': 'urgency_change', - 'at': { - 'type': 'named_time', - 'name': 'support_hours_start' - }, - 'to_urgency': 'high' - } - ], - 'alert_creation': 'create_alerts_and_incidents', - 'alert_grouping': 'time', - 'alert_grouping_timeout': 2 - } - })['service'] - - new_integration = self._pager_duty.post('/services/{}/integrations'.format(new_service['id']), { - 'integration': { - 'type': 'integration_inbound_integration', - 'name': 'Sysdig', - 'vendor': { - 'id': sysdig_vendor['id'], - 'type': 'vendor' - }, - 'service': { - 'id': new_service['id'], - 'summary': new_service['summary'], - 'type': new_service['type'], - 'self': new_service['self'], - 'html_url': new_service['html_url'], - } - } - })['integration'] - - self._sysdig.create_notification_channel({ - 'type': 'PAGER_DUTY', - 'enabled': True, - 'sendTestNotification': False, - 'name': policy['name'], - 'options': { - 'account': self._pager_duty_id, - 'serviceKey': new_integration['integration_key'], - 'serviceName': new_service['name'], - 'notifyOnOk': True, - 'notifyOnResolve': True - } - }) - - return fn - - def create_notification_channel(self, policy, service, integration): - def fn(): - self._sysdig.create_notification_channel({ - "type": "PAGER_DUTY", - "enabled": True, - "sendTestNotification": False, - "name": policy['name'], - "options": { - "account": self._pager_duty_id, - "serviceKey": integration['integration_key'], - "serviceName": service['name'], - "notifyOnOk": True, - "notifyOnResolve": True - } - }) - - return fn - - def rename_notification_channel(self, channel, channel_name, service_name): - def fn(): - new_channel = copy.deepcopy(channel) - new_channel['name'] = channel_name - new_channel['options']['serviceName'] = service_name - self._sysdig.update_notification_channel(new_channel) - - return fn - - def rename_service(self, service, service_name): - def fn(): - new_service = copy.deepcopy(service) - new_service['name'] = service_name - self._pager_duty.put('/services/{}'.format(service['id']), new_service) - - return fn - - -def pluralize(term, count, plural=None): - if count == 1: - return term - else: - if plural is None: - return '{}s'.format(term) - else: - return plural - - -# let's get started! -print('') - - -run(args['sysdig-token'][0], args['pagerduty-account-id'][0], - args['pagerduty-access-key'][0], args['link'], args['unlink'], args['dry_run'])