diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..87efeb1e6 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +# https://editorconfig.org/ + +root = true + +[*] +indent_style = space +indent_size = 4 +insert_final_newline = true +trim_trailing_whitespace = false +end_of_line = lf +charset = utf-8 + +[*.py] +max_line_length = 100 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..96507c893 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Report a problem +title: "[Bug] " +labels: 'bug' +assignees: '' + +--- +### Description + +### Steps to Reproduce + +1. +2. +3. + +### Expected Behavior + + +### Actual Behavior + + +### Basic Information + +- code42cli version: +- python version: +- operating system: diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..3368e9398 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature request +about: Suggest an idea for code42cli +title: "[Enhancement] YOUR IDEA!" +labels: enhancement +assignees: '' + +--- + +## Summary + + +## Proposed API + + + +## Intended Use Case + diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..a8dc027d0 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,59 @@ +name: build + +on: + push: + branches: + - main + tags: + - v* + pull_request: + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + - name: Install tox + run: | + pip install tox==3.17.1 + pip install . + - name: Run Unit tests + run: tox -e py # Run tox using the version of Python in `PATH` + - name: Submit coverage report + uses: codecov/codecov-action@v1.0.7 + with: + file: code42cli/coverage.xml + - name: Checkout mock servers + uses: actions/checkout@v2 + with: + repository: code42/code42-mock-servers + path: code42-mock-servers + - name: Add mock servers host addresses + run: | + sudo tee -a /etc/hosts <> ~/.ssh/known_hosts + ssh-agent -a $SSH_AUTH_SOCK > /dev/null + ssh-add - <<< "${{ secrets.C42_EVENT_EXTRACTOR_PRIVATE_DEPLOY_KEY }}" + - name: Install tox + run: | + pip install tox==3.17.1 + pip install . + - name: Run Unit tests + env: + SSH_AUTH_SOCK: /tmp/ssh_agent.sock + run: tox -e nightly # Run tox using latest main branch from py42/c42eventextractor + - name: Notify Slack Action + uses: 8398a7/action-slack@v3 + with: + status: ${{ job.status }} + fields: repo,message,commit,author,action,workflow + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + if: failure() diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000..557793893 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,69 @@ +name: publish + +on: + release: + types: [published] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: '3.9' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build Release + run: | + python setup.py sdist bdist_wheel + - name: Set File Names and Release IDs + run: | + src_file=( ./dist/*.tar.gz ) + wheel_file=( ./dist/*.whl ) + echo "RELEASE_ID=$(jq --raw-output '.release.id' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "SOURCE_DIST_FILE=$(basename $src_file)" >> $GITHUB_ENV + echo "WHEEL_FILE=$(basename $wheel_file)" >> $GITHUB_ENV + - name: Set Upload Url + run: | + echo "UPLOAD_URL=https://uploads.github.com/repos/${GITHUB_REPOSITORY}/releases/${RELEASE_ID}/assets{?name,label}" >> $GITHUB_ENV + - name: Output Variables For Uploading + id: get_upload_vars + run: | + echo "Release ID: $RELEASE_ID" + echo "Source Dist File: $SOURCE_DIST_FILE" + echo "Source Dist Upload Url: $SOURCE_DIST_URL" + echo "Wheel File: $WHEEL_FILE" + echo "Upload Url: $UPLOAD_URL" + echo "::set-output name=source_dist_path::./dist/${SOURCE_DIST_FILE}" + echo "::set-output name=source_dist_name::${SOURCE_DIST_FILE}" + echo "::set-output name=wheel_path::./dist/${WHEEL_FILE}" + echo "::set-output name=wheel_name::./dist/${WHEEL_FILE}" + echo "::set-output name=upload_url::${UPLOAD_URL}" + - name: Upload Source Distribution to GitHub release + uses: actions/upload-release-asset@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.get_upload_vars.outputs.upload_url }} + asset_path: ${{ steps.get_upload_vars.outputs.source_dist_path }} + asset_name: ${{ steps.get_upload_vars.outputs.source_dist_name }} + asset_content_type: application/x-gzip + - name: Upload Wheel to GitHub Release + uses: actions/upload-release-asset@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.get_upload_vars.outputs.upload_url }} + asset_path: ${{ steps.get_upload_vars.outputs.wheel_path }} + asset_name: ${{ steps.get_upload_vars.outputs.wheel_name }} + asset_content_type: application/zip + - name: Publish Build to PyPI + env: + TWINE_USERNAME: '__token__' + TWINE_PASSWORD: ${{ secrets.PYPI_ACCESS_TOKEN }} + run: | + twine upload dist/* diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml new file mode 100644 index 000000000..383e31961 --- /dev/null +++ b/.github/workflows/style.yml @@ -0,0 +1,27 @@ +name: style + +on: + push: + branches: + - main + tags: + - v* + pull_request: + +jobs: + + style: + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v1 + with: + python-version: '3.11' + - name: Install tox + run: | + pip install tox==3.17.1 + pip install . + - name: Run style checks + run: tox -e style diff --git a/.gitignore b/.gitignore index 4c964cf03..a1db36551 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,6 @@ -# Test config file -*config.cfg +*.csv -# IDE files -.idea/ - -# Database files -*.db +.DS_Store # Byte-compiled / optimized / DLL files __pycache__/ @@ -15,6 +10,9 @@ __pycache__/ # C extensions *.so +# IDE files +.idea + # Distribution / packaging .Python build/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d9e8b6632..c2871db6b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,28 @@ repos: -- repo: https://github.com/ambv/black - rev: stable - hooks: - - id: black - language_version: python3.6 \ No newline at end of file + - repo: https://github.com/asottile/pyupgrade + rev: v2.7.1 + hooks: + - id: pyupgrade + args: ["--py36-plus"] + - repo: https://github.com/asottile/reorder_python_imports + rev: v2.3.0 + hooks: + - id: reorder-python-imports + args: ["--application-directories", "src"] + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + - repo: https://github.com/pycqa/flake8 + rev: 6.1.0 + hooks: + - id: flake8 + additional_dependencies: + - flake8-bugbear + - flake8-implicit-str-concat + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: check-byte-order-marker + - id: trailing-whitespace + - id: end-of-file-fixer diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..186d4e545 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,32 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # You can also specify other tool versions: + # nodejs: "20" + # rust: "1.70" + # golang: "1.20" + + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +python: + install: + - method: pip + path: . + extra_requirements: + - dev + - docs diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..3914b9b6d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "editor.rulers": [88], + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.nosetestsEnabled": false, + "python.testing.pytestEnabled": true, + "python.linting.flake8Enabled": true, + "python.linting.enabled": true +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 2761367ce..f24b5e5e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), @@ -7,11 +8,850 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 The intended audience of this file is for py42 consumers -- as such, changes that don't affect how a consumer would use the library (e.g. adding unit tests, updating documentation, etc) are not captured here. +## 1.19.0 - 2025-03-21 + +### Deprecated + +- All Incydr functionality is deprecated in Code42CLI. Use the Incydr SDK instead: https://developer.code42.com/ + +## 1.18.1 - 2025-01-08 + +## Changed + +- Updated the user-agent prefix for compatibility with Incydr conventions. + +## Removed + +- Removed support for end-of-life python versions 3.6, 3.7, 3.8. + +## 1.18.0 - 2023-11-30 + +### Added + +- Support for Python 3.12, includes various dependency version requirement updates. + +## 1.17.0 - 2023-08-04 + +### Removed + +- Removed the following command groups following deprecation: + - `detection-lists` + - `departing-employee` + - `high-risk-employee` +- APIs were replaced by the `watchlists` commands + +## 1.16.6 - 2023-04-12 + +### Fixed + +- Vulnerability in `ipython` dependency for installs on Python 3.8+ + +## 1.16.5 - 2023-02-01 + +### Fixed + +- Bug in `security-data search|send-to` where using `--format json` and a checkpoint raised an error when configured for V2 file events. +- Bug in `devices list` command when using `--include-legal-hold-membership` option with an API client auth profile. + +## 1.16.2 - 2022-11-07 + +### Fixed + +- Updated setup requirements to allow for install with any `click` version `>=7.1.1` + +## 1.16.1 - 2022-10-10 + +### Added + +- Support for `click` version `>=8.0.0`. + +## 1.16.0 - 2022-10-06 + +### Added + +- Support for Code42 API clients. + - You can create a new profile with API client authentication using `code42 profile create-api-client` + - Or, update your existing profile to use API clients with `code42 update --api-client-id --secret ` +- New commands to view details for user risk profiles: + - `code42 users list-risk-profiles` + - `code42 users show-risk-profile` +- Proxy support via `HTTPS_PROXY` environment variable. + +### Changed + +- **When using API client authentication**, changes to the following `legal-hold` commands: + - `code42 legal-hold list` - Change in response shape. + - `code42 legal-hold show` - Change in response shape. + - `code42 legal-hold search-events` - **Not available.** + +## 1.15.0 - 2022-08-23 + +### Added + +- Support for the V2 file event data model. + - V1 file event APIs were marked deprecated in May 2022 and will be no longer be supported after May 2023. + - Use the `--use-v2-file-events True` option with the `code42 profile create` or `code42 profile update` commands to enable your code42 CLI profile to use the latest V2 file event data model. + - See the [V2 File Events User Guide](https://clidocs.code42.com/en/latest/userguides/siemexample.html) for more information. + +### Changed + +- The `--disable-ssl-errors` options for the `code42 profile create` and `code42 profile update` commands is no longer a flag and now takes a boolean `True/False` arg. + +## 1.14.5 - 2022-08-01 + +### Added + +- `code42 devices list` and `code42 devices list-backup-sets` now accept a `--page-size ` option to enable manually configuring optimal page size. + +## 1.14.4 - 2022-07-21 + +### Changed + +- Reduced the `page_size` in Device API calls from 500 to 100 to reduce possibility of timeouts when including backup usage in `code42 devices list`. + +## 1.14.3 - 2022-07-06 + +### Fixed + +- Bug where the `code42 security-data search` command using a checkpoint and only the `--include-non-exposure` filter resulted in invalid page tokens. +- Bug where `code42 security-data search` would only return 10,000 events on the first search when using a new checkpoint. + +## 1.14.2 - 2022-06-17 + +### Fixed + +- Bug where the `code42 security-data search` command using a checkpoint and only the `--include-non-exposure` filter constructed an invalid search query. + +## 1.14.1 - 2022-06-13 + +### Fixed +- `watchlists bulk` commands now accept CSVs with extra headers + +## 1.14.0 - 2022-05-19 + +### Added + +- `watchlists` command group for interacting with watchlists. + - `watchlists add` for adding users to a watchlist + - `watchlists remove` for removing users from a watchlist + - `watchlists list` for listing existing watchlists + - `watchlists list-members` for listing users who are members of a given watchlist + - `watchlist bulk add|remove` for adding/removing multiple users via CSV file + +- `users update-start-date` command to add/modify the "start date" property of a User's risk profile. +- `users update-departure-date` command to add/modify the "end date" property of a User's risk profile. +- `users update-risk-profile-notes` command to add/modify the "notes" property of a User's risk profile. + +### Deprecated + +- `departing-employee` and `high-risk-employee` command groups. These actions have been replaced by the `watchlists` command group. + +## 1.13.0 - 2022-04-04 + +### Added + +- `departing-employee bulk remove` and `high-risk-employee bulk remove` commands now accept CSVs with an optional header, as well as extraneous columns if a header is provided. +- Added `devices rename` and `devices bulk rename` commands to rename devices. + - *Note: Incydr devices cannot be renamed.* +- Added the following commands for managing users' cloud aliases: + - `users add-alias` + - `users remove-alias` + - `users list-aliases` + - `users bulk add-alias` + - `users bulk remove-alias` + +## 1.12.1 - 2022-01-21 + +### Fixed + +- Vulnerability in `ipython` dependency. See [CVE-2022-21699](https://nvd.nist.gov/vuln/detail/CVE-2022-21699). + +## 1.12.0 - 2021-12-13 + +### Fixed +- Bug where device settings were unable to be serialized to json. + +### Added +- `--columns` option to `security-data search` and `security-data send-to` commands which reduces output to only the specified colums/json keys. Accepts a comma-separated list of column names (case-insensitive). + +### Changed +- Improved accuracy of checkpointing for `security-data search` (checkpoints every row as it is printed to stdout instead of just the last event of the search response). + +## 1.11.1 - 2021-11-09 + +### Changed +- Updated minimum version of py42 to `1.19.3` to provide access to updated URI paths for new standardized versioning scheme. + +## 1.11.0 - 2021-10-22 + +### Fixed + +- Incorrect column title on `code42 trusted-activities bulk create` command help text. +- `code42 devices list` will now process `--exclude-most-recently-connected` prior to `--last-connected-before` instead of after. +- The minimum required version of Python for code42cli is now correctly set as 3.6.2. + +### Added + +- New bulk commands to manage user roles + - `code42 users bulk add-roles` + - `code42 users bulk remove-roles` + +- New option `--include-roles` on `code42 users list` that includes the roles for all users. + +- New command `code42 users show ` that prints all the details of that user. + +- New commands to view orgs + - `code42 users orgs list` + - `code42 users orgs show ` + +## 1.10.0 - 2021-10-05 + +### Added + +- New option `--include-legal-hold-membership` on command `code42 users list` that includes the legal hold matter name and ID for any user on legal hold. + +- New commands for deactivating/reactivating Code42 user accounts: + - `code42 users deactivate` + - `code42 users reactivate` + - `code42 users bulk deactivate` + - `code42 users bulk reactivate` + +- `code42 profile use` now prompts you to select a profile when not given a profile name argument. + +- New `trusted-activities` commands for managing trusted activities and resources: + - `code42 trusted-activities create` to create a trusted activity. + - `code42 trusted-activities update` to update a trusted activity. + - `code42 trusted-activities remove` to remove a trusted activity. + - `code42 trusted-activities list` to print the details of all trusted activities. + - `code42 trusted-activities bulk create` to bulk create trusted activities from a CSV file. + - `code42 trusted-activities bulk update` to bulk update trusted activities from a CSV file. + - `code42 trusted-activities bulk remove` to bulk remove trusted activities from a CSV file. + +### Fixed + +- Bug where `audit-logs search` with `--use-checkpoint` option was causing output formatting problems. +- Improve error message for `code42 users list`, `code42 devices list`, `code42 devices list-backup-sets` + +## 1.9.0 - 2021-08-19 + +### Added + +- `code42 profile` commands that validate passwords (`create`, `update`, `reset-pw`) now have the `--debug` option available, and `create` and `update` can now also pass in `--totp` as an option. + +- New command options for `code42 security-data search` + - `--risk-indicator` to filter events by risk indicators. + - `--risk-severity` to filter events by risk severity. + +### Changed + +- A TOTP token is now required on `code42 profile` commands that check for password validity when a user has MFA enabled. + +- Updated minimum version of py42 to `1.18.0` to provide access to `FIRST_DESTINATION_USE` and `RARE_DESTINATION_USE` search filters. + +### Fixed + +- `code42 profile delete` command now prints a clear error message when deletion target doesn't exist. + +## 1.8.1 - 2021-07-14 + +### Fixed + +- The `chardet` library is now an explicit dependency, resolving dependency issues for fresh installations using latest `requests` v.2.26.0 + +## 1.8.0 - 2021-07-08 + +### Fixed + +- Issue where `code42 devices bulk deactivate` and `code42 devices bulk reactivate` would + output incorrect Successes and Failures at the end of the process. + +- Bug where `code42 audit-logs search` would fail to store checkpoints when timestamps included + nanoseconds. + +- Issue where if an error occurred during `code42 audit-logs search` or `code42 audit-logs send-to`, + the user would get a stored checkpoint without having handled events. + +### Added + +- New command `code42 users update` to update a single user. + +- New command `code42 users bulk update` to update users in bulk. + +- New command `code42 users move` to move a single user to a different organization. + +- New command `code42 users bulk move` to move users in bulk. + +### Changed + +- Now when a user is not found, the error message suggests that it might be because you don't + have the necessary permissions. + +## 1.7.0 - 2021-06-17 + +### Added + +- New command `code42 users add-role` to add a user role to a single user. + +- New command `code42 users remove-role` to remove a user role from a single user. + +- New command `code42 shell` that opens an IPython console with a pre-initialized py42 sdk. + +## 1.6.1 - 2021-05-27 + +### Fixed + +- Issue where `profile` commands that required connecting to an authority failed to respect the `--disable-ssl-errors` flag when set. + +## 1.6.0 - 2021-05-20 + +### Added + +- Support for users that require multi-factor authentication. + +## 1.5.1 - 2021-05-12 + +### Fixed + +- Issue where some error messages stopped displaying in the same way that they did in prior versions. + +- Issue where the `--role-name` option on the command `code42 users list` caused the + CLI to call a deprecated method. + +## 1.5.0 - 2021-05-05 + +### Added + +- New command `code42 alerts show` that displays information about a single alert. + +- New command `code42 alerts update` that can update an alert's state or note. + +- New command `code42 alerts bulk generate-template` for generating CSV templates for bulk + commands. + +- New command `code42 alerts bulk update` for bulk updating alerts. + +- New command `code42 cases file-events bulk generate-template` creates the template CSV + file for the given command arg. + +- New command `code42 cases file-events bulk add` that takes a CSV file with case number + and event ID. + +- New command `code42 cases file-events bulk remove` that takes a CSV file with case + number and event ID. + +### Changed + +- `code42 alerts search` now includes the alert ID in its table output. + +- `code42 alerts search` table output now refers to the alert state as `state` instead of + `status`. + +## 1.4.2 - 2021-04-22 + +### Added + +- New command `code42 users list` with options: + - `--org-uid` filters on org membership. + - `--role-name` filters on users having a particular role. + - `--active` and `--inactive` filter on user status. + +### Fixed + +- Bug where some CSV outputs on Windows would have an extra newline between the rows. + +- Issue where outputting or sending an alert or file-event with a timestamp without + decimals would error. + +- A performance issue with the `code42 departing-employee bulk add` command. + +### Changed + +- `code42 alert-rules list` now outputs via a pager when results contain more than 10 rules. + +- `code42 cases list` now outputs via a pager when results contain more than 10 cases. + +## 1.4.1 - 2021-04-15 + +### Added + +- `code42 legal-hold search-events` command: + - `--matter-id` filters based on a legal hold uid. + - `--begin` filters based on a beginning timestamp. + - `--end` filters based on an end timestamp. + - `--event-type` filters based on a list of event types. + +### Fixed + +- Arguments/options that read data from files now attempt to autodetect file encodings. + Resolving a bug where CSVs written on Windows with Powershell would fail to be read properly. + +## 1.4.0 - 2021-03-09 + +### Added + +- `code42cli.extensions` module exposes `sdk_options` decorator and `script` group for writing custom extension scripts + using the Code42 CLI. + +- `code42 devices list` options: + - `--include-legal-hold-membership` prints the legal hold matter name and ID for any active device on legal hold + - `--include-total-storage` prints the backup archive count and total storage + +## 1.3.1 - 2021-02-25 + +### Changed + +- Command options for `profile update`: + - `-n` `--name` is not required, and if omitted will use the default profile. + - `-s` `--server` and `-u` `--username` are not required and can be updated independently now. + - Example: `code42 profile update -s 1.2.3.4:1234` + +## 1.3.0 - 2021-02-11 + +### Fixed + +- Issue where `code42 alert-rules bulk add` would show as successful when adding users to a non-existent alert rule. + +### Added + +- New choice `TLS-TCP` for `--protocol` option used by `send-to` commands: + - `code42 security-data send-to` + - `code42 alerts send-to` + - `code42 audit-logs send-to` + for more securely transporting data. Included are new flags: + - `--certs` + - `--ignore-cert-validation` + +### Changed + +- The error text in cases command when: + - `cases create` sets a name that already exists in the system. + - `cases create` sets a description that has more than 250 characters. + - `cases update` sets a description that has more than 250 characters. + - `cases file-events add` is performed on an already closed case. + - `cases file-events add` sets an event id that is already added to the case. + - `cases file-events remove` is performed on an already closed case. + +## 1.2.0 - 2021-01-25 + +### Added + +- The `devices` command is added. Included are: + - `devices deactivate` to deactivate a single device. + - `devices reactivate` to reactivate a single device. + - `devices show` to retrieve detailed information about a device. + - `devices list` to retrieve info about many devices, including device settings. + - `devices list-backup-sets` to retrieve detailed info about device backup sets. + - `devices bulk deactivate` to deactivate a list of devices. + - `devices bulk reactivate` to reactivate a list of devices. + - `devices bulk generate-template` to create a blank CSV file for bulk commands. + +- `code42 departing-employee list` command. + +- `code42 high-risk-employee list` command. + +- `code42 cases` commands: + - `create` to create a new case. + - `update` to update case details. + - `export` to download a case summary as a PDF file. + - `list` to view all cases. + - `show` to view the details of a particular case. + +- `code42 cases file-events` commands: + - `add` to add an event to a case. + - `remove` to remove an event from a case. + - `list` to view all events associated with a case. + +### Changed + +- The error text when removing an employee from a detection list now references the employee + by ID rather than the username. + +- Improved help text for date option arguments. + +## 1.1.0 - 2020-12-18 + +### Fixed + +- Issue where `code42 profile delete` was allowed without giving a `profile_name` even + though deleting the default profile is not allowed. + +### Added + +- `code42 audit-logs` commands: + - `search` to search for audit-logs. + - `send-to` to send audit-logs to server. + +### Changed + +- `profile_name` argument is now required for `code42 profile delete`, as it was meant to be. + +- The `--advanced-query` option on `alerts search` and `security-data (search|send-to)` commands has been updated: + - It can now accept the query as a JSON string or as the path to a file containing the JSON query. + - It can be used with the `--use-checkpoint/-c` option. + +- Now, when adding a cloud alias to a detection list user, such as during `departing-employee add`, it will remove the existing cloud alias if one exists. + - Before, it would error and the cloud alias would not get added. + +## 1.0.0 - 2020-08-31 + +### Fixed + +- Bug where `code42 legal-hold show` would error when terminal was too small. + +- Fixed bug in `departing_employee bulk add` command that allowed invalid dates to be passed without validation. + +### Changed + +- The follow commands now print a nicer error message when trying to remove a user who is not on the list: + - `code42 departing-employee remove` + - `code42 high-risk-employee remove` + - `code42 alert-rules remove-user` + +- `-i` (`--incremental`) has been removed, use `-c` (`--use-checkpoint`) with a string name for the checkpoint instead. + +- The code42cli has been migrated to the [click](https://click.palletsprojects.com) framework. This brings: + - BREAKING CHANGE: Commands that accept multiple values for the same option now must have the option flag provided before each value: + use `--option value1 --option value2` instead of `--option value1 value2` (which was previously possible). + - Cosmetic changes to error messages, progress bars, and help message formatting. + +- The `print` command on the `security-data` and `alerts` command groups has been replaced with the `search` command. + This was a name change only, all other functionality remains the same. + +- A profile created with the `--disable-ssl-errors` flag will now correctly not verify SSL certs when making requests. A warning message is printed + each time the CLI is run with a profile configured this way, as it is not recommended. + +- The `path` positional argument for bulk `generate-template` commands is now an option (`--p/-p`). + +- Below `search` subcommands accept argument `--format/-f` to display result in formats `csv`, `table`, `json`, `raw-json`: + - Default output format is changed to `table` format from `raw-json`, returns a paginated response. + All properties would be displayed by default except when using `-f table`. + Pass `--include-all` when using `table` to view all non-nested top-level properties. + - `code42 alerts search` + - `code42 security-data search` + - `code42 security-data saved-search list` + - `code42 legal-hold list` + - `code42 alert-rules list` + +### Added + +- `--or-query` option added to `security-data search` and `alerts search` commands which combines the provided filter arguments into an 'OR' query instead of the default 'AND' query. + +- `--password` option added to `profile create` and `profile update` commands, enabling creating profiles while bypassing the interactive password prompt. + +- Profiles can now save multiple alert and file event checkpoints. The name of the checkpoint to be used for a given query should be passed to `-c` (`--use-checkpoint`). + +- `-y/--assume-yes` option added to `profile delete` and `profile delete-all` commands to not require interactive prompt. + +- Below subcommands accept argument `--format/-f` to display result in formats `csv`, `table`, `json`, `formatted-json`: + - `code42 alert-rules list` + - `code42 legal-hold list` + - `code42 legal-hold show` + - `code42 security-data saved-search list` + +### Removed + +- The `write-to` command for `security-data` and `alerts` command groups. + +## 0.7.3 - 2020-06-23 + +### Fixed + +- Fixed bug that caused the last few entries in csv files to sometimes not be processed when performing bulk processing actions. + +## 0.7.2 - 2020-06-11 + +### Fixed + +- Fixed bug that caused `alert-rules list` to error due to page size restrictions on backing service. + +## 0.7.1 - 2020-06-10 + +### Fixed + +- Issue that prevented alerts from being retrieved successfully via `code42 alerts` commands due to a change in its backing API. + +## 0.7.0 - 2020-06-08 + +### Changed + +- `code42cli` no longer supports python 2.7. + +- `code42 profile create` now uses required `--name`, `--server` and `--username` flags instead of positional arguments. + +- `code42 high-risk-employee add-risk-tags` now uses required `--username` and `--tag` flags instead of positional arguments. + +- `code42 high-risk-employee remove-risk-tags` now uses required `--username` and `--tag` flags instead of positional arguments. + +### Added + +- Extraction subcommands of `code42 security-data`, `print/write-to/send-to` accepts argument `--saved-search` to + return saved search results. + +- `code42 security-data saved-search` commands: + - `list` prints out existing saved searches' id and name + - `show` takes a search id + +- `code42 high-risk-employee bulk` supports `add-risk-tags` and `remove-risk-tags`. + - `code42 high-risk-employee bulk generate-template ` options `add-risk-tags` and `remove-risk-tags`. + - `add-risk-tags` that takes a csv file with username and space separated risk tags. + - `remove-risk-tags` that takes a csv file with username and space separated risk tags. + +- Display, `Fuzzy suggestions`, valid keywords matching mistyped commands or arguments. + +- `code42 alerts`: + - Ability to search/poll for alerts with checkpointing using one of the following commands: + - `print` to output to stdout. + - `write-to` to output to a file. + - `send-to` to output to server via UDP or TCP. + +- `code42 alert-rules` commands: + - `add-user` with parameters `--rule-id` and `--username`. + - `remove-user` that takes a rule ID and optionally `--username`. + - `list`. + - `show` takes a rule ID. + - `bulk` with subcommands: + - `add`: that takes a csv file with rule IDs and usernames. + - `generate-template`: that creates the file template. And parameters: + - `cmd`: with options `add` and `remove`. + - `path` + - `remove`: that takes a csv file with rule IDs and usernames. + +- `code42 legal-hold` commands: + - `add-user` with parameters `--matter-id/-m` and `--username/-u`. + - `remove-user` with parameters `--matter-id/-m` and `--username/-u`. + - `list` prints out existing active legal hold matters. + - `show` takes a `matter_id` and prints details of the matter. + - optional argument `--include-inactive` additionally prints matter memberships that are no longer active. + - optional argument `--include-policy` additionally prints out the matter's backup preservation policy in json form. + - `bulk` with subcommands: + - `add-user`: that takes a csv file with matter IDs and usernames. + - `remove-user`: that takes a csv file with matter IDs and usernames. + - `generate-template`: that creates the file templates. + - `cmd`: with options `add` and `remove`. + - `path` + +- Success messages for `profile delete` and `profile update`. + +- Additional information in the error log file: + - The full command path for the command that errored. + - User-facing error messages you see during adhoc sessions. + +- A custom error in the error log when you try adding unknown risk tags to user. + +- A custom error in the error log when you try adding a user to a detection list who is already added. +- Graceful handling of keyboard interrupts (ctrl-c) so stack traces aren't printed to console. +- Warning message printed when ctrl-c is encountered in the middle of an operation that could cause incorrect checkpoint + state, a second ctrl-c is required to quit while that operation is ongoing. + +- A progress bar that displays during bulk commands. + +- Short option `-u` added for `code42 high-risk-employee add-risk-tags` and `remove-risk-tags`. + +- Tab completion for bash and zsh for Unix based machines. + +### Fixed + +- Fixed bug in bulk commands where value-less fields in csv files were treated as empty strings instead of None. +- Fixed anomaly where the path to the error log on Windows contained mixed slashes. + +### 0.5.3 - 2020-05-04 + +### Fixed + +- Issue introduced in py42 v1.1.0 that prevented `high-risk-employee` and `departing-employee` commands from working properly. + +## 0.5.2 - 2020-04-29 + +### Fixed + +- Issue that prevented bulk csv loading. + +## 0.5.1 - 2020-04-27 + +### Fixed + +- Issue that prevented version 0.5.0 from updating its dependencies properly. + +- Issue that prevented the `add` and `bulk add` functionality of `departing-employee` and `high-risk-employee` from successfully adding users to lists when specifying optional fields. + +## 0.5.0 - 2020-04-24 + +### Changed + +- `securitydata` renamed to `security-data`. +- From `security-data` related subcommands (such as `print`): + - `--c42username` flag renamed to `--c42-username`. + - `--filename` flag renamed to `--file-name`. + - `--filepath` flag renamed to `--file-path`. + - `--processOwner` flag renamed to `--process-owner`. +- `-b|--begin` and `-e|--end` arguments now accept shorthand date-range strings for days, hours, and minute intervals going back from the current time (e.g. `30d`, `24h`, `15m`). +- Default profile validation logic added to prevent confusing error states. + +### Added + +- `code42 profile update` command. +- `code42 profile create` command. +- `code42 profile delete` command. +- `code42 profile delete-all` command. +- `code42 high-risk-employee` commands: + - `bulk` with subcommands: + - `add`: that takes a csv file of users. + - `generate-template`: that creates the file template. And parameters: + - `cmd`: with options `add` and `remove`. + - `path` + - `remove`: that takes a list of users in a file. + - `add` that takes parameters: `--username`, `--cloud-alias`, `--risk-factor`, and `--notes`. + - `remove` that takes a username. + - `add-risk-tags` that takes a username and risk tags. + - `remove-risk-tags` that takes a username and risk tags. +- `code42 departing-employee` commands: + - `bulk` with subcommands: + - `add`: that takes a csv file of users. + - `generate-template`: that creates the file template. And parameters: + - `cmd`: with options `add` and `remove`. + - `path` + - `remove`: that takes a list of users in a file. + - `add` that takes parameters: `--username`, `--cloud-alias`, `--departure-date`, and `--notes`. + - `remove` that takes a username. + +### Removed + +- `code42 profile set` command. Use `code42 profile create` instead. + +## 0.4.4 - 2020-04-01 + +### Added + +- Added message to STDERR when no results are found + +### Fixed + +- Add milliseconds to end timestamp, to represent end of day with milliseconds precision. + +## 0.4.3 - 2020-03-17 + +### Added + +- Support for storing passwords when keying is not available. + +### Fixed + +- Bug where keyring caused errors on certain operating systems when not supported. + +### Changed + +- Updated help texts to be more descriptive. + +## 0.4.2 - 2020-03-13 + +### Fixed + +- Bug where encoding would cause an error when opening files on python2. + +## 0.4.1 - 2020-03-13 + +### Fixed + +- Bug where `profile reset-pw` did not work with the default profile. +- Bug where `profile show` indicated a password was set for a different profile. +- We now validate credentials when setting a password. + +### Changed + +- Date inputs are now required to be in quotes when they include a time. + +## 0.4.0 - 2020-03-12 + +### Added + +- Support for multiple profiles: + - Optional `--profile` flag for: + - `securitydata write-to`, `print`, and `send-to`, + - `profile show`, `set`, and `reset-pw`. + - `code42 profile use` command for changing the default profile. + - `code42 profile list` command for listing all the available profiles. +- The following search args can now take multiple values: + - `--c42username`, + - `--actor`, + - `--md5`, + - `--sha256`, + - `--filename`, + - `--filepath`, + - `--processOwner`, + - `--tabURL` + +### Fixed + +- Fixed bug where port attached to `securitydata send-to` command was not properly applied. + +### Changed + +- Begin dates are no longer required for subsequent interactive `securitydata` commands. +- When provided, begin dates are now ignored on subsequent interactive `securitydata` commands. +- `--profile` arg is now required the first time setting up a profile. + +## 0.3.0 - 2020-03-04 + +### Added + +- Begin and end date now support specifying time: `code42 securitydata print -b 2020-02-02 12:00:00`. +- If running interactively and errors occur, you will be told them at the end of `code42 securitydata` commands. +- New search arguments for `print`, `write-to`, and `send-to`: + - `--c42username` + - `--actor` + - `--md5` + - `--sha256` + - `--source` + - `--filename` + - `--filepath` + - `--processOwner` + - `--tabURL` + - `--include-non-exposure` + +### Changed + +- It is no longer required to store your password in your profile, + and you will be prompted to enter your password at runtime if you don't. +- You will be asked if you would like to set a password after using `code42cli profile set`. +- Begin date is now required for `securitydata` `print`, `write-to`, and `send-to` commands. + +### Removed + +- Removed `--show` flag from `code42 profile set` command. Just use `code42 profile show`. + +## 0.2.0 - 2020-02-25 + +### Removed + +- Removed config file settings and `-c` CLI arg. Use `code42 profile set`. +- Removed `--clear-password` CLI argument. Use `code42 profile set -p`. You will be prompted. +- Removed top-level destination args. Use subcommands `write-to`. `send-to`, `print` off of `code42 security data`. + +### Added + +- Added ability to view your profile: `code42 profile show`. +- Added `securitydata` subcommands: + - Use `code42 securitydata write-to` to output to a file. + - Use `code42 securitydata send-to` to output to a server. + - Use `code42 securitydata print` to outputs to stdout. + - Use `code42 securitydata clear-cursor` to remove the stored cursor for 'incremental' mode. +- Added support for raw JSON queries via `code42 securitydata [subcommand] --advanced-query [JSON]`. + +### Changed + +- Renamed base command `c42aed` to `code42`. +- Moved CLI arguments `-s`, `-u`, and `--ignore-ssl-errors` to `code42 profile set` command. +- Renamed and moved top-level `-r` flag. + - Use `-i` on one of these `securitydata` subcommands `write-to`. `send-to`, `print`. +- Moved search arguments to individual `securitydata` subcommands `write-to`. `send-to`, `print`. + ## 0.1.1 - 2019-10-29 ### Fixed + - Issue where IOError message was inaccurate when using the wrong port for server destinations. ### Added + - Error handling for all socket errors. - Error handling for IOError 'connection refused'. diff --git a/CODECLASSIFICATION b/CODECLASSIFICATION new file mode 100644 index 000000000..a2edc7299 --- /dev/null +++ b/CODECLASSIFICATION @@ -0,0 +1,7 @@ +# CODECLASSIFICATION for code42cli + +# Specify all repository branches as non-production (catch all) +/refs/heads/* non-prod + +# Specify the 'main' branch as the only production branch +/refs/heads/main production diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..539130336 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @code42/literally-skynet diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e69de29bb..6ff27506e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -0,0 +1,205 @@ +- [Set up your Development environment](#set-up-your-development-environment) + - [macOS](#macos) + - [Windows/Linux](#windowslinux) +- [Installation](#installation) +- [Run a full build](#run-a-full-build) +- [Coding Style](#coding-style) + - [Style linter](#style-linter) +- [Tests](#tests) + - [Writing tests](#writing-tests) +- [Documentation](#documentation) + - [Generating documentation](#generating-documentation) + - [Performing a test build](#performing-a-test-build) + - [Running the docs locally](#running-the-docs-locally) +- [Changes](#changes) +- [Opening a PR](#opening-a-pr) + +## Set up your Development environment + +The very first thing to do is to fork the code42cli repo, clone it, and make it your working directory! + +```bash +git clone https://github.com/myaccount/code42cli +cd code42cli +``` + +To set up your development environment, create a python virtual environment and activate it. This keeps your dependencies sandboxed so that they are unaffected by (and do not affect) other python packages you may have installed. + +### macOS + +There are many ways to do this (you can also use the method outlined for Windows/Linux below), but we recommend using [pyenv](https://github.com/pyenv/pyenv). + +Install `pyenv` and `pyenv-virtualenv` via [homebrew](https://brew.sh/): + +```bash +brew install pyenv pyenv-virtualenv +``` + +After installing `pyenv` and `pyenv-virtualenv`, be sure to add the following entries to your `.zshrc` (or `.bashrc` if you are using bash) and restart your shell: + +```bash +eval "$(pyenv init -)" +eval "$(pyenv virtualenv-init -)" +``` + +Then, create your virtual environment. + +```bash +pyenv install 3.9.10 +pyenv virtualenv 3.9.10 code42cli +pyenv activate code42cli +``` + +**Note**: The CLI supports pythons versions 3.9 through 3.12 for end users. Use `pyenv --versions` to see all versions available for install. + +Use `source deactivate` to exit the virtual environment and `pyenv activate code42cli` to reactivate it. + +### Windows/Linux + +Install a version of python 3.9 or higher from [python.org](https://python.org). +Next, in a directory somewhere outside the project, create and activate your virtual environment: + +```bash +python -m venv code42cli +# macOS/Linux +source code42cli/bin/activate +# Windows +.\code42cli\Scripts\Activate +``` + +To leave the virtual environment, simply use: +```bash +deactivate +``` + +## Installation + +Next, with your virtual environment activated, install code42cli and its development dependencies. The `-e` option installs code42cli in +["editable mode"](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs). + +```bash +pip install -e .'[dev]' +``` + +Open the project in your IDE of choice and change the python environment to +point to your virtual environment, and you should be ready to go! + +## Run a full build + +We use [tox](https://tox.readthedocs.io/en/latest/#) to run our build against Python 3.9, 3.10, 3.11 and 3.12. When run locally, `tox` will run only against the version of python that your virtual envrionment is running, but all versions will be validated against when you [open a PR](#opening-a-pr). + +To run all the unit tests, do a test build of the documentation, and check that the code meets all style requirements, simply run: + +```bash +tox +``` +If the full process runs without any errors, your environment is set up correctly! You can also use `tox` to run sub-parts of the build, as explained below. + +## Coding Style + +Use syntax and built-in modules that are compatible with Python 3.9+. + +### Style linter + +When you open a PR, after all of the unit tests successfully pass, a series +of style checks will run. See the [pre-commit-config.yaml](.pre-commit-config.yaml) file to see a list of the projects involved in this automation. If your code does not pass the style checks, the PR will not be allowed to merge. Many of the style rules can be corrected automatically by running a simple command once you are satisfied with your change: + +```bash +tox -e style +``` + +This will output a diff of the files that were changed as well a list of files / line numbers / error descriptions for any style problems that need to be corrected manually. Once these have been corrected and re-pushed, the PR checks should pass. + +You can optionally also choose to have these checks / automatic adjustments +occur automatically on each git commit that you make (instead of only when running `tox`.) To do so, install `pre-commit` and install the pre-commit hooks: + +```bash +pip install pre-commit +pre-commit install +``` + +## Tests + +This will also test that the documentation build passes and run the style checks. If you want to _only_ run the unit tests, you can use: + +```bash +$ tox -e py +``` + +If you want to run the integration tests in your current python environment, you can do: + +```bash +pytest -m "integration" +``` + +Integration tests have a dependency on `nmap` module to test `send-to` commands. + +### Writing tests + +Put actual before expected values in assert statements. Pytest assumes this order. + +```python +a = 4 +assert a % 2 == 0 +``` + +Use the following naming convention with test methods: + +test\_\[unit_under_test\]\_\[variables_for_the_test\]\_\[expected_state\] + +Example: + +```python +def test_add_one_and_one_equals_two(): +``` + +## Documentation + +Command functions should have accompanying documentation. Documentation is written in markdown and managed in the `docs` folder of this repo. + +### Generating documentation + +code42cli uses [Sphinx](http://www.sphinx-doc.org/) to generate documentation. + +#### Performing a test build + +To simply test that the documentation build without errors, you can run: + +```bash +tox -e docs +``` + +#### Running the docs locally + +To build and run the documentation locally, run the following from the `docs` directory: + +```bash +pip install sphinx myst-parser sphinx_rtd_theme +make html +``` + +To view the resulting documentation, open `docs/_build/html/index.html`. + +For the best viewing experience, run a local server to view the documentation. +You can this by running the below from the `docs` directory using python 3: + +```bash +python -m http.server --directory "_build/html" 1337 +``` + +and then pointing your browser to `localhost:1337`. + +## Changes + +Document all notable consumer-affecting changes in CHANGELOG.md per principles and guidelines at +[Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## Opening a PR + +When you're satisfied with your changes, open a PR and fill out the pull request template file. We recommend prefixing the name of your branch and/or PR title with `bugfix`, `chore`, or `feature` to help quickly categorize your change. Your unit tests and other checks will run against all supported python versions when you do this. + +For contributions from non-Code42 employees, we require you to agree to our [Contributor License Agreement](https://code42.github.io/code42-cla/Code42_Individual_Contributor_License_Agreement). + +On submission of your first PR, a GitHub action will run requiring you to reply in a comment with your affirmation of the CLA before the PR will be able to be merged. + +A team member should get in contact with you shortly to help merge your PR to completion and get it ready for a release! diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..5b3dd6809 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include CHANGELOG.md +include README.md +include LICENSE.md +include tox.ini diff --git a/README.md b/README.md index 88d628a19..c0c485cb4 100644 --- a/README.md +++ b/README.md @@ -1,181 +1,253 @@ -# c42seceventcli - AED +# The Code42 CLI -The c42seceventcli AED module contains a CLI tool for extracting AED events as well as an optional state manager -for recording timestamps. The state manager records timestamps so that on future runs, -you only extract events you did not previously extract. +![Build status](https://github.com/code42/code42cli/workflows/build/badge.svg) +[![codecov.io](https://codecov.io/github/code42/code42cli/coverage.svg?branch=main)](https://codecov.io/github/code42/code42cli?branch=master) +[![versions](https://img.shields.io/pypi/pyversions/code42cli.svg)](https://pypi.org/project/code42cli/) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Documentation Status](https://readthedocs.org/projects/code42cli/badge/?version=latest)](https://clidocs.code42.com/en/latest/?badge=latest) + +## Code42CLI end-of-life +Code42CLI is now deprecated. It has been replaced by the [Incydr CLI](https://support.code42.com/hc/en-us/articles/14827667072279-Introduction-to-the-Incydr-command-line-interface). +- Code42CLI will reach **end-of-support on January 1, 2026**, and **end-of-life on January 1, 2027**. +- To ensure uninterrupted functionality and access to the latest features, migrate your integrations to the Incydr CLI as soon as possible. + +For more details, [see our FAQ](https://support.code42.com/hc/en-us/articles/32154640298263-Code42-CLI-end-of-life-FAQ). + +Use the `code42` command to interact with your Code42 environment. + +* `code42 security-data` is a CLI tool for extracting AED events. + Additionally, you can choose to only get events that Code42 previously did not observe since you last recorded a + checkpoint (provided you do not change your query). +* `code42 watchlists` is a collection of tools for managing your employee watchlists. ## Requirements -- Python 2.7.x or 3.5.0+ -- Code42 Server 6.8.x+ +- Python 3.6.2+ ## Installation -Until we are able to put `py42` and `c42secevents` on PyPI, you will need to first install them manually. -`py42` is available for download [here](https://confluence.corp.code42.com/pages/viewpage.action?pageId=61767969#py42%E2%80%93Code42PythonSDK-Downloads). -For py42 installation instructions, see its [README](https://stash.corp.code42.com/projects/SH/repos/lib_c42_python_sdk/browse/README.md). +Install the `code42` CLI using: -`c42secevents` is available [here](https://confluence.corp.code42.com/display/LS/Security+Event+Extractor+-+Python). -For `c42secevents` installation instructions, see its [README](https://stash.corp.code42.com/projects/INT/repos/security-event-extractor/browse/README.md). +```bash +$ python3 -m pip install code42cli +``` -Once you've done that, install `c42seceventcli` using: +## Usage +First, create your profile: ```bash -$ python setup.py install +code42 profile create --name MY_FIRST_PROFILE --server example.authority.com --username security.admin@example.com ``` -## Usage +Your profile contains the necessary properties for logging into Code42 servers. After running `code42 profile create`, +the program prompts you about storing a password. If you agree, you are then prompted to input your password. + +Your password is not shown when you do `code42 profile show`. However, `code42 profile show` will confirm that a +password exists for your profile. If you do not set a password, you will be securely prompted to enter a password each +time you run a command. -A simple usage requires you to pass in your Code42 authority URL and username as arguments: +For development purposes, you may need to ignore ssl errors. If you need to do this, use the `--disable-ssl-errors` +option when creating your profile: ```bash -c42aed -s https://example.authority.com -u security.admin@example.com +code42 profile create -n MY_FIRST_PROFILE -s https://example.authority.com -u security.admin@example.com --disable-ssl-errors ``` - -Another option is to put your Code42 authority URL and username (and other arguments) in a config file. -Use `default.config.cfg` as an example to make your own config file; it has all the supported arguments. -The arguments in `default.config.cfg` mirror the CLI arguments. -```buildoutcfg -[Code42] -c42_authority_url=https://example.authority.com -c42_username=user@code42.com +You can add multiple profiles with different names and the change the default profile with the `use` command: + +```bash +code42 profile use MY_SECOND_PROFILE ``` -Then, run the script as follows: +When the `--profile` flag is available on other commands, such as those in `security-data`, it will use that profile +instead of the default one. For example, ```bash -c42aed -c path/to/config +code42 security-data search -b 2020-02-02 --profile MY_SECOND_PROFILE ``` -To use the state management service, simply provide the `-r` to the command line. -`-r` is particularly useful if you wish to run this script on a recurring job: +To see all your profiles, do: ```bash -c42aed -s https://example.authority.com -u security.admin@example.com -r +code42 profile list ``` -If you are using a config file with `-c`, set `record_cursor` to True: +## Security Data and Alerts + +Using the CLI, you can query for security events and alerts just like in the admin console, but the results are output +to stdout so they can be written to a file or piped out to another process (for sending to an external syslog server, for +example). + + +The following examples pertain to security events, but can also be used for alerts by replacing `security-data` with +`alerts`: -```buildoutcfg -[Code42] -c42_authority_url=https://example.authority.com -c42_username=user@code42.com -record_cursor=True +To print events to stdout, do: + +```bash +code42 security-data search -b ``` -By excluding `-r`, future runs will not know about previous events you got, and -you will get all the events in the given time range (or default time range of 60 days back). -To clear the cursor: +Note that `-b` or `--begin` is usually required. + +And end date can also be given with `-e` or `--end` to query for a specific date range (if end is not passed, it will get all events up to the present time). + +To specify a begin/end time, you can pass a date or a date w/ time as a string: ```bash -c42aed -s https://example.authority.com -u security.admin@example.com -r --clear-cursor +code42 security-data search -b '2020-02-02 12:51:00' ``` -There are two possible output formats. -* CEF -* JSON +```bash +code42 security-data search -b '2020-02-02 12:30' +``` -JSON is the default. To use CEF, use `-o CEF`: +```bash +code42 security-data search -b '2020-02-02 12' +``` ```bash -c42aed -s https://example.authority.com -u security.admin@example.com -o CEF +code42 security-data search -b 2020-02-02 ``` -Or if you are using a config file with `-c`: +or a shorthand string specifying either days, hours, or minutes back from the current time: -```buildoutcfg -[Code42] -c42_authority_url=https://example.authority.com -c42_username=user@code42.com -output_format=CEF +```bash +code42 security-data search -b 30d ``` -There are three possible destination types to use: +```bash +code42 security-data search -b 10d -e 12h +``` -* stdout -* file - writing to a file -* server - transmitting to a server, such as syslog +Begin date will be ignored if provided on subsequent queries using `-c/--use-checkpoint`. -The program defaults to `stdout`. To use a file, use `--dest-type` and `--dest` this way: +Use other formats with `-f`: ```bash -c42aed -s https://example.authority.com -u security.admin@example.com --dest-type file --dest name-of-file.txt +code42 security-data search -b 2020-02-02 -f CEF ``` -To use a server destination (like syslog): +The available formats are CEF, JSON, and RAW-JSON. +Currently, CEF format is only supported for security events. + +To write events to a file, just redirect your output: ```bash -c42aed -s https://example.authority.com -u security.admin@example.com --dest-type server --dest https://syslog.example.com +code42 security-data search -b 2020-02-02 > filename.txt ``` -Both `destination_type` and `destination` are possible fields in the config file as well. +To send events to an external server, use the `send-to` command, which behaves the same as `search` except for defaulting +to `RAW-JSON` output and sending results to an external server instead of to stdout: -You can also use CLI arguments with config file arguments, but the program will favor the CLI arguments. +The default port (if none is specified on the address) is the standard syslog port 514, and default protocol is UDP: -If this is your first time running, you will be prompted for your Code42 password. +```bash +code42 security-data send-to 10.10.10.42 -b 1d +``` -If you get a keychain error when running this script, you may have to add a code signature: +Results can also be sent over TCP to any port by using the `-p/--protocol` flag and adding a port to the address argument: ```bash -codesign -f -s - $(which python) +code42 security-data send-to 10.10.10.42:8080 -p TCP -b 1d ``` -All errors are sent to an error log file named `c42seceventcli_aed_errors.log` -located in your user directory under `.c42seceventcli/log`. +Note: For more complex requirements when sending to an external server (SSL, special formatting, etc.), use a dedicated +syslog forwarding tool like `rsyslog` or connection tunneling tool like `stunnel`. + +If you want to periodically run the same query, but only retrieve the new events each time, use the +`-c/--use-checkpoint` option with a name for your checkpoint. This stores the timestamp of the query's last event to a +file on disk and uses that as the "begin date" timestamp filter on the next query that uses the same checkpoint name. +Checkpoints are stored per profile. -Full usage: +Initial run requires a begin date: +```bash +code42 security-data search -b 30d --use-checkpoint my_checkpoint +``` +Subsequent runs do not: +```bash +code42 security-data search --use-checkpoint my_checkpoint ``` -usage: c42aed [-h] [--clear-cursor] [--reset-password] [-c CONFIG_FILE] - [-s C42_AUTHORITY_URL] [-u C42_USERNAME] [-b BEGIN_DATE] [-i] - [-o {CEF,JSON}] - [-t [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} ...]]] - [-d--debug] [--dest-type {stdout,file,server}] - [--dest DESTINATION] [--dest-port DESTINATION_PORT] - [--dest-protocol {TCP,UDP}] [-e END_DATE | -r] -optional arguments: - -h, --help show this help message and exit - --clear-cursor Resets the stored cursor. - --reset-password Clears stored password and prompts user for password. - -c CONFIG_FILE, --config-file CONFIG_FILE - The path to the config file to use for the rest of the - arguments. - -s C42_AUTHORITY_URL, --server C42_AUTHORITY_URL - The full scheme, url and port of the Code42 server. - -u C42_USERNAME, --username C42_USERNAME - The username of the Code42 API user. - -b BEGIN_DATE, --begin BEGIN_DATE - The beginning of the date range in which to look for - events, in YYYY-MM-DD UTC format OR a number (number - of minutes ago). - -i, --ignore-ssl-errors - Do not validate the SSL certificates of Code42 - servers. - -o {CEF,JSON}, --output-format {CEF,JSON} - The format used for outputting events. - -t [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} ...]], --types [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} [{SharedViaLink,SharedToDomain,ApplicationRead,CloudStorage,RemovableMedia,IsPublic} ...]] - To limit extracted events to those with given exposure - types. - -d--debug Turn on debug logging. - --dest-type {stdout,file,server} - The type of destination to send output to. - --dest DESTINATION Either a name of a local file or syslog host address. - Ignored if destination type is 'stdout'. - --dest-port DESTINATION_PORT - Port used when sending logs to server. Ignored if - destination type is not 'server'. - --dest-protocol {TCP,UDP} - Protocol used to send logs to server. Ignored if - destination type is not 'server'. - -e END_DATE, --end END_DATE - The end of the date range in which to look for events, - in YYYY-MM-DD UTC format OR a number (number of - minutes ago). - -r, --record-cursor Only get events that were not previously retrieved. +You can also use wildcard for queries, but note, if they are not in quotes, you may get unexpected behavior. + +```bash +code42 security-data search --actor "*" ``` -# Known Issues +The search query parameters are as follows: + +- `-t/--type` (exposure types) +- `-b/--begin` (begin date) +- `-e/--end` (end date) +- `--c42-username` +- `--actor` +- `--md5` +- `--sha256` +- `--source` +- `--file-name` +- `--file-path` +- `--process-owner` +- `--tab-url` +- `--include-non-exposure` (does not work with `-t`) +- `--advanced-query` (raw JSON query) + +You cannot use other query parameters if you use `--advanced-query`. +To learn more about acceptable arguments, add the `-h` flag to `code42 security-data` + +Saved Searches: + +The CLI can also access "saved searches" that are stored in the admin console, and run them via their saved search ID. + +Use the `saved-search list` subcommand to list existing searches with their IDs: + +```bash +code42 security-data saved-search list +``` + +The `show` subcommand will give details about the search with the provided ID: + +```bash +code42 security-data saved-search show +``` + +To get the results of a saved search, use the `--saved-search` option with your search ID on the `search` subcommand: + +```bash +code42 security-data search --saved-search +``` + +## Troubleshooting + +If you keep getting prompted for your password, try resetting with `code42 profile reset-pw`. +If that doesn't work, delete your credentials file located at ~/.code42cli or the entry in keychain. + +## Shell tab completion + +To enable shell autocomplete when you hit `tab` after the first few characters of a command name, do the following: + +For Bash, add this to ~/.bashrc: + +``` +eval "$(_CODE42_COMPLETE=source_bash code42)" +``` + +For Zsh, add this to ~/.zshrc: + +``` +eval "$(_CODE42_COMPLETE=source_zsh code42)" +``` + +For Fish, add this to ~/.config/fish/completions/code42.fish: + +``` +eval (env _CODE42_COMPLETE=source_fish code42) +``` + +Open a new shell to enable completion. Or run the eval command directly in your current shell to enable it temporarily. + + +## Writing Extensions -Only the first 10,000 of each set of events containing the exact same insertion timestamp is reported. +The CLI exposes a few helpers for writing custom extension scripts powered by the CLI. Read the user-guide [here](https://clidocs.code42.com/en/feature-extension_scripts/userguides/extensions.html). diff --git a/aed_config.default.cfg b/aed_config.default.cfg deleted file mode 100644 index 2570443a3..000000000 --- a/aed_config.default.cfg +++ /dev/null @@ -1,21 +0,0 @@ -; OPTIONAL CONFIG FILE -; Use this file as an example for which arguments the program accepts. -; Make a copy of this file, edit it, and use with the `-c` flag: -; c42aed -c path/to/config -; Some args may not apply, and you can remove what you do not need. -; You can use this file together with CLI args if you want. - -[Code42] -c42_authority_url=https://example.authority.com -c42_username=user@code42.com -begin_date=2019-01-01 -end_date=2019-02-02 -ignore_ssl_errors=False -output_format=JSON -record_cursor=False -exposure_types=SharedViaLink, SharedToDomain, ApplicationRead, CloudStorage, RemovableMedia, IsPublic -debug_mode=False -destination_type=syslog -destination=https://log.example.com -destination_port=514 -destination_protocol=TCP diff --git a/c42seceventcli/aed/args.py b/c42seceventcli/aed/args.py deleted file mode 100644 index 4f6b7a0ab..000000000 --- a/c42seceventcli/aed/args.py +++ /dev/null @@ -1,154 +0,0 @@ -from datetime import datetime, timedelta -from argparse import ArgumentParser - -from c42seceventcli.common.cli_args import ( - add_clear_cursor_arg, - add_reset_password_arg, - add_config_file_path_arg, - add_authority_host_address_arg, - add_username_arg, - add_begin_date_arg, - add_end_date_arg, - add_ignore_ssl_errors_arg, - add_output_format_arg, - add_record_cursor_arg, - add_exposure_types_arg, - add_debug_arg, - add_destination_type_arg, - add_destination_arg, - add_destination_port_arg, - add_destination_protocol_arg, -) -import c42seceventcli.common.util as common - - -def get_args(): - parser = _get_arg_parser() - cli_args = vars(parser.parse_args()) - args = _union_cli_args_with_config_file_args(cli_args) - args.cli_parser = parser - args.initialize_args() - args.verify_authority_arg() - args.verify_username_arg() - args.verify_destination_args() - return args - - -def _get_arg_parser(): - parser = ArgumentParser() - - add_clear_cursor_arg(parser) - add_reset_password_arg(parser) - add_config_file_path_arg(parser) - add_authority_host_address_arg(parser) - add_username_arg(parser) - add_begin_date_arg(parser) - add_ignore_ssl_errors_arg(parser) - add_output_format_arg(parser) - add_exposure_types_arg(parser) - add_debug_arg(parser) - add_destination_type_arg(parser) - add_destination_arg(parser) - add_destination_port_arg(parser) - add_destination_protocol_arg(parser) - - # Makes sure that you can't give both an end_timestamp and record_cursor - mutually_exclusive_timestamp_group = parser.add_mutually_exclusive_group() - add_end_date_arg(mutually_exclusive_timestamp_group) - add_record_cursor_arg(mutually_exclusive_timestamp_group) - - return parser - - -def _union_cli_args_with_config_file_args(cli_args): - config_args = _get_config_args(cli_args.get("config_file")) - args = AEDArgs() - keys = cli_args.keys() - for key in keys: - args.try_set(key, cli_args.get(key), config_args.get(key)) - - return args - - -def _get_config_args(config_file_path): - try: - return common.get_config_args(config_file_path) - except IOError: - print("Path to config file {0} not found".format(config_file_path)) - exit(1) - - -class AEDArgs(common.SecArgs): - cli_parser = None - c42_authority_url = None - c42_username = None - begin_date = None - end_date = None - ignore_ssl_errors = False - output_format = "JSON" - record_cursor = False - exposure_types = None - debug_mode = False - destination_type = "stdout" - destination = None - destination_port = 514 - destination_protocol = "TCP" - reset_password = False - clear_cursor = False - - def __init__(self): - self.begin_date = AEDArgs._get_default_begin_date() - self.end_date = AEDArgs._get_default_end_date() - - def initialize_args(self): - self.destination_type = self.destination_type.lower() - try: - self.destination_port = int(self.destination_port) - except ValueError: - msg = "Destination port '{0}' not a base 10 integer.".format(self.destination_port) - self._raise_value_error(msg) - - @staticmethod - def _get_default_begin_date(): - default_begin_date = datetime.now() - timedelta(days=60) - return default_begin_date.strftime("%Y-%m-%d") - - @staticmethod - def _get_default_end_date(): - default_end_date = datetime.now() - return default_end_date.strftime("%Y-%m-%d") - - def verify_authority_arg(self): - if self.c42_authority_url is None: - self._raise_value_error("Code42 authority host address not provided.") - - def verify_username_arg(self): - if self.c42_username is None: - self._raise_value_error("Code42 username not provided.") - - def verify_destination_args(self): - self._verify_stdout_destination() - self._verify_server_destination() - - def _verify_stdout_destination(self): - if self.destination_type == "stdout" and self.destination is not None: - msg = ( - "Destination '{0}' not applicable for stdout. " - "Try removing '--dest' arg or change '--dest-type' to 'file' or 'server'." - ) - msg = msg.format(self.destination) - self._raise_value_error(msg) - - def _verify_file_destination(self): - if self.destination_type == "file" and self.destination is None: - msg = "Missing file name. Try: '--dest path/to/file'." - self._raise_value_error(msg) - - def _verify_server_destination(self): - if self.destination_type == "server" and self.destination is None: - msg = "Missing server URL. Try: '--dest https://syslog.example.com'." - self._raise_value_error(msg) - - def _raise_value_error(self, msg): - self.cli_parser.print_usage() - raise ValueError(msg) diff --git a/c42seceventcli/aed/cursor_store.py b/c42seceventcli/aed/cursor_store.py deleted file mode 100644 index 4d0659f53..000000000 --- a/c42seceventcli/aed/cursor_store.py +++ /dev/null @@ -1,36 +0,0 @@ -from c42seceventcli.common.cursor_store import SecurityEventCursorStore - -_INSERTION_TIMESTAMP_FIELD_NAME = u"insertionTimestamp" - - -class AEDCursorStore(SecurityEventCursorStore): - _PRIMARY_KEY = 1 - - def __init__(self, db_file_path=None): - super(AEDCursorStore, self).__init__("aed_checkpoint", db_file_path) - if self._is_empty(): - self._init_table() - - def get_stored_insertion_timestamp(self): - rows = self._get(_INSERTION_TIMESTAMP_FIELD_NAME, self._PRIMARY_KEY) - if rows and rows[0]: - return rows[0][0] - - def replace_stored_insertion_timestamp(self, new_insertion_timestamp): - self._set( - column_name=_INSERTION_TIMESTAMP_FIELD_NAME, - new_value=new_insertion_timestamp, - primary_key=self._PRIMARY_KEY, - ) - - def reset(self): - self._drop_table() - self._init_table() - - def _init_table(self): - columns = "{0}, {1}".format(self._PRIMARY_KEY_COLUMN_NAME, _INSERTION_TIMESTAMP_FIELD_NAME) - create_table_query = "CREATE TABLE {0} ({1})".format(self._table_name, columns) - insert_query = "INSERT INTO {0} VALUES(?, null)".format(self._table_name) - with self._connection as conn: - conn.execute(create_table_query) - conn.execute(insert_query, (self._PRIMARY_KEY,)) diff --git a/c42seceventcli/aed/main.py b/c42seceventcli/aed/main.py deleted file mode 100644 index 53dad1244..000000000 --- a/c42seceventcli/aed/main.py +++ /dev/null @@ -1,171 +0,0 @@ -import json -from socket import gaierror, herror, timeout -from urllib3 import disable_warnings -from urllib3.exceptions import InsecureRequestWarning -from datetime import datetime, timedelta - -from py42 import debug_level -from py42 import settings -from py42.sdk import SDK -from c42secevents.extractors import AEDEventExtractor -from c42secevents.common import FileEventHandlers, convert_datetime_to_timestamp -from c42secevents.logging.formatters import AEDDictToCEFFormatter, AEDDictToJSONFormatter - -import c42seceventcli.common.util as common -import c42seceventcli.aed.args as aed_args -from c42seceventcli.aed.cursor_store import AEDCursorStore - -_SERVICE_NAME = u"c42seceventcli_aed" - - -def main(): - args = _get_args() - if args.reset_password: - common.delete_stored_password(_SERVICE_NAME, args.c42_username) - - handlers = _create_handlers(args) - _set_up_cursor_store( - record_cursor=args.record_cursor, clear_cursor=args.clear_cursor, handlers=handlers - ) - sdk = _create_sdk_from_args(args, handlers) - - if bool(args.ignore_ssl_errors): - _ignore_ssl_errors() - - if bool(args.debug_mode): - settings.debug_level = debug_level.DEBUG - - _extract(args=args, sdk=sdk, handlers=handlers) - - -def _get_args(): - try: - return aed_args.get_args() - except ValueError as ex: - print(repr(ex)) - exit(1) - - -def _ignore_ssl_errors(): - settings.verify_ssl_certs = False - disable_warnings(InsecureRequestWarning) - - -def _create_handlers(args): - handlers = FileEventHandlers() - error_logger = common.get_error_logger(_SERVICE_NAME) - settings.global_exception_message_receiver = error_logger.error - handlers.handle_error = error_logger.error - output_format = args.output_format - logger_formatter = _get_log_formatter(output_format) - destination_args = _create_destination_args(args) - logger = _get_logger( - formatter=logger_formatter, service_name=_SERVICE_NAME, destination_args=destination_args - ) - handlers.handle_response = _get_response_handler(logger) - return handlers - - -def _create_destination_args(args): - destination_args = common.DestinationArgs() - destination_args.destination_type = args.destination_type - destination_args.destination = args.destination - destination_args.destination_port = args.destination_port - destination_args.destination_protocol = args.destination_protocol - return destination_args - - -def _get_logger(formatter, service_name, destination_args): - try: - return common.get_logger( - formatter=formatter, service_name=service_name, destination_args=destination_args - ) - except (herror, gaierror, timeout) as ex: - print(repr(ex)) - _print_server_args(destination_args) - exit(1) - except IOError as ex: - print(repr(ex)) - if ex.errno == 61: - _print_server_args(destination_args) - exit(1) - - print("File path: {0}.".format(destination_args.destination)) - exit(1) - - -def _print_server_args(server_args): - print( - "Hostname={0}, port={1}, protocol={2}.".format( - server_args.destination, server_args.destination_port, server_args.destination_protocol - ) - ) - - -def _set_up_cursor_store(record_cursor, clear_cursor, handlers): - if record_cursor or clear_cursor: - store = AEDCursorStore() - if clear_cursor: - store.reset() - - if record_cursor: - handlers.record_cursor_position = store.replace_stored_insertion_timestamp - handlers.get_cursor_position = store.get_stored_insertion_timestamp - return store - - -def _get_log_formatter(output_format): - if output_format == "JSON": - return AEDDictToJSONFormatter() - elif output_format == "CEF": - return AEDDictToCEFFormatter() - else: - print("Unsupported output format {0}".format(output_format)) - exit(1) - - -def _get_response_handler(logger): - def handle_response(response): - response_dict = json.loads(response.text) - file_events_key = u"fileEvents" - if file_events_key in response_dict: - events = response_dict[file_events_key] - for event in events: - logger.info(event) - - return handle_response - - -def _create_sdk_from_args(args, handlers): - password = common.get_stored_password(_SERVICE_NAME, args.c42_username) - try: - sdk = SDK.create_using_local_account( - host_address=args.c42_authority_url, username=args.c42_username, password=password - ) - return sdk - except Exception as ex: - handlers.handle_error(ex) - print("Incorrect username or password.") - exit(1) - - -def _extract(args, sdk, handlers): - min_timestamp = _parse_min_timestamp(args.begin_date) - max_timestamp = common.parse_timestamp(args.end_date) - extractor = AEDEventExtractor(sdk, handlers) - extractor.extract(min_timestamp, max_timestamp, args.exposure_types) - - -def _parse_min_timestamp(begin_date): - min_timestamp = common.parse_timestamp(begin_date) - boundary_date = datetime.utcnow() - timedelta(days=90) - boundary = convert_datetime_to_timestamp(boundary_date) - if min_timestamp < boundary: - print("Argument '--begin' must be within 90 days.") - exit(1) - - return min_timestamp - - -if __name__ == "__main__": - main() diff --git a/c42seceventcli/common/cli_args.py b/c42seceventcli/common/cli_args.py deleted file mode 100644 index d25032523..000000000 --- a/c42seceventcli/common/cli_args.py +++ /dev/null @@ -1,173 +0,0 @@ -from argparse import SUPPRESS - - -def add_config_file_path_arg(arg_group): - arg_group.add_argument( - "-c", - "--config-file", - dest="config_file", - action="store", - help="The path to the config file to use for the rest of the arguments.", - ) - - -def add_clear_cursor_arg(arg_group): - arg_group.add_argument( - "--clear-cursor", - dest="clear_cursor", - action="store_true", - help="Resets the stored cursor.", - default=False, - ) - - -def add_reset_password_arg(arg_group): - arg_group.add_argument( - "--reset-password", - dest="reset_password", - action="store_true", - help="Clears stored password and prompts user for password.", - default=False, - ) - - -def add_authority_host_address_arg(arg_group): - arg_group.add_argument( - "-s", - "--server", - dest="c42_authority_url", - action="store", - help="The full scheme, url and port of the Code42 server.", - ) - - -def add_username_arg(arg_group): - arg_group.add_argument( - "-u", - "--username", - action="store", - dest="c42_username", - help="The username of the Code42 API user.", - ) - - -def add_begin_date_arg(arg_group): - arg_group.add_argument( - "-b", - "--begin", - action="store", - dest="begin_date", - help="The beginning of the date range in which to look for events, " - "in YYYY-MM-DD UTC format OR a number (number of minutes ago).", - ) - - -def add_end_date_arg(arg_group): - arg_group.add_argument( - "-e", - "--end", - action="store", - dest="end_date", - help="The end of the date range in which to look for events, " - "in YYYY-MM-DD UTC format OR a number (number of minutes ago).", - ) - - -def add_ignore_ssl_errors_arg(arg_group): - arg_group.add_argument( - "-i", - "--ignore-ssl-errors", - action="store_true", - dest="ignore_ssl_errors", - help="Do not validate the SSL certificates of Code42 servers.", - ) - - -def add_output_format_arg(arg_group): - arg_group.add_argument( - "-o", - "--output-format", - dest="output_format", - action="store", - choices=["CEF", "JSON"], - help="The format used for outputting events.", - ) - - -def add_record_cursor_arg(arg_group): - arg_group.add_argument( - "-r", - "--record-cursor", - dest="record_cursor", - action="store_true", - help="Only get events that were not previously retrieved.", - ) - - -def add_exposure_types_arg(arg_group): - arg_group.add_argument( - "-t", - "--types", - nargs="*", - action="store", - dest="exposure_types", - choices=[ - u"SharedViaLink", - u"SharedToDomain", - u"ApplicationRead", - u"CloudStorage", - u"RemovableMedia", - u"IsPublic", - ], - help="To limit extracted events to those with given exposure types.", - ) - - -def add_debug_arg(arg_group): - arg_group.add_argument( - "-d" "--debug", action="store_true", dest="debug_mode", help="Turn on debug logging." - ) - - -def add_destination_type_arg(arg_group): - arg_group.add_argument( - "--dest-type", - action="store", - dest="destination_type", - choices=["stdout", "file", "server"], - help="The type of destination to send output to.", - ) - - -def add_destination_arg(arg_group): - arg_group.add_argument( - "--dest", - action="store", - dest="destination", - help="Either a name of a local file or syslog host address. Ignored if destination type is 'stdout'.", - ) - - -def add_destination_port_arg(arg_group): - arg_group.add_argument( - "--dest-port", - action="store", - dest="destination_port", - help="Port used when sending logs to server. Ignored if destination type is not 'server'.", - ) - - -def add_destination_protocol_arg(arg_group): - arg_group.add_argument( - "--dest-protocol", - action="store", - dest="destination_protocol", - choices=["TCP", "UDP"], - help="Protocol used to send logs to server. Ignored if destination type is not 'server'.", - ) - - -def add_help_arg(arg_group): - arg_group.add_argument( - "-h", "--help", action="help", default=SUPPRESS, help="Show this help message and exit." - ) diff --git a/c42seceventcli/common/cursor_store.py b/c42seceventcli/common/cursor_store.py deleted file mode 100644 index 1962d245d..000000000 --- a/c42seceventcli/common/cursor_store.py +++ /dev/null @@ -1,50 +0,0 @@ -import sqlite3 -from c42seceventcli.common.util import get_user_project_path - - -class SecurityEventCursorStore(object): - _PRIMARY_KEY_COLUMN_NAME = "cursor_id" - - def __init__(self, db_table_name, db_file_path=None): - # type: (str, str) -> None - self._table_name = db_table_name - if db_file_path is None: - db_path = get_user_project_path("db") - db_file_path = "{0}/{1}.db".format(db_path, self._table_name) - - self._connection = sqlite3.connect(db_file_path) - - def _get(self, columns, primary_key): - # type: (str, any) -> list - query = "SELECT {0} FROM {1} WHERE {2}=?" - query = query.format(columns, self._table_name, self._PRIMARY_KEY_COLUMN_NAME) - with self._connection as conn: - cursor = conn.cursor() - cursor.execute(query, (primary_key,)) - return cursor.fetchall() - - def _set(self, column_name, new_value, primary_key): - # type: (str, any, any) -> None - query = "UPDATE {0} SET {1}=? WHERE {2}=?".format( - self._table_name, column_name, self._PRIMARY_KEY_COLUMN_NAME - ) - with self._connection as conn: - conn.execute(query, (new_value, primary_key)) - - def _drop_table(self): - drop_query = "DROP TABLE {0}".format(self._table_name) - with self._connection as conn: - conn.execute(drop_query) - - def _is_empty(self): - table_count_query = """ - SELECT COUNT(name) - FROM sqlite_master - WHERE type='table' AND name=? - """ - with self._connection as conn: - cursor = conn.cursor() - cursor.execute(table_count_query, (self._table_name,)) - query_result = cursor.fetchone() - if query_result: - return int(query_result[0]) <= 0 diff --git a/c42seceventcli/common/util.py b/c42seceventcli/common/util.py deleted file mode 100644 index 7a9a665be..000000000 --- a/c42seceventcli/common/util.py +++ /dev/null @@ -1,144 +0,0 @@ -import sys -import keyring -import getpass -import logging -from os import path, makedirs -from keyring.errors import PasswordDeleteError -from datetime import datetime, timedelta -from configparser import ConfigParser -from logging.handlers import RotatingFileHandler - -from c42secevents.logging.handlers import NoPrioritySysLogHandler -from c42secevents.common import convert_datetime_to_timestamp - - -def get_user_project_path(subdir=None): - """The path on your user dir to /.c42seceventcli/[subdir]""" - package_name = __name__.split(".")[0] - home = path.expanduser("~") - user_project_path = path.join(home, ".{0}".format(package_name), subdir) - - if not path.exists(user_project_path): - makedirs(user_project_path) - - return user_project_path - - -def get_config_args(config_file_path): - args = {} - parser = ConfigParser() - if config_file_path: - if not parser.read(path.expanduser(config_file_path)): - raise IOError("Supplied an empty config file {0}".format(config_file_path)) - - if not parser.sections(): - return args - - items = parser.items("Code42") - for item in items: - args[item[0]] = item[1] - - return args - - -def parse_timestamp(input_string): - try: - time = datetime.strptime(input_string, "%Y-%m-%d") - except ValueError: - if input_string and input_string.isdigit(): - now = datetime.utcnow() - time = now - timedelta(minutes=int(input_string)) - else: - raise ValueError("input must be a positive integer or a date in YYYY-MM-DD format.") - - return convert_datetime_to_timestamp(time) - - -def get_error_logger(service_name): - log_path = get_user_project_path("log") - log_path = "{0}/{1}_errors.log".format(log_path, service_name) - logger = logging.getLogger("{0}_error_logger".format(service_name)) - formatter = logging.Formatter("%(asctime)s %(message)s") - handler = RotatingFileHandler(log_path, maxBytes=250000000) - handler.setFormatter(formatter) - logger.addHandler(handler) - return logger - - -class DestinationArgs(object): - destination_type = None - destination = None - destination_port = None - destination_protocol = None - - -def get_logger(formatter, service_name, destination_args): - """Args: - formatter: The formatter for logger. - service_name: The name of the script getting the logger. - Necessary for distinguishing multiple loggers. - destination_args: DTO holding the destination_type, destination, destination_port, and destination_protocol. - Returns: - A logger with the correct handler per destination_type. - For destination_type == stdout, it uses a StreamHandler. - For destination_type == file, it uses a FileHandler. - For destination_type == server, it uses a NoPrioritySyslogHandler. - """ - - logger = logging.getLogger("{0}_logger".format(service_name)) - handler = _get_log_handler(destination_args) - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - return logger - - -def _get_log_handler(destination_args): - if destination_args.destination_type == "stdout": - return logging.StreamHandler(sys.stdout) - elif destination_args.destination_type == "server": - return NoPrioritySysLogHandler( - hostname=destination_args.destination, - port=destination_args.destination_port, - protocol=destination_args.destination_protocol, - ) - elif destination_args.destination_type == "file": - return logging.FileHandler(filename=destination_args.destination) - - -def get_stored_password(service_name, username): - password = keyring.get_password(service_name, username) - if password is None: - try: - password = getpass.getpass(prompt="Code42 password: ") - save_password = _get_input("Save password to keychain? (y/n): ") - if save_password.lower()[0] == "y": - keyring.set_password(service_name, username, password) - - except KeyboardInterrupt: - print() - exit(1) - - return password - - -def _get_input(prompt): - if sys.version_info >= (3, 0): - return input(prompt) - else: - return raw_input(prompt) - - -def delete_stored_password(service_name, username): - try: - keyring.delete_password(service_name, username) - except PasswordDeleteError: - return - - -class SecArgs(object): - def try_set(self, arg_name, cli_arg=None, config_arg=None): - if cli_arg is not None: - setattr(self, arg_name, cli_arg) - elif config_arg is not None: - setattr(self, arg_name, config_arg) diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..51285967a --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 000000000..550a683db --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,3 @@ +.wy-side-nav-search>div.version { + color: #404040; +} diff --git a/docs/commands.md b/docs/commands.md new file mode 100644 index 000000000..79c54b172 --- /dev/null +++ b/docs/commands.md @@ -0,0 +1,32 @@ +# Commands + +```{eval-rst} +.. toctree:: + :hidden: + :maxdepth: 2 + :glob: + + Alert Rules + Alerts + Audit Logs + Cases + Devices + Legal Hold + Profile + Security Data + Trusted Activities + Users + Watchlists +``` + +* [Alert Rules](commands/alertrules.rst) +* [Alerts](commands/alerts.rst) +* [Audit Logs](commands/auditlogs.rst) +* [Cases](commands/cases.rst) +* [Devices](commands/devices.rst) +* [Legal Hold](commands/legalhold.rst) +* [Profile](commands/profile.rst) +* [Security Data](commands/securitydata.rst) +* [Trusted Activities](commands/trustedactivities.rst) +* [Users](commands/users.rst) +* [Watchlists](commands/watchlists.rst) diff --git a/docs/commands/alertrules.rst b/docs/commands/alertrules.rst new file mode 100644 index 000000000..cb0d90500 --- /dev/null +++ b/docs/commands/alertrules.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.alert_rules:alert_rules + :prog: alert-rules + :nested: full diff --git a/docs/commands/alerts.rst b/docs/commands/alerts.rst new file mode 100644 index 000000000..96c7eb826 --- /dev/null +++ b/docs/commands/alerts.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.alerts:alerts + :prog: alerts + :nested: full diff --git a/docs/commands/auditlogs.rst b/docs/commands/auditlogs.rst new file mode 100644 index 000000000..d2d70f436 --- /dev/null +++ b/docs/commands/auditlogs.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.auditlogs:audit_logs + :prog: audit-logs + :nested: full diff --git a/docs/commands/cases.rst b/docs/commands/cases.rst new file mode 100644 index 000000000..b2e5665ab --- /dev/null +++ b/docs/commands/cases.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.cases:cases + :prog: cases + :nested: full diff --git a/docs/commands/devices.rst b/docs/commands/devices.rst new file mode 100644 index 000000000..79d477237 --- /dev/null +++ b/docs/commands/devices.rst @@ -0,0 +1,3 @@ +.. click:: code42cli.cmds.devices:devices + :prog: devices + :nested: full diff --git a/docs/commands/legalhold.rst b/docs/commands/legalhold.rst new file mode 100644 index 000000000..e6c1598a0 --- /dev/null +++ b/docs/commands/legalhold.rst @@ -0,0 +1,3 @@ +.. click:: code42cli.cmds.legal_hold:legal_hold + :prog: legal-hold + :nested: full diff --git a/docs/commands/profile.rst b/docs/commands/profile.rst new file mode 100644 index 000000000..a8f7d1675 --- /dev/null +++ b/docs/commands/profile.rst @@ -0,0 +1,3 @@ +.. click:: code42cli.cmds.profile:profile + :prog: profile + :nested: full diff --git a/docs/commands/securitydata.rst b/docs/commands/securitydata.rst new file mode 100644 index 000000000..15c37a73b --- /dev/null +++ b/docs/commands/securitydata.rst @@ -0,0 +1,9 @@ +************* +Security Data +************* + +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.securitydata:security_data + :prog: security-data + :nested: full diff --git a/docs/commands/trustedactivities.rst b/docs/commands/trustedactivities.rst new file mode 100644 index 000000000..ff218d34e --- /dev/null +++ b/docs/commands/trustedactivities.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.trustedactivities:trusted_activities + :prog: trusted-activities + :nested: full diff --git a/docs/commands/users.rst b/docs/commands/users.rst new file mode 100644 index 000000000..59dc0c20c --- /dev/null +++ b/docs/commands/users.rst @@ -0,0 +1,3 @@ +.. click:: code42cli.cmds.users:users + :prog: users + :nested: full diff --git a/docs/commands/watchlists.rst b/docs/commands/watchlists.rst new file mode 100644 index 000000000..b52b462b0 --- /dev/null +++ b/docs/commands/watchlists.rst @@ -0,0 +1,5 @@ +.. warning:: Incydr functionality is **deprecated**. Use the Incydr CLI instead. + +.. click:: code42cli.cmds.watchlists:watchlists + :prog: watchlists + :nested: full diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..87a5ab36f --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,127 @@ +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config +# -- Path setup -------------------------------------------------------------- +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys + +import code42cli.__version__ as meta + +# -- Project information ----------------------------------------------------- + +project = "code42cli" +copyright = "2022, Code42 Software" +author = "Code42 Software" + +# The short X.Y version +version = f"code42cli v{meta.__version__}" +# The full version, including alpha/beta/rc tags +release = f"code42cli v{meta.__version__}" + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +needs_sphinx = "4.4.0" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "myst_parser", + "sphinx_click", +] + +# Add myst_parser types to suppress warnings +suppress_warnings = ["myst.header", "myst.xref_missing"] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = [".rst", ".md"] + +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +# language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + +# generate header anchors +myst_heading_anchors = 4 + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_rtd_theme" + +html_favicon = "favicon.ico" + +html_logo = "logo.png" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. + +html_theme_options = { + "style_nav_header_background": "#f0f0f0", + "logo_only": True, + # TOC options + "navigation_depth": 4, + "titles_only": True, + "collapse_navigation": False, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# These paths are either relative to html_static_path +# or fully qualified paths (eg. https://...) +html_css_files = ["custom.css"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + +# At the bottom of conf.py + + +def setup(app): + pass + + +sys.path.insert(0, os.path.abspath("..")) diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 000000000..9a23a8e7d Binary files /dev/null and b/docs/favicon.ico differ diff --git a/docs/guides.md b/docs/guides.md new file mode 100644 index 000000000..bbf07f09e --- /dev/null +++ b/docs/guides.md @@ -0,0 +1,24 @@ +# User Guides + +```{eval-rst} +.. toctree:: + :hidden: + :maxdepth: 2 + :glob: + + Get started with the Code42 command-line interface (CLI) + Configure a profile + Manage legal hold users + Clean up your environment by deactivating devices + Write custom extension scripts using the Code42 CLI and Py42 + Manage users + Perform bulk actions +``` + +* [Get started with the Code42 command-line interface (CLI)](userguides/gettingstarted.md) +* [Configure a profile](userguides/profile.md) +* [Manage legal hold users](userguides/legalhold.md) +* [Clean up your environment by deactivating devices](userguides/deactivatedevices.md) +* [Write custom extension scripts using the Code42 CLI and Py42](userguides/extensions.md) +* [Manage users](userguides/users.md) +* [Perform bulk actions](userguides/bulkcommands.md) diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..c2899507d --- /dev/null +++ b/docs/index.md @@ -0,0 +1,41 @@ +# Code42 command-line interface (CLI) + +```{eval-rst} +.. toctree:: + :hidden: + :maxdepth: 2 + + guides +``` + +```{eval-rst} +.. toctree:: + :hidden: + :maxdepth: 2 + + commands +``` + +```{eval-rst} +.. warning:: Incydr functionality in the code42cli is **deprecated**. Use the resources at https://developer.code42.com/ instead. +``` + +[![license](https://img.shields.io/pypi/l/code42cli.svg)](https://pypi.org/project/code42cli/) +[![versions](https://img.shields.io/pypi/pyversions/code42cli.svg)](https://pypi.org/project/code42cli/) + +The Code42 command-line interface (CLI) tool offers a way to interact with your Code42 environment without using the +Code42 console or making API calls directly. For example, you can use it to extract Code42 data for use in a security +information and event management (SIEM) tool or manage users on the High Risk Employees list or Departing Employees +list. + +## Requirements +To use the Code42 CLI, you must have: + +* A [Code42 product plan](https://code42.com/r/support/product-plans) that supports the feature or functionality for your use case +* Endpoint monitoring enabled in the Code42 console +* Python version 3.6 and later installed + +## Content + +* [User Guides](guides.md) +* [Commands](commands.md) diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 000000000..8443612d9 Binary files /dev/null and b/docs/logo.png differ diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 000000000..27f573b87 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..4fffce759 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +click==8.0.0 +sphinx-click==2.5.0 diff --git a/docs/userguides/bulkcommands.md b/docs/userguides/bulkcommands.md new file mode 100644 index 000000000..09712a3cd --- /dev/null +++ b/docs/userguides/bulkcommands.md @@ -0,0 +1,23 @@ +# Using Bulk Commands + +Bulk functionality is available for many Code42 CLI methods, more details on which commands have bulk capabilities can be found in the [Commands Documentation](../commands.md). + +All bulk methods take a CSV file as input. + +The `generate-template` command can be used to create a CSV file with the necessary headers for a particular command. + +For instance, the following command will create a file named `devices_bulk_deactivate.csv` with a single column header row of `guid`. +```bash +code42 devices bulk generate-template deactivate +``` + +The CSV file can contain more columns than are necessary for the command, however then the header row is **required**. + +If the CSV file contains the *exact* number of columns that are necessary for the command then the header row is **optional**, but columns are expected to be in the same order as the template. + +To run a bulk method, simply pass the CSV file path to the desired command. For example, you would use to following command to deactivate multiple devices within your organization at once: + + +```bash +code42 devices bulk deactivate devices_bulk_deactivate.csv +``` diff --git a/docs/userguides/deactivatedevices.md b/docs/userguides/deactivatedevices.md new file mode 100644 index 000000000..994df083b --- /dev/null +++ b/docs/userguides/deactivatedevices.md @@ -0,0 +1,99 @@ +# Clean up your environment by deactivating devices + +Your Code42 environment may contain many old devices that are no +longer active computers and that have not connected to Code42 in +quite some time. In order to clean up your environment, you can +use the CLI to deactivate these devices in bulk. + +## Generate a list of devices + +You can generate a list of devices using `code42 devices list`. By +default, it will display the list of devices at the command line, +but you can also output it in a number of file formats. For +example, to generate a CSV of active devices in your environment, use +this command: + +``` +code42 devices list --active -f CSV +``` + +To save to a file, redirect the output to a file in your shell: + +``` +code42 devices list --active -f CSV > output.csv +``` + +### Filter the list + +You can filter or edit the list of devices in your spreadsheet or +text editor of choice, but the CLI has some parameters built in +that can help you to filter the list of devices to just the ones +you want to deactivate. To see a full list of available +parameters, run `code42 devices list -h`. + +Here are some useful parameters you may wish to leverage when +curating a list of devices to deactivate: + +* `--last-connected-before DATE|TIMESTAMP|SHORT_TIME` - allows you to only see devices that have not connected since a particular date. You can also use a timestamp or short time format, for example `30d`. +* `--exclude-most-recently-connected INTEGER` - allows you to exclude the most recently connected device (per user) from the results. This allows you to ensure that every user is left with at least N device(s), regardless of how recently they have connected. +* `--created-before DATE|TIMESTAMP|SHORT_TIME` - allows you to only see devices created before a particular date. + +## Deactivate devices + +Once you have a list of devices that you want to remove, you can +run the `code42 devices bulk deactivate` command: + +``` +code42 devices bulk deactivate list_of_devices.csv +``` + +The device list must be a file in CSV format containing a `guid` +column with the unique identifier of the devices to be +deactivated. The deactivate command can also accept some optional +parameters: + +* `--change-device-name` - prepends `deactivated_` to the beginning of the device name, allowing you to have a record of which devices were deactivated by the CLI and when. +* `--purge-date yyyy-MM-dd` - allows you to change the date on which the deactivated devices' archives will be purged from cold storage. + +To see a full list of available options, run `code42 devices bulk deactivate -h`. + +The `code42 devices bulk deactivate` command will output the guid +of the device to be deactivated, plus a column indicating the +success or failure of the deactivation. To change the format of +this output, use the `-f` or `--format` option. + +You can also redirect the output to a file, for example: + +``` +code42 devices bulk deactivate devices_to_deactivate.csv -f CSV > deactivation_results.csv +``` + +Deactivation will fail if the user running the command does not +have permission to deactivate the device, or if the user owning +the device is on legal hold. + + +### Generate the list and deactivate in a single command + +You can also pipe the output of `code42 devices list` directly to +`code42 devices bulk deactivate`. When using a pipe, make sure to +use `-` as the input argument for `code42 devices bulk deactivate` +to indicate that it should read from standard input. + +Here is an example: + +``` +code42 devices list --active \ +--last-connected-before 365d \ +--exclude-most-recently-connected 1 \ +-f CSV \ +| code42 devices bulk deactivate - \ +-f CSV \ +> deactivation_results.csv +``` + +This lists all devices that have not connected within a year _and_ +are not a user's most-recently-connected device, and then attempts +to deactivate them. + +Learn more about [Managing Devices](../commands/devices.md). diff --git a/docs/userguides/extensions.md b/docs/userguides/extensions.md new file mode 100644 index 000000000..defadeea1 --- /dev/null +++ b/docs/userguides/extensions.md @@ -0,0 +1,101 @@ +# Write custom extension scripts using the Code42 CLI and py42 + +While the Code42 CLI aims to provide an easy way to automate many common Code42 tasks, there will likely be times when +you need to script something the CLI doesn't have out-of-the-box. + +To accommodate for those scenarios, the Code42 CLI exposes a few helper objects in the `code42cli.extensions` module +that make it easy to write custom scripts with `py42` that use features of the CLI (like profiles) to reduce the amount +of boilerplate needed to be productive. + +## Before you begin + +The Code42 CLI is a python application written using the [click framework](https://click.palletsprojects.com/en/7.x/), +and the exposed extension objects are custom `click` classes. A basic knowledge of how to define `click` commands, +arguments, and options is required. + +### The `sdk_options` decorator + +The most important extension object is the `sdk_options` decorator. When you decorate a command you've defined in your +script with `@sdk_options`, it will automatically add `--profile` and `--debug` options to your command. These work the +same as in the main CLI commands. + +Decorating a command with `@sdk_options` also causes the first argument to your command function to be the `state` +object, which contains the initialized py42 sdk. There's no need to handle user credentials or login, the `sdk_options` +does all that for you using the CLI profiles. + +### The `script` group + +The `script` object exposed in the extensions module is a `click.Group` subclass, which allows you to add multiple +sub-commands and group functionality together. While not explicitly required when writing custom scripts, the `script` +group has logic to help handle and log any uncaught exceptions to the `~/.code42cli/log/code42_errors.log` file. + +If only a single command is added to the `script` group, the group will default to that command, so you don't need to +explicitly provide the sub-command name. + +An example command that just prints the username and ID that the sdk is authenticated with: + +```python +import click +from code42cli.extensions import script, sdk_options + +@click.command() +@sdk_options +def my_command(state): + user = state.sdk.users.get_current() + print(user["username"], user["userId"]) + +if __name__ == "__main__": + script.add_command(my_command) + script() +``` + +## Ensuring your script runs in the Code42 CLI python environment + +The above example works as a standalone script, if it were named `my_script.py` you could execute it by running: + +```bash +python3 my_script.py +``` + +However, if the Code42 CLI is installed in a different python environment than your `python3` command, it might fail to +import the extensions. + +To workaround environment and path issues, the CLI has a `--python` option that prints out the path to the python +executable the CLI uses, so you can execute your script with`$(code42 --python) script.py` on Mac/Linux or +`&$(code42 --python) script.py` on Windows to ensure it always uses the correct python path for the extension script to +work. + +## Installing your extension script as a Code42 CLI plugin + +The above example works as a standalone script, but it's also possible to install that same script as a plugin into the +main CLI itself. + +Assuming the above example code is in a file called `my_script.py`, just add a file `setup.py` in the same directory +with the following: + +```python +from distutils.core import setup + +setup( + name="my_script", + version="0.1", + py_modules=["my_script"], + install_requires=["code42cli"], + entry_points=""" + [code42cli.plugins] + my_command=my_script:my_command + """, +) +``` + +The `entry_points` section tells the Code42 CLI where to look for the commands to add to its main group. If you have +multiple commands defined in your script you can add one per line in the `entry_points` and they'll all get installed +into the Code42 CLI. + +Once your `setup.py` is ready, install it with pip while in the directory of `setup.py`: + +``` +$(code42 --python) -m pip install . +``` + +Then running `code42 -h` should show `my-command` as one of the available commands to run! diff --git a/docs/userguides/gettingstarted.md b/docs/userguides/gettingstarted.md new file mode 100644 index 000000000..abfcf366a --- /dev/null +++ b/docs/userguides/gettingstarted.md @@ -0,0 +1,192 @@ +# Get started with the Code42 command-line interface (CLI) + +* [Licensing](#licensing) +* [Installation](#installation) +* [Authentication](#authentication) +* [Troubleshooting and Support](#troubleshooting-and-support) + +## Licensing + +This project uses the [MIT License](https://github.com/code42/code42cli/blob/main/LICENSE.md). + +## Installation + +You can install the Code42 CLI from PyPI, from source, or from distribution. + +### From PyPI + +The easiest and most common way is to use `pip`: + +```bash +python3 -m pip install code42cli +``` + +To install a previous version of the Code42 CLI via `pip`, add the version number. For example, to install version +0.5.3, enter: + +```bash +python3 -m pip install code42cli==0.5.3 +``` + +Visit the [project history](https://pypi.org/project/code42cli/#history) on PyPI to see all published versions. + +### From source + +Alternatively, you can install the Code42 CLI directly from [source code](https://github.com/code42/code42cli): + +```bash +git clone https://github.com/code42/code42cli.git +``` + +When it finishes downloading, from the root project directory, run: + +```bash +python setup.py install +``` + +### From distribution + +If you want create a `.tar` ball for installing elsewhere, run the following command from the project's root directory: + +```bash +python setup.py sdist +``` + +After it finishes building, the `.tar` ball will be located in the newly created `dist` directory. To install it, enter: + +```bash +python3 -m pip install code42cli-[VERSION].tar.gz +``` + +## Updates + +To update the CLI, use the pip `--upgrade` flag. + +```bash +python3 -m pip install code42cli --upgrade +``` + +## Authentication + +```{eval-rst} +.. important:: The Code42 CLI currently only supports token-based authentication. +``` + +Create a user in Code42 to authenticate (basic authentication) and access data via the CLI. The CLI returns data based +on the roles assigned to this user. To ensure that the user's rights are not too permissive, create a user with the lowest +level of privilege necessary. See our [Role assignment use cases](https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Role_assignment_use_cases) +for information on recommended roles. We recommend you test to confirm that the user can access the right data. + +If you choose not to store your password in the CLI, you must enter it for each command that requires a connection. + +The Code42 CLI supports local accounts with MFA (multi-factor authentication) enabled. The Time-based One-Time +Password (TOTP) must be provided at every invocation of the CLI, either via the `--totp` option or when prompted. + +The Code42 CLI currently does **not** support SSO login providers or any other identity providers such as Active +Directory or Okta. + +## Proxy Support + +```{eval-rst} +.. note:: Proxy support was added in code42cli version 1.16.0 +``` + +The Code42 CLI will attempt to connect through a proxy if the `https_proxy`/`HTTPS_PROXY` environment variable is set. + +### Windows and Mac + +For Windows and Mac systems, the CLI uses Keyring when storing passwords. + +### Red Hat Enterprise Linux + +To use Keyring to store the credentials you 2enter in the Code42 CLI, enter the following commands before installing. +```bash +yum -y install python-pip python3 dbus-python gnome-keyring libsecret dbus-x11 +pip3 install code42cli +``` +If the following directories do not already exist, create them: +```bash +mkdir -p ~/.cache +mkdir -p ~/.local/share/keyring +``` +In the following commands, replace the example value `\n` with the Keyring password (if the default Keyring already exists). +```bash +eval "$(dbus-launch --sh-syntax)" +eval "$(printf '\n' | gnome-keyring-daemon --unlock)" +eval "$(printf '\n' | /usr/bin/gnome-keyring-daemon --start)" +``` +Close out your D-bus session and GNOME Keyring: +```bash +pkill gnome +pkill dbus +``` +If you do not use Keyring to store your credentials, the Code42 CLI will ask permission to store your credentials in a local flat file with read/write permissions for only the operating system user who set the password. Alternatively, you can enter your password with each command you enter. + +### Ubuntu +If Keyring doesn't support your Ubuntu system, the Code42 CLI will ask permission to store your credentials in a local flat file with read/write permissions for only the operating system user who set the password. Alternatively, you can enter your password with each command you enter. + + + +To learn more about authenticating in the CLI, follow the [Configure profile guide](profile.md). + +## Troubleshooting and support + +### Code42 command not found + +If your python installation has added itself to your environment's PATH variable, then running `code42` _should_ just work. + +However, if after installation the `code42` command is not found, the CLI has some helpers for this (added in version 1.10): + +You can execute the CLI by calling the python module directly: + +```bash +python3 -m code42cli +``` + +And the base `code42` command now has a `--script-dir` option that will print out the directory the `code42` script was +installed into, so you can manually add it to your PATH, enabling the `code42` command to work. + +#### On Mac/Linux: + +Run the following to make `code42` visible in your shell's PATH (to persist the change, add it to your shell's configuration file): + +```bash +export PATH=$PATH:$(python3 -m code42cli --script-dir) +``` + +#### On Windows: + +```powershell +$env:Path += ";$(python -m code42cli --script-dir)" +``` + +To persist the change, add the updated PATH to your registry: + +```powershell +Set-ItemProperty -Path 'Registry::HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment' -Name PATH -Value $env:Path +``` + +### Debug mode + +Debug mode may be useful if you are trying to determine if you are experiencing permissions issues. When debug mode is +on, the CLI logs HTTP request data to the console. Use the `-d` flag to enable debug mode for a particular command. +`-d` can appear anywhere in the command chain: + +```bash +code42 -d +``` + +### File an issue on GitHub + +If you are experiencing an issue with the Code42 CLI, select *New issue* at the +[project repository](https://github.com/code42/code42cli/issues) to create an issue. See the Github +[guide on creating an issue](https://help.github.com/en/github/managing-your-work-on-github/creating-an-issue) for more information. + +### Contact Code42 Support + +If you don't have a GitHub account and are experiencing issues, contact +[Code42 support](https://support.code42.com/). + +## What's next? + +Learn how to [Set up a profile](profile.md). diff --git a/docs/userguides/legalhold.md b/docs/userguides/legalhold.md new file mode 100644 index 000000000..f01f46b71 --- /dev/null +++ b/docs/userguides/legalhold.md @@ -0,0 +1,107 @@ +# Manage legal hold custodians + +Once you [create a legal hold matter in the Code42 console](https://support.code42.com/Administrator/Cloud/Configuring/Create_a_legal_hold_matter#Step_1:_Create_a_matter), you can use the Code42 CLI to add or release custodians from the matter. + +To see a list of all the users currently in your organization: +- Export a list from the [Users action menu](https://support.code42.com/Administrator/Cloud/Code42_console_reference/Users_reference#Action_menu). +- Use the [CLI users commands](./users.md). + +Use the `legal-hold` commands to manage legal hold custodians. + - To view a list of legal hold matters for your organization, including the matter ID, use the following command: + `code42 legal-hold list` + - To see a list of all the custodians currently associated with a legal hold matter, enter `code42 legal-hold show `. + + +## Get CSV template + +To add multiple custodians to a legal hold matter: + +1. Generate a CSV template. Below is an example command that generates a template to use when bulk adding custodians to legal hold matter. Once generated, the CSV file is saved to your current working directory. + `code42 legal-hold bulk generate-template add` + + To generate a template to use when bulk releasing custodians from a legal hold matter: + + `code42 legal-hold bulk generate-template remove` + + The CSV templates for `add` and `remove` have the same columns, but the commands generate different default filenames. + +2. Use the CSV template to enter the matter ID(s) and Code42 usernames for the custodians you want to add to the matters. +To get the ID for a matter, enter `code42 legal-hold list`. +3. Save the CSV file. + +## Add custodians to a legal hold matter + +You can add one or more custodians to a legal hold matter using the Code42 CLI. + +### Add multiple custodians +Once you have entered the matter ID and user information in the CSV file, use the `bulk add` command with the CSV file path to add multiple custodians at once. For example: + +`code42 legal-hold bulk add /Users/admin/add_users_to_legal_hold.csv` + +### Add a single custodian + +To add a single custodian to a legal hold matter, use the following command as an example: + +`code42 legal-hold add-user --matter-id 123456789123456789 --username user@example.com` + +#### Options + + - `--matter-id` (required): The identification number of the legal hold matter. To get the ID for a matter, run the command `code42 legal-hold list`. + - `--username` (required): The Code42 username of the custodian to add to the matter. + - `--profile` (optional): The profile to use to execute the command. If not specified, the default profile is used. + +## Release custodians +You can [release one or more custodians](https://support.code42.com/Administrator/Cloud/Configuring/Create_a_legal_hold_matter#Release_or_reactivate_custodians) from a legal hold matter using the Code42 CLI. + +### Release multiple custodians + +To release multiple custodians at once: + +1. Enter the matter ID(s) and Code42 usernames to the [CSV file template you generated](#get-csv-template). +2. Save the file to your current working directory. +3. Use the `bulk remove` command with the file path of the CSV you created. For example: + `code42 legal-hold bulk remove /Users/admin/remove_users_from_legal_hold.csv` + +### Release a single custodian + +Use `remove-user` to release a single custodian. For example: + +`code42 legal-hold remove-user --matter-id 123456789123456789 --username user@example.com` + +Options are the same as `add-user` shown above. + +## View matters and custodians + +You can use the Code42 CLI to get a list of all the [legal hold matters](https://support.code42.com/Administrator/Cloud/Code42_console_reference/Legal_Hold_reference#All_Matters) for your organization, or get full details for a matter. + +### List legal hold matters + +To view a list of legal hold matters for your organization, use the following command: + +`code42 legal-hold list` + +This command produces the matter ID, name, description, creator, and creation date for the legal hold matters. + +### View matter details + +To view active custodians for a legal hold matter, enter `code42 legal-hold show` with the matter ID, for example: + +`code42 legal-hold show 123456789123456789` + +To view active custodians for a legal hold matter, as well as the details of the preservation policy, enter + +`code42 legal-hold show --include-policy` + +To view all custodians (including inactive) for a legal hold matter, enter + +`code42 legal-hold show --include-inactive` + +### List legal hold events + +To view a list of legal hold administrative events, use the following command: + +`code42 legal-hold search-events` + +This command takes the optional filters of a specific matter uid, beginning timestamp, end timestamp, and event type. + +Learn more about the [Legal Hold](../commands/legalhold.md) commands. diff --git a/docs/userguides/profile.md b/docs/userguides/profile.md new file mode 100644 index 000000000..99995c95f --- /dev/null +++ b/docs/userguides/profile.md @@ -0,0 +1,55 @@ +# Configure profile + +Use the [code42 profile](../commands/profile.md) set of commands to establish the Code42 environment you're working +within and your user information. + +## User token authentication + +Use the following command to create your profile with user token authentication: +```bash +code42 profile create --name MY_FIRST_PROFILE --server example.authority.com --username security.admin@example.com +``` + +Your profile contains the necessary properties for authenticating with Code42. After running `code42 profile create`, +the program prompts you about storing a password. If you agree, you are then prompted to enter your password. + +Your password is not shown when you do `code42 profile show`. However, `code42 profile show` will confirm that a +password exists for your profile. If you do not set a password, you will be securely prompted to enter a password each +time you run a command. + +## API client authentication + +Once you've generated an API Client in your Code42 console, use the following command to create your profile with API client authentication: +```bash +code42 profile create-api-client --name MY_API_CLIENT_PROFILE --server example.authority.com --api-client-id 'key-42' --secret 'code42%api%client%secret' +``` + +```{eval-rst} +.. note:: Remember to wrap your API client secret with single quotes to avoid issues with bash expansion and special characters. +``` + +## View profiles + +You can add multiple profiles with different names and the change the default profile with the `use` command: + +```bash +code42 profile use MY_SECOND_PROFILE +``` + +When you use the `--profile` flag with other commands, such as those in `security-data`, that profile is used +instead of the default profile. For example, + +```bash +code42 security-data search -b 2020-02-02 --profile MY_SECOND_PROFILE +``` + +To see all your profiles, do: + +```bash +code42 profile list +``` + +## Profiles with Multi-Factor Authentication + +If your Code42 user account requires multi-factor authentication, the MFA token can either be passed in with the `--totp` +option, or if not passed you will be prompted to enter it before the command executes. diff --git a/docs/userguides/users.md b/docs/userguides/users.md new file mode 100644 index 000000000..f090471f1 --- /dev/null +++ b/docs/userguides/users.md @@ -0,0 +1,118 @@ +# Manage Users + +You can use the CLI to manage user information, update user roles, and move users between organizations. + +To view a all the users currently in your organization, you can export a list from the [Users list in the Code42 console](https://support.code42.com/Administrator/Cloud/Code42_console_reference/Users_reference) or you can use the `list` command. + +You can use optional flags to filter the users you want to view. The following command will print all active users with the `Desktop User` role who belong to the organization with UID `1234567890`: +```bash +code42 users list --org-uid 1234567890 --role-name "Desktop User" --active +``` + +To change the information for one or more users, provide the user UID and updated information with the `update` or `bulk update` commands. + +## Manage User Roles + +Apply [Code42's user roles](https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Roles_resources/Roles_reference#Standard_roles) to user accounts to provide administrators with the desired set of permissions. Each role has associated permissions, limitations, and recommended use cases. + +#### View User Roles +View a user's current roles and other details with the `show` command: +```bash +code42 users show "sean.cassidy@example.com" +``` +Alternatively, pass the `--include-roles` flag to the `list ` command. The following command will print a list of all active users and their current roles: +```bash +code42 users list --active --include-roles +``` + +#### Update User Roles + +Use the following command to add a role to a user: +```bash +code42 users add-role --username "sean.cassidy@example.com" --role-name "Desktop User" +``` + +Similarly, use the `remove-role` command to remove a role from a user. + +## Manage User Risk Profile info + +To set a start or end/departure date on a User's profile (useful for users on the "New Hire" and "Departing" Watchlists): + +```bash +code42 users update-start-date 2020-03-10 user@example.com + +code42 users update-departure-date 2022-06-20 user@example.com +``` + +To clear the value of start_date/end_date on a User's profile, use the `--clear` option to the above commands: + +```bash +code42 users update-departure-date --clear user@example.com +``` + +To update a User's Risk Profile notes field: + +```bash +code42 users update-risk-profile-notes user@example.com "New note text" +``` + +By default, the note text will overwrite notes are already on the profile. To keep existing note data, use the `--append` option: + +```bash +code42 users update-risk-profile-notes user@example.com "Additional note text" --append +``` + +## Deactivate a User + +You can deactivate a user with the following command: +```bash +code42 users deactivate sean.cassidy@example.com +``` + +To deactivate multiple users at once, enter each username on a new line in a CSV file, then use the `bulk deactivate` command with the CSV file path. For example: +```bash +code42 users bulk deactivate users_to_deactivate.csv +``` + +Similarly, use the `reactivate` and `bulk reactivate` commands to reactivate a user. + +## Assign an Organization + +Use [Organizations](https://support.code42.com/Administrator/Cloud/Code42_console_reference/Organizations_reference) to group users together in the Code42 environment. + +You'll need an organization's unique identifier number (UID) to move a user into it. You can use the `list` command to view a list of all current user organizations, including UIDs: +```bash +code42 users orgs list +``` + +Use the `show` command to view all the details of a user organization. +As an example, to print the details of an organization associated with the UID `123456789` in JSON format: +```bash +code42 users show 123456789 -f JSON +``` + +Once you've identified your organizations UID number, use the `move` command to move a user into that organization. In the following example a user is moved into the organization associated with the UID `1234567890`: +```bash +code42 users move --username sean.cassidy@example.com --org-id 1234567890 +``` + +Alternatively, to move multiple users between organizations, fill out the `move` CSV file template, then use the `bulk move` command with the CSV file path. +```bash +code42 users bulk move bulk-command.csv +``` + +## Get CSV Template for bulk commands + +The following command generates a CSV template for each of the available bulk user commands. The CSV file is saved to the current working directory. +```bash +code42 users bulk generate-template [update|move|add-alias|remove-alias|update-risk-profile] +``` + +Once generated, fill out and use each of the CSV templates with their respective bulk commands. +```bash +code42 users bulk [update|move|deactivate|reactivate|add-alias|remove-alias|update-risk-profile] bulk-command.csv +``` + +A CSV with a `username` column and a single username on each new line is used for the `reactivate` and `deactivate` bulk commands. These commands are not available as options for `generate-template`. + +Learn more about [Managing Users](../commands/users.md). diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 3a5bff861..000000000 --- a/pyproject.toml +++ /dev/null @@ -1,17 +0,0 @@ -[tool.black] -line-length = 100 -include = '\.pyi?$' -exclude = ''' -/( - \.eggs - | \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist -)/ -''' \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..f104e210b --- /dev/null +++ b/setup.cfg @@ -0,0 +1,37 @@ +[metadata] +license_file = LICENSE.md + +[bdist_wheel] +universal = 1 + +[tool:pytest] +testpaths = tests +filterwarnings = + error + +[flake8] +# B = bugbear +# E = pycodestyle errors +# F = flake8 pyflakes +# W = pycodestyle warnings +# B9 = bugbear opinions, +# ISC = implicit str concat +select = B, E, F, W, B9, ISC +ignore = + # slice notation whitespace, different opinion from black + E203 + # line length, handled by black + B950 + E501 + # bare except, handled by bugbear B001 + E722 + # binary operation line break, different opinion from black + W503 + # exception chaining + B904 + # manual quoting + B907 + # assertRaises-type + B908 +# up to 88 allowed by bugbear B950 +max-line-length = 80 diff --git a/setup.py b/setup.py index 7bf0d18ff..1fd443269 100644 --- a/setup.py +++ b/setup.py @@ -1,15 +1,75 @@ -from setuptools import find_packages, setup +from codecs import open +from os import path + +from setuptools import find_packages +from setuptools import setup + +here = path.abspath(path.dirname(__file__)) + +about = {} +with open(path.join(here, "src", "code42cli", "__version__.py"), encoding="utf8") as fh: + exec(fh.read(), about) + +with open(path.join(here, "README.md"), "r", "utf-8") as f: + readme = f.read() setup( - name="c42seceventcli", - version="0.1.1", - description="CLI for retrieving Code42 Exfiltration Detection events", - packages=find_packages(include=["c42seceventcli", "c42seceventcli.*"]), - python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4", - install_requires=["c42secevents", "urllib3", "keyring==18.0.1"], - license="MIT", + name="code42cli", + version=about["__version__"], + url="https://github.com/code42/py42", + project_urls={ + "Issue Tracker": "https://github.com/code42/code42cli/issues", + "Documentation": "https://clidocs.code42.com/", + "Source Code": "https://github.com/code42/code42cli", + }, + description="The official command line tool for interacting with Code42", + long_description=readme, + long_description_content_type="text/markdown", + packages=find_packages("src"), + package_dir={"": "src"}, include_package_data=True, zip_safe=False, - extras_require={"dev": ["pre-commit==1.18.3", "pytest==4.6.5", "pytest-mock==1.10.4"]}, - entry_points={"console_scripts": ["c42aed=c42seceventcli.aed.main:main"]}, + python_requires=">=3.9, <4", + install_requires=[ + "chardet", + "click>=7.1.1,<8.2", + "click_plugins>=1.1.1", + "colorama>=0.4.3", + "keyring==18.0.1", + "keyrings.alt==3.2.0", + "ipython>=7.16.3;python_version<'3.8'", + "ipython>=8.10.0;python_version>='3.8'", + "pandas>=1.1.3", + "py42>=1.28.0", + "setuptools>=66.0.0", + ], + extras_require={ + "dev": [ + "flake8>=4.0.0", + "pytest==4.6.11", + "pytest-cov==2.10.0", + "pytest-mock==2.0.0", + "tox>=3.17.1", + "importlib-metadata<5.0", + ], + "docs": [ + "sphinx==8.1.3", + "myst-parser==4.0.0", + "sphinx_rtd_theme==3.0.2", + "sphinx-click", + ], + }, + classifiers=[ + "Intended Audience :: Developers", + "Natural Language :: English", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + ], + entry_points={"console_scripts": ["code42=code42cli.main:cli"]}, ) diff --git a/src/code42cli/__init__.py b/src/code42cli/__init__.py new file mode 100644 index 000000000..a0a7d79b2 --- /dev/null +++ b/src/code42cli/__init__.py @@ -0,0 +1,15 @@ +from py42.__version__ import __version__ as py42version + +from code42cli.__version__ import __version__ as cliversion + + +PRODUCT_NAME = "code42cli" +MAIN_COMMAND = "code42" +BANNER = f"""\b + dP""b8 dP"Yb 8888b. 888888 dP88 oP"Yb. +dP `" dP Yb 8I Yb 88__ dP 88 "' dP' +Yb Yb dP 8I dY 88"" d888888 dP' + YboodP YbodP 8888Y" 888888 88 .d8888 + +code42cli version {cliversion}, by Code42 Software. +powered by py42 version {py42version}.""" diff --git a/src/code42cli/__main__.py b/src/code42cli/__main__.py new file mode 100644 index 000000000..9dc21ac74 --- /dev/null +++ b/src/code42cli/__main__.py @@ -0,0 +1,3 @@ +from code42cli.main import cli + +cli(prog_name="code42") diff --git a/src/code42cli/__version__.py b/src/code42cli/__version__.py new file mode 100644 index 000000000..d84d79d43 --- /dev/null +++ b/src/code42cli/__version__.py @@ -0,0 +1 @@ +__version__ = "1.19.0" diff --git a/src/code42cli/bulk.py b/src/code42cli/bulk.py new file mode 100644 index 000000000..72cbe4754 --- /dev/null +++ b/src/code42cli/bulk.py @@ -0,0 +1,164 @@ +import os + +import click + +from code42cli.errors import LoggedCLIError +from code42cli.logger import get_main_cli_logger +from code42cli.worker import Worker + +_logger = get_main_cli_logger() + + +class BulkCommandType: + ADD = "add" + REMOVE = "remove" + + def __iter__(self): + return iter([self.ADD, self.REMOVE]) + + +def write_template_file(path, columns): + with open(path, "w", encoding="utf8") as new_file: + new_file.write(",".join(columns)) + + +def generate_template_cmd_factory(group_name, commands_dict, help_message=None): + """Helper function that creates a `generate-template` click command that can be added to `bulk` + sub-command groups. + + Args: + `group_name`: a str representing the parent command group this is generating templates for. + `commands_dict`: a dict of the commands with their column names. Keys are the cmd + names that will become the `cmd` argument, and values are the list of column names for + the csv. + + If a cmd takes a flat file, value should be a string indicating what item the flat file + rows should contain. + """ + help_message = ( + help_message + or "Generate the CSV template needed for bulk adding/removing users." + ) + + @click.command(help=help_message) + @click.argument("cmd", type=click.Choice(list(commands_dict))) + @click.option( + "--path", + "-p", + type=click.Path(dir_okay=False, resolve_path=True, writable=True), + help="Write template file to specific file path/name.", + ) + def generate_template(cmd, path): + columns = commands_dict[cmd] + if not path: + filename = f"{group_name}_bulk_{cmd.replace('-', '_')}.csv" + path = os.path.join(os.getcwd(), filename) + write_template_file(path, columns) + + return generate_template + + +def run_bulk_process( + row_handler, rows, progress_label=None, stats=None, raise_global_error=True +): + """Runs a bulk process. + + Args: + row_handler (callable): A callable that you define to process values from the row as + either *args or **kwargs. + rows (iterable): the rows to process. + progress_label: a label that prints with the progress bar. + stats (WorkerStats): Pass in WorkerStats if doing error handling outside of the worker. + raise_global_error (bool): Set to False to *NOT* raise a CLI error if any rows fail. + This is useful if doing error handling outside of the worker class. + + Returns: + :class:`WorkerStats`: A class containing the successes and failures count. + """ + processor = _create_bulk_processor( + row_handler, + rows, + progress_label, + stats=stats, + raise_global_error=raise_global_error, + ) + return processor.run() + + +def _create_bulk_processor( + row_handler, rows, progress_label, stats=None, raise_global_error=True +): + """A factory method to create the bulk processor, useful for testing purposes.""" + return BulkProcessor( + row_handler, + rows, + progress_label=progress_label, + stats=stats, + raise_global_error=raise_global_error, + ) + + +class BulkProcessor: + """A class for bulk processing a file. + + Args: + row_handler (callable): A callable that you define to process values from the row as + either *args or **kwargs. For example, if it's a csv file with header `prop_a,prop_b` + and first row `1,test`, then `row_handler` should receive kwargs + `prop_a: '1', prop_b: 'test'` when processing the first row. If it's a flat file, then + `row_handler` only needs to take an extra arg. + """ + + def __init__( + self, + row_handler, + rows, + worker=None, + progress_label=None, + stats=None, + raise_global_error=True, + ): + total = len(rows) + self._rows = rows + self._row_handler = row_handler + self._progress_bar = click.progressbar( + length=len(self._rows), + item_show_func=self._show_stats, + label=progress_label, + ) + self._raise_global_error = raise_global_error + self.__worker = worker or Worker(5, total, bar=self._progress_bar, stats=stats) + self._stats = self.__worker.stats + + def run(self): + """Processes the csv rows specified in the ctor, calling `self.row_handler` on each row.""" + self._stats.reset_results() + for row in self._rows: + self._process_row(row) + self.__worker.wait() + self._handle_if_errors() + return self._stats._results + + def _process_row(self, row): + self._process_csv_row(row) + + def _process_csv_row(self, row): + # Removes problems from including extra columns. Error messages from out of order args + # are more indicative this way too. + row.pop(None, None) + + row_values = {key: val if val != "" else None for key, val in row.items()} + self.__worker.do_async( + lambda *args, **kwargs: self._handle_row(*args, **kwargs), **row_values + ) + + def _handle_row(self, *args, **kwargs): + return self._row_handler(*args, **kwargs) + + def _show_stats(self, _): + return str(self._stats) + + def _handle_if_errors(self): + click.echo("") + if self._stats.total_errors and self._raise_global_error: + raise LoggedCLIError("Some problems occurred during bulk processing.") diff --git a/c42seceventcli/__init__.py b/src/code42cli/click_ext/__init__.py similarity index 100% rename from c42seceventcli/__init__.py rename to src/code42cli/click_ext/__init__.py diff --git a/src/code42cli/click_ext/groups.py b/src/code42cli/click_ext/groups.py new file mode 100644 index 000000000..b22e6a4ed --- /dev/null +++ b/src/code42cli/click_ext/groups.py @@ -0,0 +1,182 @@ +import difflib +import platform +import re +from collections import OrderedDict + +import click +from py42.exceptions import Py42ActiveLegalHoldError +from py42.exceptions import Py42CaseAlreadyHasEventError +from py42.exceptions import Py42CaseNameExistsError +from py42.exceptions import Py42CloudAliasCharacterLimitExceededError +from py42.exceptions import Py42CloudAliasLimitExceededError +from py42.exceptions import Py42DescriptionLimitExceededError +from py42.exceptions import Py42ForbiddenError +from py42.exceptions import Py42HTTPError +from py42.exceptions import Py42InvalidEmailError +from py42.exceptions import Py42InvalidPageTokenError +from py42.exceptions import Py42InvalidPasswordError +from py42.exceptions import Py42InvalidRuleOperationError +from py42.exceptions import Py42InvalidUsernameError +from py42.exceptions import Py42LegalHoldNotFoundOrPermissionDeniedError +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42OrgNotFoundError +from py42.exceptions import Py42TrustedActivityConflictError +from py42.exceptions import Py42TrustedActivityIdNotFound +from py42.exceptions import Py42TrustedActivityInvalidCharacterError +from py42.exceptions import Py42UpdateClosedCaseError +from py42.exceptions import Py42UserAlreadyAddedError +from py42.exceptions import Py42UsernameMustBeEmailError +from py42.exceptions import Py42UserNotOnListError +from py42.exceptions import Py42UserRiskProfileNotFound +from py42.exceptions import Py42WatchlistNotFound + +from code42cli.errors import Code42CLIError +from code42cli.errors import LoggedCLIError +from code42cli.errors import UserDoesNotExistError +from code42cli.logger import get_main_cli_logger +from code42cli.logger.handlers import SyslogServerNetworkConnectionError + +_DIFFLIB_CUT_OFF = 0.6 + + +class ExceptionHandlingGroup(click.Group): + """A `click.Group` subclass to add custom exception handling.""" + + logger = get_main_cli_logger() + _original_args = None + + def make_context(self, info_name, args, parent=None, **extra): + + # grab the original command line arguments for logging purposes + self._original_args = " ".join(args) + + return super().make_context(info_name, args, parent=parent, **extra) + + def invoke(self, ctx): + try: + return super().invoke(ctx) + + except click.UsageError as err: + self._suggest_cmd(err) + + except LoggedCLIError: + raise + + except Code42CLIError as err: + self.logger.log_error(str(err)) + raise + + except click.ClickException: + raise + + except click.exceptions.Exit: + raise + + except ( + UserDoesNotExistError, + Py42UserAlreadyAddedError, + Py42UserNotOnListError, + Py42InvalidRuleOperationError, + Py42LegalHoldNotFoundOrPermissionDeniedError, + SyslogServerNetworkConnectionError, + Py42CaseNameExistsError, + Py42DescriptionLimitExceededError, + Py42CaseAlreadyHasEventError, + Py42UpdateClosedCaseError, + Py42UsernameMustBeEmailError, + Py42InvalidEmailError, + Py42InvalidPageTokenError, + Py42InvalidPasswordError, + Py42InvalidUsernameError, + Py42ActiveLegalHoldError, + Py42OrgNotFoundError, + Py42TrustedActivityConflictError, + Py42TrustedActivityInvalidCharacterError, + Py42TrustedActivityIdNotFound, + Py42CloudAliasLimitExceededError, + Py42CloudAliasCharacterLimitExceededError, + Py42UserRiskProfileNotFound, + Py42WatchlistNotFound, + Py42NotFoundError, + ) as err: + msg = err.args[0] + self.logger.log_error(msg) + raise Code42CLIError(msg) + + except Py42ForbiddenError as err: + self.logger.log_verbose_error(self._original_args, err.response.request) + raise LoggedCLIError( + "You do not have the necessary permissions to perform this task. " + "Try using or creating a different profile." + ) + + except Py42HTTPError as err: + self.logger.log_verbose_error(self._original_args, err.response.request) + raise LoggedCLIError("Problem making request to server.") + + except UnicodeEncodeError: + if platform.system() == "Windows": + cmd = 'if using powershell: $ENV:PYTHONIOENCODING="utf-16"\nif using cmd.exe: SET PYTHONIOENCODING="utf-16"' + else: + cmd = 'export PYTHONIOENCODING="utf-8"' + raise Code42CLIError( + f"Failed to handle unicode character using environment's detected encoding, try running the following:\n\n{cmd}\n\nand then re-run your `code42` command." + ) + + except OSError: + raise + + except Exception: + self.logger.log_verbose_error() + raise LoggedCLIError("Unknown problem occurred.") + + @staticmethod + def _suggest_cmd(usage_err): + """Handles fuzzy suggestion of commands that are close to the bad command entered.""" + if usage_err.message is not None: + match = re.match("No such command '(.*)'.", usage_err.message) + if match: + bad_arg = match.groups()[0] + available_commands = list(usage_err.ctx.command.commands.keys()) + suggested_commands = difflib.get_close_matches( + bad_arg, available_commands, cutoff=_DIFFLIB_CUT_OFF + ) + if not suggested_commands: + raise usage_err + usage_err.message = ( + f"No such command '{bad_arg}'. " + f"Did you mean {' or '.join(suggested_commands)}?" + ) + raise usage_err + + +class OrderedGroup(click.Group): + """A `click.Group` subclass that uses an `OrderedDict` to store commands so the help text lists + them in the order they were defined/added to the group. + """ + + def __init__(self, name=None, commands=None, **attrs): + super().__init__(name, commands, **attrs) + # the registered subcommands by their exported names. + self.commands = commands or OrderedDict() + + def list_commands(self, ctx): + return self.commands + + +class ExtensionGroup(ExceptionHandlingGroup): + """A helper click.Group for extension scripts. If only a single command is added to this group, + that command will be the "default" and won't need to be explicitly passed as the first argument + to the extension script. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def parse_args(self, ctx, args): + if len(self.commands) == 1: + cmd_name, cmd = next(iter(self.commands.items())) + if not args or args[0] not in self.commands: + self.commands = {"": cmd} + args.insert(0, "") + super().parse_args(ctx, args) diff --git a/src/code42cli/click_ext/options.py b/src/code42cli/click_ext/options.py new file mode 100644 index 000000000..bbbdc8ea0 --- /dev/null +++ b/src/code42cli/click_ext/options.py @@ -0,0 +1,34 @@ +import click + + +def incompatible_with(incompatible_opts): + """Factory for creating custom `click.Option` subclasses that enforce incompatibility with the + option strings passed to this function. + """ + + if isinstance(incompatible_opts, str): + incompatible_opts = [incompatible_opts] + + class IncompatibleOption(click.Option): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + # if None it means we're in autocomplete mode and don't want to validate + if ctx.obj is not None: + found_incompatible = ", ".join( + [ + f"--{opt.replace('_', '-')}" + for opt in opts + if opt in incompatible_opts + ] + ) + if self.name in opts and found_incompatible: + name = self.name.replace("_", "-") + raise click.BadOptionUsage( + option_name=self.name, + message=f"--{name} can't be used with: {found_incompatible}", + ) + return super().handle_parse_result(ctx, opts, args) + + return IncompatibleOption diff --git a/src/code42cli/click_ext/types.py b/src/code42cli/click_ext/types.py new file mode 100644 index 000000000..87c8b9bf4 --- /dev/null +++ b/src/code42cli/click_ext/types.py @@ -0,0 +1,184 @@ +import re +from datetime import datetime +from datetime import timedelta +from datetime import timezone + +import chardet +import click +from click.exceptions import BadParameter + +from code42cli.logger import CliLogger +from code42cli.util import print_numbered_list + + +class AutoDecodedFile(click.File): + """Attempts to autodetect file's encoding prior to normal click.File processing.""" + + def convert(self, value, param, ctx): + try: + with open(value, "rb") as file: + self.encoding = chardet.detect(file.read())["encoding"] + if self.encoding is None: + CliLogger().log_error(f"Failed to detect encoding of file: {value}") + except Exception: + pass # we'll let click.File do it's own exception handling for the filepath + + return super().convert(value, param, ctx) + + +class FileOrString(AutoDecodedFile): + """Declares a parameter to be a file (if the argument begins with `@`), otherwise accepts it as + a string. + """ + + def __init__(self): + super().__init__("r") + + def convert(self, value, param, ctx): + if value.startswith("@") or value == "-": + value = value.lstrip("@") + file = super().convert(value, param, ctx) + return file.read() + else: + return value + + +class MagicDate(click.ParamType): + """Declares a parameter to be a 'magic' date string. Accepts an optional `round` argument + which can be a function that takes a datetime and returns it rounded appropriately. This allows + imprecise "day" input values (2020-01-01, 3d) to be rounded to the start or end of the day + if needed. Accepts the following values as user input: + + timestamp formats: + yyyy-MM-dd + yyyy-MM-dd HH + yyyy-MM-dd HH:MM + yyyy-MM-dd HH:MM:SS + + short-string (day, hour, min) formats: + 30d + 24h + 15m + + and converts them to datetime objects. + """ + + TIMESTAMP_REGEX = re.compile(r"(\d{4}-\d{2}-\d{2})(?:$|T|\s+)([0-9:]+)?") + MAGIC_TIME_REGEX = re.compile(r"(\d+)([dhmDHM])$") + HELP_TEXT = ( + "Accepts a date/time in yyyy-MM-dd (UTC) or yyyy-MM-dd HH:MM:SS " + "(UTC+24-hr time) format where the 'time' portion of the string " + "can be partial (e.g. '2020-01-01 12' or '2020-01-01 01:15') or " + "a 'short time' value representing days (30d), hours (24h) or " + "minutes (15m) from the current time." + ) + + name = "magicdate" + + def __init__(self, rounding_func=None): + self.round = rounding_func + + def get_metavar(self, param): + return "[DATE|TIMESTAMP|SHORT_TIME]" + + def __repr__(self): + return "MagicDate" + + def convert(self, value, param, ctx): + timestamp_match = self.TIMESTAMP_REGEX.match(value) + magic_match = self.MAGIC_TIME_REGEX.match(value) + + if timestamp_match: + date, time = timestamp_match.groups() + dt = self._get_dt_from_date_time_pair(date, time) + if not time and callable(self.round): + dt = self.round(dt) + + elif magic_match: + num, period = magic_match.groups() + dt = self._get_dt_from_magic_time_pair(num, period) + if period == "d" and callable(self.round): + dt = self.round(dt) + + else: + self.fail(self.HELP_TEXT, param=param) + + return dt.replace(tzinfo=timezone.utc) + + @staticmethod + def _get_dt_from_magic_time_pair(num, period): + num = int(num) + period = period.lower() + if period == "d": + delta = timedelta(days=num) + elif period == "h": + delta = timedelta(hours=num) + elif period == "m": + delta = timedelta(minutes=num) + else: + raise BadParameter(f"Couldn't parse magic time string: {num}{period}") + return datetime.utcnow() - delta + + @staticmethod + def _get_dt_from_date_time_pair(date, time): + date_format = "%Y-%m-%d %H:%M:%S" + if time: + time = "{}:{}:{}".format(*time.split(":") + ["00", "00"]) + else: + time = "00:00:00" + date_string = f"{date} {time}" + try: + dt = datetime.strptime(date_string, date_format) + except ValueError: + raise BadParameter(f"Unable to parse date string: {date_string}.") + else: + return dt + + +class MapChoice(click.Choice): + """Choice subclass that takes an extra map of additional 'valid' keys to map to correct + choices list, allowing backward compatible choice changes. The extra keys don't show up + in help text, but work when passed as a choice. + """ + + def __init__(self, choices, extras_map, **kwargs): + self.extras_map = extras_map + super().__init__(choices, **kwargs) + + def convert(self, value, param, ctx): + if value in self.extras_map: + value = self.extras_map[value] + + return super().convert(value, param, ctx) + + +class PromptChoice(click.ParamType): + def __init__(self, choices): + self.choices = choices + + def print_choices(self): + print_numbered_list(self.choices) + + def convert(self, value, param, ctx): + try: + choice_index = int(value) - 1 + return self.choices[choice_index] + except Exception: + self.fail("Invalid choice", param=param) + + +class TOTP(click.ParamType): + """Validates param to be a 6-digit integer, which is what all Code42 TOTP tokens will be.""" + + def get_metavar(self, param): + return "TEXT" + + def convert(self, value, param, ctx): + try: + int(value) + assert len(value) == 6 + return value + except Exception: + raise BadParameter( + f"TOTP tokens should be a 6-digit integer. '{value}' was provided." + ) diff --git a/c42seceventcli/aed/__init__.py b/src/code42cli/cmds/__init__.py similarity index 100% rename from c42seceventcli/aed/__init__.py rename to src/code42cli/cmds/__init__.py diff --git a/src/code42cli/cmds/alert_rules.py b/src/code42cli/cmds/alert_rules.py new file mode 100644 index 000000000..294bdf614 --- /dev/null +++ b/src/code42cli/cmds/alert_rules.py @@ -0,0 +1,204 @@ +from collections import OrderedDict + +import click +from click import echo +from py42.exceptions import Py42BadRequestError +from py42.util import format_json + +from code42cli import PRODUCT_NAME +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.cmds.shared import get_user_id +from code42cli.errors import Code42CLIError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import OutputFormatter +from code42cli.util import deprecation_warning + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead." + + +class AlertRuleTypes: + EXFILTRATION = "FED_ENDPOINT_EXFILTRATION" + CLOUD_SHARE = "FED_CLOUD_SHARE_PERMISSIONS" + FILE_TYPE_MISMATCH = "FED_FILE_TYPE_MISMATCH" + + +_HEADER_KEYS_MAP = OrderedDict() +_HEADER_KEYS_MAP["observerRuleId"] = "RuleId" +_HEADER_KEYS_MAP["name"] = "Name" +_HEADER_KEYS_MAP["severity"] = "Severity" +_HEADER_KEYS_MAP["type"] = "Type" +_HEADER_KEYS_MAP["ruleSource"] = "Source" +_HEADER_KEYS_MAP["isEnabled"] = "Enabled" + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def alert_rules(state): + """DEPRECATED - Manage users associated with alert rules.""" + deprecation_warning(DEPRECATION_TEXT) + pass + + +rule_id_option = click.option( + "--rule-id", required=True, help="Identification number of the alert rule." +) + + +@alert_rules.command() +@rule_id_option +@click.option( + "-u", + "--username", + required=True, + help="The username of the user to add to the alert rule.", +) +@sdk_options() +def add_user(state, rule_id, username): + """Add a user to an alert rule.""" + _add_user(state.sdk, rule_id, username) + + +@alert_rules.command() +@rule_id_option +@click.option( + "-u", + "--username", + required=True, + help="The username of the user to remove from the alert rule.", +) +@sdk_options() +def remove_user(state, rule_id, username): + """Remove a user from an alert rule.""" + try: + _remove_user(state.sdk, rule_id, username) + except Py42BadRequestError: + raise Code42CLIError( + f"User {username} is not currently assigned to rule-id {rule_id}." + ) + + +@alert_rules.command("list") +@format_option +@sdk_options() +def list_alert_rules(state, format): + """Fetch existing alert rules.""" + formatter = OutputFormatter(format, _HEADER_KEYS_MAP) + selected_rules = _get_all_rules_metadata(state.sdk) + + if selected_rules: + formatter.echo_formatted_list(selected_rules) + + +@alert_rules.command() +@click.argument("rule_id") +@sdk_options() +def show(state, rule_id): + """Print out detailed alert rule criteria.""" + selected_rule = _get_rule_metadata(state.sdk, rule_id) + if selected_rule: + get = _get_rule_type_func(state.sdk, selected_rule[0]["type"]) + rule_detail = get(rule_id) + echo(format_json(rule_detail.text)) + + +@alert_rules.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk alert rule actions.""" + pass + + +ALERT_RULES_CSV_HEADERS = ["rule_id", "username"] + +alert_rules_generate_template = generate_template_cmd_factory( + group_name="alert_rules", + commands_dict={"add": ALERT_RULES_CSV_HEADERS, "remove": ALERT_RULES_CSV_HEADERS}, +) +bulk.add_command(alert_rules_generate_template) + + +@bulk.command( + help=f"Bulk add users to alert rules from a CSV file. " + f"CSV file format: {','.join(ALERT_RULES_CSV_HEADERS)}" +) +@read_csv_arg(headers=ALERT_RULES_CSV_HEADERS) +@sdk_options() +def add(state, csv_rows): + sdk = state.sdk + + def handle_row(rule_id, username): + _add_user(sdk, rule_id, username) + + run_bulk_process( + handle_row, csv_rows, progress_label="Adding users to alert-rules:" + ) + + +@bulk.command( + help="Bulk remove users from alert rules using a CSV file. " + "CSV file format: {','.join(ALERT_RULES_CSV_HEADERS)}" +) +@read_csv_arg(headers=ALERT_RULES_CSV_HEADERS) +@sdk_options() +def remove(state, csv_rows): + sdk = state.sdk + + def handle_row(rule_id, username): + _remove_user(sdk, rule_id, username) + + run_bulk_process( + handle_row, csv_rows, progress_label="Removing users from alert-rules:" + ) + + +def _add_user(sdk, rule_id, username): + user_id = get_user_id(sdk, username) + rules = _get_rule_metadata(sdk, rule_id) + if rules: + sdk.alerts.rules.add_user(rule_id, user_id) + + +def _remove_user(sdk, rule_id, username): + user_id = get_user_id(sdk, username) + rules = _get_rule_metadata(sdk, rule_id) + if rules: + sdk.alerts.rules.remove_user(rule_id, user_id) + + +def _get_all_rules_metadata(sdk): + rules_generator = sdk.alerts.rules.get_all() + selected_rules = [ + rule for rules in rules_generator for rule in rules["ruleMetadata"] + ] + return _handle_rules_results(selected_rules) + + +def _get_rule_metadata(sdk, rule_id): + rules = sdk.alerts.rules.get_by_observer_id(rule_id)["ruleMetadata"] + return _handle_rules_results(rules, rule_id) + + +def _handle_rules_results(rules, rule_id=None): + if not rules: + id_msg = f"with RuleId {rule_id} " if rule_id else "" + msg = f"No alert rules {id_msg}found." + raise Code42CLIError(msg) + return rules + + +def _get_rule_type_func(sdk, rule_type): + if rule_type == AlertRuleTypes.EXFILTRATION: + return sdk.alerts.rules.exfiltration.get + elif rule_type == AlertRuleTypes.CLOUD_SHARE: + return sdk.alerts.rules.cloudshare.get + elif rule_type == AlertRuleTypes.FILE_TYPE_MISMATCH: + return sdk.alerts.rules.filetypemismatch.get + else: + raise Code42CLIError( + "Received an unknown rule type from server. You might need to update " + f"to a newer version of {PRODUCT_NAME}" + ) diff --git a/src/code42cli/cmds/alerts.py b/src/code42cli/cmds/alerts.py new file mode 100644 index 000000000..314e9d764 --- /dev/null +++ b/src/code42cli/cmds/alerts.py @@ -0,0 +1,449 @@ +import click +import py42.sdk.queries.alerts.filters as f +from py42.exceptions import Py42NotFoundError +from py42.sdk.queries.alerts.alert_query import AlertQuery +from py42.sdk.queries.alerts.filters import AlertState +from py42.sdk.queries.alerts.filters import RuleType +from py42.sdk.queries.alerts.filters import Severity +from py42.util import format_dict + +import code42cli.cmds.search.options as searchopt +import code42cli.errors as errors +import code42cli.options as opt +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.cmds.search import SendToCommand +from code42cli.cmds.search.cursor_store import AlertCursorStore +from code42cli.cmds.search.options import server_options +from code42cli.cmds.util import convert_to_or_query +from code42cli.cmds.util import create_time_range_filter +from code42cli.cmds.util import try_get_default_header +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.date_helper import limit_date_range +from code42cli.enums import JsonOutputFormat +from code42cli.enums import OutputFormat +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.output_formats import OutputFormatter +from code42cli.util import deprecation_warning +from code42cli.util import hash_event +from code42cli.util import parse_timestamp +from code42cli.util import warn_interrupt + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + +ALERTS_KEYWORD = "alerts" +ALERT_PAGE_SIZE = 25 + +begin = opt.begin_option( + ALERTS_KEYWORD, + callback=lambda ctx, param, arg: convert_datetime_to_timestamp( + limit_date_range(arg, max_days_back=90) + ), +) +end = opt.end_option(ALERTS_KEYWORD) +checkpoint = opt.checkpoint_option(ALERTS_KEYWORD) +advanced_query = searchopt.advanced_query_option(ALERTS_KEYWORD) +severity_option = click.option( + "--severity", + multiple=True, + type=click.Choice(Severity.choices()), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.Severity), + help="Filter alerts by severity. Defaults to returning all severities.", +) +filter_state_option = click.option( + "--state", + multiple=True, + type=click.Choice(AlertState.choices()), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.AlertState), + help="Filter alerts by status. Defaults to returning all statuses.", +) +actor_option = click.option( + "--actor", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.Actor), + help="Filter alerts by including the given actor(s) who triggered the alert. " + "Arguments must match the actor's cloud alias exactly.", +) +actor_contains_option = click.option( + "--actor-contains", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.contains_filter(f.Actor), + help="Filter alerts by including actor(s) whose cloud alias contains the given string.", +) +exclude_actor_option = click.option( + "--exclude-actor", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.not_in_filter(f.Actor), + help="Filter alerts by excluding the given actor(s) who triggered the alert. " + "Arguments must match actor's cloud alias exactly.", +) +exclude_actor_contains_option = click.option( + "--exclude-actor-contains", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.not_contains_filter(f.Actor), + help="Filter alerts by excluding actor(s) whose cloud alias contains the given string.", +) +rule_name_option = click.option( + "--rule-name", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.RuleName), + help="Filter alerts by including the given rule name(s).", +) +exclude_rule_name_option = click.option( + "--exclude-rule-name", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.not_in_filter(f.RuleName), + help="Filter alerts by excluding the given rule name(s).", +) +rule_id_option = click.option( + "--rule-id", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.RuleId), + help="Filter alerts by including the given rule id(s).", +) +exclude_rule_id_option = click.option( + "--exclude-rule-id", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.not_in_filter(f.RuleId), + help="Filter alerts by excluding the given rule id(s).", +) +rule_type_option = click.option( + "--rule-type", + multiple=True, + type=click.Choice(RuleType.choices()), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.is_in_filter(f.RuleType), + help="Filter alerts by including the given rule type(s).", +) +exclude_rule_type_option = click.option( + "--exclude-rule-type", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.not_in_filter(f.RuleType), + help="Filter alerts by excluding the given rule type(s).", +) +description_option = click.option( + "--description", + multiple=True, + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + callback=searchopt.contains_filter(f.Description), + help="Filter alerts by description. Does fuzzy search by default.", +) +send_to_format_options = click.option( + "-f", + "--format", + type=click.Choice(JsonOutputFormat(), case_sensitive=False), + help="The output format of the result. Defaults to json format.", + default=JsonOutputFormat.RAW, +) +alert_id_arg = click.argument("alert-id") +note_option = click.option("--note", help="A note to attach to the alert.") +update_state_option = click.option( + "--state", + help="The state to give to the alert.", + type=click.Choice(AlertState.choices()), +) + + +def _get_default_output_header(): + return { + "id": "Id", + "name": "RuleName", + "actor": "Username", + "createdAt": "ObservedDate", + "state": "State", + "severity": "Severity", + "description": "Description", + } + + +def search_options(f): + f = checkpoint(f) + f = advanced_query(f) + f = end(f) + f = begin(f) + return f + + +def filter_options(f): + f = actor_option(f) + f = actor_contains_option(f) + f = exclude_actor_option(f) + f = exclude_actor_contains_option(f) + f = rule_name_option(f) + f = exclude_rule_name_option(f) + f = rule_id_option(f) + f = exclude_rule_id_option(f) + f = rule_type_option(f) + f = exclude_rule_type_option(f) + f = description_option(f) + f = severity_option(f) + f = filter_state_option(f) + return f + + +@click.group(cls=OrderedGroup) +@opt.sdk_options(hidden=True) +def alerts(state): + """DEPRECATED - Get and send alert data.""" + deprecation_warning(DEPRECATION_TEXT) + # store cursor getter on the group state so shared --begin option can use it in validation + state.cursor_getter = _get_alert_cursor_store + + +@alerts.command() +@click.argument("checkpoint-name") +@opt.sdk_options() +def clear_checkpoint(state, checkpoint_name): + """Remove the saved alert checkpoint from `--use-checkpoint/-c` mode.""" + _get_alert_cursor_store(state.profile.name).delete(checkpoint_name) + + +@alerts.command() +@filter_options +@search_options +@click.option( + "--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible +) +@opt.sdk_options() +@click.option( + "--include-all", + default=False, + is_flag=True, + help="Display simple properties of the primary level of the nested response.", +) +@format_option +def search( + cli_state, + format, + begin, + end, + advanced_query, + use_checkpoint, + or_query, + include_all, + **kwargs, +): + """Search for alerts.""" + output_header = try_get_default_header( + include_all, _get_default_output_header(), format + ) + formatter = OutputFormatter(format, output_header) + cursor = _get_alert_cursor_store(cli_state.profile.name) if use_checkpoint else None + if use_checkpoint: + checkpoint_name = use_checkpoint + checkpoint = cursor.get(checkpoint_name) + if checkpoint is not None: + begin = checkpoint + + query = _construct_query(cli_state, begin, end, advanced_query, or_query) + alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query) + + if use_checkpoint: + checkpoint_name = use_checkpoint + # update checkpoint to alertId of last event retrieved + alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, alerts_gen + ) + alerts_list = [] + for alert in alerts_gen: + alerts_list.append(alert) + if not alerts_list: + click.echo("No results found.") + return + formatter.echo_formatted_list(alerts_list) + + +def _construct_query(state, begin, end, advanced_query, or_query): + + if advanced_query: + state.search_filters = advanced_query + else: + if begin or end: + state.search_filters.append( + create_time_range_filter(f.DateObserved, begin, end) + ) + if or_query: + state.search_filters = convert_to_or_query(state.search_filters) + query = AlertQuery(*state.search_filters) + query.page_size = ALERT_PAGE_SIZE + query.sort_direction = "asc" + query.sort_key = "CreatedAt" + return query + + +def _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, alerts_gen +): + """De-duplicates events across checkpointed runs. Since using the timestamp of the last event + processed as the `--begin` time of the next run causes the last event to show up again in the + next results, we hash the last event(s) of each run and store those hashes in the cursor to + filter out on the next run. It's also possible that two events have the exact same timestamp, so + `checkpoint_events` needs to be a list of hashes so we can filter out everything that's actually + been processed. + """ + + checkpoint_alerts = cursor.get_alerts(checkpoint_name) + new_timestamp = None + new_alerts = [] + for alert in alerts_gen: + event_hash = hash_event(alert) + if event_hash not in checkpoint_alerts: + if alert[f.DateObserved._term] != new_timestamp: + new_timestamp = alert[f.DateObserved._term] + new_alerts.clear() + new_alerts.append(event_hash) + yield alert + ts = parse_timestamp(new_timestamp) + cursor.replace(checkpoint_name, ts) + cursor.replace_alerts(checkpoint_name, new_alerts) + + +@alerts.command(cls=SendToCommand) +@filter_options +@search_options +@click.option( + "--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible +) +@opt.sdk_options() +@server_options +@click.option( + "--include-all", + default=False, + is_flag=True, + help="Display simple properties of the primary level of the nested response.", +) +@send_to_format_options +def send_to(cli_state, begin, end, advanced_query, use_checkpoint, or_query, **kwargs): + """Send alerts to the given server address. + + HOSTNAME format: address:port where port is optional and defaults to 514. + """ + cursor = _get_cursor(cli_state, use_checkpoint) + + if use_checkpoint: + checkpoint_name = use_checkpoint + checkpoint = cursor.get(checkpoint_name) + if checkpoint is not None: + begin = checkpoint + + query = _construct_query(cli_state, begin, end, advanced_query, or_query) + alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query) + + if use_checkpoint: + checkpoint_name = use_checkpoint + alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, alerts_gen + ) + with warn_interrupt(): + alert = None + for alert in alerts_gen: + cli_state.logger.info(alert) + if alert is None: # generator was empty + click.echo("No results found.") + + +def _get_cursor(state, use_checkpoint): + return _get_alert_cursor_store(state.profile.name) if use_checkpoint else None + + +def _get_alert_cursor_store(profile_name): + return AlertCursorStore(profile_name) + + +@alerts.command() +@opt.sdk_options() +@alert_id_arg +@click.option( + "--include-observations", is_flag=True, help="View observations of the alert." +) +def show(state, alert_id, include_observations): + """Display the details of a single alert.""" + formatter = OutputFormatter(OutputFormat.TABLE, _get_default_output_header()) + + try: + response = state.sdk.alerts.get_details(alert_id) + except Py42NotFoundError: + raise errors.Code42CLIError(f"No alert found with ID '{alert_id}'.") + + alert = response["alerts"][0] + formatter.echo_formatted_list([alert]) + + # Show note details + note = alert.get("note") + if note: + click.echo("\nNote:\n") + click.echo(format_dict(note)) + + if include_observations: + observations = alert.get("observations") + if observations: + click.echo("\nObservations:\n") + click.echo(format_dict(observations)) + else: + click.echo("\nNo observations found.") + + +@alerts.command() +@opt.sdk_options() +@alert_id_arg +@update_state_option +@note_option +def update(cli_state, alert_id, state, note): + """Update alert information.""" + _update_alert(cli_state.sdk, alert_id, state, note) + + +@alerts.group(cls=OrderedGroup) +@opt.sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk alert actions.""" + pass + + +UPDATE_ALERT_CSV_HEADERS = ["id", "state", "note"] +update_alerts_generate_template = generate_template_cmd_factory( + group_name=ALERTS_KEYWORD, + commands_dict={"update": UPDATE_ALERT_CSV_HEADERS}, + help_message="Generate the CSV template needed for bulk alert commands.", +) +bulk.add_command(update_alerts_generate_template) + + +@bulk.command( + name="update", + help=f"Bulk update alerts using a CSV file with format: {','.join(UPDATE_ALERT_CSV_HEADERS)}", +) +@opt.sdk_options() +@read_csv_arg(headers=UPDATE_ALERT_CSV_HEADERS) +def bulk_update(cli_state, csv_rows): + """Bulk update alerts.""" + sdk = cli_state.sdk + + def handle_row(id, state, note): + _update_alert(sdk, id, state, note) + + run_bulk_process( + handle_row, + csv_rows, + progress_label="Updating alerts:", + ) + + +def _update_alert(sdk, alert_id, alert_state, note): + if alert_state: + sdk.alerts.update_state(alert_state, [alert_id], note=note) + elif note: + sdk.alerts.update_note(alert_id, note) diff --git a/src/code42cli/cmds/auditlogs.py b/src/code42cli/cmds/auditlogs.py new file mode 100644 index 000000000..0671cde74 --- /dev/null +++ b/src/code42cli/cmds/auditlogs.py @@ -0,0 +1,264 @@ +import click + +import code42cli.options as opt +from code42cli.click_ext.groups import OrderedGroup +from code42cli.cmds.search import SendToCommand +from code42cli.cmds.search.cursor_store import AuditLogCursorStore +from code42cli.cmds.search.options import server_options +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.options import checkpoint_option +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import OutputFormatter +from code42cli.util import deprecation_warning +from code42cli.util import hash_event +from code42cli.util import parse_timestamp +from code42cli.util import warn_interrupt + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + +EVENT_KEY = "events" +AUDIT_LOGS_KEYWORD = "audit-logs" + + +def _get_audit_logs_default_header(): + return { + "timestamp": "Timestamp", + "type$": "Type", + "actorName": "ActorName", + "actorIpAddress": "ActorIpAddress", + "userName": "AffectedUser", + "userId": "AffectedUserUID", + } + + +begin_option = opt.begin_option( + AUDIT_LOGS_KEYWORD, + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), +) +end_option = opt.end_option( + AUDIT_LOGS_KEYWORD, + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), +) +filter_option_usernames = click.option( + "--actor-username", + required=False, + help="Filter results by actor usernames.", + multiple=True, +) +filter_option_user_ids = click.option( + "--actor-user-id", + required=False, + help="Filter results by actor user IDs.", + multiple=True, +) +filter_option_user_ip_addresses = click.option( + "--actor-ip", + required=False, + help="Filter results by user IP addresses.", + multiple=True, +) +filter_option_affected_user_ids = click.option( + "--affected-user-id", + required=False, + help="Filter results by affected user IDs.", + multiple=True, +) +filter_option_affected_usernames = click.option( + "--affected-username", + required=False, + help="Filter results by affected usernames.", + multiple=True, +) +filter_option_event_types = click.option( + "--event-type", + required=False, + help="Filter results by event types (e.g. search_issued, user_registered, user_deactivated).", + multiple=True, +) + + +def filter_options(f): + f = filter_option_event_types(f) + f = filter_option_usernames(f) + f = filter_option_user_ids(f) + f = filter_option_user_ip_addresses(f) + f = filter_option_affected_user_ids(f) + f = filter_option_affected_usernames(f) + f = end_option(f) + f = begin_option(f) + return f + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def audit_logs(state): + """DEPRECATED - Get and send audit log event data.""" + deprecation_warning(DEPRECATION_TEXT) + # store cursor getter on the group state so shared --begin option can use it in validation + state.cursor_getter = _get_audit_log_cursor_store + + +@audit_logs.command() +@click.argument("checkpoint-name") +@sdk_options() +def clear_checkpoint(state, checkpoint_name): + """Remove the saved audit log checkpoint from `--use-checkpoint/-c` mode.""" + _get_audit_log_cursor_store(state.profile.name).delete(checkpoint_name) + + +@audit_logs.command() +@filter_options +@format_option +@checkpoint_option(AUDIT_LOGS_KEYWORD) +@sdk_options() +def search( + state, + begin, + end, + event_type, + actor_username, + actor_user_id, + actor_ip, + affected_user_id, + affected_username, + format, + use_checkpoint, +): + """Search audit log events.""" + formatter = OutputFormatter(format, _get_audit_logs_default_header()) + cursor = _get_audit_log_cursor_store(state.profile.name) + if use_checkpoint: + checkpoint_name = use_checkpoint + checkpoint = cursor.get(checkpoint_name) + if checkpoint is not None: + begin = checkpoint + + events = _get_all_audit_log_events( + state.sdk, + begin_time=begin, + end_time=end, + event_types=event_type, + usernames=actor_username, + user_ids=actor_user_id, + user_ip_addresses=actor_ip, + affected_user_ids=affected_user_id, + affected_usernames=affected_username, + ) + + if use_checkpoint: + checkpoint_name = use_checkpoint + events = list( + _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, events + ) + ) + + if not events: + click.echo("No results found.", err=True) + return + + formatter.echo_formatted_list(events) + + +@audit_logs.command(cls=SendToCommand) +@filter_options +@checkpoint_option(AUDIT_LOGS_KEYWORD) +@server_options +@sdk_options() +def send_to( + state, + begin, + end, + event_type, + actor_username, + actor_user_id, + actor_ip, + affected_user_id, + affected_username, + use_checkpoint, + **kwargs, +): + """Send audit log events to the given server address in JSON format. + + HOSTNAME format: address:port where port is optional and defaults to 514. + """ + cursor = _get_audit_log_cursor_store(state.profile.name) + if use_checkpoint: + checkpoint_name = use_checkpoint + checkpoint = cursor.get(checkpoint_name) + if checkpoint is not None: + begin = checkpoint + + events = _get_all_audit_log_events( + state.sdk, + begin_time=begin, + end_time=end, + event_types=event_type, + usernames=actor_username, + user_ids=actor_user_id, + user_ip_addresses=actor_ip, + affected_user_ids=affected_user_id, + affected_usernames=affected_username, + ) + if use_checkpoint: + checkpoint_name = use_checkpoint + events = _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, events + ) + with warn_interrupt(): + event = None + for event in events: + state.logger.info(event) + if event is None: # generator was empty + click.echo("No results found.") + + +def _get_all_audit_log_events(sdk, **filter_args): + response_gen = sdk.auditlogs.get_all(**filter_args) + events = [] + try: + responses = list(response_gen) + except KeyError: + # API endpoint (get_page) returns a response without events key when no records are found + # e.g {"paginationRangeStartIndex": 10000, "paginationRangeEndIndex": 10000, "totalResultCount": 1593} + # we can remove this check once PL-93211 is resolved and deployed. + return events + + for response in responses: + if EVENT_KEY in response.data: + response_events = response.data.get(EVENT_KEY) + events.extend(response_events) + + return sorted(events, key=lambda x: x.get("timestamp")) + + +def _dedupe_checkpointed_events_and_store_updated_checkpoint( + cursor, checkpoint_name, events +): + """De-duplicates events across checkpointed runs. Since using the timestamp of the last event + processed as the `--begin` time of the next run causes the last event to show up again in the + next results, we hash the last event(s) of each run and store those hashes in the cursor to + filter out on the next run. It's also possible that two events have the exact same timestamp, so + `checkpoint_events` needs to be a list of hashes so we can filter out everything that's actually + been processed. + """ + + checkpoint_events = cursor.get_events(checkpoint_name) + new_timestamp = None + new_events = [] + for event in events: + event_hash = hash_event(event) + if event_hash not in checkpoint_events: + if event["timestamp"] != new_timestamp: + new_timestamp = event["timestamp"] + new_events.clear() + new_events.append(event_hash) + yield event + ts = parse_timestamp(new_timestamp) + cursor.replace(checkpoint_name, ts) + cursor.replace_events(checkpoint_name, new_events) + + +def _get_audit_log_cursor_store(profile_name): + return AuditLogCursorStore(profile_name) diff --git a/src/code42cli/cmds/cases.py b/src/code42cli/cmds/cases.py new file mode 100644 index 000000000..199cb7d18 --- /dev/null +++ b/src/code42cli/cmds/cases.py @@ -0,0 +1,333 @@ +import json +import os + +import click +from py42.clients.cases import CaseStatus +from py42.exceptions import Py42BadRequestError +from py42.exceptions import Py42CaseAlreadyHasEventError +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42UpdateClosedCaseError + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.errors import Code42CLIError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.options import set_begin_default_dict +from code42cli.options import set_end_default_dict +from code42cli.output_formats import OutputFormatter +from code42cli.util import deprecation_warning + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + + +case_number_arg = click.argument("case-number", type=int) +case_number_option = click.option( + "--case-number", type=int, help="The number assigned to the case.", required=True +) +name_option = click.option( + "--name", + help="The name of the case.", +) +assignee_option = click.option( + "--assignee", help="The UID of the user to assign to the case." +) +description_option = click.option("--description", help="The description of the case.") +findings_option = click.option("--findings", help="Any findings for the case.") +subject_option = click.option( + "--subject", help="The user UID of the subject of the case." +) +status_option = click.option( + "--status", + help="Status of the case. `OPEN` or `CLOSED`.", + type=click.Choice(CaseStatus.choices()), +) +file_event_id_option = click.option( + "--event-id", required=True, help="The file event ID associated with the case." +) +CASES_KEYWORD = "cases" +BEGIN_DATE_DICT = set_begin_default_dict(CASES_KEYWORD) +END_DATE_DICT = set_end_default_dict(CASES_KEYWORD) + + +def _get_cases_header(): + return { + "number": "Number", + "name": "Name", + "assignee": "Assignee", + "status": "Status", + "subject": "Subject", + "createdAt": "Creation Time", + "updatedAt": "Last Update Time", + } + + +def _get_events_header(): + return { + "eventId": "Event Id", + "eventTimestamp": "Timestamp", + "filePath": "Path", + "fileName": "File", + "exposure": "Exposure", + } + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def cases(state): + """DEPRECATED - Manage cases and events associated with cases.""" + deprecation_warning(DEPRECATION_TEXT) + pass + + +@cases.command() +@click.argument("name") +@assignee_option +@description_option +@findings_option +@subject_option +@sdk_options() +def create(state, name, subject, assignee, description, findings): + """Create a new case.""" + state.sdk.cases.create( + name, + subject=subject, + assignee=assignee, + description=description, + findings=findings, + ) + + +@cases.command() +@case_number_arg +@name_option +@assignee_option +@description_option +@findings_option +@subject_option +@status_option +@sdk_options() +def update(state, case_number, name, subject, assignee, description, findings, status): + """Update case details for the given case.""" + state.sdk.cases.update( + case_number, + name=name, + subject=subject, + assignee=assignee, + description=description, + findings=findings, + status=status, + ) + + +@cases.command("list") +@click.option( + "--name", + help="Filter by name of a case. Supports partial name matches.", +) +@click.option("--subject", help="Filter by the user UID of the subject of a case.") +@click.option("--assignee", help="Filter by the user UID of an assignee.") +@click.option("--begin-create-time", **BEGIN_DATE_DICT) +@click.option("--end-create-time", **END_DATE_DICT) +@click.option("--begin-update-time", **BEGIN_DATE_DICT) +@click.option("--end-update-time", **END_DATE_DICT) +@click.option("--status", help="Filter cases by case status.") +@format_option +@sdk_options() +def _list( + state, + name, + assignee, + subject, + begin_create_time, + end_create_time, + begin_update_time, + end_update_time, + status, + format, +): + """List all the cases.""" + pages = state.sdk.cases.get_all( + name=name, + assignee=assignee, + subject=subject, + min_create_time=begin_create_time, + max_create_time=end_create_time, + min_update_time=begin_update_time, + max_update_time=end_update_time, + status=status, + ) + formatter = OutputFormatter(format, _get_cases_header()) + cases = [case for page in pages for case in page["cases"]] + if cases: + formatter.echo_formatted_list(cases) + else: + click.echo("No cases found.") + + +def _get_file_events(sdk, case_number): + response = sdk.cases.file_events.get_all(case_number) + if not response["events"]: + return None + return json.loads(response.text) + + +def _display_file_events(events): + if events: + click.echo("\nFile Events:\n") + click.echo(json.dumps(events, indent=4)) + else: + click.echo("\nNo events found.") + + +@cases.command() +@case_number_arg +@click.option( + "--include-file-events", + is_flag=True, + help="View file events associated to the case.", +) +@sdk_options() +@format_option +def show(state, case_number, format, include_file_events): + """Show case details.""" + formatter = OutputFormatter(format) + try: + response = state.sdk.cases.get(case_number) + formatter.echo_formatted_list([response.data]) + if include_file_events: + events = _get_file_events(state.sdk, case_number) + _display_file_events(events) + except Py42NotFoundError: + raise Code42CLIError(f"Invalid case-number {case_number}.") + + +@cases.command() +@case_number_arg +@click.option( + "--path", + help="The file path where to save the PDF. Defaults to the current directory.", + default=os.getcwd(), +) +@sdk_options() +def export(state, case_number, path): + """Download a case detail summary as a PDF file at the given path with name _case_summary.pdf.""" + response = state.sdk.cases.export_summary(case_number) + file = os.path.join(path, f"{case_number}_case_summary.pdf") + with open(file, "wb") as f: + f.write(response.content) + + +@cases.group(cls=OrderedGroup) +@sdk_options() +def file_events(state): + """Fetch file events associated with the case.""" + pass + + +@file_events.command("list") +@case_number_arg +@sdk_options() +@format_option +def file_events_list(state, case_number, format): + """List all the file events associated with the case.""" + formatter = OutputFormatter(format, _get_events_header()) + try: + response = state.sdk.cases.file_events.get_all(case_number) + except Py42NotFoundError: + raise Code42CLIError("Invalid case-number.") + + if not response["events"]: + click.echo("No events found.") + else: + events = [event for event in response["events"]] + formatter.echo_formatted_list(events) + + +@file_events.command() +@case_number_option +@file_event_id_option +@sdk_options() +def add(state, case_number, event_id): + """Associate a file event to a case, by event ID.""" + try: + state.sdk.cases.file_events.add(case_number, event_id) + except Py42UpdateClosedCaseError: + raise + except Py42CaseAlreadyHasEventError: + raise + except Py42BadRequestError: + raise Code42CLIError("Invalid case-number or event-id.") + + +@file_events.command() +@case_number_option +@file_event_id_option +@sdk_options() +def remove(state, case_number, event_id): + """Remove the associated file event from the case, by event ID.""" + try: + state.sdk.cases.file_events.delete(case_number, event_id) + except Py42NotFoundError: + raise Code42CLIError("Invalid case-number or event-id.") + + +@file_events.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk case file-event actions.""" + pass + + +FILE_EVENTS_HEADERS = [ + "number", + "event_id", +] + +case_file_events_generate_template = generate_template_cmd_factory( + group_name="file_events", + commands_dict={"add": FILE_EVENTS_HEADERS, "remove": FILE_EVENTS_HEADERS}, +) +bulk.add_command(case_file_events_generate_template) + + +@bulk.command( + name="add", + help="Bulk associate file events to cases using a CSV file with " + f"format: {','.join(FILE_EVENTS_HEADERS)}.", +) +@read_csv_arg(headers=FILE_EVENTS_HEADERS) +@sdk_options() +def bulk_add(state, csv_rows): + sdk = state.sdk + + def handle_row(number, event_id): + sdk.cases.file_events.add(number, event_id) + + run_bulk_process( + handle_row, + csv_rows, + progress_label="Associating file events to cases:", + ) + + +@bulk.command( + name="remove", + help="Bulk remove the file event association from cases using a CSV file with " + f"format: {','.join(FILE_EVENTS_HEADERS)}.", +) +@read_csv_arg(headers=FILE_EVENTS_HEADERS) +@sdk_options() +def bulk_remove(state, csv_rows): + sdk = state.sdk + + def handle_row(number, event_id): + sdk.cases.file_events.delete(number, event_id) + + run_bulk_process( + handle_row, + csv_rows, + progress_label="Removing the file event association from cases:", + ) diff --git a/src/code42cli/cmds/devices.py b/src/code42cli/cmds/devices.py new file mode 100644 index 000000000..d635d1785 --- /dev/null +++ b/src/code42cli/cmds/devices.py @@ -0,0 +1,734 @@ +from datetime import date + +import click +import numpy as np +from pandas import concat +from pandas import DataFrame +from pandas import json_normalize +from pandas import Series +from pandas import to_datetime +from py42 import exceptions +from py42.clients.settings.device_settings import IncydrDeviceSettings +from py42.exceptions import Py42NotFoundError + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.click_ext.options import incompatible_with +from code42cli.click_ext.types import MagicDate +from code42cli.date_helper import round_datetime_to_day_end +from code42cli.date_helper import round_datetime_to_day_start +from code42cli.errors import Code42CLIError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import DataFrameOutputFormatter +from code42cli.output_formats import OutputFormat +from code42cli.output_formats import OutputFormatter +from code42cli.worker import create_worker_stats + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def devices(state): + """Manage devices within your Code42 environment.""" + pass + + +device_guid_argument = click.argument( + "device-guid", + type=str, + callback=lambda ctx, param, arg: _verify_guid_type(arg), +) + +new_device_name_option = click.option( + "-n", "--new-device-name", help="The new name for the device.", required=True +) + + +def change_device_name_option(help_msg): + return click.option( + "--change-device-name", + required=False, + is_flag=True, + default=False, + help=help_msg, + ) + + +DATE_FORMAT = "%Y-%m-%d" +purge_date_option = click.option( + "--purge-date", + required=False, + type=click.DateTime(formats=[DATE_FORMAT]), + default=None, + help="The date on which the archive should be purged from cold storage in yyyy-MM-dd format. " + "If not provided, the date will be set according to the appropriate organization settings.", +) + + +@devices.command() +@device_guid_argument +@new_device_name_option +@sdk_options() +def rename(state, device_guid, new_device_name): + """Rename a device with Code42. Requires the device GUID to rename.""" + _change_device_name(state.sdk, device_guid, new_device_name) + + +@devices.command() +@device_guid_argument +@change_device_name_option( + "Prepend 'deactivated_' to the name of the device if deactivation is successful." +) +@purge_date_option +@sdk_options() +def deactivate(state, device_guid, change_device_name, purge_date): + """Deactivate a device within Code42. Requires the device GUID to deactivate.""" + _deactivate_device(state.sdk, device_guid, change_device_name, purge_date) + + +@devices.command() +@device_guid_argument +@sdk_options() +def reactivate(state, device_guid): + """Reactivate a device within Code42. Requires the device GUID to reactivate.""" + _reactivate_device(state.sdk, device_guid) + + +def _deactivate_device(sdk, device_guid, change_device_name, purge_date): + try: + device = _change_device_activation(sdk, device_guid, "deactivate") + except exceptions.Py42BadRequestError: + raise Code42CLIError(f"The device with GUID '{device_guid}' is in legal hold.") + if purge_date: + _update_cold_storage_purge_date(sdk, device_guid, purge_date) + if change_device_name and not device.data["name"].startswith("deactivated_"): + _change_device_name( + sdk, + device_guid, + "deactivated_" + + date.today().strftime("%Y-%m-%d") + + "_" + + device.data["name"], + ) + + +def _reactivate_device(sdk, device_guid): + _change_device_activation(sdk, device_guid, "reactivate") + + +def _change_device_activation(sdk, device_guid, cmd_str): + try: + device = sdk.devices.get_by_guid(device_guid) + device_id = device.data["computerId"] + if cmd_str == "reactivate": + sdk.devices.reactivate(device_id) + elif cmd_str == "deactivate": + sdk.devices.deactivate(device_id) + return device + except exceptions.Py42NotFoundError: + raise Code42CLIError(f"The device with GUID '{device_guid}' was not found.") + except exceptions.Py42ForbiddenError: + raise Code42CLIError( + f"Unable to {cmd_str} the device with GUID '{device_guid}'." + ) + + +def _verify_guid_type(device_guid): + if device_guid is None: + return + try: + int(device_guid) + return device_guid + except ValueError: + raise Code42CLIError("Not a valid GUID.") + + +def _update_cold_storage_purge_date(sdk, guid, purge_date): + archives_response = sdk.archive.get_all_by_device_guid(guid) + archive_guid_list = [ + archive["archiveGuid"] + for page in archives_response + for archive in page["archives"] + if archive["format"] != "ARCHIVE_V2" + ] + for archive_guid in archive_guid_list: + sdk.archive.update_cold_storage_purge_date( + archive_guid, purge_date.strftime("%Y-%m-%d") + ) + + +def _change_device_name(sdk, guid, name): + try: + device_settings = sdk.devices.get_settings(guid) + if isinstance(device_settings, IncydrDeviceSettings): + raise Code42CLIError( + "Failed to rename device. Incydr devices cannot be renamed." + ) + device_settings.name = name + sdk.devices.update_settings(device_settings) + except KeyError: + raise Code42CLIError( + "Failed to rename device. This device is missing expected settings fields." + ) + except exceptions.Py42ForbiddenError: + raise Code42CLIError( + f"You don't have the necessary permissions to rename the device with GUID '{guid}'." + ) + except exceptions.Py42NotFoundError: + raise Code42CLIError(f"The device with GUID '{guid}' was not found.") + + +@devices.command() +@device_guid_argument +@sdk_options() +def show(state, device_guid): + """Print individual device details. Requires device GUID.""" + + formatter = OutputFormatter(OutputFormat.TABLE, _device_info_keys_map()) + backup_set_formatter = OutputFormatter(OutputFormat.TABLE, _backup_set_keys_map()) + device_info = _get_device_info(state.sdk, device_guid) + formatter.echo_formatted_list([device_info]) + backup_usage = device_info.get("backupUsage") + if backup_usage: + click.echo() + backup_set_formatter.echo_formatted_list(backup_usage) + + +def _device_info_keys_map(): + return { + "name": "Name", + "osHostname": "Hostname", + "guid": "GUID", + "status": "Status", + "lastConnected": "Last Connected Date", + "productVersion": "Code42 Version", + "osName": "Operating System", + "osVersion": "Operating System Version", + } + + +def _backup_set_keys_map(): + return { + "targetComputerName": "Destination", + "lastBackup": "Last Backup Activity", + "lastCompleted": "Last Completed Backup", + "archiveBytes": "Archive Size in Bytes", + "archiveGuid": "Archive GUID", + } + + +def _get_device_info(sdk, device_guid): + return sdk.devices.get_by_guid(device_guid, include_backup_usage=True).data + + +active_option = click.option( + "--active", + is_flag=True, + help="Limits results to only active devices.", + default=None, +) +inactive_option = click.option( + "--inactive", + is_flag=True, + help="Limits results to only deactivated devices.", + cls=incompatible_with("active"), +) +org_uid_option = click.option( + "--org-uid", + required=False, + type=str, + default=None, + help="Limit devices to only those in the organization you specify. " + "Note that child organizations will be included.", +) +page_size_option = click.option( + "--page-size", + required=False, + type=int, + default=100, + help="Number of devices to retrieve per API call. " + "Lower this value if you are getting timeouts when retrieving devices with backup info. Default: 100", +) + +include_usernames_option = click.option( + "--include-usernames", + required=False, + type=bool, + default=False, + is_flag=True, + help="Add the username associated with a device to the output.", +) + + +@devices.command(name="list") +@active_option +@inactive_option +@org_uid_option +@click.option( + "--include-backup-usage", + required=False, + type=bool, + default=False, + is_flag=True, + help="Return backup usage information for each device (may significantly lengthen the size " + "of the return).", +) +@include_usernames_option +@click.option( + "--include-settings", + required=False, + type=bool, + default=False, + is_flag=True, + help="Include device settings in output.", +) +@click.option( + "--include-legal-hold-membership", + required=False, + type=bool, + default=False, + is_flag=True, + help="Include legal hold membership in output.", +) +@click.option( + "--include-total-storage", + required=False, + type=bool, + default=False, + is_flag=True, + help="Include backup archive count and total storage in output.", +) +@click.option( + "--exclude-most-recently-connected", + type=int, + help="Filter out the N most recently connected devices per user. " + "Useful for identifying duplicate and/or replaced devices that are no longer needed across " + "an environment. If a user has 2 devices and N=1, the one device with the most recent " + "'lastConnected' date will not show up in the result list.", +) +@click.option( + "--last-connected-before", + type=MagicDate(rounding_func=round_datetime_to_day_start), + help=f"Include devices only when the 'lastConnected' field is after the provided value. {MagicDate.HELP_TEXT}", +) +@click.option( + "--last-connected-after", + type=MagicDate(rounding_func=round_datetime_to_day_end), + help="Include devices only when 'lastConnected' field is after the provided value. " + "Argument format options are the same as --last-connected-before.", +) +@click.option( + "--created-before", + type=MagicDate(rounding_func=round_datetime_to_day_start), + help="Include devices only when 'creationDate' field is less than the provided value. " + "Argument format options are the same as --last-connected-before.", +) +@click.option( + "--created-after", + type=MagicDate(rounding_func=round_datetime_to_day_end), + help="Include devices only when 'creationDate' field is greater than the provided value. " + "Argument format options are the same as --last-connected-before.", +) +@page_size_option +@format_option +@sdk_options() +def list_devices( + state, + active, + inactive, + org_uid, + include_backup_usage, + include_usernames, + include_settings, + include_legal_hold_membership, + include_total_storage, + exclude_most_recently_connected, + last_connected_after, + last_connected_before, + created_after, + created_before, + page_size, + format, +): + """Get information about many devices.""" + if inactive: + active = False + columns = [ + "computerId", + "guid", + "name", + "osHostname", + "status", + "lastConnected", + "creationDate", + "productVersion", + "osName", + "osVersion", + "userUid", + ] + df = _get_device_dataframe( + sdk=state.sdk, + columns=columns, + page_size=page_size, + active=active, + org_uid=org_uid, + include_backup_usage=(include_backup_usage or include_total_storage), + ) + if exclude_most_recently_connected: + most_recent = ( + df.sort_values(["userUid", "lastConnected"], ascending=False) + .groupby("userUid") + .head(exclude_most_recently_connected) + ) + df = df.drop(most_recent.index) + if last_connected_after: + df = df.loc[to_datetime(df.lastConnected) > last_connected_after] + if last_connected_before: + df = df.loc[to_datetime(df.lastConnected) < last_connected_before] + if created_after: + df = df.loc[to_datetime(df.creationDate) > created_after] + if created_before: + df = df.loc[to_datetime(df.creationDate) < created_before] + if include_total_storage: + df = _add_storage_totals_to_dataframe(df, include_backup_usage) + if include_settings: + df = _add_settings_to_dataframe(state.sdk, df) + if include_usernames: + df = _add_usernames_to_device_dataframe(state.sdk, df) + if include_legal_hold_membership: + df = _add_legal_hold_membership_to_device_dataframe(state.sdk, df) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(df) + + +def _add_legal_hold_membership_to_device_dataframe(sdk, df): + columns = ["legalHold.legalHoldUid", "legalHold.name", "user.userUid"] + + legal_hold_member_dataframe = ( + json_normalize(list(_get_all_active_hold_memberships(sdk)))[columns] + .groupby(["user.userUid"]) + .agg(",".join) + .rename( + { + "legalHold.legalHoldUid": "legalHoldUid", + "legalHold.name": "legalHoldName", + }, + axis=1, + ) + ) + df = df.merge( + legal_hold_member_dataframe, + how="left", + left_on="userUid", + right_on="user.userUid", + ) + + df.loc[df["status"] == "Deactivated", ["legalHoldUid", "legalHoldName"]] = np.nan + + return df + + +def _get_all_active_hold_memberships(sdk): + for page in sdk.legalhold.get_all_matters(active=True): + if sdk._auth_flag == 1: # noqa: api client endpoint returns a list directly + matters = page.data + else: + matters = page["legalHolds"] + for matter in matters: + if sdk._auth_flag == 1: # noqa: api client endpoint returns a list directly + for _page in sdk.legalhold.get_all_matter_custodians( + legal_hold_matter_uid=matter["legalHoldUid"], active=True + ): + yield from _page.data + else: + for _page in sdk.legalhold.get_all_matter_custodians( + legal_hold_uid=matter["legalHoldUid"], active=True + ): + yield from _page["legalHoldMemberships"] + + +def _get_device_dataframe( + sdk, columns, page_size, active=None, org_uid=None, include_backup_usage=False +): + devices_generator = sdk.devices.get_all( + active=active, + include_backup_usage=include_backup_usage, + org_uid=org_uid, + page_size=page_size, + ) + devices_list = [] + if include_backup_usage: + columns.append("backupUsage") + for page in devices_generator: + devices_list.extend(page["computers"]) + return DataFrame.from_records(devices_list, columns=columns) + + +def _add_settings_to_dataframe(sdk, device_dataframe): + macos_guids = [ + {"guid": value} + for value in device_dataframe.loc[ + device_dataframe["osName"] == "mac", "guid" + ].values + ] + + def handle_row(guid): + try: + full_disk_access_status = sdk.devices.get_agent_full_disk_access_state( + guid + ).data[ + "value" + ] # returns 404 error if device isn't a Mac or doesn't have full disk access + except Py42NotFoundError: + full_disk_access_status = False + return { + "guid": guid, + "full disk access status": full_disk_access_status, + } + + result_list = DataFrame.from_records( + run_bulk_process( + handle_row, macos_guids, progress_label="Getting device settings" + ) + ) + try: + return device_dataframe.merge(result_list, how="left", on="guid") + except KeyError: + return device_dataframe + + +def _add_usernames_to_device_dataframe(sdk, device_dataframe): + users_generator = sdk.users.get_all() + users_list = [] + for page in users_generator: + users_list.extend(page["users"]) + users_dataframe = DataFrame.from_records( + users_list, columns=["username", "userUid"] + ) + return device_dataframe.merge(users_dataframe, how="left", on="userUid") + + +def _add_storage_totals_to_dataframe(df, include_backup_usage): + df[["archiveCount", "totalStorageBytes"]] = df["backupUsage"].apply( + _break_backup_usage_into_total_storage + ) + + if not include_backup_usage: + df = df.drop("backupUsage", axis=1) + return df + + +def _break_backup_usage_into_total_storage(backup_usage): + total_storage = 0 + archive_count = 0 + for archive in backup_usage: + if archive["archiveFormat"] != "ARCHIVE_V2": + archive_count += 1 + total_storage += archive["archiveBytes"] + return Series([archive_count, total_storage]) + + +@devices.command() +@active_option +@inactive_option +@org_uid_option +@include_usernames_option +@page_size_option +@format_option +@sdk_options() +def list_backup_sets( + state, + active, + inactive, + org_uid, + include_usernames, + page_size, + format, +): + """Get information about many devices and their backup sets.""" + if inactive: + active = False + columns = ["guid", "userUid"] + df = _get_device_dataframe(state.sdk, columns, active, org_uid) + if include_usernames: + df = _add_usernames_to_device_dataframe(state.sdk, df) + df = _add_backup_set_settings_to_dataframe(state.sdk, df) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(df) + + +def _add_backup_set_settings_to_dataframe(sdk, devices_dataframe): + rows = [{"guid": guid} for guid in devices_dataframe["guid"].values] + + def handle_row(guid): + try: + current_device_settings = sdk.devices.get_settings(guid) + except Exception as err: + return DataFrame.from_records( + [ + { + "guid": guid, + "ERROR": f"Unable to retrieve device settings for {guid}: {err}", + } + ] + ) + current_result_dataframe = DataFrame.from_records( + [ + { + "guid": current_device_settings.guid, + "backup set name": backup_set["name"], + "destinations": [ + destination for destination in backup_set.destinations.values() + ], + "included files": list(backup_set.included_files), + "excluded files": list(backup_set.excluded_files), + "filename exclusions": list(backup_set.filename_exclusions), + "locked": backup_set.locked, + } + for backup_set in current_device_settings.backup_sets + ] + ) + return current_result_dataframe + + result_list = run_bulk_process( + handle_row, rows, progress_label="Getting device settings" + ) + try: + return devices_dataframe.merge(concat(result_list), how="left", on="guid") + except KeyError: + return devices_dataframe + + +@devices.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for managing devices in bulk.""" + pass + + +_bulk_device_activation_headers = ["guid"] +_bulk_device_rename_headers = ["guid", "name"] + +devices_generate_template = generate_template_cmd_factory( + group_name="devices", + commands_dict={ + "reactivate": _bulk_device_activation_headers, + "deactivate": _bulk_device_activation_headers, + "rename": _bulk_device_rename_headers, + }, + help_message="Generate the CSV template needed for bulk device commands.", +) +bulk.add_command(devices_generate_template) + + +@bulk.command(name="deactivate") +@read_csv_arg(headers=_bulk_device_activation_headers) +@change_device_name_option( + "Prepend 'deactivated_' to the name of any successfully deactivated devices." +) +@purge_date_option +@format_option +@sdk_options() +def bulk_deactivate(state, csv_rows, change_device_name, purge_date, format): + """Deactivate all devices from the provided CSV containing a 'guid' column.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["deactivated"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + for row in csv_rows: + row["change_device_name"] = change_device_name + row["purge_date"] = purge_date + + def handle_row(**row): + try: + _deactivate_device( + sdk, row["guid"], row["change_device_name"], row["purge_date"] + ) + row["deactivated"] = "True" + except Exception as err: + row["deactivated"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Deactivating devices:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command(name="reactivate") +@read_csv_arg(headers=_bulk_device_activation_headers) +@format_option +@sdk_options() +def bulk_reactivate(state, csv_rows, format): + """Reactivate all devices from the provided CSV containing a 'guid' column.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["reactivated"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _reactivate_device(sdk, row["guid"]) + row["reactivated"] = "True" + except Exception as err: + row["reactivated"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Reactivating devices:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command(name="rename") +@read_csv_arg(headers=_bulk_device_rename_headers) +@format_option +@sdk_options() +def bulk_rename(state, csv_rows, format): + """Rename all devices from the provided CSV containing a 'guid' and a 'name' column.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["renamed"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _change_device_name(sdk, row["guid"], row["name"]) + row["renamed"] = "True" + except Exception as err: + row["renamed"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Renaming devices:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) diff --git a/src/code42cli/cmds/legal_hold.py b/src/code42cli/cmds/legal_hold.py new file mode 100644 index 000000000..df17d5c6e --- /dev/null +++ b/src/code42cli/cmds/legal_hold.py @@ -0,0 +1,326 @@ +import json +from functools import lru_cache +from pprint import pformat + +import click +from click import echo +from click import style + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.cmds.shared import get_user_id +from code42cli.errors import UserNotInLegalHoldError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.options import set_begin_default_dict +from code42cli.options import set_end_default_dict +from code42cli.output_formats import OutputFormat +from code42cli.output_formats import OutputFormatter +from code42cli.util import format_string_list_to_columns + + +_MATTER_KEYS_MAP = { + "legalHoldUid": "Matter ID", + "name": "Name", + "description": "Description", + "creator_username": "Creator", + "creationDate": "Creation Date", +} +_EVENT_KEYS_MAP = { + "eventUid": "Event ID", + "eventType": "Event Type", + "eventDate": "Event Date", + "legalHoldUid": "Legal Hold ID", + "actorUsername": "Actor Username", + "custodianUsername": "Custodian Username", +} +LEGAL_HOLD_KEYWORD = "legal hold events" +LEGAL_HOLD_EVENT_TYPES = [ + "MembershipCreated", + "MembershipReactivated", + "MembershipDeactivated", + "HoldCreated", + "HoldDeactivated", + "HoldReactivated", + "Restore", +] +BEGIN_DATE_DICT = set_begin_default_dict(LEGAL_HOLD_KEYWORD) +END_DATE_DICT = set_end_default_dict(LEGAL_HOLD_KEYWORD) + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def legal_hold(state): + """Add and remove custodians from legal hold matters.""" + + +def matter_id_option(required, help): + return click.option("-m", "--matter-id", required=required, type=str, help=help) + + +user_id_option = click.option( + "-u", + "--username", + required=True, + type=str, + help="The username of the custodian to add to the matter.", +) + + +@legal_hold.command() +@matter_id_option( + True, + "Identification number of the legal hold matter the custodian will be added to.", +) +@user_id_option +@sdk_options() +def add_user(state, matter_id, username): + """Add a custodian to a legal hold matter.""" + _add_user_to_legal_hold(state.sdk, matter_id, username) + + +@legal_hold.command() +@matter_id_option( + True, + "Identification number of the legal hold matter the custodian will be removed from.", +) +@user_id_option +@sdk_options() +def remove_user(state, matter_id, username): + """Release a custodian from a legal hold matter.""" + _remove_user_from_legal_hold(state, state.sdk, matter_id, username) + + +@legal_hold.command("list") +@format_option +@sdk_options() +def _list(state, format=None): + """Fetch existing legal hold matters.""" + formatter = OutputFormatter(format, _MATTER_KEYS_MAP) + matters = _get_all_active_matters(state) + if matters: + formatter.echo_formatted_list(matters) + + +@legal_hold.command() +@click.argument("matter-id") +@click.option( + "--include-inactive", + is_flag=True, + help="View all custodians associated with the legal hold matter, " + "including inactive custodians.", +) +@click.option( + "--include-policy", + is_flag=True, + help="View details of the preservation policy associated with the legal hold matter.", +) +@sdk_options() +def show(state, matter_id, include_inactive=False, include_policy=False): + """Display details of a given legal hold matter.""" + matter = _check_matter_is_accessible(state.sdk, matter_id) + + if state.profile.api_client_auth == "True": + try: + matter["creator_username"] = matter["creator"]["user"]["email"] + except KeyError: + pass + else: + matter["creator_username"] = matter["creator"]["username"] + matter = json.loads(matter.text) + + # if `active` is None then all matters (whether active or inactive) are returned. True returns + # only those that are active. + active = None if include_inactive else True + memberships = _get_legal_hold_memberships_for_matter( + state, state.sdk, matter_id, active=active + ) + active_usernames = [ + member["user"]["username"] for member in memberships if member["active"] + ] + inactive_usernames = [ + member["user"]["username"] for member in memberships if not member["active"] + ] + + formatter = OutputFormatter(OutputFormat.TABLE, _MATTER_KEYS_MAP) + formatter.echo_formatted_list([matter]) + _print_matter_members(active_usernames, member_type="active") + + if include_inactive: + _print_matter_members(inactive_usernames, member_type="inactive") + + if include_policy: + _get_and_print_preservation_policy(state.sdk, matter["holdPolicyUid"]) + echo("") + + +@legal_hold.command() +@matter_id_option(False, "Filter results by legal hold UID.") +@click.option( + "--event-type", + type=click.Choice(LEGAL_HOLD_EVENT_TYPES), + help="Filter results by event types.", +) +@click.option("--begin", **BEGIN_DATE_DICT) +@click.option("--end", **END_DATE_DICT) +@format_option +@sdk_options() +def search_events(state, matter_id, event_type, begin, end, format): + """Tools for getting legal hold event data.""" + if state.profile.api_client_auth == "True": + echo( + style( + "WARNING: This method is unavailable with API Client Authentication.", + fg="red", + ), + err=True, + ) + + formatter = OutputFormatter(format, _EVENT_KEYS_MAP) + events = _get_all_events(state.sdk, matter_id, begin, end) + if event_type: + events = [event for event in events if event["eventType"] == event_type] + if events: + formatter.echo_formatted_list(events) + else: + click.echo("No results found.") + + +@legal_hold.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk legal hold actions.""" + pass + + +LEGAL_HOLD_CSV_HEADERS = ["matter_id", "username"] + + +legal_hold_generate_template = generate_template_cmd_factory( + group_name="legal_hold", + commands_dict={"add": LEGAL_HOLD_CSV_HEADERS, "remove": LEGAL_HOLD_CSV_HEADERS}, +) +bulk.add_command(legal_hold_generate_template) + + +@bulk.command( + name="add", + help=f"Bulk add custodians to legal hold matters using a CSV file. " + f"CSV file format: {','.join(LEGAL_HOLD_CSV_HEADERS)}", +) +@read_csv_arg(headers=LEGAL_HOLD_CSV_HEADERS) +@sdk_options() +def bulk_add(state, csv_rows): + sdk = state.sdk + + def handle_row(matter_id, username): + _add_user_to_legal_hold(sdk, matter_id, username) + + run_bulk_process(handle_row, csv_rows, progress_label="Adding users to legal hold:") + + +@bulk.command( + help=f"Bulk release custodians from legal hold matters using a CSV file. " + f"CSV file format: {','.join(LEGAL_HOLD_CSV_HEADERS)}" +) +@read_csv_arg(headers=LEGAL_HOLD_CSV_HEADERS) +@sdk_options() +def remove(state, csv_rows): + sdk = state.sdk + + def handle_row(matter_id, username): + _remove_user_from_legal_hold(state, sdk, matter_id, username) + + run_bulk_process( + handle_row, csv_rows, progress_label="Removing users from legal hold:" + ) + + +def _add_user_to_legal_hold(sdk, matter_id, username): + user_id = get_user_id(sdk, username) + _check_matter_is_accessible(sdk, matter_id) + sdk.legalhold.add_to_matter(user_id, matter_id) + + +def _remove_user_from_legal_hold(state, sdk, matter_id, username): + _check_matter_is_accessible(sdk, matter_id) + + user_id = get_user_id(sdk, username) + memberships = _get_legal_hold_memberships_for_matter( + state, sdk, matter_id, active=True + ) + membership_id = None + for member in memberships: + if member["user"]["userUid"] == user_id: + membership_id = member["legalHoldMembershipUid"] + if not membership_id: + raise UserNotInLegalHoldError(username, matter_id) + + sdk.legalhold.remove_from_matter(membership_id) + + +def _get_and_print_preservation_policy(sdk, policy_uid): + preservation_policy = sdk.legalhold.get_policy_by_uid(policy_uid) + echo("\nPreservation Policy:\n") + echo(pformat(json.loads(preservation_policy.text))) + + +def _get_legal_hold_memberships_for_matter(state, sdk, matter_id, active=True): + memberships_generator = sdk.legalhold.get_all_matter_custodians( + matter_id, active=active + ) + if state.profile.api_client_auth == "True": + memberships = [member for page in memberships_generator for member in page] + else: + memberships = [ + member + for page in memberships_generator + for member in page["legalHoldMemberships"] + ] + return memberships + + +def _get_all_active_matters(state): + matters_generator = state.sdk.legalhold.get_all_matters() + if state.profile.api_client_auth == "True": + matters = [ + matter for page in matters_generator for matter in page if matter["active"] + ] + for matter in matters: + try: + matter["creator_username"] = matter["creator"]["user"]["email"] + except KeyError: + pass + else: + matters = [ + matter + for page in matters_generator + for matter in page["legalHolds"] + if matter["active"] + ] + for matter in matters: + matter["creator_username"] = matter["creator"]["username"] + return matters + + +def _get_all_events(sdk, legal_hold_uid, begin_date, end_date): + events_generator = sdk.legalhold.get_all_events( + legal_hold_uid, begin_date, end_date + ) + events = [event for page in events_generator for event in page["legalHoldEvents"]] + return events + + +def _print_matter_members(username_list, member_type="active"): + if username_list: + echo(f"\n{member_type.capitalize()} matter members:\n") + format_string_list_to_columns(username_list) + else: + echo(f"No {member_type} matter members.\n") + + +@lru_cache(maxsize=None) +def _check_matter_is_accessible(sdk, matter_id): + return sdk.legalhold.get_matter_by_uid(matter_id) diff --git a/src/code42cli/cmds/profile.py b/src/code42cli/cmds/profile.py new file mode 100644 index 000000000..8c0b1567f --- /dev/null +++ b/src/code42cli/cmds/profile.py @@ -0,0 +1,446 @@ +from getpass import getpass + +import click +from click import echo +from click import secho + +import code42cli.profile as cliprofile +from code42cli.click_ext.options import incompatible_with +from code42cli.click_ext.types import PromptChoice +from code42cli.click_ext.types import TOTP +from code42cli.errors import Code42CLIError +from code42cli.options import yes_option +from code42cli.profile import CREATE_PROFILE_HELP +from code42cli.sdk_client import create_sdk +from code42cli.util import does_user_agree + + +@click.group() +def profile(): + """Manage Code42 connection settings.""" + pass + + +debug_option = click.option( + "-d", + "--debug", + is_flag=True, + help="Turn on debug logging.", +) +totp_option = click.option( + "--totp", help="TOTP token for multi-factor authentication.", type=TOTP() +) + + +def profile_name_arg(required=False): + return click.argument("profile_name", required=required) + + +def name_option(required=False): + return click.option( + "-n", + "--name", + required=required, + help="The name of the Code42 CLI profile to use when executing this command.", + ) + + +def server_option(required=False): + return click.option( + "-s", + "--server", + required=required, + help="The URL you use to sign into Code42.", + ) + + +def username_option(required=False): + return click.option( + "-u", + "--username", + required=required, + cls=incompatible_with(["api_client_id", "secret"]), + help="The username of the Code42 API user.", + ) + + +password_option = click.option( + "--password", + cls=incompatible_with(["api_client_id", "secret"]), + help="The password for the Code42 API user. If this option is omitted, interactive prompts " + "will be used to obtain the password.", +) + +disable_ssl_option = click.option( + "--disable-ssl-errors", + type=click.types.BOOL, + help="For development purposes, do not validate the SSL certificates of Code42 servers. " + "This is not recommended, except for specific scenarios like testing. Attach this flag to the update command to toggle the setting.", + default=None, +) + +use_v2_file_events_option = click.option( + "--use-v2-file-events", + type=click.types.BOOL, + help="Opts to use the V2 file event data model. Attach this flag to the update command to toggle the setting", + default=None, +) + + +def api_client_id_option(required=False): + return click.option( + "--api-client-id", + required=required, + cls=incompatible_with(["username", "password", "totp"]), + help="The API client key for API client authentication. Used with the `--secret` option.", + ) + + +def secret_option(required=False): + return click.option( + "--secret", + required=required, + cls=incompatible_with(["username", "password", "totp"]), + help="The API secret for API client authentication. Used with the `--api-client` option.", + ) + + +@profile.command() +@profile_name_arg() +def show(profile_name): + """Print the details of a profile.""" + c42profile = cliprofile.get_profile(profile_name) + echo(f"\n{c42profile.name}:") + if c42profile.api_client_auth == "True": + echo(f"\t* api-client-id = {c42profile.username}") + else: + echo(f"\t* username = {c42profile.username}") + echo(f"\t* authority url = {c42profile.authority_url}") + echo(f"\t* ignore-ssl-errors = {c42profile.ignore_ssl_errors}") + echo(f"\t* use-v2-file-events = {c42profile.use_v2_file_events}") + echo(f"\t* api-client-auth-profile = {c42profile.api_client_auth}") + if c42profile.api_client_auth == "True": + if cliprofile.get_stored_password(c42profile.name) is not None: + echo("\t* The API client secret is set.") + else: + if cliprofile.get_stored_password(c42profile.name) is not None: + echo("\t* A password is set.") + echo("") + echo("") + + +@profile.command() +@name_option(required=True) +@server_option(required=True) +@username_option(required=True) +@password_option +@totp_option +@yes_option(hidden=True) +@disable_ssl_option +@use_v2_file_events_option +@debug_option +def create( + name, + server, + username, + password, + disable_ssl_errors, + use_v2_file_events, + debug, + totp, +): + """ + Create a profile with username/password authentication. + The first profile created will be the default. + """ + cliprofile.create_profile( + name, + server, + username, + disable_ssl_errors, + use_v2_file_events, + api_client_auth=False, + ) + password = password or _prompt_for_password() + if password: + _set_pw(name, password, debug, totp=totp, api_client=False) + echo(f"Successfully created profile '{name}'.") + + +@profile.command() +@name_option(required=True) +@server_option(required=True) +@api_client_id_option(required=True) +@secret_option(required=True) +@yes_option(hidden=True) +@disable_ssl_option +@use_v2_file_events_option +@debug_option +def create_api_client( + name, + server, + api_client_id, + secret, + disable_ssl_errors, + use_v2_file_events, + debug, +): + """ + Create a profile with Code42 API client authentication. + The first profile created will be the default. + """ + cliprofile.create_profile( + name, + server, + api_client_id, + disable_ssl_errors, + use_v2_file_events, + api_client_auth=True, + ) + _set_pw(name, secret, debug, totp=False, api_client=True) + echo(f"Successfully created profile '{name}'.") + + +@profile.command() +@name_option() +@server_option() +@api_client_id_option() +@secret_option() +@username_option() +@password_option +@totp_option +@disable_ssl_option +@use_v2_file_events_option +@yes_option(hidden=True) +@debug_option +def update( + name, + server, + api_client_id, + secret, + username, + password, + disable_ssl_errors, + use_v2_file_events, + debug, + totp, +): + """Update an existing profile.""" + c42profile = cliprofile.get_profile(name) + + if not any( + [ + server, + api_client_id, + secret, + username, + password, + disable_ssl_errors is not None, + use_v2_file_events is not None, + ] + ): + if c42profile.api_client_auth == "True": + raise click.UsageError( + "Must provide at least one of `--server`, `--api-client-id`, `--secret`, `--use-v2-file-events` or " + "`--disable-ssl-errors` when updating an API client profile. " + "Provide both `--username` and `--password` options to switch this profile to username/password authentication." + ) + else: + raise click.UsageError( + "Must provide at least one of `--server`, `--username`, `--password`, `--use-v2-file-events` or " + "`--disable-ssl-errors` when updating a username/password authenticated profile. " + "Provide both `--api-client-id` and `--secret` options to switch this profile to Code42 API client authentication." + ) + + if c42profile.api_client_auth == "True": + if (username and not password) or (password and not username): + raise click.UsageError( + "This profile currently uses API client authentication. " + "Please provide both the `--username` and `--password` options to update this profile to use username/password authentication." + ) + elif username and password: + if does_user_agree( + "You passed the `--username` and `--password options for a profile currently using Code42 API client authentication. " + "Are you sure you would like to update this profile to use username/password authentication? This will overwrite existing credentials. (y/n): " + ): + cliprofile.update_profile( + c42profile.name, + server, + username, + disable_ssl_errors, + use_v2_file_events, + api_client_auth=False, + ) + _set_pw(c42profile.name, password, debug, api_client=False) + else: + echo(f"Profile '{c42profile.name}` was not updated.") + return + else: + cliprofile.update_profile( + c42profile.name, + server, + api_client_id, + disable_ssl_errors, + use_v2_file_events, + ) + if secret: + _set_pw(c42profile.name, secret, debug, api_client=True) + + else: + if (api_client_id and not secret) or (api_client_id and not secret): + raise click.UsageError( + "This profile currently uses username/password authentication. " + "Please provide both the `--api-client-id` and `--secret` options to update this profile to use Code42 API client authentication." + ) + elif api_client_id and secret: + if does_user_agree( + "You passed the `--api-client-id` and `--secret options for a profile currently using username/password authentication. " + "Are you sure you would like to update this profile to use Code42 API client authentication? This will overwrite existing credentials. (y/n): " + ): + cliprofile.update_profile( + c42profile.name, + server, + api_client_id, + disable_ssl_errors, + use_v2_file_events, + api_client_auth=True, + ) + _set_pw(c42profile.name, secret, debug, api_client=True) + else: + echo(f"Profile '{name}` was not updated.") + return + else: + cliprofile.update_profile( + c42profile.name, + server, + username, + disable_ssl_errors, + use_v2_file_events, + ) + if not password and not c42profile.has_stored_password: + password = _prompt_for_password() + + if password: + _set_pw(c42profile.name, password, debug, totp=totp) + + echo(f"Profile '{c42profile.name}' has been updated.") + + +@profile.command() +@profile_name_arg() +@debug_option +def reset_pw(profile_name, debug): + """\b + Change the stored password for a profile. Only affects what's stored in the local profile, + does not make any changes to the Code42 user account.""" + password = getpass() + profile_name_saved = _set_pw(profile_name, password, debug) + echo(f"Password updated for profile '{profile_name_saved}'.") + + +@profile.command("list") +def _list(): + """Show all existing stored profiles.""" + profiles = cliprofile.get_all_profiles() + if not profiles: + raise Code42CLIError("No existing profile.", help=CREATE_PROFILE_HELP) + for c42profile in profiles: + echo(str(c42profile)) + + +@profile.command() +@profile_name_arg() +def use(profile_name): + """\b + Set a profile as the default. If not providing a profile-name, + prompts for a choice from a list of all profiles.""" + + if not profile_name: + _select_profile_from_prompt() + return + + _set_default_profile(profile_name) + + +@profile.command() +@yes_option() +@profile_name_arg(required=True) +def delete(profile_name): + """Deletes a profile and its stored password (if any).""" + try: + cliprofile.get_profile(profile_name) + except Code42CLIError: + raise Code42CLIError(f"Profile '{profile_name}' does not exist.") + message = ( + "\nDeleting this profile will also delete any stored passwords and checkpoints. " + "Are you sure? (y/n): " + ) + if cliprofile.is_default_profile(profile_name): + message = f"\n'{profile_name}' is currently the default profile!\n{message}" + if does_user_agree(message): + cliprofile.delete_profile(profile_name) + echo(f"Profile '{profile_name}' has been deleted.") + + +@profile.command() +@yes_option() +def delete_all(): + """Deletes all profiles and saved passwords (if any).""" + existing_profiles = cliprofile.get_all_profiles() + if existing_profiles: + profile_str_list = "\n\t".join( + [c42profile.name for c42profile in existing_profiles] + ) + message = ( + f"\nAre you sure you want to delete the following profiles?\n\t{profile_str_list}" + "\n\nThis will also delete any stored passwords and checkpoints. (y/n): " + ) + if does_user_agree(message): + for profile_obj in existing_profiles: + cliprofile.delete_profile(profile_obj.name) + echo(f"Profile '{profile_obj.name}' has been deleted.") + else: + echo("\nNo profiles exist. Nothing to delete.") + + +def _prompt_for_password(): + if does_user_agree("Would you like to set a password? (y/n): "): + password = getpass() + return password + + +def _set_pw(profile_name, password, debug, totp=None, api_client=False): + c42profile = cliprofile.get_profile(profile_name) + try: + create_sdk( + c42profile, + is_debug_mode=debug, + password=password, + totp=totp, + api_client=api_client, + ) + except Exception: + secho("Password not stored!", bold=True) + raise + cliprofile.set_password(password, c42profile.name) + return c42profile.name + + +def _select_profile_from_prompt(): + """Set the default profile from user input.""" + profiles = cliprofile.get_all_profiles() + profile_names = [profile_choice.name for profile_choice in profiles] + choices = PromptChoice(profile_names) + choices.print_choices() + prompt_message = "Input the number of the profile you wish to use" + profile_name = click.prompt(prompt_message, type=choices) + _set_default_profile(profile_name) + + +def _set_default_profile(profile_name): + cliprofile.switch_default_profile(profile_name) + _print_default_profile_was_set(profile_name) + + +def _print_default_profile_was_set(profile_name): + echo(f"{profile_name} has been set as the default profile.") diff --git a/src/code42cli/cmds/search/__init__.py b/src/code42cli/cmds/search/__init__.py new file mode 100644 index 000000000..1cdbfb599 --- /dev/null +++ b/src/code42cli/cmds/search/__init__.py @@ -0,0 +1,48 @@ +import click + +from code42cli.errors import Code42CLIError +from code42cli.logger import get_logger_for_server +from code42cli.logger.enums import ServerProtocol +from code42cli.output_formats import OutputFormat + + +def _try_get_logger_for_server(hostname, protocol, output_format, certs): + try: + return get_logger_for_server(hostname, protocol, output_format, certs) + except Exception as err: + raise Code42CLIError( + f"Unable to connect to {hostname}. Failed with error: {err}." + ) + + +class SendToCommand(click.Command): + def invoke(self, ctx): + certs = ctx.params.get("certs") + hostname = ctx.params.get("hostname") + protocol = ctx.params.get("protocol") + output_format = ctx.params.get("format", OutputFormat.RAW) + ignore_cert_validation = ctx.params.get("ignore_cert_validation") + _handle_incompatible_args(protocol, ignore_cert_validation, certs) + + if ignore_cert_validation: + certs = "ignore" + + ctx.obj.logger = _try_get_logger_for_server( + hostname, protocol, output_format, certs + ) + return super().invoke(ctx) + + +def _handle_incompatible_args(protocol, ignore_cert_validation, certs): + if protocol == ServerProtocol.TLS_TCP: + return + + arg = None + if ignore_cert_validation is not None: + arg = "--ignore-cert-validation" + elif certs is not None: + arg = "--certs" + if arg is not None: + raise click.BadOptionUsage( + arg, f"'{arg}' can only be used with '--protocol {ServerProtocol.TLS_TCP}'." + ) diff --git a/src/code42cli/cmds/search/cursor_store.py b/src/code42cli/cmds/search/cursor_store.py new file mode 100644 index 000000000..5255dae65 --- /dev/null +++ b/src/code42cli/cmds/search/cursor_store.py @@ -0,0 +1,142 @@ +import json +import os +from os import path + +from code42cli.errors import Code42CLIError +from code42cli.util import get_user_project_path + + +class Cursor: + def __init__(self, location): + self._location = location + self._name = path.basename(location) + + @property + def name(self): + return self._name + + @property + def value(self): + with open(self._location) as checkpoint: + return checkpoint.read() + + +class BaseCursorStore: + def __init__(self, dir_path): + self._dir_path = dir_path + + def get(self, cursor_name): + """Gets the last stored date observed timestamp.""" + try: + location = path.join(self._dir_path, cursor_name) + with open(location) as checkpoint: + checkpoint_value = checkpoint.read() + if not checkpoint_value: + return None + try: + return float(checkpoint_value) + except ValueError: + raise Code42CLIError( + f"Unable to parse checkpoint from {location}, expected a unix-epoch timestamp, got '{checkpoint_value}'." + ) + except FileNotFoundError: + return None + + def replace(self, cursor_name, new_checkpoint): + """Replaces the last stored date observed timestamp with the given one.""" + location = path.join(self._dir_path, cursor_name) + with open(location, "w") as checkpoint: + checkpoint.write(str(new_checkpoint)) + + def delete(self, cursor_name): + """Removes a single cursor from the store.""" + try: + location = path.join(self._dir_path, cursor_name) + os.remove(location) + except FileNotFoundError: + msg = f"No checkpoint named {cursor_name} exists for this profile." + raise Code42CLIError(msg) + + def clean(self): + """Removes all cursors from this store.""" + cursors = self.get_all_cursors() + for cursor in cursors: + self.delete(cursor.name) + + def get_all_cursors(self): + """Returns a list of all cursors stored in this directory (which is typically scoped to a profile).""" + dir_contents = os.listdir(self._dir_path) + return [Cursor(f) for f in dir_contents if self._is_file(f)] + + def _is_file(self, node_name): + return path.isfile(path.join(self._dir_path, node_name)) + + +class FileEventCursorStore(BaseCursorStore): + def __init__(self, profile_name): + dir_path = get_user_project_path("file_event_checkpoints", profile_name) + super().__init__(dir_path) + + def get(self, cursor_name): + """Gets the last stored date observed timestamp.""" + try: + location = path.join(self._dir_path, cursor_name) + with open(location) as checkpoint: + checkpoint_value = checkpoint.read() + if not checkpoint_value: + return None + return str(checkpoint_value) + except FileNotFoundError: + return None + + +class AlertCursorStore(BaseCursorStore): + def __init__(self, profile_name): + dir_path = get_user_project_path("alert_checkpoints", profile_name) + super().__init__(dir_path) + + def get_alerts(self, cursor_name): + try: + location = path.join(self._dir_path, cursor_name) + "_alerts" + with open(location) as checkpoint: + try: + return json.loads(checkpoint.read()) + except json.JSONDecodeError: + return [] + except FileNotFoundError: + return [] + + def replace_alerts(self, cursor_name, new_alerts): + location = path.join(self._dir_path, cursor_name) + "_alerts" + with open(location, "w") as checkpoint: + checkpoint.write(json.dumps(new_alerts)) + + +class AuditLogCursorStore(BaseCursorStore): + def __init__(self, profile_name): + dir_path = get_user_project_path("audit_log_checkpoints", profile_name) + super().__init__(dir_path) + + def get_events(self, cursor_name): + try: + location = path.join(self._dir_path, cursor_name) + "_events" + with open(location) as checkpoint: + try: + return json.loads(checkpoint.read()) + except json.JSONDecodeError: + return [] + except FileNotFoundError: + return [] + + def replace_events(self, cursor_name, new_events): + location = path.join(self._dir_path, cursor_name) + "_events" + with open(location, "w") as checkpoint: + checkpoint.write(json.dumps(new_events)) + + +def get_all_cursor_stores_for_profile(profile_name): + return [ + FileEventCursorStore(profile_name), + AlertCursorStore(profile_name), + AuditLogCursorStore(profile_name), + ] diff --git a/src/code42cli/cmds/search/options.py b/src/code42cli/cmds/search/options.py new file mode 100644 index 000000000..885d94a08 --- /dev/null +++ b/src/code42cli/cmds/search/options.py @@ -0,0 +1,220 @@ +import json +from datetime import datetime + +import click +from py42.sdk.queries.query_filter import FilterGroup + +from code42cli.click_ext.options import incompatible_with +from code42cli.click_ext.types import FileOrString +from code42cli.enums import SendToFileEventsOutputFormat +from code42cli.logger.enums import ServerProtocol + + +def is_in_filter(filter_cls, filter_cls_v2=None): + def callback(ctx, param, arg): + if arg: + f = filter_cls + if filter_cls_v2 and ctx.obj.profile.use_v2_file_events == "True": + f = filter_cls_v2 + ctx.obj.search_filters.append(f.is_in(arg)) + return arg + + return callback + + +def not_in_filter(filter_cls, filter_cls_v2=None): + def callback(ctx, param, arg): + if arg: + f = filter_cls + if filter_cls_v2 and ctx.obj.profile.use_v2_file_events == "True": + f = filter_cls_v2 + ctx.obj.search_filters.append(f.not_in(arg)) + return arg + + return callback + + +def exists_filter(filter_cls, filter_cls_v2=None): + def callback(ctx, param, arg): + if not arg: + f = filter_cls + if filter_cls_v2 and ctx.obj.profile.use_v2_file_events == "True": + f = filter_cls_v2 + ctx.obj.search_filters.append(f.exists()) + return arg + + return callback + + +def contains_filter(filter_cls, filter_cls_v2=None): + def callback(ctx, param, arg): + if arg: + f = filter_cls + if filter_cls_v2 and ctx.obj.profile.use_v2_file_events == "True": + f = filter_cls_v2 + for item in arg: + ctx.obj.search_filters.append(f.contains(item)) + return arg + + return callback + + +def not_contains_filter(filter_cls, filter_cls_v2=None): + def callback(ctx, param, arg): + if arg: + f = filter_cls + if filter_cls_v2 and ctx.obj.profile.use_v2_file_events == "True": + f = filter_cls_v2 + for item in arg: + ctx.obj.search_filters.append(f.not_contains(item)) + return arg + + return callback + + +AdvancedQueryAndSavedSearchIncompatible = incompatible_with( + ["advanced_query", "saved_search"] +) + +ExposureTypeIncompatible = incompatible_with( + ["advanced_query", "saved_search", "event_action"] +) +EventActionIncompatible = incompatible_with( + ["advanced_query", "saved_search", "exposure_type"] +) + + +class BeginOption(AdvancedQueryAndSavedSearchIncompatible): + """click.Option subclass that enforces correct --begin option usage.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + # if ctx.obj is None it means we're in autocomplete mode and don't want to validate + if ( + ctx.obj is not None + and "saved_search" not in opts + and "advanced_query" not in opts + ): + profile = opts.get("profile") or ctx.obj.profile.name + cursor = ctx.obj.cursor_getter(profile) + checkpoint_arg_present = "use_checkpoint" in opts + checkpoint_value = ( + cursor.get(opts.get("use_checkpoint", "")) + if checkpoint_arg_present + else None + ) + begin_present = "begin" in opts + if ( + checkpoint_arg_present + and checkpoint_value is not None + and begin_present + ): + opts.pop("begin") + try: + checkpoint_value = datetime.utcfromtimestamp( + float(checkpoint_value) + ) + except ValueError: + pass + click.echo( + "Ignoring --begin value as --use-checkpoint was passed and checkpoint of " + f"{checkpoint_value} exists.\n", + err=True, + ) + if ( + checkpoint_arg_present + and checkpoint_value is None + and not begin_present + ): + raise click.UsageError( + message="--begin date is required for --use-checkpoint when no checkpoint " + "exists yet.", + ) + if not checkpoint_arg_present and not begin_present: + raise click.UsageError(message="--begin date is required.") + return super().handle_parse_result(ctx, opts, args) + + +def _parse_query_from_json(ctx, param, arg): + if arg is None: + return + try: + query = json.loads(arg) + filter_groups = [FilterGroup.from_dict(group) for group in query["groups"]] + return filter_groups + except json.JSONDecodeError as json_error: + raise click.BadParameter(f"Unable to parse JSON: {json_error}") + except KeyError as key_error: + raise click.BadParameter(f"Unable to build query from input JSON: {key_error}") + + +def advanced_query_option(term, **kwargs): + defaults = dict( + help=f"A raw JSON {term} query. " + "Useful for when the provided query parameters do not satisfy your requirements. " + "Argument can be passed as a string, read from stdin by passing '-', or from a filename if " + "prefixed with '@', e.g. '--advanced-query @query.json'. " + "WARNING: Using advanced queries is incompatible with other query-building arguments.", + metavar="QUERY_JSON", + type=FileOrString(), + callback=_parse_query_from_json, + ) + defaults.update(kwargs) + return click.option("--advanced-query", **defaults) + + +or_query_option = click.option( + "--or-query", + is_flag=True, + cls=AdvancedQueryAndSavedSearchIncompatible, + help="Combine query filter options with 'OR' logic instead of the default 'AND'.", +) + +include_all_option = click.option( + "--include-all", + default=False, + is_flag=True, + help="Display simple properties of the primary level of the nested response.", + cls=incompatible_with("columns"), +) + + +def server_options(f): + hostname_arg = click.argument("hostname") + protocol_option = click.option( + "-p", + "--protocol", + type=click.Choice(ServerProtocol(), case_sensitive=False), + default=ServerProtocol.UDP, + help="Protocol used to send logs to server. " + "Use TCP-TLS for additional security. Defaults to UDP.", + ) + certs_option = click.option( + "--certs", + type=str, + help="A CA certificates-chain file for the TCP-TLS protocol.", + ) + ignore_cert_validation = click.option( + "--ignore-cert-validation", + help="Set to skip CA certificate validation. " + "Incompatible with the 'certs' option.", + is_flag=True, + default=None, + cls=incompatible_with(["certs"]), + ) + f = hostname_arg(f) + f = protocol_option(f) + f = certs_option(f) + f = ignore_cert_validation(f) + return f + + +send_to_format_options = click.option( + "-f", + "--format", + type=click.Choice(SendToFileEventsOutputFormat(), case_sensitive=False), + help="The output format of the result. Defaults to RAW-JSON format.", + default=SendToFileEventsOutputFormat.RAW, +) diff --git a/src/code42cli/cmds/securitydata.py b/src/code42cli/cmds/securitydata.py new file mode 100644 index 000000000..eae94d3f7 --- /dev/null +++ b/src/code42cli/cmds/securitydata.py @@ -0,0 +1,668 @@ +from pprint import pformat + +import click +import py42.sdk.queries.fileevents.filters as f +from click import echo +from pandas import DataFrame +from pandas import json_normalize +from py42.exceptions import Py42InvalidPageTokenError +from py42.sdk.queries.fileevents.file_event_query import FileEventQuery +from py42.sdk.queries.fileevents.filters import InsertionTimestamp +from py42.sdk.queries.fileevents.filters.exposure_filter import ExposureType +from py42.sdk.queries.fileevents.filters.file_filter import FileCategory +from py42.sdk.queries.fileevents.filters.risk_filter import RiskIndicator +from py42.sdk.queries.fileevents.filters.risk_filter import RiskSeverity +from py42.sdk.queries.fileevents.v2 import FileEventQuery as FileEventQueryV2 +from py42.sdk.queries.fileevents.v2 import filters as v2_filters + +import code42cli.cmds.search.options as searchopt +import code42cli.options as opt +from code42cli.click_ext.groups import OrderedGroup +from code42cli.click_ext.options import incompatible_with +from code42cli.click_ext.types import MapChoice +from code42cli.cmds.search import SendToCommand +from code42cli.cmds.search.cursor_store import FileEventCursorStore +from code42cli.cmds.util import convert_to_or_query +from code42cli.cmds.util import create_time_range_filter +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.date_helper import limit_date_range +from code42cli.enums import OutputFormat +from code42cli.errors import Code42CLIError +from code42cli.logger import get_main_cli_logger +from code42cli.options import column_option +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import DataFrameOutputFormatter +from code42cli.output_formats import FileEventsOutputFormat +from code42cli.output_formats import FileEventsOutputFormatter +from code42cli.util import deprecation_warning +from code42cli.util import warn_interrupt + +logger = get_main_cli_logger() +MAX_EVENT_PAGE_SIZE = 10000 + +SECURITY_DATA_KEYWORD = "file events" + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + + +def exposure_type_callback(): + def callback(ctx, param, arg): + if arg: + if ctx.obj.profile.use_v2_file_events == "True": + raise Code42CLIError( + "Exposure type (--type/-t) filter is incompatible with V2 file events. Use the event action (--event-action) filter instead." + ) + ctx.obj.search_filters.append(ExposureType.is_in(arg)) + return arg + + return callback + + +def event_action_callback(): + def callback(ctx, param, arg): + if arg: + if ctx.obj.profile.use_v2_file_events == "False": + raise Code42CLIError( + "Event action (--event-action) filter is incompatible with V1 file events. Upgrade your profile to use the V2 file event data model with `code42 profile update --use-v2-file-events True`" + ) + ctx.obj.search_filters.append(v2_filters.event.Action.is_in(arg)) + return arg + + return callback + + +def get_all_events_callback(): + def callback(ctx, param, arg): + if not arg: + if ctx.obj.profile.use_v2_file_events == "True": + ctx.obj.search_filters.append( + v2_filters.risk.Severity.not_eq( + v2_filters.risk.Severity.NO_RISK_INDICATED + ) + ) + else: + ctx.obj.search_filters.append(ExposureType.exists()) + return arg + + return callback + + +file_events_format_option = click.option( + "-f", + "--format", + type=click.Choice(FileEventsOutputFormat(), case_sensitive=False), + help="The output format of the result. Defaults to table format.", + default=FileEventsOutputFormat.TABLE, +) +exposure_type_option = click.option( + "-t", + "--type", + multiple=True, + type=click.Choice(list(ExposureType.choices())), + cls=searchopt.ExposureTypeIncompatible, + callback=exposure_type_callback(), + help="Limits events to those with given exposure types. Only compatible with V1 file events.", +) +event_action_option = click.option( + "--event-action", + multiple=True, + type=click.Choice(list(v2_filters.event.Action.choices())), + cls=searchopt.EventActionIncompatible, + callback=event_action_callback(), + help="Limits events to those with given event action. Only compatible with V2 file events.", +) +username_option = click.option( + "--c42-username", + multiple=True, + callback=searchopt.is_in_filter(f.DeviceUsername, v2_filters.user.Email), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to endpoint events for these Code42 users.", +) +actor_option = click.option( + "--actor", + multiple=True, + callback=searchopt.is_in_filter(f.Actor, v2_filters.user.Email), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to only those enacted by the cloud service user " + "of the person who caused the event.", +) +md5_option = click.option( + "--md5", + multiple=True, + callback=searchopt.is_in_filter(f.MD5, v2_filters.file.MD5), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to file events where the file has one of these MD5 hashes.", +) +sha256_option = click.option( + "--sha256", + multiple=True, + callback=searchopt.is_in_filter(f.SHA256, v2_filters.file.SHA256), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to file events where the file has one of these SHA256 hashes.", +) +source_option = click.option( + "--source", + multiple=True, + callback=searchopt.is_in_filter(f.Source, v2_filters.source.Name), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to only those from one of these sources. For example, Gmail, Box, or Endpoint.", +) +file_name_option = click.option( + "--file-name", + multiple=True, + callback=searchopt.is_in_filter(f.FileName, v2_filters.file.Name), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to file events where the file has one of these names.", +) +file_path_option = click.option( + "--file-path", + multiple=True, + callback=searchopt.is_in_filter(f.FilePath, v2_filters.file.Directory), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to file events where the file is located at one of these paths. Applies to endpoint file events only.", +) +file_category_option = click.option( + "--file-category", + multiple=True, + type=MapChoice( + choices=list(FileCategory.choices()), + extras_map={ + "AUDIO": FileCategory.AUDIO, + "DOCUMENT": FileCategory.DOCUMENT, + "EXECUTABLE": FileCategory.EXECUTABLE, + "IMAGE": FileCategory.IMAGE, + "PDF": FileCategory.PDF, + "PRESENTATION": FileCategory.PRESENTATION, + "SCRIPT": FileCategory.SCRIPT, + "SOURCE_CODE": FileCategory.SOURCE_CODE, + "SPREADSHEET": FileCategory.SPREADSHEET, + "VIDEO": FileCategory.VIDEO, + "VIRTUAL_DISK_IMAGE": FileCategory.VIRTUAL_DISK_IMAGE, + "ARCHIVE": FileCategory.ZIP, + "ZIP": FileCategory.ZIP, + "Zip": FileCategory.ZIP, + }, + ), + callback=searchopt.is_in_filter(f.FileCategory, v2_filters.file.Category), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to file events where the file can be classified by one of these categories.", +) +process_owner_option = click.option( + "--process-owner", + multiple=True, + callback=searchopt.is_in_filter(f.ProcessOwner, v2_filters.process.Owner), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits exposure events by process owner, as reported by the device’s operating system. " + "Applies only to `Printed` and `Browser or app read` events.", +) +tab_url_option = click.option( + "--tab-url", + multiple=True, + callback=searchopt.is_in_filter(f.TabURL, v2_filters.destination.TabUrls), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to be exposure events with one of the specified destination tab URLs.", +) + + +include_non_exposure_option = click.option( + "--include-non-exposure", + is_flag=True, + callback=get_all_events_callback(), + cls=incompatible_with(["advanced_query", "type", "saved_search"]), + help="Get all events including non-exposure events.", +) +risk_indicator_map = { + "PUBLIC_CORPORATE_BOX": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_BOX, + "PUBLIC_CORPORATE_GOOGLE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_GOOGLE_DRIVE, + "PUBLIC_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_ONEDRIVE, + "SENT_CORPORATE_GMAIL": RiskIndicator.CloudDataExposures.SENT_CORPORATE_GMAIL, + "SHARED_CORPORATE_BOX": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_BOX, + "SHARED_CORPORATE_GOOGLE_DRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_GOOGLE_DRIVE, + "SHARED_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_ONEDRIVE, + "AMAZON_DRIVE": RiskIndicator.CloudStorageUploads.AMAZON_DRIVE, + "BOX": RiskIndicator.CloudStorageUploads.BOX, + "DROPBOX": RiskIndicator.CloudStorageUploads.DROPBOX, + "GOOGLE_DRIVE": RiskIndicator.CloudStorageUploads.GOOGLE_DRIVE, + "ICLOUD": RiskIndicator.CloudStorageUploads.ICLOUD, + "MEGA": RiskIndicator.CloudStorageUploads.MEGA, + "ONEDRIVE": RiskIndicator.CloudStorageUploads.ONEDRIVE, + "ZOHO": RiskIndicator.CloudStorageUploads.ZOHO, + "BITBUCKET": RiskIndicator.CodeRepositoryUploads.BITBUCKET, + "GITHUB": RiskIndicator.CodeRepositoryUploads.GITHUB, + "GITLAB": RiskIndicator.CodeRepositoryUploads.GITLAB, + "SOURCEFORGE": RiskIndicator.CodeRepositoryUploads.SOURCEFORGE, + "STASH": RiskIndicator.CodeRepositoryUploads.STASH, + "163.COM": RiskIndicator.EmailServiceUploads.ONESIXTHREE_DOT_COM, + "126.COM": RiskIndicator.EmailServiceUploads.ONETWOSIX_DOT_COM, + "AOL": RiskIndicator.EmailServiceUploads.AOL, + "COMCAST": RiskIndicator.EmailServiceUploads.COMCAST, + "GMAIL": RiskIndicator.EmailServiceUploads.GMAIL, + "ICLOUD_MAIL": RiskIndicator.EmailServiceUploads.ICLOUD, + "MAIL.COM": RiskIndicator.EmailServiceUploads.MAIL_DOT_COM, + "OUTLOOK": RiskIndicator.EmailServiceUploads.OUTLOOK, + "PROTONMAIL": RiskIndicator.EmailServiceUploads.PROTONMAIL, + "QQMAIL": RiskIndicator.EmailServiceUploads.QQMAIL, + "SINA_MAIL": RiskIndicator.EmailServiceUploads.SINA_MAIL, + "SOHU_MAIL": RiskIndicator.EmailServiceUploads.SOHU_MAIL, + "YAHOO": RiskIndicator.EmailServiceUploads.YAHOO, + "ZOHO_MAIL": RiskIndicator.EmailServiceUploads.ZOHO_MAIL, + "AIRDROP": RiskIndicator.ExternalDevices.AIRDROP, + "REMOVABLE_MEDIA": RiskIndicator.ExternalDevices.REMOVABLE_MEDIA, + "AUDIO": RiskIndicator.FileCategories.AUDIO, + "DOCUMENT": RiskIndicator.FileCategories.DOCUMENT, + "EXECUTABLE": RiskIndicator.FileCategories.EXECUTABLE, + "IMAGE": RiskIndicator.FileCategories.IMAGE, + "PDF": RiskIndicator.FileCategories.PDF, + "PRESENTATION": RiskIndicator.FileCategories.PRESENTATION, + "SCRIPT": RiskIndicator.FileCategories.SCRIPT, + "SOURCE_CODE": RiskIndicator.FileCategories.SOURCE_CODE, + "SPREADSHEET": RiskIndicator.FileCategories.SPREADSHEET, + "VIDEO": RiskIndicator.FileCategories.VIDEO, + "VIRTUAL_DISK_IMAGE": RiskIndicator.FileCategories.VIRTUAL_DISK_IMAGE, + "ZIP": RiskIndicator.FileCategories.ZIP, + "FACEBOOK_MESSENGER": RiskIndicator.MessagingServiceUploads.FACEBOOK_MESSENGER, + "MICROSOFT_TEAMS": RiskIndicator.MessagingServiceUploads.MICROSOFT_TEAMS, + "SLACK": RiskIndicator.MessagingServiceUploads.SLACK, + "WHATSAPP": RiskIndicator.MessagingServiceUploads.WHATSAPP, + "OTHER": RiskIndicator.Other.OTHER, + "UNKNOWN": RiskIndicator.Other.UNKNOWN, + "FACEBOOK": RiskIndicator.SocialMediaUploads.FACEBOOK, + "LINKEDIN": RiskIndicator.SocialMediaUploads.LINKEDIN, + "REDDIT": RiskIndicator.SocialMediaUploads.REDDIT, + "TWITTER": RiskIndicator.SocialMediaUploads.TWITTER, + "FILE_MISMATCH": RiskIndicator.UserBehavior.FILE_MISMATCH, + "OFF_HOURS": RiskIndicator.UserBehavior.OFF_HOURS, + "REMOTE": RiskIndicator.UserBehavior.REMOTE, + "FIRST_DESTINATION_USE": RiskIndicator.UserBehavior.FIRST_DESTINATION_USE, + "RARE_DESTINATION_USE": RiskIndicator.UserBehavior.RARE_DESTINATION_USE, +} +risk_indicator_map_reversed = {v: k for k, v in risk_indicator_map.items()} + + +def risk_indicator_callback(): + def callback(ctx, param, arg): + if arg: + f_cls = f.RiskIndicator + if ctx.obj.profile.use_v2_file_events == "True": + f_cls = v2_filters.risk.Indicators + mapped_args = tuple(risk_indicator_map[i] for i in arg) + filter_func = searchopt.is_in_filter(f_cls) + return filter_func(ctx, param, mapped_args) + + return callback + + +risk_indicator_option = click.option( + "--risk-indicator", + multiple=True, + type=MapChoice( + choices=list(risk_indicator_map.keys()), + extras_map=risk_indicator_map_reversed, + ), + callback=risk_indicator_callback(), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to those classified by the given risk indicator categories.", +) +risk_severity_option = click.option( + "--risk-severity", + multiple=True, + type=click.Choice(list(RiskSeverity.choices())), + callback=searchopt.is_in_filter(f.RiskSeverity, v2_filters.risk.Severity), + cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, + help="Limits events to those classified by the given risk severity.", +) +begin_option = opt.begin_option( + SECURITY_DATA_KEYWORD, + callback=lambda ctx, param, arg: convert_datetime_to_timestamp( + limit_date_range(arg, max_days_back=90) + ), +) +end_option = opt.end_option(SECURITY_DATA_KEYWORD) +checkpoint_option = opt.checkpoint_option( + SECURITY_DATA_KEYWORD, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible +) +advanced_query_option = searchopt.advanced_query_option(SECURITY_DATA_KEYWORD) + + +def _get_saved_search_option(): + def _get_saved_search_query(ctx, param, arg): + if arg is None: + return + query = ctx.obj.sdk.securitydata.savedsearches.get_query( + arg, use_v2=ctx.obj.profile.use_v2_file_events == "True" + ) + return query + + return click.option( + "--saved-search", + help="Get events from a saved search filter with the given ID." + "WARNING: Using a saved search is incompatible with other query-building arguments.", + callback=_get_saved_search_query, + cls=incompatible_with("advanced_query"), + ) + + +def search_options(f): + f = column_option(f) + f = checkpoint_option(f) + f = advanced_query_option(f) + f = searchopt.or_query_option(f) + f = end_option(f) + f = begin_option(f) + return f + + +def file_event_options(f): + f = exposure_type_option(f) + f = event_action_option(f) + f = username_option(f) + f = actor_option(f) + f = md5_option(f) + f = sha256_option(f) + f = source_option(f) + f = file_name_option(f) + f = file_path_option(f) + f = file_category_option(f) + f = process_owner_option(f) + f = tab_url_option(f) + f = include_non_exposure_option(f) + f = risk_indicator_option(f) + f = risk_severity_option(f) + f = _get_saved_search_option()(f) + return f + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def security_data(state): + """DEPRECATED - Get and send file event data.""" + deprecation_warning(DEPRECATION_TEXT) + # store cursor getter on the group state so shared --begin option can use it in validation + state.cursor_getter = _get_file_event_cursor_store + + +@security_data.command() +@click.argument("checkpoint-name") +@sdk_options() +def clear_checkpoint(state, checkpoint_name): + """Remove the saved file event checkpoint from `--use-checkpoint/-c` mode.""" + _get_file_event_cursor_store(state.profile.name).delete(checkpoint_name) + + +@security_data.command() +@file_event_options +@search_options +@sdk_options() +@column_option +@searchopt.include_all_option +@file_events_format_option +def search( + state, + format, + begin, + end, + advanced_query, + use_checkpoint, + saved_search, + or_query, + columns, + include_all, + **kwargs, +): + """Search for file events.""" + + if format == FileEventsOutputFormat.CEF and columns: + raise click.BadOptionUsage( + "columns", "--columns option can't be used with CEF format." + ) + + # cef format unsupported for v2 file events + if ( + format == FileEventsOutputFormat.CEF + and state.profile.use_v2_file_events == "True" + ): + raise click.BadOptionUsage( + "format", "--format CEF is unsupported for v2 file events." + ) + + # set default table columns + if format == OutputFormat.TABLE: + if not columns and not include_all: + if state.profile.use_v2_file_events == "True": + columns = [ + "@timestamp", + "file.name", + "file.directory", + "event.action", + "file.category", + "file.sizeInBytes", + "file.owner", + "file.hash.md5", + "file.hash.sha256", + "risk.indicators", + "risk.severity", + ] + else: + columns = [ + "fileName", + "filePath", + "eventType", + "eventTimestamp", + "fileCategory", + "fileSize", + "fileOwner", + "md5Checksum", + "sha256Checksum", + "riskIndicators", + "riskSeverity", + ] + + flatten = format in (OutputFormat.TABLE, OutputFormat.CSV) + + if use_checkpoint: + cursor = _get_file_event_cursor_store(state.profile.name) + checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state) + + if state.profile.use_v2_file_events == "True": + + def checkpoint_func(event): + event_id = event["event.id"] if flatten else event["event"]["id"] + cursor.replace(use_checkpoint, event_id) + + else: + + def checkpoint_func(event): + cursor.replace(use_checkpoint, event["eventId"]) + + else: + checkpoint = checkpoint_func = None + + query = _construct_query(state, begin, end, saved_search, advanced_query, or_query) + dfs = _get_all_file_events(state, query, checkpoint, flatten) + formatter = FileEventsOutputFormatter(format, checkpoint_func=checkpoint_func) + # sending to pager when checkpointing can be inaccurate due to pager buffering, so disallow pager + force_no_pager = use_checkpoint + formatter.echo_formatted_dataframes( + dfs, columns=columns, force_no_pager=force_no_pager + ) + + +@security_data.command(cls=SendToCommand) +@file_event_options +@search_options +@sdk_options() +@searchopt.server_options +@searchopt.send_to_format_options +def send_to( + state, + begin, + end, + advanced_query, + use_checkpoint, + saved_search, + or_query, + columns, + **kwargs, +): + """Send events to the given server address. + + HOSTNAME format: address:port where port is optional and defaults to 514. + """ + if state.profile.use_v2_file_events != "True": + deprecation_warning(DEPRECATION_TEXT) + + flatten = format in (OutputFormat.TABLE, OutputFormat.CSV) + + if use_checkpoint: + cursor = _get_file_event_cursor_store(state.profile.name) + checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state) + + if state.profile.use_v2_file_events == "True": + + def checkpoint_func(event): + event_id = event["event.id"] if flatten else event["event"]["id"] + cursor.replace(use_checkpoint, event_id) + + else: + + def checkpoint_func(event): + cursor.replace(use_checkpoint, event["eventId"]) + + else: + checkpoint = checkpoint_func = None + + query = _construct_query(state, begin, end, saved_search, advanced_query, or_query) + dfs = _get_all_file_events(state, query, checkpoint) + formatter = FileEventsOutputFormatter(None, checkpoint_func=checkpoint_func) + + with warn_interrupt(): + event = None + for event in formatter.iter_rows(dfs, columns=columns): + state.logger.info(event) + if event is None: # generator was empty + click.echo("No results found.") + + +@security_data.group(cls=OrderedGroup) +@sdk_options() +def saved_search(state): + """Search for file events using saved searches.""" + pass + + +@saved_search.command("list") +@format_option +@sdk_options() +def _list(state, format=None): + """List available saved searches.""" + if state.profile.use_v2_file_events != "True": + deprecation_warning(DEPRECATION_TEXT) + + formatter = DataFrameOutputFormatter(format) + response = state.sdk.securitydata.savedsearches.get( + use_v2=state.profile.use_v2_file_events == "True" + ) + saved_searches_df = DataFrame(response["searches"]) + formatter.echo_formatted_dataframes( + saved_searches_df, columns=["name", "id", "notes"] + ) + + +@saved_search.command() +@click.argument("search-id") +@sdk_options() +def show(state, search_id): + """Get the details of a saved search.""" + if state.profile.use_v2_file_events != "True": + deprecation_warning(DEPRECATION_TEXT) + + response = state.sdk.securitydata.savedsearches.get_by_id( + search_id, use_v2=state.profile.use_v2_file_events == "True" + ) + echo(pformat(response["searches"])) + + +def _get_file_event_cursor_store(profile_name): + return FileEventCursorStore(profile_name) + + +def _construct_query(state, begin, end, saved_search, advanced_query, or_query): + + if advanced_query: + state.search_filters = advanced_query + elif saved_search: + state.search_filters = saved_search._filter_group_list + else: + if begin or end: + timestamp_class = ( + v2_filters.timestamp.Timestamp + if state.profile.use_v2_file_events == "True" + else f.EventTimestamp + ) + state.search_filters.append( + create_time_range_filter(timestamp_class, begin, end) + ) + if or_query: + state.search_filters = convert_to_or_query(state.search_filters) + + if not state.search_filters: + # if a checkpoint and _only_ --include-non-exposure is passed, the filter list will be empty, which isn't a + # valid query, so in that case we want to fallback to retrieving all events. The checkpoint will + # still cause the query results to only contain events after the checkpointed event. + severity_filter = ( + v2_filters.risk.Severity.exists() + if state.profile.use_v2_file_events == "True" + else RiskSeverity.exists() + ) + state.search_filters.append(severity_filter) + + # construct a v2 model query if profile setting enabled + if state.profile.use_v2_file_events == "True": + query = FileEventQueryV2(*state.search_filters) + query.sort_key = "@timestamp" + else: + query = FileEventQuery(*state.search_filters) + query.sort_key = "insertionTimestamp" + query.page_size = MAX_EVENT_PAGE_SIZE + query.sort_direction = "asc" + return query + + +def _get_all_file_events(state, query, checkpoint="", flatten=False): + if checkpoint is None: + checkpoint = "" + try: + response = state.sdk.securitydata.search_all_file_events( + query, page_token=checkpoint + ) + except Py42InvalidPageTokenError: + response = state.sdk.securitydata.search_all_file_events(query) + + data = response["fileEvents"] + if data and flatten: + data = json_normalize(data) + yield DataFrame(data) + + while response["nextPgToken"]: + response = state.sdk.securitydata.search_all_file_events( + query, page_token=response["nextPgToken"] + ) + data = response["fileEvents"] + if data and flatten: + data = json_normalize(data) + yield DataFrame(data) + + +def _handle_timestamp_checkpoint(checkpoint, state): + try: + checkpoint = float(checkpoint) + if state.profile.use_v2_file_events == "True": + state.search_filters.append( + v2_filters.timestamp.Timestamp.on_or_after(checkpoint) + ) + else: + state.search_filters.append(InsertionTimestamp.on_or_after(checkpoint)) + return None + except (ValueError, TypeError): + return checkpoint diff --git a/src/code42cli/cmds/shared.py b/src/code42cli/cmds/shared.py new file mode 100644 index 000000000..e87da7d88 --- /dev/null +++ b/src/code42cli/cmds/shared.py @@ -0,0 +1,21 @@ +from functools import lru_cache + +from code42cli.errors import UserDoesNotExistError + + +@lru_cache(maxsize=None) +def get_user_id(sdk, username): + """Returns the user's UID. + Raises `UserDoesNotExistError` if the user doesn't exist in the Code42 server. + + Args: + sdk (py42.sdk.SDKClient): The py42 sdk. + username (str or unicode): The username of the user to get an ID for. + + Returns: + str: The user ID for the user with the given username. + """ + users = sdk.users.get_by_username(username)["users"] + if not users: + raise UserDoesNotExistError(username) + return users[0]["userUid"] diff --git a/src/code42cli/cmds/shell.py b/src/code42cli/cmds/shell.py new file mode 100644 index 000000000..8eb1fdf67 --- /dev/null +++ b/src/code42cli/cmds/shell.py @@ -0,0 +1,13 @@ +import click + +from code42cli import BANNER +from code42cli.options import sdk_options + + +@click.command() +@sdk_options() +def shell(state): + """Open an IPython shell with py42 initialized as `sdk`.""" + import IPython + + IPython.embed(colors="Neutral", banner1=BANNER, user_ns={"sdk": state.sdk}) diff --git a/src/code42cli/cmds/trustedactivities.py b/src/code42cli/cmds/trustedactivities.py new file mode 100644 index 000000000..95f3c477f --- /dev/null +++ b/src/code42cli/cmds/trustedactivities.py @@ -0,0 +1,228 @@ +import click +from py42.clients.trustedactivities import TrustedActivityType + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.errors import Code42CLIError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import OutputFormatter +from code42cli.util import deprecation_warning + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + +resource_id_arg = click.argument("resource-id", type=int) +type_option = click.option( + "--type", + help=f"Type of trusted activity. Valid types include {', '.join(TrustedActivityType.choices())}.", + type=click.Choice(TrustedActivityType.choices()), +) +value_option = click.option( + "--value", + help="The value of the trusted activity, such as the domain or Slack workspace name.", +) +description_option = click.option( + "--description", help="The description of the trusted activity." +) + + +def _get_trust_header(): + return { + "resourceId": "Resource Id", + "type": "Type", + "value": "Value", + "description": "Description", + "updatedAt": "Last Update Time", + "updatedByUsername": "Last Updated By (Username)", + "updatedByUserUid": "Last updated By (UserUID)", + } + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def trusted_activities(state): + """DEPRECATED - Manage trusted activities and resources.""" + deprecation_warning(DEPRECATION_TEXT) + pass + + +@trusted_activities.command() +@click.argument("type", type=click.Choice(TrustedActivityType.choices())) +@click.argument("value") +@description_option +@sdk_options() +def create(state, type, value, description): + """Create a trusted activity. + + VALUE is the name of the domain or Slack workspace. + """ + state.sdk.trustedactivities.create( + type, + value, + description=description, + ) + + +@trusted_activities.command() +@resource_id_arg +@value_option +@description_option +@sdk_options() +def update(state, resource_id, value, description): + """Update a trusted activity. Requires the activity's resource ID.""" + state.sdk.trustedactivities.update( + resource_id, + value=value, + description=description, + ) + + +@trusted_activities.command() +@resource_id_arg +@sdk_options() +def remove(state, resource_id): + """Remove a trusted activity. Requires the activity's resource ID.""" + state.sdk.trustedactivities.delete(resource_id) + + +@trusted_activities.command("list") +@click.option("--type", type=click.Choice(TrustedActivityType.choices())) +@format_option +@sdk_options() +def _list(state, type, format): + """List all trusted activities.""" + pages = state.sdk.trustedactivities.get_all(type=type) + formatter = OutputFormatter(format, _get_trust_header()) + trusted_resources = [ + resource for page in pages for resource in page["trustResources"] + ] + if trusted_resources: + formatter.echo_formatted_list(trusted_resources) + else: + click.echo("No trusted activities found.") + + +@trusted_activities.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk trusted activity actions.""" + pass + + +TRUST_CREATE_HEADERS = [ + "type", + "value", + "description", +] +TRUST_UPDATE_HEADERS = [ + "resource_id", + "value", + "description", +] +TRUST_REMOVE_HEADERS = [ + "resource_id", +] + +trusted_activities_generate_template = generate_template_cmd_factory( + group_name="trusted_activities", + commands_dict={ + "create": TRUST_CREATE_HEADERS, + "update": TRUST_UPDATE_HEADERS, + "remove": TRUST_REMOVE_HEADERS, + }, + help_message="Generate the CSV template needed for bulk trusted-activities commands", +) +bulk.add_command(trusted_activities_generate_template) + + +@bulk.command( + name="create", + help="Bulk create trusted activities using a CSV file with " + f"format: {','.join(TRUST_CREATE_HEADERS)}.\b\n\n" + f"Available `type` values are: {'|'.join(TrustedActivityType.choices())}", +) +@read_csv_arg(headers=TRUST_CREATE_HEADERS) +@sdk_options() +def bulk_create(state, csv_rows): + """Bulk create trusted activities.""" + sdk = state.sdk + + def handle_row(type, value, description): + if type not in TrustedActivityType.choices(): + message = f"Invalid type {type}, valid types include {', '.join(TrustedActivityType.choices())}." + raise Code42CLIError(message) + if type is None: + message = "'type' is a required field to create a trusted activity." + raise Code42CLIError(message) + if value is None: + message = "'value' is a required field to create a trusted activity." + raise Code42CLIError(message) + sdk.trustedactivities.create(type, value, description) + + run_bulk_process( + handle_row, + csv_rows, + progress_label="Creating trusting activities:", + ) + + +@bulk.command( + name="update", + help="Bulk update trusted activities using a CSV file with " + f"format: {','.join(TRUST_UPDATE_HEADERS)}.", +) +@read_csv_arg(headers=TRUST_UPDATE_HEADERS) +@sdk_options() +def bulk_update(state, csv_rows): + """Bulk update trusted activities.""" + sdk = state.sdk + + def handle_row(resource_id, value, description): + if resource_id is None: + message = "'resource_id' is a required field to update a trusted activity." + raise Code42CLIError(message) + _check_resource_id_type(resource_id) + sdk.trustedactivities.update(resource_id, value, description) + + run_bulk_process( + handle_row, csv_rows, progress_label="Updating trusted activities:" + ) + + +@bulk.command( + name="remove", + help="Bulk remove trusted activities using a CSV file with " + f"format: {','.join(TRUST_REMOVE_HEADERS)}.", +) +@read_csv_arg(headers=TRUST_REMOVE_HEADERS) +@sdk_options() +def bulk_remove(state, csv_rows): + """Bulk remove trusted activities.""" + sdk = state.sdk + + def handle_row(resource_id): + if resource_id is None: + message = "'resource_id' is a required field to remove a trusted activity." + raise Code42CLIError(message) + _check_resource_id_type(resource_id) + sdk.trustedactivities.delete(resource_id) + + run_bulk_process( + handle_row, + csv_rows, + progress_label="Removing trusted activities:", + ) + + +def _check_resource_id_type(resource_id): + def raise_error(resource_id): + message = f"Invalid resource ID {resource_id}. Must be an integer." + raise Code42CLIError(message) + + try: + if not float(resource_id).is_integer(): + raise_error(resource_id) + except ValueError: + raise_error(resource_id) diff --git a/src/code42cli/cmds/users.py b/src/code42cli/cmds/users.py new file mode 100644 index 000000000..284d7b228 --- /dev/null +++ b/src/code42cli/cmds/users.py @@ -0,0 +1,1008 @@ +import functools + +import click +from pandas import DataFrame +from pandas import json_normalize +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42UserRiskProfileNotFound + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.click_ext.options import incompatible_with +from code42cli.errors import Code42CLIError +from code42cli.errors import UserDoesNotExistError +from code42cli.file_readers import read_csv_arg +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import DataFrameOutputFormatter +from code42cli.output_formats import OutputFormat +from code42cli.output_formats import OutputFormatter +from code42cli.worker import create_worker_stats + + +username_arg = click.argument("username") + +org_uid_option = click.option( + "--org-uid", + help="Limit users to only those in the organization you specify. Note that child orgs are included.", +) +active_option = click.option( + "--active", + is_flag=True, + help="Limits results to only active users.", + default=None, +) +inactive_option = click.option( + "--inactive", + is_flag=True, + help="Limits results to only deactivated users.", + cls=incompatible_with("active"), +) +user_id_option = click.option( + "--user-id", help="The unique identifier of the user to be modified.", required=True +) +org_id_option = click.option( + "--org-id", + help="The unique identifier (UID) for the organization to which the user will be moved.", + required=True, +) +include_legal_hold_option = click.option( + "--include-legal-hold-membership", + default=False, + is_flag=True, + help="Include legal hold membership in output.", +) + + +def role_name_option(help): + return click.option("--role-name", help=help) + + +def username_option(help, required=False): + return click.option("--username", help=help, required=required) + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def users(state): + """Manage users within your Code42 environment.""" + pass + + +@users.command(name="list") +@org_uid_option +@role_name_option("Limit results to only users having the specified role.") +@active_option +@inactive_option +@include_legal_hold_option +@click.option( + "--include-roles", default=False, is_flag=True, help="Include user roles." +) +@format_option +@sdk_options() +def list_users( + state, + org_uid, + role_name, + active, + inactive, + include_legal_hold_membership, + include_roles, + format, +): + """List users in your Code42 environment.""" + if inactive: + active = False + role_id = _get_role_id(state.sdk, role_name) if role_name else None + columns = ( + ["userUid", "status", "username", "orgUid"] + if format == OutputFormat.TABLE + else None + ) + if include_roles and columns: + columns.append("roles") + df = _get_users_dataframe( + state.sdk, columns, org_uid, role_id, active, include_roles + ) + if include_legal_hold_membership: + df = _add_legal_hold_membership_to_user_dataframe(state.sdk, df) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(df) + + +@users.command("show") +@username_arg +@include_legal_hold_option +@format_option +@sdk_options() +def show_user(state, username, include_legal_hold_membership, format): + """Show user details.""" + columns = ( + ["userUid", "status", "username", "orgUid", "roles"] + if format == OutputFormat.TABLE + else None + ) + response = state.sdk.users.get_by_username(username, incRoles=True) + df = DataFrame.from_records(response["users"], columns=columns) + if include_legal_hold_membership and not df.empty: + df = _add_legal_hold_membership_to_user_dataframe(state.sdk, df) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(df) + + +@users.command(name="list-risk-profiles") +@active_option +@inactive_option +@click.option( + "--manager-id", + help="Matches users whose manager has the given Code42 user ID.", +) +@click.option("--department", help="Matches users in the given department.") +@click.option("--employment-type", help="Matches users with the given employment type.") +@click.option("-r", "--region", help="Matches users the given region (state).") +@format_option +@sdk_options() +def list_user_risk_profiles( + state, + active, + inactive, + manager_id, + department, + employment_type, + region, + format, +): + """List users in your Code42 environment.""" + if inactive: + active = False + columns = ( + [ + "userId", + "username", + "active", + "department", + "employmentType", + "region", + "endDate", + ] + if format == OutputFormat.TABLE + else None + ) + users_generator = state.sdk.userriskprofile.get_all( + active=active, + manager_id=manager_id, + department=department, + employment_type=employment_type, + region=region, + ) + users_list = [] + for page in users_generator: + users_list.extend(page["userRiskProfiles"]) + + df = DataFrame.from_records(users_list, columns=columns) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(df) + + +@users.command("show-risk-profile") +@username_arg +@format_option +@sdk_options() +def show_user_risk_profile(state, username, format): + """Show user risk profile details.""" + formatter = OutputFormatter(format) + response = state.sdk.userriskprofile.get_by_username(username) + formatter.echo_formatted_list([response.data]) + + +@users.command() +@username_option("Username of the target user.") +@role_name_option("Name of role to add.") +@sdk_options() +def add_role(state, username, role_name): + """Add the specified role to the user with the specified username.""" + _add_user_role(state.sdk, username, role_name) + + +@users.command() +@role_name_option("Name of role to remove.") +@username_option("Username of the target user.") +@sdk_options() +def remove_role(state, username, role_name): + """Remove the specified role to the user with the specified username.""" + _remove_user_role(state.sdk, role_name, username) + + +@users.command(name="update") +@user_id_option +@click.option("--username", help="The new username for the user.") +@click.option("--password", help="The new password for the user.") +@click.option("--email", help="The new email for the user.") +@click.option("--first-name", help="The new first name for the user.") +@click.option("--last-name", help="The new last name for the user.") +@click.option("--notes", help="Notes about this user.") +@click.option( + "--archive-size-quota", help="The total size (in bytes) allowed for this user." +) +@sdk_options() +def update_user( + state, + user_id, + username, + email, + password, + first_name, + last_name, + notes, + archive_size_quota, +): + """Update a user with the specified unique identifier.""" + _update_user( + state.sdk, + user_id, + username, + email, + password, + first_name, + last_name, + notes, + archive_size_quota, + ) + + +@users.command() +@username_arg +@sdk_options() +def deactivate(state, username): + """Deactivate a user.""" + sdk = state.sdk + _deactivate_user(sdk, username) + + +@users.command() +@username_arg +@sdk_options() +def reactivate(state, username): + """Reactivate a user.""" + sdk = state.sdk + _reactivate_user(sdk, username) + + +_bulk_user_update_headers = [ + "user_id", + "username", + "email", + "password", + "first_name", + "last_name", + "notes", + "archive_size_quota", +] + +_bulk_user_move_headers = ["username", "org_id"] + +_bulk_user_roles_headers = ["username", "role_name"] + +_bulk_user_alias_headers = ["username", "alias"] + +_bulk_user_risk_profile_headers = ["username", "start_date", "end_date", "notes"] + +_bulk_user_activation_headers = ["username"] + + +@users.command(name="move") +@username_option("The username of the user to move.", required=True) +@org_id_option +@sdk_options() +def change_organization(state, username, org_id): + """Change the organization of the user with the given username + to the org with the given org UID.""" + _change_organization(state.sdk, username, org_id) + + +@users.command() +@click.argument("username") +@click.argument("alias") +@sdk_options() +def add_alias(state, username, alias): + """Add a cloud alias for a given user. + + A cloud alias is the username an employee uses to access cloud services such as Google Drive or Box. Adding a cloud alias allows Incydr to link a user's cloud activity with their Code42 username. Each user has a default cloud alias of their Code42 username. You can add one additional alias.""" + _add_cloud_alias(state.sdk, username, alias) + + +@users.command() +@click.argument("username") +@click.argument("alias") +@sdk_options() +def remove_alias(state, username, alias): + """Remove a cloud alias for a given user.""" + _remove_cloud_alias(state.sdk, username, alias) + + +@users.command() +@click.argument("username") +@click.argument( + "date", type=click.DateTime(formats=["%Y-%m-%d"]), required=False, metavar="DATE" +) +@click.option("--clear", is_flag=True, help="Clears the current `start_date` value.") +@sdk_options() +def update_start_date(state, username, date, clear): + """Sets the `start_date` on a User's risk profile (useful for users on the New Hire Watchlist). + Date format: %Y-%m-%d""" + if not date and not clear: + raise Code42CLIError("Must supply DATE argument if --clear is not used.") + if clear: + date = "" + user_id = _get_user(state.sdk, username)["userId"] + state.sdk.userriskprofile.update(user_id, start_date=date) + + +@users.command() +@click.argument("username") +@click.argument("date", type=click.DateTime(formats=["%Y-%m-%d"]), required=False) +@click.option("--clear", is_flag=True, help="Clears the current `end_date` value.") +@sdk_options() +def update_departure_date(state, username, date, clear): + """Sets the `end_date` on a User's risk profile (useful for users on the Departing Watchlist). Date format: %Y-%m-%d""" + if not date and not clear: + raise Code42CLIError("Must supply DATE argument if --clear is not used.") + if clear: + date = "" + user_id = _get_user(state.sdk, username)["userId"] + state.sdk.userriskprofile.update(user_id, end_date=date) + + +@users.command() +@click.argument("username") +@click.argument("note", required=False) +@click.option("--clear", is_flag=True, help="Clears the current `notes` value.") +@click.option( + "--append", + is_flag=True, + help="Appends provided note to existing note text as a new line.", +) +@sdk_options() +def update_risk_profile_notes(state, username, note, clear, append): + """Sets the `notes` value of a User's risk profile. + + WARNING: Overwrites any existing note value.""" + if not note and not clear: + raise Code42CLIError("Must supply NOTE argument if --clear is not used.") + user = _get_user(state.sdk, username) + user_id = user["userId"] + if append and user["notes"]: + note = user["notes"] + f"\n\n{note}" + if clear: + note = "" + state.sdk.userriskprofile.update(user_id, notes=note) + + +@users.command() +@click.argument("username") +@sdk_options() +def list_aliases(state, username): + """List the cloud aliases for a given user. + + Each user has a default cloud alias of their Code42 username with up to one additional alias.""" + user = _get_user(state.sdk, username) + aliases = user["cloudAliases"] + if aliases: + click.echo(aliases) + else: + click.echo(f"No cloud aliases for user '{username}' found.") + + +@users.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def orgs(state): + """Tools for viewing user orgs.""" + pass + + +def _get_orgs_header(): + return { + "orgId": "ID", + "orgUid": "UID", + "orgName": "Name", + "status": "Status", + "parentOrgId": "Parent ID", + "parentOrgUid": "Parent UID", + "type": "Type", + "classification": "Classification", + "creationDate": "Creation Date", + "settings": "Settings", + } + + +@orgs.command(name="list") +@format_option +@sdk_options() +def list_orgs( + state, + format, +): + """List all orgs.""" + pages = state.sdk.orgs.get_all() + formatter = OutputFormatter(format, _get_orgs_header()) + orgs = [org for page in pages for org in page["orgs"]] + if orgs: + formatter.echo_formatted_list(orgs) + else: + click.echo("No orgs found.") + + +@orgs.command(name="show") +@click.argument("org-uid") +@format_option +@sdk_options() +def show_org( + state, + org_uid, + format, +): + """Show org details.""" + formatter = OutputFormatter(format) + try: + response = state.sdk.orgs.get_by_uid(org_uid) + formatter.echo_formatted_list([response.data]) + except Py42NotFoundError: + raise Code42CLIError(f"Invalid org UID {org_uid}.") + + +@users.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for managing users in bulk.""" + pass + + +users_generate_template = generate_template_cmd_factory( + group_name="users", + commands_dict={ + "update": _bulk_user_update_headers, + "move": _bulk_user_move_headers, + "add-alias": _bulk_user_alias_headers, + "remove-alias": _bulk_user_alias_headers, + "update-risk-profile": _bulk_user_risk_profile_headers, + }, + help_message="Generate the CSV template needed for bulk user commands.", +) +bulk.add_command(users_generate_template) + + +@bulk.command( + name="update", + help="Update a list of users from the provided CSV in format: " + f"{','.join(_bulk_user_update_headers)}", +) +@read_csv_arg(headers=_bulk_user_update_headers) +@format_option +@sdk_options() +def bulk_update(state, csv_rows, format): + """Update a list of users from the provided CSV.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["updated"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _update_user( + sdk, **{key: row[key] for key in row.keys() if key != "updated"} + ) + row["updated"] = "True" + except Exception as err: + row["updated"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Updating users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="move", + help="Change the organization of the list of users from the provided CSV in format: " + f"{','.join(_bulk_user_move_headers)}", +) +@read_csv_arg(headers=_bulk_user_move_headers) +@format_option +@sdk_options() +def bulk_move(state, csv_rows, format): + """Change the organization of the list of users from the provided CSV.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["moved"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _change_organization( + sdk, **{key: row[key] for key in row.keys() if key != "moved"} + ) + row["moved"] = "True" + except Exception as err: + row["moved"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Moving users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="deactivate", + help=f"Deactivate a list of users from the provided CSV in format: {','.join(_bulk_user_activation_headers)}", +) +@read_csv_arg(headers=_bulk_user_activation_headers) +@format_option +@sdk_options() +def bulk_deactivate(state, csv_rows, format): + """Deactivate a list of users.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["deactivated"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _deactivate_user( + sdk, **{key: row[key] for key in row.keys() if key != "deactivated"} + ) + row["deactivated"] = "True" + except Exception as err: + row["deactivated"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Deactivating users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="reactivate", + help=f"Reactivate a list of users from the provided CSV in format: {','.join(_bulk_user_activation_headers)}", +) +@read_csv_arg(headers=_bulk_user_activation_headers) +@format_option +@sdk_options() +def bulk_reactivate(state, csv_rows, format): + """Reactivate a list of users.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + + csv_rows[0]["reactivated"] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _reactivate_user( + sdk, **{key: row[key] for key in row.keys() if key != "reactivated"} + ) + row["reactivated"] = "True" + except Exception as err: + row["reactivated"] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Reactivating users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="add-roles", + help=f"Add roles to a list of users from the provided CSV in format: {','.join(_bulk_user_roles_headers)}", +) +@read_csv_arg(headers=_bulk_user_roles_headers) +@format_option +@sdk_options() +def bulk_add_roles(state, csv_rows, format): + """Bulk add roles to a list of users.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + status_header = "role added" + + csv_rows[0][status_header] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _add_user_role( + sdk, **{key: row[key] for key in row.keys() if key != status_header} + ) + row[status_header] = "True" + except Exception as err: + row[status_header] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Adding roles to users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="remove-roles", + help=f"Remove roles from a list of users from the provided CSV in format: {','.join(_bulk_user_roles_headers)}", +) +@read_csv_arg(headers=_bulk_user_roles_headers) +@format_option +@sdk_options() +def bulk_remove_roles(state, csv_rows, format): + """Bulk remove roles from a list of users.""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + success_header = "role removed" + + csv_rows[0][success_header] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _remove_user_role( + sdk, **{key: row[key] for key in row.keys() if key != success_header} + ) + row[success_header] = "True" + except Exception as err: + row[success_header] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Removing roles from users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="add-alias", + help=f"Add aliases to a list of users from the provided CSV in format: {','.join(_bulk_user_alias_headers)}.\n\nA cloud alias is the username an employee uses to access cloud services such as Google Drive or Box. Adding a cloud alias allows Incydr to link a user's cloud activity with their Code42 username. Each user has a default cloud alias of their Code42 username. You can add one additional alias.", +) +@read_csv_arg(headers=_bulk_user_alias_headers) +@format_option +@sdk_options() +def bulk_add_alias(state, csv_rows, format): + """Bulk add aliases to users""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + success_header = "alias added" + + csv_rows[0][success_header] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _add_cloud_alias( + sdk, **{key: row[key] for key in row.keys() if key != success_header} + ) + row[success_header] = "True" + except Exception as err: + row[success_header] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Adding aliases to users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="remove-alias", + help=f"Remove aliases from a list of users from the provided CSV in format: {','.join(_bulk_user_alias_headers)}", +) +@read_csv_arg(headers=_bulk_user_alias_headers) +@format_option +@sdk_options() +def bulk_remove_alias(state, csv_rows, format): + """Bulk remove aliases from users""" + + # Initialize the SDK before starting any bulk processes + # to prevent multiple instances and having to enter 2fa multiple times. + sdk = state.sdk + success_header = "alias removed" + + csv_rows[0][success_header] = "False" + formatter = OutputFormatter(format, {key: key for key in csv_rows[0].keys()}) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + _remove_cloud_alias( + sdk, **{key: row[key] for key in row.keys() if key != success_header} + ) + row[success_header] = "True" + except Exception as err: + row[success_header] = f"False: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Removing aliases from users:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +@bulk.command( + name="update-risk-profile", + help=f"Update user risk profile data from the provided CSV in format: {','.join(_bulk_user_risk_profile_headers)}" + "\n\nTo clear a value, set column item to the string: 'null'.", +) +@format_option +@read_csv_arg(headers=_bulk_user_risk_profile_headers) +@click.option( + "--append-notes", + is_flag=True, + help="Append provided note value to already existing note on a new line. Defaults to overwrite.", +) +@sdk_options() +def bulk_update_risk_profile(state, csv_rows, format, append_notes): + """Bulk update User Risk Profile data.""" + sdk = state.sdk + + success_header = "updated_user" + formatter = OutputFormatter( + format, {key: key for key in [*csv_rows[0].keys(), success_header]} + ) + stats = create_worker_stats(len(csv_rows)) + + def handle_row(**row): + try: + updated_user = _update_userriskprofile( + sdk, append_notes=append_notes, **row + ) + row[success_header] = updated_user + except Exception as err: + row[success_header] = f"Error: {err}" + stats.increment_total_errors() + return row + + result_rows = run_bulk_process( + handle_row, + csv_rows, + progress_label="Updating user risk profile data:", + stats=stats, + raise_global_error=False, + ) + formatter.echo_formatted_list(result_rows) + + +def _add_user_role(sdk, username, role_name): + user_id = _get_legacy_user_id(sdk, username) + _get_role_id(sdk, role_name) # function provides role name validation + sdk.users.add_role(user_id, role_name) + + +def _remove_user_role(sdk, role_name, username): + user_id = _get_legacy_user_id(sdk, username) + _get_role_id(sdk, role_name) # function provides role name validation + sdk.users.remove_role(user_id, role_name) + + +def _get_legacy_user_id(sdk, username): + if not username: + # py42 returns all users when passing `None` to `get_by_username()`. + raise click.BadParameter("Username is required.") + user = sdk.users.get_by_username(username)["users"] + if len(user) == 0: + raise UserDoesNotExistError(username) + user_id = user[0]["userId"] + return user_id + + +@functools.lru_cache() +def _get_role_id(sdk, role_name): + try: + roles_dataframe = DataFrame.from_records( + sdk.users.get_available_roles().data, index="roleName" + ) + role_result = roles_dataframe.at[role_name, "roleId"] + return str(role_result) # extract the role ID from the series + except KeyError: + raise Code42CLIError(f"Role with name '{role_name}' not found.") + + +def _get_users_dataframe(sdk, columns, org_uid, role_id, active, include_roles): + users_generator = sdk.users.get_all( + active=active, org_uid=org_uid, role_id=role_id, incRoles=include_roles + ) + users_list = [] + for page in users_generator: + users_list.extend(page["users"]) + + return DataFrame.from_records(users_list, columns=columns) + + +def _add_legal_hold_membership_to_user_dataframe(sdk, df): + columns = ["legalHold.legalHoldUid", "legalHold.name", "user.userUid"] + + custodians = list(_get_all_active_hold_memberships(sdk)) + + if len(custodians) == 0: + return df + + legal_hold_member_dataframe = ( + json_normalize(custodians)[columns] + .groupby(["user.userUid"]) + .agg(",".join) + .rename( + { + "legalHold.legalHoldUid": "legalHoldUid", + "legalHold.name": "legalHoldName", + }, + axis=1, + ) + ) + df = df.merge( + legal_hold_member_dataframe, + how="left", + left_on="userUid", + right_on="user.userUid", + ) + + return df + + +def _get_all_active_hold_memberships(sdk): + for page in sdk.legalhold.get_all_matters(active=True): + for matter in page["legalHolds"]: + for _page in sdk.legalhold.get_all_matter_custodians( + legal_hold_uid=matter["legalHoldUid"], active=True + ): + yield from _page["legalHoldMemberships"] + + +def _update_user( + sdk, + user_id, + username, + email, + password, + first_name, + last_name, + notes, + archive_size_quota, +): + return sdk.users.update_user( + user_id, + username=username, + email=email, + password=password, + first_name=first_name, + last_name=last_name, + notes=notes, + archive_size_quota_bytes=archive_size_quota, + ) + + +def _change_organization(sdk, username, org_id): + user_id = _get_legacy_user_id(sdk, username) + org_id = _get_org_id(sdk, org_id) + return sdk.users.change_org_assignment(user_id=int(user_id), org_id=int(org_id)) + + +def _get_org_id(sdk, org_id): + org = sdk.orgs.get_by_uid(org_id) + return org["orgId"] + + +def _deactivate_user(sdk, username): + user_id = _get_legacy_user_id(sdk, username) + sdk.users.deactivate(user_id) + + +def _reactivate_user(sdk, username): + user_id = _get_legacy_user_id(sdk, username) + sdk.users.reactivate(user_id) + + +def _get_user(sdk, username): + # use when retrieving the user risk profile information + try: + return sdk.userriskprofile.get_by_username(username) + except Py42UserRiskProfileNotFound: + raise UserDoesNotExistError(username) + + +def _add_cloud_alias(sdk, username, alias): + user = _get_user(sdk, username) + sdk.userriskprofile.add_cloud_aliases(user["userId"], alias) + + +def _remove_cloud_alias(sdk, username, alias): + user = _get_user(sdk, username) + sdk.userriskprofile.delete_cloud_aliases(user["userId"], alias) + + +def _update_userriskprofile( + sdk, append_notes=False, username=None, start_date=None, end_date=None, notes=None +): + user = _get_user(sdk, username) + user_id = user["userId"] + if append_notes and notes != "null": + notes = user["notes"] + f"\n\n{notes}" + + # py42 interprets empty string as "clear this value" for kwarg values. Since empty CSV columns + # get parsed as "" we want to have user provide explicit 'null' string to indicate desire to + # clear instead of just not update value + start_date = ( + None if start_date == "" else ("" if start_date == "null" else start_date) + ) + end_date = None if end_date == "" else ("" if end_date == "null" else end_date) + notes = None if notes == "" else ("" if notes == "null" else notes) + + updated_user = sdk.userriskprofile.update( + user_id, start_date=start_date, end_date=end_date, notes=notes + ) + return { + k: v + for k, v in updated_user.data.items() + if k in ["username", "userId", "startDate", "endDate", "notes"] + } diff --git a/src/code42cli/cmds/util.py b/src/code42cli/cmds/util.py new file mode 100644 index 000000000..6841f5c36 --- /dev/null +++ b/src/code42cli/cmds/util.py @@ -0,0 +1,96 @@ +import itertools + +from py42.sdk.queries.alerts.filters import DateObserved +from py42.sdk.queries.fileevents.filters import EventTimestamp +from py42.sdk.queries.fileevents.filters import ExposureType +from py42.sdk.queries.fileevents.filters import InsertionTimestamp +from py42.sdk.queries.fileevents.v2.filters.event import Inserted +from py42.sdk.queries.fileevents.v2.filters.risk import Severity +from py42.sdk.queries.fileevents.v2.filters.timestamp import Timestamp +from py42.sdk.queries.query_filter import FilterGroup +from py42.sdk.queries.query_filter import QueryFilterTimestampField + +from code42cli import errors +from code42cli.date_helper import verify_timestamp_order +from code42cli.logger import get_main_cli_logger +from code42cli.output_formats import OutputFormat + +logger = get_main_cli_logger() + + +def convert_to_or_query(filter_groups): + and_group = FilterGroup([], "AND") + or_group = FilterGroup([], "OR") + filters = itertools.chain.from_iterable([f.filter_list for f in filter_groups]) + for _filter in filters: + if _is_exempt_filter(_filter): + and_group.filter_list.append(_filter) + else: + or_group.filter_list.append(_filter) + if and_group.filter_list: + return [and_group, or_group] + else: + return [or_group] + + +def _is_exempt_filter(f): + # exclude timestamp filters by default from "OR" queries + # if other filters need to be exempt when building a query, append them to this list + # can either be a `QueryFilter` subclass, or a composed `FilterGroup` if more precision on + # is needed for which filters should be "AND"ed + or_query_exempt_filters = [ + InsertionTimestamp, + EventTimestamp, + DateObserved, + ExposureType.exists(), + # V2 Filters + Timestamp, + Inserted, + Severity.not_eq(Severity.NO_RISK_INDICATED), + ] + + for exempt in or_query_exempt_filters: + if isinstance(exempt, FilterGroup): + if f in exempt: + return True + else: + continue + elif f.term == exempt._term: + return True + return False + + +def try_get_default_header(include_all, default_header, output_format): + """Returns appropriate header based on include-all and output format. If returns None, + the CLI format option will figure out the header based on the data keys.""" + output_header = None if include_all else default_header + if output_format != OutputFormat.TABLE and include_all: + err_text = "--include-all only allowed for Table output format." + logger.log_error(err_text) + raise errors.Code42CLIError(err_text) + return output_header + + +def create_time_range_filter(filter_cls, begin_date=None, end_date=None): + """Creates a filter using the given filter class (must be a subclass of + :class:`py42.sdk.queries.query_filter.QueryFilterTimestampField`) and date args. Returns + `None` if both begin_date and end_date args are `None`. + + Args: + filter_cls: The class of filter to create. (must be a subclass of + :class:`py42.sdk.queries.query_filter.QueryFilterTimestampField`) + begin_date: The begin date for the range. + end_date: The end date for the range. + """ + if not issubclass(filter_cls, QueryFilterTimestampField): + raise Exception("filter_cls must be a subclass of QueryFilterTimestampField") + + if begin_date and end_date: + verify_timestamp_order(begin_date, end_date) + return filter_cls.in_range(begin_date, end_date) + + elif begin_date and not end_date: + return filter_cls.on_or_after(begin_date) + + elif end_date and not begin_date: + return filter_cls.on_or_before(end_date) diff --git a/src/code42cli/cmds/watchlists.py b/src/code42cli/cmds/watchlists.py new file mode 100644 index 000000000..4c6835e99 --- /dev/null +++ b/src/code42cli/cmds/watchlists.py @@ -0,0 +1,270 @@ +import csv + +import click +from pandas import DataFrame +from py42.constants import WatchlistType +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42WatchlistNotFound + +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.click_ext.groups import OrderedGroup +from code42cli.click_ext.options import incompatible_with +from code42cli.click_ext.types import AutoDecodedFile +from code42cli.errors import Code42CLIError +from code42cli.options import format_option +from code42cli.options import sdk_options +from code42cli.output_formats import DataFrameOutputFormatter +from code42cli.util import deprecation_warning + +DEPRECATION_TEXT = "Incydr functionality is deprecated. Use the Incydr CLI instead (https://developer.code42.com/)." + + +@click.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def watchlists(state): + """DEPRECATED - Manage watchlist user memberships.""" + deprecation_warning(DEPRECATION_TEXT) + pass + + +@watchlists.command("list") +@format_option +@sdk_options() +def _list(state, format): + """List all watchlists.""" + pages = state.sdk.watchlists.get_all() + dfs = (DataFrame(page["watchlists"]) for page in pages) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(dfs) + + +@watchlists.command() +@click.option( + "--watchlist-id", + help="ID of the watchlist.", +) +@click.option( + "--watchlist-type", + type=click.Choice(WatchlistType.choices()), + help="Type of watchlist to list.", + cls=incompatible_with("watchlist_id"), +) +@click.option( + "--only-included-users", + help="Restrict results to users explicitly added to watchlist via API or Console. " + "Users added implicitly via group membership or other dynamic rule will not be listed.", + is_flag=True, +) +@format_option +@sdk_options() +def list_members(state, watchlist_type, watchlist_id, only_included_users, format): + """List all members on a given watchlist.""" + if not watchlist_id and not watchlist_type: + raise click.ClickException("--watchlist-id OR --watchlist-type is required.") + if watchlist_type: + watchlist_id = state.sdk.watchlists._watchlists_service.watchlist_type_id_map[ + watchlist_type + ] + if only_included_users: + pages = state.sdk.watchlists.get_all_included_users(watchlist_id) + dfs = (DataFrame(page["includedUsers"]) for page in pages) + else: + pages = state.sdk.watchlists.get_all_watchlist_members(watchlist_id) + dfs = (DataFrame(page["watchlistMembers"]) for page in pages) + formatter = DataFrameOutputFormatter(format) + formatter.echo_formatted_dataframes(dfs) + + +@watchlists.command() +@click.option( + "--watchlist-id", + help="ID of the watchlist.", +) +@click.option( + "--watchlist-type", + type=click.Choice(WatchlistType.choices()), + help="Type of watchlist to add user to.", + cls=incompatible_with("watchlist_id"), +) +@click.argument("user", metavar="[USER_ID|USERNAME]") +@sdk_options() +def add(state, watchlist_id, watchlist_type, user): + """Add a user to a watchlist.""" + if not watchlist_id and not watchlist_type: + raise click.ClickException("--watchlist-id OR --watchlist-type is required.") + try: + user = int(user) + except ValueError: + # assume username if `user` is not an int + user = state.sdk.userriskprofile.get_by_username(user)["userId"] + try: + if watchlist_id: + state.sdk.watchlists.add_included_users_by_watchlist_id(user, watchlist_id) + elif watchlist_type: + state.sdk.watchlists.add_included_users_by_watchlist_type( + user, watchlist_type + ) + except Py42WatchlistNotFound: + raise + except Py42NotFoundError: + raise Code42CLIError(f"User ID {user} not found.") + + +@watchlists.command() +@click.option("--watchlist-id", help="ID of the watchlist.") +@click.option( + "--watchlist-type", + type=click.Choice(WatchlistType.choices()), + help="Type of watchlist to remove user from.", + cls=incompatible_with("watchlist_id"), +) +@click.argument("user", metavar="[USER_ID|USERNAME]") +@sdk_options() +def remove(state, watchlist_id, watchlist_type, user): + """Remove a user from a watchlist.""" + if not watchlist_id and not watchlist_type: + raise click.ClickException("--watchlist-id OR --watchlist-type is required.") + try: + user = int(user) + except ValueError: + # assume username if `user` is not an int + user = state.sdk.userriskprofile.get_by_username(user)["userId"] + try: + if watchlist_id: + state.sdk.watchlists.remove_included_users_by_watchlist_id( + user, watchlist_id + ) + elif watchlist_type: + state.sdk.watchlists.remove_included_users_by_watchlist_type( + user, watchlist_type + ) + except Py42WatchlistNotFound: + raise + except Py42NotFoundError: + raise Code42CLIError(f"User ID {user} not found.") + + +@watchlists.group(cls=OrderedGroup) +@sdk_options(hidden=True) +def bulk(state): + """Tools for executing bulk watchlist actions.""" + pass + + +watchlists_generate_template = generate_template_cmd_factory( + group_name="watchlists", + commands_dict={ + "add": ["watchlist_id", "watchlist_type", "user_id", "username"], + "remove": ["watchlist_id", "watchlist_type", "user_id", "username"], + }, +) +bulk.add_command(watchlists_generate_template) + + +@bulk.command( + name="add", + help="Bulk add users to watchlists using a CSV file. Requires either a `watchlist_id` or " + "`watchlist_type` column header to identify the watchlist, and either a `user_id` or " + "`username` column header to identify the user to add.", +) +@click.argument( + "csv_rows", + metavar="CSV_FILE", + type=AutoDecodedFile("r"), + callback=lambda ctx, param, arg: csv.DictReader(arg), +) +@sdk_options() +def bulk_add(state, csv_rows): + headers = csv_rows.fieldnames + if "user_id" not in headers and "username" not in headers: + raise Code42CLIError( + "CSV requires either a `username` or `user_id` " + "column to identify which users to add to watchlist." + ) + if "watchlist_id" not in headers and "watchlist_type" not in headers: + raise Code42CLIError( + "CSV requires either a `watchlist_id` or `watchlist_type` " + "column to identify which watchlist to add user to." + ) + + sdk = state.sdk + + def handle_row( + watchlist_id=None, watchlist_type=None, user_id=None, username=None, **kwargs + ): + if username and not user_id: + user_id = sdk.userriskprofile.get_by_username(username)["userId"] + if watchlist_id: + sdk.watchlists.add_included_users_by_watchlist_id(user_id, watchlist_id) + elif watchlist_type: + choices = WatchlistType.choices() + if watchlist_type not in choices: + raise Code42CLIError( + f"Provided watchlist_type `{watchlist_type}` for username={username}, " + f"user_id={user_id} row is invalid. Must be one of: {','.join(choices)}" + ) + sdk.watchlists.add_included_users_by_watchlist_type(user_id, watchlist_type) + else: + raise Code42CLIError( + f"Row for username={username}, user_id={user_id} " + "missing value for `watchlist_id` or `watchlist_type` columns." + ) + + run_bulk_process( + handle_row, + list(csv_rows), + progress_label="Adding users to Watchlists:", + ) + + +@bulk.command( + name="remove", + help="Bulk remove users from watchlists using a CSV file. Requires either a `watchlist_id` or " + "`watchlist_type` column header to identify the watchlist, and either a `user_id` or " + "`username` header to identify the user to remove.", +) +@click.argument( + "csv_rows", + metavar="CSV_FILE", + type=AutoDecodedFile("r"), + callback=lambda ctx, param, arg: csv.DictReader(arg), +) +@sdk_options() +def bulk_remove(state, csv_rows): + headers = csv_rows.fieldnames + if "user_id" not in headers and "username" not in headers: + raise Code42CLIError( + "CSV requires either a `username` or `user_id` " + "column to identify which users to remove from watchlist." + ) + if "watchlist_id" not in headers and "watchlist_type" not in headers: + raise Code42CLIError( + "CSV requires either a `watchlist_id` or `watchlist_type` " + "column to identify which watchlist to remove user from." + ) + + sdk = state.sdk + + def handle_row( + watchlist_id=None, watchlist_type=None, user_id=None, username=None, **kwargs + ): + if username and not user_id: + user_id = sdk.userriskprofile.get_by_username(username)["userId"] + if watchlist_id: + sdk.watchlists.remove_included_users_by_watchlist_id(user_id, watchlist_id) + elif watchlist_type: + sdk.watchlists.remove_included_users_by_watchlist_type( + user_id, watchlist_type + ) + else: + raise Code42CLIError( + f"Row for username={username}, user_id={user_id} " + "missing value for `watchlist_id` or `watchlist_type` columns." + ) + + run_bulk_process( + handle_row, + list(csv_rows), + progress_label="Adding users to Watchlists:", + ) diff --git a/src/code42cli/config.py b/src/code42cli/config.py new file mode 100644 index 000000000..f989d8f27 --- /dev/null +++ b/src/code42cli/config.py @@ -0,0 +1,171 @@ +import os +from configparser import ConfigParser + +import code42cli.util as util + + +class NoConfigProfileError(Exception): + def __init__(self, profile_arg_name=None): + message = ( + f"Profile '{profile_arg_name}' does not exist." + if profile_arg_name + else "Profile does not exist." + ) + super().__init__(message) + + +class ConfigAccessor: + DEFAULT_VALUE = "__DEFAULT__" + AUTHORITY_KEY = "c42_authority_url" + USERNAME_KEY = "c42_username" + IGNORE_SSL_ERRORS_KEY = "ignore-ssl-errors" + USE_V2_FILE_EVENTS_KEY = "use-v2-file-events" + API_CLIENT_AUTH_KEY = "api-client-auth" + DEFAULT_PROFILE = "default_profile" + _INTERNAL_SECTION = "Internal" + + def __init__(self, parser): + self.parser = parser + file_name = "config.cfg" + self.path = os.path.join(util.get_user_project_path(), file_name) + if not os.path.exists(self.path): + self._create_internal_section() + self._save() + else: + self.parser.read(self.path) + + def get_profile(self, name=None): + """Returns the profile with the given name. + If name is None, returns the default profile. + If the name does not exist or there is no existing profile, it will throw an exception. + """ + name = name or self._default_profile_name + if name not in self.parser.sections() or name == self.DEFAULT_VALUE: + name = name if name != self.DEFAULT_VALUE else None + raise NoConfigProfileError(name) + return self.parser[name] + + def get_all_profiles(self): + """Returns all the available profiles.""" + profiles = [] + names = self._get_profile_names() + for name in names: + profiles.append(self.get_profile(name)) + return profiles + + def create_profile( + self, + name, + server, + username, + ignore_ssl_errors, + use_v2_file_events, + api_client_auth, + ): + """Creates a new profile if one does not already exist for that name.""" + try: + self.get_profile(name) + except NoConfigProfileError as ex: + if name is not None and name != self.DEFAULT_VALUE: + self._create_profile_section(name) + else: + raise ex + + profile = self.get_profile(name) + self.update_profile( + profile.name, + server, + username, + ignore_ssl_errors, + use_v2_file_events, + api_client_auth, + ) + self._try_complete_setup(profile) + + def update_profile( + self, + name, + server=None, + username=None, + ignore_ssl_errors=None, + use_v2_file_events=None, + api_client_auth=None, + ): + profile = self.get_profile(name) + if server: + profile[self.AUTHORITY_KEY] = server.strip() + if username: + profile[self.USERNAME_KEY] = username.strip() + if ignore_ssl_errors is not None: + profile[self.IGNORE_SSL_ERRORS_KEY] = str(ignore_ssl_errors) + if use_v2_file_events is not None: + profile[self.USE_V2_FILE_EVENTS_KEY] = str(use_v2_file_events) + if api_client_auth is not None: + profile[self.API_CLIENT_AUTH_KEY] = str(api_client_auth) + self._save() + + def switch_default_profile(self, new_default_name): + """Changes what is marked as the default profile in the internal section.""" + if self.get_profile(new_default_name) is None: + raise NoConfigProfileError(new_default_name) + self._internal[self.DEFAULT_PROFILE] = new_default_name + self._save() + + def delete_profile(self, name): + """Deletes a profile.""" + if self.get_profile(name) is None: + raise NoConfigProfileError(name) + self.parser.remove_section(name) + if name == self._default_profile_name: + self._internal[self.DEFAULT_PROFILE] = self.DEFAULT_VALUE + self._save() + + @property + def _internal(self): + return self.parser[self._INTERNAL_SECTION] + + @property + def _default_profile_name(self): + return self._internal[self.DEFAULT_PROFILE] + + def _get_profile_names(self): + names = list(self.parser.sections()) + names.remove(self._INTERNAL_SECTION) + return names + + def _create_internal_section(self): + self.parser.add_section(self._INTERNAL_SECTION) + self.parser[self._INTERNAL_SECTION] = {} + self.parser[self._INTERNAL_SECTION][self.DEFAULT_PROFILE] = self.DEFAULT_VALUE + + def _create_profile_section(self, name): + self.parser.add_section(name) + self.parser[name] = {} + self.parser[name][self.AUTHORITY_KEY] = self.DEFAULT_VALUE + self.parser[name][self.USERNAME_KEY] = self.DEFAULT_VALUE + self.parser[name][self.IGNORE_SSL_ERRORS_KEY] = str(False) + self.parser[name][self.USE_V2_FILE_EVENTS_KEY] = str(False) + self.parser[name][self.API_CLIENT_AUTH_KEY] = str(False) + + def _save(self): + with open(self.path, "w+", encoding="utf-8") as file: + self.parser.write(file) + + def _try_complete_setup(self, profile): + authority = profile.get(self.AUTHORITY_KEY) + username = profile.get(self.USERNAME_KEY) + + authority_valid = authority and authority != self.DEFAULT_VALUE + username_valid = username and username != self.DEFAULT_VALUE + + if not authority_valid or not username_valid: + return + + self._save() + + default_profile = self._internal.get(self.DEFAULT_PROFILE) + if default_profile is None or default_profile == self.DEFAULT_VALUE: + self.switch_default_profile(profile.name) + + +config_accessor = ConfigAccessor(ConfigParser()) diff --git a/src/code42cli/date_helper.py b/src/code42cli/date_helper.py new file mode 100644 index 000000000..95510afe8 --- /dev/null +++ b/src/code42cli/date_helper.py @@ -0,0 +1,51 @@ +import re +from datetime import datetime +from datetime import timedelta +from datetime import timezone + +import click + +TIMESTAMP_REGEX = re.compile(r"(\d{4}-\d{2}-\d{2})\s*(.*)?") +MAGIC_TIME_REGEX = re.compile(r"(\d+)([dhm])$") + +_FORMAT_VALUE_ERROR_MESSAGE = ( + "input must be a date/time string (e.g. 'yyyy-MM-dd', " + "'yy-MM-dd HH:MM', 'yy-MM-dd HH:MM:SS'), or a short value in days, " + "hours, or minutes (e.g. 30d, 24h, 15m)" +) + + +def convert_datetime_to_timestamp(dt): + if dt is None: + return + return dt.replace(tzinfo=timezone.utc).timestamp() + + +def verify_timestamp_order( + min_timestamp, max_timestamp, min_param=("-b", "--begin"), max_param="--end" +): + if min_timestamp is None or max_timestamp is None: + return + if min_timestamp >= max_timestamp: + raise click.BadParameter( + param_hint=min_param, message=f"cannot be after {max_param} date." + ) + + +def limit_date_range(dt, max_days_back=90, param=None): + if dt is None: + return + now = datetime.utcnow().replace(tzinfo=timezone.utc) + if now - dt > timedelta(days=max_days_back): + raise click.BadParameter( + message=f"must be within {max_days_back} days.", param=param + ) + return dt + + +def round_datetime_to_day_start(dt): + return dt.replace(hour=0, minute=0, second=0, microsecond=0) + + +def round_datetime_to_day_end(dt): + return dt.replace(hour=23, minute=59, second=59, microsecond=999999) diff --git a/src/code42cli/enums.py b/src/code42cli/enums.py new file mode 100644 index 000000000..a1c44a861 --- /dev/null +++ b/src/code42cli/enums.py @@ -0,0 +1,31 @@ +from py42.choices import Choices + + +class JsonOutputFormat(Choices): + JSON = "JSON" + RAW = "RAW-JSON" + + def __iter__(self): + return iter([self.JSON, self.RAW]) + + +class OutputFormat(JsonOutputFormat): + TABLE = "TABLE" + CSV = "CSV" + + def __iter__(self): + return iter([self.TABLE, self.CSV, self.JSON, self.RAW]) + + +class SendToFileEventsOutputFormat(JsonOutputFormat): + CEF = "CEF" + + def __iter__(self): + return iter([self.CEF, self.JSON, self.RAW]) + + +class FileEventsOutputFormat(OutputFormat): + CEF = "CEF" + + def __iter__(self): + return iter([self.TABLE, self.CSV, self.JSON, self.RAW, self.CEF]) diff --git a/src/code42cli/errors.py b/src/code42cli/errors.py new file mode 100644 index 000000000..b66b588c2 --- /dev/null +++ b/src/code42cli/errors.py @@ -0,0 +1,63 @@ +import click +from click._compat import get_text_stderr + +from code42cli.logger import get_view_error_details_message + +ERRORED = False + + +class Code42CLIError(click.ClickException): + """Base CLI exception. The `message` param automatically gets logged to error file and printed + to stderr in red text. If `help` param is provided, it will also be printed to stderr after the + message but not logged to file. + """ + + def __init__(self, message, help=None): + self.help = help + super().__init__(message) + + def show(self, file=None): + """Override default `show` to print CLI errors in red text.""" + if file is None: + file = get_text_stderr() + click.secho(f"Error: {self.format_message()}", file=file, fg="red") + if self.help: + click.echo(self.help, err=True) + + +class LoggedCLIError(Code42CLIError): + """Exception to be raised when wanting to point users to error logs for error details. + + If `message` param is provided it will be printed to screen along with message on where to + find error details in the log. + """ + + def __init__(self, message=None): + self.message = message + super().__init__(message) + + def format_message(self): + locations_message = get_view_error_details_message() + return ( + f"{self.message}\n{locations_message}" + if self.message + else locations_message + ) + + +class UserDoesNotExistError(Code42CLIError): + """An error to represent a username that is not in our system. The CLI shows this error when + the user tries to add or remove a user that does not exist. This error is not shown during + bulk add or remove.""" + + def __init__(self, username): + super().__init__( + f"User '{username}' does not exist or you do not have permission to view them." + ) + + +class UserNotInLegalHoldError(Code42CLIError): + def __init__(self, username, matter_id): + super().__init__( + f"User '{username}' is not an active member of legal hold matter '{matter_id}'." + ) diff --git a/src/code42cli/extensions/__init__.py b/src/code42cli/extensions/__init__.py new file mode 100644 index 000000000..063a13217 --- /dev/null +++ b/src/code42cli/extensions/__init__.py @@ -0,0 +1,49 @@ +from code42cli.click_ext.groups import ExtensionGroup +from code42cli.main import CONTEXT_SETTINGS +from code42cli.options import debug_option +from code42cli.options import pass_state +from code42cli.options import profile_option + + +def sdk_options(f): + """Decorator that adds two `click.option`s (--profile, --debug) to wrapped command, as well as + passing the `code42cli.options.CLIState` object using the [click.make_pass_decorator](https://click.palletsprojects.com/en/7.x/api/#click.make_pass_decorator), + which automatically instantiates the `py42` sdk using the Code42 profile provided from the `--profile` + option. The `py42` sdk can be accessed from the `state.sdk` attribute. + + Example: + + @click.command() + @sdk_options + def get_current_user_command(state): + my_user = state.sdk.users.get_current() + print(my_user) + """ + f = profile_option()(f) + f = debug_option()(f) + f = pass_state(f) + return f + + +script = ExtensionGroup(context_settings=CONTEXT_SETTINGS) +"""A `click.Group` subclass that enables the Code42 CLI's custom error handling/logging to be used +in extension scripts. If only a single command is added to the `script` group it also uses that +command as the default, so the command name doesn't need to be called explicitly. + +Example: + + @click.command() + @click.argument("guid") + @sdk_options + def get_device_info(state, guid) + device = state.sdk.devices.get_by_guid(guid) + print(device) + + if __name__ == "__main__": + script.add_command(my_command) + script() + +The script can then be invoked directly without needing to call the `get-device-info` subcommand: + + python script.py --profile my_profile +""" diff --git a/src/code42cli/file_readers.py b/src/code42cli/file_readers.py new file mode 100644 index 000000000..87a2845d7 --- /dev/null +++ b/src/code42cli/file_readers.py @@ -0,0 +1,60 @@ +import csv + +import click + +from code42cli.click_ext.types import AutoDecodedFile +from code42cli.errors import Code42CLIError + + +def read_csv_arg(headers): + """Helper for defining arguments that read from a csv file. Automatically converts + the file name provided on command line to a list of csv rows (passed to command + function as `csv_rows` param). + """ + return click.argument( + "csv_rows", + metavar="CSV_FILE", + type=AutoDecodedFile("r"), + callback=lambda ctx, param, arg: read_csv(arg, headers=headers), + ) + + +def read_csv(file, headers): + """Helper to read a csv file object into a list of dict rows. + If CSV has a header row, all items in `headers` arg must be present in CSV or an + error is raised. Any extra columns will get filtered out from resulting dicts. + + If no header row is present in CSV, column count must match `headers` arg length or + else error is raised. + """ + lines = file.readlines() + + # check if header is commented for flat-file backwards compatability + if lines[0].startswith("#"): + # strip comment line + lines.pop(0) + + first_line = lines[0].strip().split(",") + + # handle when first row has all of our expected headers + if all(field in first_line for field in headers): + reader = csv.DictReader(lines[1:], fieldnames=first_line) + csv_rows = [{key: row[key] for key in headers} for row in reader] + if not csv_rows: + raise Code42CLIError("CSV contains no data rows.") + return csv_rows + + # handle when first row has no expected headers + elif all(field not in first_line for field in headers): + # only process header-less CSVs if we get exact expected column count + if len(first_line) == len(headers): + return list(csv.DictReader(lines, fieldnames=headers)) + else: + raise Code42CLIError( + "CSV data is ambiguous. Column count must match expected columns exactly when no " + f"header row is present. Expected columns: {headers}" + ) + # handle when first row has some expected headers but not all + else: + missing = [field for field in headers if field not in first_line] + raise Code42CLIError(f"Missing required columns in csv: {missing}") diff --git a/src/code42cli/logger/__init__.py b/src/code42cli/logger/__init__.py new file mode 100644 index 000000000..b501e6993 --- /dev/null +++ b/src/code42cli/logger/__init__.py @@ -0,0 +1,136 @@ +import logging +import os +import traceback +from logging.handlers import RotatingFileHandler +from threading import Lock + +from code42cli.enums import FileEventsOutputFormat +from code42cli.logger.formatters import FileEventDictToCEFFormatter +from code42cli.logger.formatters import FileEventDictToJSONFormatter +from code42cli.logger.formatters import FileEventDictToRawJSONFormatter +from code42cli.logger.handlers import NoPrioritySysLogHandler +from code42cli.util import get_url_parts +from code42cli.util import get_user_project_path + +# prevent loggers from printing stacks to stderr if a pipe is broken +logging.raiseExceptions = False + +logger_deps_lock = Lock() +ERROR_LOG_FILE_NAME = "code42_errors.log" + + +def _get_formatter(output_format): + if output_format == FileEventsOutputFormat.JSON: + return FileEventDictToJSONFormatter() + elif output_format == FileEventsOutputFormat.CEF: + return FileEventDictToCEFFormatter() + else: + return FileEventDictToRawJSONFormatter() + + +def _init_logger(logger, handler, output_format): + formatter = _get_formatter(output_format) + logger.setLevel(logging.INFO) + return add_handler_to_logger(logger, handler, formatter) + + +def get_logger_for_server(hostname, protocol, output_format, certs): + """Gets the logger that sends logs to a server for the given format. + + Args: + hostname: The hostname of the server. It may include the port. + protocol: The transfer protocol for sending logs. + output_format: CEF, JSON, or RAW_JSON. Each type results in a different logger instance. + certs: Use for passing SSL/TLS certificates when connecting to the server. + """ + logger = logging.getLogger(f"code42_syslog_{output_format.lower()}") + if logger_has_handlers(logger): + return logger + + with logger_deps_lock: + url_parts = get_url_parts(hostname) + hostname = url_parts[0] + port = url_parts[1] or 514 + if not logger_has_handlers(logger): + handler = NoPrioritySysLogHandler(hostname, port, protocol, certs) + handler.connect_socket() + return _init_logger(logger, handler, output_format) + return logger + + +def _get_standard_formatter(): + return logging.Formatter("%(message)s") + + +def _get_error_log_path(): + log_path = get_user_project_path("log") + return os.path.join(log_path, ERROR_LOG_FILE_NAME) + + +def _create_error_file_handler(): + log_path = _get_error_log_path() + return RotatingFileHandler( + log_path, maxBytes=250000000, encoding="utf-8", delay=True + ) + + +def add_handler_to_logger(logger, handler, formatter): + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + +def logger_has_handlers(logger): + return len(logger.handlers) + + +def _get_error_file_logger(): + """Gets the logger where raw exceptions are logged.""" + logger = logging.getLogger("code42_error_logger") + if logger_has_handlers(logger): + return logger + + with logger_deps_lock: + if not logger_has_handlers(logger): + formatter = _create_formatter_for_error_file() + handler = _create_error_file_handler() + return add_handler_to_logger(logger, handler, formatter) + return logger + + +def get_view_error_details_message(): + """Returns the error message that is printed when errors occur.""" + path = _get_error_log_path() + return f"View details in {path}" + + +def _create_formatter_for_error_file(): + return logging.Formatter("%(asctime)s %(message)s") + + +class CliLogger: + def __init__(self): + self._logger = _get_error_file_logger() + + def log_error(self, err): + message = str(err) if err else None + if message: + self._logger.error(message) + + def log_verbose_error(self, invocation_str=None, http_request=None): + """For logging traces, invocation strs, and request parameters during exceptions to the + error log file.""" + prefix = ( + "Exception occurred." + if not invocation_str + else f"Exception occurred from input: '{invocation_str}'." + ) + message = f"{prefix}. See error below." + self.log_error(message) + self.log_error(traceback.format_exc()) + if http_request: + self.log_error(f"Request parameters: {http_request.body}") + + +def get_main_cli_logger(): + return CliLogger() diff --git a/src/code42cli/logger/enums.py b/src/code42cli/logger/enums.py new file mode 100644 index 000000000..7f314cea3 --- /dev/null +++ b/src/code42cli/logger/enums.py @@ -0,0 +1,7 @@ +class ServerProtocol: + TCP = "TCP" + UDP = "UDP" + TLS_TCP = "TLS-TCP" + + def __iter__(self): + return iter([self.TCP, self.UDP, self.TLS_TCP]) diff --git a/src/code42cli/logger/formatters.py b/src/code42cli/logger/formatters.py new file mode 100644 index 000000000..f2373329a --- /dev/null +++ b/src/code42cli/logger/formatters.py @@ -0,0 +1,137 @@ +import json +from datetime import datetime +from logging import Formatter + +from code42cli.maps import CEF_CUSTOM_FIELD_NAME_MAP +from code42cli.maps import FILE_EVENT_TO_SIGNATURE_ID_MAP +from code42cli.maps import JSON_TO_CEF_MAP + +CEF_TEMPLATE = ( + "CEF:0|Code42|{productName}|1|{signatureID}|{eventName}|{severity}|{extension}" +) +CEF_TIMESTAMP_FIELDS = ["end", "fileCreateTime", "fileModificationTime", "rt"] + + +class FileEventDictToCEFFormatter(Formatter): + """Formats file event dicts into CEF format. Attach to a logger via `setFormatter` to use. + Args: + default_product_name: The default value to use in the product name segment of the CEF message. + default_severity_level: The default integer between 1 and 10 to assign to the severity segment of the CEF message. + """ + + def __init__( + self, + default_product_name="Advanced Exfiltration Detection", + default_severity_level="5", + ): + super().__init__() + self._default_product_name = default_product_name + self._default_severity_level = default_severity_level + + def format(self, record): + """ + Args: + record (LogRecord): `record.msg` must be a `dict`. + """ + file_event_dict = record.msg + # security events must convert to file event dict format before calling this. + ext, evt, sig_id = map_event_to_cef(file_event_dict) + cef_log = CEF_TEMPLATE.format( + productName=self._default_product_name, + signatureID=sig_id, + eventName=evt, + severity=self._default_severity_level, + extension=ext, + ) + return cef_log + + +class FileEventDictToJSONFormatter(Formatter): + """Formats file event dicts into JSON format. Attach to a logger via `setFormatter` to use. + Items in the dictionary whose values are `None`, empty string, or empty lists will be excluded + from the JSON conversion. + """ + + def format(self, record): + """ + Args: + record (LogRecord): `record.msg` must be a `dict`. + """ + file_event_dict = record.msg + file_event_dict = { + key: file_event_dict[key] + for key in file_event_dict + if file_event_dict[key] or file_event_dict[key] == 0 + } + return json.dumps(file_event_dict) + + +class FileEventDictToRawJSONFormatter(Formatter): + """Formats file event dicts into JSON format. Attach to a logger via `setFormatter` to use.""" + + def format(self, record): + return json.dumps(record.msg) + + +def _format_cef_kvp(cef_field_key, cef_field_value): + if cef_field_key + "Label" in CEF_CUSTOM_FIELD_NAME_MAP: + return _format_custom_cef_kvp(cef_field_key, cef_field_value) + + cef_field_value = _handle_nested_json_fields(cef_field_key, cef_field_value) + if isinstance(cef_field_value, list): + cef_field_value = _convert_list_to_csv(cef_field_value) + elif cef_field_key in CEF_TIMESTAMP_FIELDS: + cef_field_value = convert_file_event_timestamp_to_cef_timestamp(cef_field_value) + return f"{cef_field_key}={cef_field_value}" + + +def _handle_nested_json_fields(cef_field_key, cef_field_value): + result = [] + if cef_field_key == "duser": + result = [ + item["cloudUsername"] for item in cef_field_value if type(item) is dict + ] + + return result or cef_field_value + + +def _format_custom_cef_kvp(custom_cef_field_key, custom_cef_field_value): + custom_cef_label_key = f"{custom_cef_field_key}Label" + custom_cef_label_value = CEF_CUSTOM_FIELD_NAME_MAP[custom_cef_label_key] + return ( + f"{custom_cef_field_key}={custom_cef_field_value} " + f"{custom_cef_label_key}={custom_cef_label_value}" + ) + + +def _convert_list_to_csv(_list): + value = ",".join([val for val in _list]) + return value + + +def convert_file_event_timestamp_to_cef_timestamp(timestamp_value): + try: + _datetime = datetime.strptime(timestamp_value, "%Y-%m-%dT%H:%M:%S.%fZ") + except ValueError: + _datetime = datetime.strptime(timestamp_value, "%Y-%m-%dT%H:%M:%SZ") + value = f"{_datetime_to_ms_since_epoch(_datetime):.0f}" + return value + + +def _datetime_to_ms_since_epoch(_datetime): + epoch = datetime.utcfromtimestamp(0) + total_seconds = (_datetime - epoch).total_seconds() + # total_seconds will be in decimals (millisecond precision) + return total_seconds * 1000 + + +def map_event_to_cef(event): + kvp_list = { + JSON_TO_CEF_MAP[key]: event[key] + for key in event + if key in JSON_TO_CEF_MAP and (event[key] is not None and event[key] != []) + } + extension = " ".join(_format_cef_kvp(key, kvp_list[key]) for key in kvp_list) + event_name = event.get("eventType", "UNKNOWN") + signature_id = FILE_EVENT_TO_SIGNATURE_ID_MAP.get(event_name, "C42000") + return extension, event_name, signature_id diff --git a/src/code42cli/logger/handlers.py b/src/code42cli/logger/handlers.py new file mode 100644 index 000000000..6412e3a0a --- /dev/null +++ b/src/code42cli/logger/handlers.py @@ -0,0 +1,149 @@ +import logging +import socket +import ssl +import sys +from logging.handlers import SysLogHandler + +from code42cli.logger.enums import ServerProtocol + + +class SyslogServerNetworkConnectionError(Exception): + """An error raised when the connection is disrupted during logging.""" + + def __init__(self): + super().__init__( + "The network connection broke while sending results. " + "This might happen if your connection requires TLS and you are attempting " + "unencrypted TCP communication." + ) + + +class NoPrioritySysLogHandler(SysLogHandler): + """ + Overrides the default implementation of SysLogHandler to not send a `` at the + beginning of the message. Most CEF consumers seem to not expect the `` to be + present in CEF messages. Attach to a logger via `.addHandler` to use. + + `self.socket` is lazily loaded for testing purposes, so the connection does not get + made for TCP/TLS until the first log record is about to be transmitted. + + Args: + hostname: The hostname of the syslog server to send log messages to. + port: The port of the syslog server to send log messages to. + protocol: The protocol over which to submit syslog messages. Accepts TCP, UDP, or TLS. + certs: Certs to specify when using TLS-TCP for the `protocol` argument. Use "ignore" for + ssl.CERT_NONE (ignoring certificate validation). + """ + + def __init__(self, hostname, port, protocol, certs): + self._hostname = hostname + self._port = port + self._protocol = protocol + self._certs = certs + self.address = (hostname, port) + logging.Handler.__init__(self) + self.socktype = _try_get_socket_type_from_protocol(protocol) + self.socket = None + + @property + def _wrap_socket(self): + return self._protocol == ServerProtocol.TLS_TCP + + def connect_socket(self): + """Call to initialize the socket. If using TCP/TLS, it will also establish the connection.""" + if not self.socket: + self.socket = self._create_socket(self._hostname, self._port, self._certs) + + def _create_socket(self, hostname, port, certs): + socket_info = self._get_socket_address_info(hostname, port) + address_family, sock_type, proto, _, sa = socket_info + sock = None + try: + sock = socket.socket(address_family, sock_type, proto) + if self._wrap_socket: + sock = _wrap_socket_for_ssl(sock, certs, hostname) + if sock_type == socket.SOCK_STREAM: + sock = _connect_socket(sock, sa) + return sock + except Exception as exc: + if sock is not None: + sock.close() + raise exc + + def _get_socket_address_info(self, hostname, port): + info = socket.getaddrinfo(hostname, port, 0, self.socktype) + if not info: + raise OSError("getaddrinfo() returns an empty list") + return info[0] + + def emit(self, record): + try: + self._send_record(record) + except Exception: + self.handleError(record) + + def handleError(self, record): + """Override logger's `handleError` method to exit if an exception is raised while trying to + log, otherwise it would continue to gather and process events if the connection breaks but send + them nowhere. + """ + t, _, _ = sys.exc_info() + if issubclass(t, ConnectionError): + raise SyslogServerNetworkConnectionError() + super().handleError(record) + + def _send_record(self, record): + formatted_record = self.format(record) + msg = formatted_record + "\n" + msg = msg.encode("utf-8") + if self.socktype == socket.SOCK_DGRAM: + self.socket.sendto(msg, self.address) + else: + self.socket.sendall(msg) + + def close(self): + if self._wrap_socket: + self.socket.unwrap() + self.socket.close() + logging.Handler.close(self) + + +def _wrap_socket_for_ssl(sock, certs, hostname): + do_ignore_certs = certs and certs.lower() == "ignore" + if do_ignore_certs: + certs = None + context = ssl.create_default_context(cafile=certs) + if do_ignore_certs: + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + return context.wrap_socket(sock, server_hostname=hostname) + + +def _connect_socket(sock, sa): + sock.settimeout(10) + sock.connect(sa) + # Set timeout back to None for 'blocking' mode, required for `sendall()`. + sock.settimeout(None) + return sock + + +def _try_get_socket_type_from_protocol(protocol): + socket_type = _get_socket_type_from_protocol(protocol) + if socket_type is None: + _raise_socket_type_error(protocol) + return socket_type + + +def _get_socket_type_from_protocol(protocol): + if protocol in [ServerProtocol.TCP, ServerProtocol.TLS_TCP]: + return socket.SOCK_STREAM + elif protocol == ServerProtocol.UDP: + return socket.SOCK_DGRAM + + +def _raise_socket_type_error(protocol): + msg = ( + "Could not determine socket type. " + f"Expected one of {list(ServerProtocol())}, but got {protocol}." + ) + raise ValueError(msg) diff --git a/src/code42cli/main.py b/src/code42cli/main.py new file mode 100644 index 000000000..519d2d214 --- /dev/null +++ b/src/code42cli/main.py @@ -0,0 +1,97 @@ +import os +import signal +import site +import sys +import warnings + +import click +from click_plugins import with_plugins +from pkg_resources import iter_entry_points +from py42.settings import set_user_agent_prefix + +from code42cli import BANNER +from code42cli import PRODUCT_NAME +from code42cli.__version__ import __version__ +from code42cli.click_ext.groups import ExceptionHandlingGroup +from code42cli.cmds.alert_rules import alert_rules +from code42cli.cmds.alerts import alerts +from code42cli.cmds.auditlogs import audit_logs +from code42cli.cmds.cases import cases +from code42cli.cmds.devices import devices +from code42cli.cmds.legal_hold import legal_hold +from code42cli.cmds.profile import profile +from code42cli.cmds.securitydata import security_data +from code42cli.cmds.shell import shell +from code42cli.cmds.trustedactivities import trusted_activities +from code42cli.cmds.users import users +from code42cli.cmds.watchlists import watchlists +from code42cli.options import sdk_options + +warnings.simplefilter("ignore", DeprecationWarning) + + +# Handle KeyboardInterrupts by just exiting instead of printing out a stack +def exit_on_interrupt(signal, frame): + click.echo(err=True) + sys.exit(1) + + +signal.signal(signal.SIGINT, exit_on_interrupt) + +# Sets part of the user agent string that py42 attaches to requests for the purposes of +# identifying CLI users. +set_user_agent_prefix(f"{PRODUCT_NAME}/{__version__} (Code42; code42.com )") + +CONTEXT_SETTINGS = { + "help_option_names": ["-h", "--help"], + "max_content_width": 200, +} + + +@with_plugins(iter_entry_points("code42cli.plugins")) +@click.group( + cls=ExceptionHandlingGroup, + context_settings=CONTEXT_SETTINGS, + help=BANNER, + invoke_without_command=True, + no_args_is_help=True, +) +@click.option( + "--python", + is_flag=True, + help="Print path to the python interpreter env that `code42cli` is installed in.", +) +@click.option( + "--script-dir", + is_flag=True, + help="Print the directory the `code42` script was installed in (for adding to your PATH if needed).", +) +@sdk_options(hidden=True) +def cli(state, python, script_dir): + if python: + click.echo(sys.executable) + sys.exit(0) + if script_dir: + for root, _dirs, files in os.walk(site.PREFIXES[0]): + if "code42" in files or "code42.exe" in files: + print(root) + sys.exit(0) + + for root, _dirs, files in os.walk(site.USER_BASE): + if "code42" in files or "code42.exe" in files: + print(root) + sys.exit(0) + + +cli.add_command(alerts) +cli.add_command(alert_rules) +cli.add_command(audit_logs) +cli.add_command(cases) +cli.add_command(devices) +cli.add_command(legal_hold) +cli.add_command(profile) +cli.add_command(security_data) +cli.add_command(shell) +cli.add_command(users) +cli.add_command(trusted_activities) +cli.add_command(watchlists) diff --git a/src/code42cli/maps.py b/src/code42cli/maps.py new file mode 100644 index 000000000..7cf2891aa --- /dev/null +++ b/src/code42cli/maps.py @@ -0,0 +1,52 @@ +JSON_TO_CEF_MAP = { + "actor": "suser", + "cloudDriveId": "aid", + "createTimestamp": "fileCreateTime", + "deviceUid": "deviceExternalId", + "deviceUserName": "suser", + "domainName": "dvchost", + "emailRecipients": "duser", + "emailSender": "suser", + "eventId": "externalId", + "eventTimestamp": "end", + "exposure": "reason", + "fileCategory": "fileType", + "fileName": "fname", + "filePath": "filePath", + "fileSize": "fsize", + "insertionTimestamp": "rt", + "md5Checksum": "fileHash", + "modifyTimestamp": "fileModificationTime", + "osHostName": "shost", + "processName": "sproc", + "processOwner": "spriv", + "publicIpAddress": "src", + "removableMediaBusType": "cs1", + "removableMediaCapacity": "cn1", + "removableMediaName": "cs3", + "removableMediaSerialNumber": "cs4", + "removableMediaVendor": "cs2", + "sharedWith": "duser", + "source": "sourceServiceName", + "syncDestination": "destinationServiceName", + "tabUrl": "request", + "url": "filePath", + "userUid": "suid", + "windowTitle": "requestClientApplication", +} + +CEF_CUSTOM_FIELD_NAME_MAP = { + "cn1Label": "Code42AEDRemovableMediaCapacity", + "cs1Label": "Code42AEDRemovableMediaBusType", + "cs2Label": "Code42AEDRemovableMediaVendor", + "cs3Label": "Code42AEDRemovableMediaName", + "cs4Label": "Code42AEDRemovableMediaSerialNumber", +} + +FILE_EVENT_TO_SIGNATURE_ID_MAP = { + "CREATED": "C42200", + "MODIFIED": "C42201", + "DELETED": "C42202", + "READ_BY_APP": "C42203", + "EMAILED": "C42204", +} diff --git a/src/code42cli/options.py b/src/code42cli/options.py new file mode 100644 index 000000000..7247d9b17 --- /dev/null +++ b/src/code42cli/options.py @@ -0,0 +1,225 @@ +import click + +from code42cli.click_ext.types import MagicDate +from code42cli.click_ext.types import TOTP +from code42cli.cmds.search.options import AdvancedQueryAndSavedSearchIncompatible +from code42cli.cmds.search.options import BeginOption +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.date_helper import round_datetime_to_day_end +from code42cli.date_helper import round_datetime_to_day_start +from code42cli.enums import OutputFormat +from code42cli.enums import SendToFileEventsOutputFormat +from code42cli.errors import Code42CLIError +from code42cli.logger.enums import ServerProtocol +from code42cli.profile import get_profile +from code42cli.sdk_client import create_sdk + + +def yes_option(hidden=False): + return click.option( + "-y", + "--assume-yes", + is_flag=True, + expose_value=False, + callback=lambda ctx, param, value: ctx.obj.set_assume_yes(value), + help='Assume "yes" as the answer to all prompts and run non-interactively.', + hidden=hidden, + ) + + +format_option = click.option( + "-f", + "--format", + type=click.Choice(OutputFormat(), case_sensitive=False), + help="The output format of the result. Defaults to table format.", + default=OutputFormat.TABLE, +) + + +class CLIState: + def __init__(self): + try: + self._profile = get_profile() + except Code42CLIError: + self._profile = None + self.totp = None + self.debug = False + self._sdk = None + self.search_filters = [] + self.assume_yes = False + + @property + def profile(self): + if self._profile is None: + self._profile = get_profile() + return self._profile + + @profile.setter + def profile(self, value): + self._profile = value + + @property + def sdk(self): + if self._sdk is None: + self._sdk = create_sdk( + self.profile, + self.debug, + totp=self.totp, + api_client=self.profile.api_client_auth == "True", + ) + return self._sdk + + def set_assume_yes(self, param): + self.assume_yes = param + + +def set_profile(ctx, param, value): + """Sets the profile on the global state object when --profile is passed to commands + decorated with @sdk_options.""" + if value: + ctx.ensure_object(CLIState).profile = get_profile(value) + + +def set_debug(ctx, param, value): + """Sets debug to True on global state object when --debug/-d is passed to commands decorated + with @sdk_options. + """ + if value: + ctx.ensure_object(CLIState).debug = value + + +def set_totp(ctx, param, value): + """Sets TOTP token on global state object for multi-factor authentication.""" + if value: + ctx.ensure_object(CLIState).totp = value + + +def profile_option(hidden=False): + opt = click.option( + "--profile", + expose_value=False, + callback=set_profile, + hidden=hidden, + help="The name of the Code42 CLI profile to use when executing this command.", + ) + return opt + + +def debug_option(hidden=False): + opt = click.option( + "-d", + "--debug", + is_flag=True, + expose_value=False, + callback=set_debug, + hidden=hidden, + help="Turn on debug logging.", + ) + return opt + + +def totp_option(hidden=False): + opt = click.option( + "--totp", + type=TOTP(), + expose_value=False, + callback=set_totp, + hidden=hidden, + help="TOTP token for multi-factor authentication.", + ) + return opt + + +pass_state = click.make_pass_decorator(CLIState, ensure=True) + + +def sdk_options(hidden=False): + def decorator(f): + f = profile_option(hidden)(f) + f = totp_option(hidden)(f) + f = debug_option(hidden)(f) + f = pass_state(f) + return f + + return decorator + + +def server_options(f): + hostname_arg = click.argument("hostname") + protocol_option = click.option( + "-p", + "--protocol", + type=click.Choice(ServerProtocol(), case_sensitive=False), + default=ServerProtocol.UDP, + help="Protocol used to send logs to server. Defaults to UDP.", + ) + f = hostname_arg(f) + f = protocol_option(f) + return f + + +send_to_format_options = click.option( + "-f", + "--format", + type=click.Choice(SendToFileEventsOutputFormat(), case_sensitive=False), + help="The output format of the result. Defaults to json format.", + default=SendToFileEventsOutputFormat.RAW, +) + + +def begin_option(term, **kwargs): + defaults = dict( + type=MagicDate(rounding_func=round_datetime_to_day_start), + help=f"The beginning of the date range in which to look for {term}. {MagicDate.HELP_TEXT} [required unless --use-checkpoint option used]", + cls=BeginOption, + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), + ) + defaults.update(kwargs) + return click.option("-b", "--begin", **defaults) + + +def end_option(term, **kwargs): + defaults = dict( + type=MagicDate(rounding_func=round_datetime_to_day_end), + cls=AdvancedQueryAndSavedSearchIncompatible, + help=f"The end of the date range in which to look for {term}, argument format options are " + "the same as `--begin`.", + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), + ) + defaults.update(kwargs) + return click.option("-e", "--end", **defaults) + + +def checkpoint_option(term, **kwargs): + defaults = dict( + help=f"Use a checkpoint with the given name to only get {term} that were not previously retrieved." + f"If a checkpoint for {term} with the given name doesn't exist, it will be created on the first run." + "Subsequent CLI runs with this flag and the same name will use the stored checkpoint to modify the search query and then update the stored checkpoint" + ) + defaults.update(kwargs) + return click.option("-c", "--use-checkpoint", **defaults) + + +def set_begin_default_dict(term): + return dict( + type=MagicDate(rounding_func=round_datetime_to_day_start), + help=f"The beginning of the date range in which to look for {term}. {MagicDate.HELP_TEXT}", + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), + ) + + +def set_end_default_dict(term): + return dict( + type=MagicDate(rounding_func=round_datetime_to_day_end), + help=f"The end of the date range in which to look for {term}, argument format options are " + "the same as `--begin`.", + callback=lambda ctx, param, arg: convert_datetime_to_timestamp(arg), + ) + + +column_option = click.option( + "--columns", + default=None, + callback=lambda ctx, param, value: value.split(",") if value is not None else None, + help="Filter output to include only specified columns. Accepts comma-separated list of column names (case-insensitive).", +) diff --git a/src/code42cli/output_formats.py b/src/code42cli/output_formats.py new file mode 100644 index 000000000..2b2ab51f1 --- /dev/null +++ b/src/code42cli/output_formats.py @@ -0,0 +1,339 @@ +import csv +import io +import json +from itertools import chain +from typing import Generator + +import click +from pandas import concat +from pandas import notnull + +from code42cli.enums import FileEventsOutputFormat +from code42cli.enums import OutputFormat +from code42cli.errors import Code42CLIError +from code42cli.logger.formatters import CEF_TEMPLATE +from code42cli.logger.formatters import map_event_to_cef +from code42cli.util import find_format_width +from code42cli.util import format_to_table + +CEF_DEFAULT_PRODUCT_NAME = "Advanced Exfiltration Detection" +CEF_DEFAULT_SEVERITY_LEVEL = "5" + +# Uses method `echo_via_pager()` when 10 or more records. +OUTPUT_VIA_PAGER_THRESHOLD = 10 + + +class OutputFormatter: + def __init__(self, output_format, header=None): + output_format = output_format.upper() if output_format else OutputFormat.TABLE + self.output_format = output_format + self._format_func = to_table + self.header = header + + if output_format == OutputFormat.CSV: + self._format_func = to_csv + elif output_format == OutputFormat.RAW: + self._format_func = to_json + elif output_format == OutputFormat.TABLE: + self._format_func = self._to_table + elif output_format == OutputFormat.JSON: + self._format_func = to_formatted_json + + def _format_output(self, output, *args, **kwargs): + return self._format_func(output, *args, **kwargs) + + def _to_table(self, output, include_header=True): + return to_table(output, self.header, include_header=include_header) + + def get_formatted_output(self, output): + if self._requires_list_output: + yield self._format_output(output) + else: + for item in output: + yield self._format_output(item) + + def echo_formatted_list(self, output_list, force_pager=False): + formatted_output = self.get_formatted_output(output_list) + if len(output_list) > OUTPUT_VIA_PAGER_THRESHOLD or force_pager: + click.echo_via_pager(formatted_output) + else: + for output in formatted_output: + click.echo(output, nl=False) + if self.output_format in [OutputFormat.TABLE]: + click.echo() + + @property + def _requires_list_output(self): + return self.output_format in (OutputFormat.TABLE, OutputFormat.CSV) + + +class DataFrameOutputFormatter: + def __init__(self, output_format, checkpoint_func=None): + self.output_format = ( + output_format.upper() if output_format else OutputFormat.TABLE + ) + if self.output_format not in OutputFormat.choices(): + raise Code42CLIError( + f"DataFrameOutputFormatter received an invalid format: {self.output_format}" + ) + self.checkpoint_func = checkpoint_func or (lambda x: None) + + def _ensure_iterable(self, dfs): + if not isinstance(dfs, (Generator, list, tuple)): + return [dfs] + return dfs + + def _iter_table(self, dfs, columns=None, **kwargs): + dfs = self._ensure_iterable(dfs) + df = concat(dfs) + if df.empty: + return + # convert everything to strings so we can left-justify format + # applymap() is deprecated in favor of map() for pandas 2.0+ (method renamed) + # pandas only supports Python 3.8+, update this once we drop support for Python 3.7 + df = df.fillna("").applymap(str) + # set overrideable default kwargs + kwargs = { + "index": False, + "justify": "left", + "formatters": make_left_aligned_formatter(df), + **kwargs, + } + if columns: + filtered = self._select_columns(df, columns) + formatted_rows = filtered.to_string(**kwargs).splitlines(keepends=True) + else: + formatted_rows = df.to_string(**kwargs).splitlines(keepends=True) + # don't checkpoint the header row + if kwargs.get("header") is not False: + yield formatted_rows.pop(0) + + yield from self._checkpoint_and_iter_formatted_events(df, formatted_rows) + + def _iter_csv(self, dfs, columns=None, **kwargs): + dfs = self._ensure_iterable(dfs) + no_header = kwargs.get("header") is False + + for i, df in enumerate(dfs): + if df.empty: + continue + # convert null values to empty string + df.fillna("", inplace=True) + # only add header on first df and if header=False was not passed in kwargs + header = False if no_header else (i == 0) + kwargs = {"index": False, "header": header, **kwargs} + if columns: + filtered = self._select_columns(df, columns) + formatted_rows = filtered.to_csv(**kwargs).splitlines(keepends=True) + else: + formatted_rows = df.to_csv(**kwargs).splitlines(keepends=True) + if header: + yield formatted_rows.pop(0) + + yield from self._checkpoint_and_iter_formatted_events(df, formatted_rows) + + def _iter_json(self, dfs, columns=None, **kwargs): + kwargs = {"ensure_ascii": False, **kwargs} + for event in self.iter_rows(dfs, columns=columns): + json_string = json.dumps(event, **kwargs) + yield f"{json_string}\n" + + def _checkpoint_and_iter_formatted_events(self, df, formatted_rows): + for event, row in zip(df.to_dict("records"), formatted_rows): # noqa: B905 + yield row + self.checkpoint_func(event) + + def _echo_via_pager_if_over_threshold(self, gen): + first_rows = [] + try: + for _ in range(OUTPUT_VIA_PAGER_THRESHOLD): + first_rows.append(next(gen)) + except StopIteration: + click.echo("".join(first_rows)) + return + + click.echo_via_pager(chain(first_rows, gen)) + + def _select_columns(self, df, columns): + if df.empty: + return df + if not isinstance(columns, (list, tuple)): + raise Code42CLIError( + "'columns' parameter must be a list or tuple of column names." + ) + # enable case-insensitive column selection + normalized_map = {c.lower(): c for c in df.columns} + try: + columns = [normalized_map[c.lower()] for c in columns] + return df[columns] + except KeyError as e: + key = e.args[0] + raise click.BadArgumentUsage( + f"'{key}' is not a valid column. Valid columns are: {list(df.columns)}" + ) + + def iter_rows(self, dfs, columns=None): + """ + Accepts a pandas DataFrame or list/generator of DataFrames and yields each + 'row' of the DataFrame as a dict, calling the `checkpoint_func` on each row + after it has been yielded. + + Accepts an optional list of column names that filter + columns in the yielded results. + """ + dfs = self._ensure_iterable(dfs) + for df in dfs: + # convert pandas' default null (numpy.NaN) to None + df = df.astype(object).where(notnull, None) + if columns: + filtered = self._select_columns(df, columns) + else: + filtered = df + for full_event, filtered_event in zip( # noqa: B905 + df.to_dict("records"), filtered.to_dict("records") + ): + yield filtered_event + self.checkpoint_func(full_event) + + def get_formatted_output(self, dfs, columns=None, **kwargs): + """ + Accepts a pandas DataFrame or list/generator of DataFrames and formats and yields + the results line by line to the caller as a generator. + + Accepts an optional list of column names that filter columns in the yielded + results. + + Any additional kwargs provided will be passed to the underlying format method + if customizations are required. + """ + if self.output_format == OutputFormat.TABLE: + yield from self._iter_table(dfs, columns=columns, **kwargs) + + elif self.output_format == OutputFormat.CSV: + yield from self._iter_csv(dfs, columns=columns, **kwargs) + + elif self.output_format == OutputFormat.JSON: + kwargs = {"indent": 4, **kwargs} + yield from self._iter_json(dfs, columns=columns, **kwargs) + + elif self.output_format == OutputFormat.RAW: + yield from self._iter_json(dfs, columns=columns, **kwargs) + + else: + raise Code42CLIError( + f"DataFrameOutputFormatter received an invalid format: {self.output_format}" + ) + + def echo_formatted_dataframes( + self, dfs, columns=None, force_pager=False, force_no_pager=False, **kwargs + ): + """ + Accepts a pandas DataFrame or list/generator of DataFrames and formats and echos the + result to stdout. If total lines > 10, results will be sent to pager. `force_pager` + and `force_no_pager` can be set to override the pager logic based on line count. + + Accepts an optional list of column names that filter + columns in the echoed results. + + Any additional kwargs provided will be passed to the underlying format method + if customizations are required. + """ + lines = self.get_formatted_output(dfs, columns=columns, **kwargs) + try: + # check for empty generator + first = next(lines) + lines = chain([first], lines) + except StopIteration: + click.echo("No results found.") + return + if force_pager and force_no_pager: + raise Code42CLIError("force_pager cannot be used with force_no_pager.") + if force_pager: + click.echo_via_pager(lines) + elif force_no_pager: + for line in lines: + click.echo(line) + else: + self._echo_via_pager_if_over_threshold(lines) + + +class FileEventsOutputFormatter(DataFrameOutputFormatter): + """Class that adds CEF format output option to base DataFrameOutputFormatter.""" + + def __init__(self, output_format, checkpoint_func=None): + self.output_format = ( + output_format.upper() if output_format else OutputFormat.RAW + ) + if self.output_format not in FileEventsOutputFormat.choices(): + raise Code42CLIError( + f"FileEventsOutputFormatter received an invalid format: {self.output_format}" + ) + self.checkpoint_func = checkpoint_func or (lambda x: None) + + def _iter_cef(self, dfs, **kwargs): + dfs = self._ensure_iterable(dfs) + for df in dfs: + df = df.mask(df.isna(), other=None) + for _i, row in df.iterrows(): + event = dict(row) + yield f"{_convert_event_to_cef(event)}\n" + self.checkpoint_func(event) + + def get_formatted_output(self, dfs, columns=None, **kwargs): + if self.output_format == FileEventsOutputFormat.CEF: + yield from self._iter_cef(dfs, **kwargs) + else: + yield from super().get_formatted_output(dfs, columns=columns, **kwargs) + + +def to_csv(output): + """Output is a list of records""" + + if not output: + return + string_io = io.StringIO(newline=None) + fieldnames = list({k for d in output for k in d.keys()}) + writer = csv.DictWriter(string_io, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(output) + return string_io.getvalue() + + +def to_table(output, header, include_header=True): + """Output is a list of records""" + if not output: + return + + rows, column_size = find_format_width(output, header, include_header=include_header) + return format_to_table(rows, column_size) + + +def to_json(output): + """Output is a single record""" + return f"{json.dumps(output)}\n" + + +def to_formatted_json(output): + """Output is a single record""" + return f"{json.dumps(output, indent=4)}\n" + + +def to_cef(output): + """Output is a single record""" + return f"{_convert_event_to_cef(output)}\n" + + +def _convert_event_to_cef(event): + ext, evt, sig_id = map_event_to_cef(event) + cef_log = CEF_TEMPLATE.format( + productName=CEF_DEFAULT_PRODUCT_NAME, + signatureID=sig_id, + eventName=evt, + severity=CEF_DEFAULT_SEVERITY_LEVEL, + extension=ext, + ) + return cef_log + + +def make_left_aligned_formatter(df): + return {c: f"{{:<{df[c].str.len().max()}s}}".format for c in df.columns} diff --git a/src/code42cli/password.py b/src/code42cli/password.py new file mode 100644 index 000000000..4f7fd1908 --- /dev/null +++ b/src/code42cli/password.py @@ -0,0 +1,44 @@ +from getpass import getpass + +import keyring + +from code42cli import PRODUCT_NAME +from code42cli.util import does_user_agree + + +def get_stored_password(profile): + """Gets your currently stored password for the given profile.""" + service_name = _get_keyring_service_name(profile.name) + return keyring.get_password(service_name, profile.username) + + +def get_password_from_prompt(): + """Prompts you and returns what you input.""" + return getpass() + + +def set_password(profile, new_password): + """Sets your password for the given profile.""" + service_name = _get_keyring_service_name(profile.name) + uses_file_storage = keyring.get_keyring().priority < 1 + if uses_file_storage and not _prompt_for_alternative_store(): + return + + keyring.set_password(service_name, profile.username, new_password) + + +def delete_password(profile): + """Deletes password for the given profile.""" + service_name = _get_keyring_service_name(profile.name) + keyring.delete_password(service_name, profile.username) + + +def _get_keyring_service_name(profile_name): + return f"{PRODUCT_NAME}::{profile_name}" + + +def _prompt_for_alternative_store(): + prompt = ( + "keyring is unavailable. Would you like to store in secure flat file? (y/n): " + ) + return does_user_agree(prompt) diff --git a/src/code42cli/profile.py b/src/code42cli/profile.py new file mode 100644 index 000000000..8a191e669 --- /dev/null +++ b/src/code42cli/profile.py @@ -0,0 +1,192 @@ +from click import style + +import code42cli.password as password +from code42cli.cmds.search.cursor_store import get_all_cursor_stores_for_profile +from code42cli.config import config_accessor +from code42cli.config import ConfigAccessor +from code42cli.config import NoConfigProfileError +from code42cli.errors import Code42CLIError + + +class Code42Profile: + def __init__(self, profile): + self._profile = profile + + @property + def name(self): + return self._profile.name + + @property + def authority_url(self): + return self._profile[ConfigAccessor.AUTHORITY_KEY] + + @property + def username(self): + return self._profile[ConfigAccessor.USERNAME_KEY] + + @property + def ignore_ssl_errors(self): + return self._profile[ConfigAccessor.IGNORE_SSL_ERRORS_KEY] + + @property + def use_v2_file_events(self): + return self._profile.get(ConfigAccessor.USE_V2_FILE_EVENTS_KEY) + + @property + def api_client_auth(self): + return self._profile.get(ConfigAccessor.API_CLIENT_AUTH_KEY) + + @property + def has_stored_password(self): + stored_password = password.get_stored_password(self) + return stored_password is not None and stored_password != "" + + def get_password(self): + pwd = password.get_stored_password(self) + if not pwd: + pwd = password.get_password_from_prompt() + return pwd + + def __str__(self): + return ( + f"{self.name}: Username={self.username}, Authority URL={self.authority_url}" + ) + + +def _get_profile(profile_name=None): + """Returns the profile for the given name.""" + config_profile = config_accessor.get_profile(profile_name) + return Code42Profile(config_profile) + + +def get_profile(profile_name=None): + if profile_name is None: + validate_default_profile() + try: + return _get_profile(profile_name) + except NoConfigProfileError as ex: + raise Code42CLIError(str(ex), help=CREATE_PROFILE_HELP) + + +def default_profile_exists(): + try: + profile = _get_profile() + return profile.name and profile.name != ConfigAccessor.DEFAULT_VALUE + except NoConfigProfileError: + return False + + +def is_default_profile(name): + if default_profile_exists(): + default = get_profile() + return name == default.name + + +def validate_default_profile(): + if not default_profile_exists(): + existing_profiles = get_all_profiles() + if not existing_profiles: + raise Code42CLIError("No existing profile.", help=CREATE_PROFILE_HELP) + else: + raise Code42CLIError( + "No default profile set.", + help=_get_set_default_profile_help(existing_profiles), + ) + + +def profile_exists(profile_name=None): + try: + _get_profile(profile_name) + return True + except NoConfigProfileError: + return False + + +def switch_default_profile(profile_name): + profile = get_profile(profile_name) # Handles if profile does not exist. + config_accessor.switch_default_profile(profile.name) + + +def create_profile( + name, server, username, ignore_ssl_errors, use_v2_file_events, api_client_auth +): + if profile_exists(name): + raise Code42CLIError(f"A profile named '{name}' already exists.") + config_accessor.create_profile( + name, server, username, ignore_ssl_errors, use_v2_file_events, api_client_auth + ) + + +def delete_profile(profile_name): + profile = _get_profile(profile_name) + profile_name = profile.name + if password.get_stored_password(profile) is not None: + password.delete_password(profile) + cursor_stores = get_all_cursor_stores_for_profile(profile_name) + for store in cursor_stores: + store.clean() + config_accessor.delete_profile(profile_name) + + +def update_profile( + name, server, username, ignore_ssl_errors, use_v2_file_events, api_client_auth=None +): + config_accessor.update_profile( + name, server, username, ignore_ssl_errors, use_v2_file_events, api_client_auth + ) + + +def get_all_profiles(): + profiles = [ + Code42Profile(profile) for profile in config_accessor.get_all_profiles() + ] + return profiles + + +def get_stored_password(profile_name=None): + profile = get_profile(profile_name) + return password.get_stored_password(profile) + + +def set_password(new_password, profile_name=None): + profile = get_profile(profile_name) + password.set_password(profile, new_password) + + +CREATE_PROFILE_HELP = ( + "\nTo add a profile with username/password authentication, use:\n{}".format( + style( + "\tcode42 profile create " + "--name " + "--server " + "--username \n", + bold=True, + ) + ) + + "\nOr to add a profile with API client authentication, use:\n{}".format( + style( + "\tcode42 profile create-api-client " + "--name " + "--server " + "--api-client-id " + "--secret \n", + bold=True, + ) + ) +) + + +def _get_set_default_profile_help(existing_profiles): + existing_profiles = [str(profile) for profile in existing_profiles] + help_msg = """ +Use the --profile flag to specify which profile to use. + +To set the default profile (used whenever --profile argument is not provided), use: + {} + +Existing profiles: +\t{}""".format( + style("code42 profile use ", bold=True), + "\n\t".join(existing_profiles), + ) + return help_msg diff --git a/src/code42cli/sdk_client.py b/src/code42cli/sdk_client.py new file mode 100644 index 000000000..7fd2b5799 --- /dev/null +++ b/src/code42cli/sdk_client.py @@ -0,0 +1,81 @@ +from os import environ + +import py42.sdk +import py42.settings +import py42.settings.debug as debug +import requests +from click import prompt +from click import secho +from py42.exceptions import Py42UnauthorizedError +from requests.exceptions import ConnectionError +from requests.exceptions import SSLError + +from code42cli.click_ext.types import TOTP +from code42cli.errors import Code42CLIError +from code42cli.errors import LoggedCLIError +from code42cli.logger import get_main_cli_logger + +py42.settings.items_per_page = 500 + +logger = get_main_cli_logger() + + +def create_sdk(profile, is_debug_mode, password=None, totp=None, api_client=False): + proxy = environ.get("HTTPS_PROXY") or environ.get("https_proxy") + if proxy: + py42.settings.proxies = {"https": proxy} + if is_debug_mode: + py42.settings.debug.level = debug.DEBUG + if profile.ignore_ssl_errors == "True": + secho( + f"Warning: Profile '{profile.name}' has SSL verification disabled. " + "Adding certificate verification is strongly advised.", + fg="red", + err=True, + ) + requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecureRequestWarning + ) + py42.settings.verify_ssl_certs = False + password = password or profile.get_password() + return _validate_connection( + profile.authority_url, profile.username, password, totp, api_client + ) + + +def _validate_connection( + authority_url, username, password, totp=None, api_client=False +): + try: + if api_client: + return py42.sdk.from_api_client(authority_url, username, password) + return py42.sdk.from_local_account(authority_url, username, password, totp=totp) + except SSLError as err: + logger.log_error(err) + raise LoggedCLIError( + f"Problem connecting to {authority_url}, SSL certificate verification failed.\nUpdate profile with --disable-ssl-errors to bypass certificate checks (not recommended!)." + ) + except ConnectionError as err: + logger.log_error(err) + if "ProxyError" in str(err): + raise LoggedCLIError( + f"Unable to connect to proxy! Proxy configuration set by environment variable: HTTPS_PROXY={environ.get('HTTPS_PROXY')}" + ) + raise LoggedCLIError(f"Problem connecting to {authority_url}.") + except Py42UnauthorizedError as err: + logger.log_error(err) + if "LoginConfig: LOCAL_2FA" in str(err): + if totp is None: + totp = prompt( + "Multi-factor authentication required. Enter TOTP", type=TOTP() + ) + return _validate_connection(authority_url, username, password, totp) + else: + raise Code42CLIError( + f"Invalid credentials or TOTP token for user {username}." + ) + else: + raise Code42CLIError(f"Invalid credentials for user {username}.") + except Exception as err: + logger.log_error(err) + raise LoggedCLIError("Unknown problem validating connection.") diff --git a/src/code42cli/util.py b/src/code42cli/util.py new file mode 100644 index 000000000..1ef2c0beb --- /dev/null +++ b/src/code42cli/util.py @@ -0,0 +1,204 @@ +import json +import os +import shutil +from datetime import timezone +from functools import wraps +from hashlib import md5 +from os import path +from signal import getsignal +from signal import SIGINT +from signal import signal + +import dateutil.parser +from click import echo +from click import get_current_context +from click import style + +_PADDING_SIZE = 3 + + +def does_user_agree(prompt): + """Prompts the user and checks if they said yes. If command has the `yes_option` flag, and + `-y/--yes` is passed, this will always return `True`. + """ + ctx = get_current_context() + if ctx.obj.assume_yes: + return True + ans = input(prompt) + ans = ans.strip().lower() + return ans == "y" + + +def get_user_project_path(*subdirs): + """The path on your user dir to /.code42cli/[subdir].""" + package_name = __name__.split(".")[0] + home = path.expanduser("~") + hidden_package_name = f".{package_name}" + user_project_path = path.join(home, hidden_package_name) + result_path = path.join(user_project_path, *subdirs) + if not path.exists(result_path): + os.makedirs(result_path) + + return result_path + + +def find_format_width(records, header, include_header=True): + """Fetches needed keys/items to be displayed based on header keys. + + Finds the largest string against each column so as to decide the padding size for the column. + + Args: + records (list or dict): A list of data to be formatted. + header (dict): Key-value where keys should map to keys of record dict and + value is the corresponding column name to be displayed on the CLI. + include_header (bool): Include header in output, defaults to True. + + Returns: + tuple (list of dict, dict): i.e Filtered records, padding size of columns. + """ + if isinstance(records, dict): + records = [records] + + rows = [] + if include_header: + if not header: + header = _get_default_header(records) + rows.append(header) + widths = dict(header.items()) # Copy + for record_row in records: + row = {} + for header_key in header.keys(): + item = record_row.get(header_key) + row[header_key] = item + widths[header_key] = max(widths[header_key], str(item), key=len) + rows.append(row) + column_sizes = {key: len(value) for key, value in widths.items()} + return rows, column_sizes + + +def format_to_table(rows, column_size): + """Formats given rows into a string of left justified table.""" + lines = [] + for row in rows: + line = "" + for key in row.keys(): + line += str(row[key]).ljust(column_size[key] + _PADDING_SIZE) + lines.append(line) + return "\n".join(lines) + + +def format_string_list_to_columns(string_list, max_width=None): + """Prints a list of strings in justified columns and fits them neatly into specified width.""" + if not string_list: + return + if not max_width: + max_width, _ = shutil.get_terminal_size() + column_width = len(max(string_list, key=len)) + _PADDING_SIZE + num_columns = int(max_width / column_width) or 1 + format_string = f"{{:<{column_width}}}" * num_columns + batches = [ + string_list[i : i + num_columns] + for i in range(0, len(string_list), num_columns) + ] + padding = ["" for _ in range(num_columns)] + for batch in batches: + echo(format_string.format(*batch + padding)) + echo() + + +class warn_interrupt: + """A context decorator class used to wrap functions where a keyboard interrupt could potentially + leave things in a bad state. Warns the user with provided message and exits when wrapped + function is complete. Requires user to ctrl-c a second time to force exit. + + Usage: + + @warn_interrupt(warning="example message") + def my_important_func(): + pass + """ + + def __init__(self, warning="Cancelling operation cleanly, one moment... "): + self.warning = warning + self.old_int_handler = None + self.interrupted = False + self.exit_instructions = style("Hit CTRL-C again to force quit.", fg="red") + + def __enter__(self): + self.old_int_handler = getsignal(SIGINT) + signal(SIGINT, self._handle_interrupts) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.interrupted: + exit(1) + signal(SIGINT, self.old_int_handler) + + return False + + def _handle_interrupts(self, sig, frame): + if not self.interrupted: + self.interrupted = True + echo(f"\n{self.warning}\n{self.exit_instructions}", err=True) + else: + exit() + + def __call__(self, func): + @wraps(func) + def inner(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return inner + + +def get_url_parts(url_str): + parts = url_str.split(":") + port = None + if len(parts) > 1 and parts[1] != "": + port = int(parts[1]) + + return parts[0], port + + +def _get_default_header(header_items): + if not header_items: + return + + # Creates dict where keys and values are the same for `find_format_width()`. + header = {} + for item in header_items: + keys = item.keys() + for key in keys: + if key not in header and isinstance(key, str): + header[key] = key + + return header + + +def hash_event(event): + if isinstance(event, dict): + event = json.dumps(event, sort_keys=True) + return md5(event.encode()).hexdigest() + + +def print_numbered_list(items): + """Outputs a numbered list of items to the user. + For example, provide ["test", "foo"] to print "1. test\n2. foo". + """ + + choices = dict(enumerate(items, 1)) + for num in choices: + echo(f"{num}. {choices[num]}") + echo() + + +def parse_timestamp(date_str): + # example: {"property": "bar", "timestamp": "2020-11-23T17:13:26.239647Z"} + ts = date_str[:-1] + date = dateutil.parser.parse(ts).replace(tzinfo=timezone.utc) + return date.timestamp() + + +def deprecation_warning(text): + echo(style(text, fg="red"), err=True) diff --git a/src/code42cli/worker.py b/src/code42cli/worker.py new file mode 100644 index 000000000..3a0983056 --- /dev/null +++ b/src/code42cli/worker.py @@ -0,0 +1,147 @@ +import queue +from threading import Lock +from threading import Thread +from time import sleep + +from py42.exceptions import Py42ForbiddenError +from py42.exceptions import Py42HTTPError + +from code42cli.errors import Code42CLIError +from code42cli.logger import get_main_cli_logger + + +def create_worker_stats(total): + return WorkerStats(total) + + +class WorkerStats: + """Stats about the tasks that have run.""" + + def __init__(self, total): + self.total = total + + _total_processed = 0 + _total_errors = 0 + _results = [] + __total_processed_lock = Lock() + __total_errors_lock = Lock() + __results_lock = Lock() + + @property + def total_processed(self): + """The total number of tasks executed.""" + return self._total_processed + + @property + def total_errors(self): + """The amount of errors that occurred.""" + return self._total_errors + + @property + def total_successes(self): + val = self._total_processed - self._total_errors + return val if val >= 0 else 0 + + @property + def results(self): + return self._results + + def __str__(self): + return f"{self.total_successes} succeeded, {self._total_errors} failed out of {self.total}" + + def increment_total_processed(self): + """+1 to self.total_processed""" + with self.__total_processed_lock: + self._total_processed += 1 + + def increment_total_errors(self): + """+1 to self.total_errors""" + with self.__total_errors_lock: + self._total_errors += 1 + + def add_result(self, result): + """add a result to the list""" + with self.__results_lock: + self._results.append(result) + + def reset_results(self): + with self.__results_lock: + self._results = [] + + +class Worker: + def __init__(self, thread_count, expected_total, bar=None, stats=None): + self._queue = queue.Queue() + self._thread_count = thread_count + self._bar = bar + self._stats = stats or WorkerStats(expected_total) + self._tasks = 0 + self.__started = False + self.__start_lock = Lock() + self._logger = get_main_cli_logger() + + def do_async(self, func, *args, **kwargs): + """Execute the given func asynchronously given *args and **kwargs. + + Args: + func (callable): The function to execute asynchronously. + *args (iter): Positional args to pass to the function. + **kwargs (dict): Key-value args to pass to the function. + """ + if not self.__started: + with self.__start_lock: + if not self.__started: + self.__start() + self.__started = True + self._queue.put({"func": func, "args": args, "kwargs": kwargs}) + self._tasks += 1 + + @property + def stats(self): + """Stats about the tasks that have been executed, such as the total errors that occurred.""" + return self._stats + + def wait(self): + """Wait for the tasks in the queue to complete. This should usually be called before + program termination.""" + while self._stats.total_processed < self._tasks: + sleep(0.5) + + def _process_queue(self): + while True: + try: + task = self._queue.get() + func = task["func"] + args = task["args"] + kwargs = task["kwargs"] + self._stats.add_result(func(*args, **kwargs)) + except Code42CLIError as err: + self._increment_total_errors() + self._logger.log_error(err) + except Py42ForbiddenError as err: + self._increment_total_errors() + self._logger.log_verbose_error(http_request=err.response.request) + self._logger.log_error( + "You do not have the necessary permissions to perform this task. " + "Try using or creating a different profile." + ) + except Py42HTTPError as err: + self._increment_total_errors() + self._logger.log_verbose_error(http_request=err.response.request) + except Exception: + self._increment_total_errors() + self._logger.log_verbose_error() + finally: + self._stats.increment_total_processed() + if self._bar: + self._bar.update(1) + self._queue.task_done() + + def __start(self): + for _ in range(0, self._thread_count): + t = Thread(target=self._process_queue) + t.daemon = True + t.start() + + def _increment_total_errors(self): + self._stats.increment_total_errors() diff --git a/tests/aed/test_args.py b/tests/aed/test_args.py deleted file mode 100644 index d5640c185..000000000 --- a/tests/aed/test_args.py +++ /dev/null @@ -1,109 +0,0 @@ -import pytest -from argparse import Namespace - -from c42seceventcli.aed.args import get_args - - -@pytest.fixture -def patches(mocker, mock_cli_arg_parser, mock_cli_args, mock_config_arg_parser, mock_config_args): - mock = mocker.MagicMock() - mock.cli_args = mock_cli_args - mock.config_args = mock_config_args - return mock - - -@pytest.fixture -def patches_with_mocked_args_verifications( - mocker, - mock_cli_arg_parser, - mock_cli_args, - mock_config_arg_parser, - mock_config_args, - mock_authority_verification, - mock_username_verification, - mock_destination_args_verification, -): - mock = mocker.MagicMock() - mock.cli_args = mock_cli_args - mock.config_args = mock_config_args - mock.verify_authority = mock_authority_verification - mock.verify_username = mock_username_verification - mock.verify_destination_args = mock_destination_args_verification - return mock - - -@pytest.fixture -def mock_cli_args(): - return Namespace() - - -@pytest.fixture -def mock_config_args(): - return {} - - -@pytest.fixture -def mock_config_arg_parser(mocker, mock_config_args): - mock_parser = mocker.patch("c42seceventcli.common.util.get_config_args") - mock_parser.return_value = mock_config_args - return mock_parser - - -@pytest.fixture -def mock_cli_arg_parser(mocker, mock_cli_args): - mock_parser = mocker.patch("argparse.ArgumentParser.parse_args") - mock_parser.return_value = mock_cli_args - return mock_parser - - -@pytest.fixture -def mock_authority_verification(mocker): - return mocker.patch("c42seceventcli.aed.args.AEDArgs.verify_authority_arg") - - -@pytest.fixture -def mock_username_verification(mocker): - return mocker.patch("c42seceventcli.aed.args.AEDArgs.verify_username_arg") - - -@pytest.fixture -def mock_destination_args_verification(mocker): - return mocker.patch("c42seceventcli.aed.args.AEDArgs.verify_destination_args") - - -def test_get_args_calls_sec_args_try_set_with_expected_args( - mocker, patches_with_mocked_args_verifications -): - mock_setter = mocker.patch("c42seceventcli.common.util.SecArgs.try_set") - key = "c42_authority_url" - expected_cli_val = "URL1" - expected_config_val = "URL2" - patches_with_mocked_args_verifications.cli_args.c42_authority_url = expected_cli_val - patches_with_mocked_args_verifications.config_args[key] = expected_config_val - get_args() - mock_setter.assert_called_once_with(key, expected_cli_val, expected_config_val) - - -def test_get_args_when_destination_is_not_none_and_destination_type_is_stdout_raises_value_error( - patches -): - patches.cli_args.destination_type = "stdout" - patches.cli_args.destination = "Delaware" - with pytest.raises(ValueError): - get_args() - - -def test_get_args_when_destination_is_none_and_destination_type_is_server_raises_value_error( - patches -): - patches.cli_args.destination_type = "server" - patches.cli_args.destination = None - with pytest.raises(ValueError): - get_args() - - -def test_get_args_when_destination_is_none_and_destination_type_is_file_raises_value_error(patches): - patches.cli_args.destination_type = "file" - patches.cli_args.destination = None - with pytest.raises(ValueError): - get_args() diff --git a/tests/aed/test_cursor_store.py b/tests/aed/test_cursor_store.py deleted file mode 100644 index ab7a086ca..000000000 --- a/tests/aed/test_cursor_store.py +++ /dev/null @@ -1,81 +0,0 @@ -from c42secevents.extractors import INSERTION_TIMESTAMP_FIELD_NAME -from c42seceventcli.aed.cursor_store import AEDCursorStore -from tests.conftest import MOCK_TEST_DB_PATH - - -class TestAEDCursorStore(object): - def test_reset_executes_expected_drop_table_query(self, sqlite_connection): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store.reset() - with store._connection as conn: - actual = conn.execute.call_args_list[0][0][0] - expected = "DROP TABLE aed_checkpoint" - assert actual == expected - - def test_reset_executes_expected_create_table_query(self, sqlite_connection): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store.reset() - with store._connection as conn: - actual = conn.execute.call_args_list[1][0][0] - expected = "CREATE TABLE aed_checkpoint (cursor_id, insertionTimestamp)" - assert actual == expected - - def test_reset_executes_expected_insert_query(self, sqlite_connection): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store._connection = sqlite_connection - store.reset() - with store._connection as conn: - actual = conn.execute.call_args[0][0] - expected = "INSERT INTO aed_checkpoint VALUES(?, null)" - assert actual == expected - - def test_reset_executes_query_with_expected_primary_key(self, sqlite_connection): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store._connection = sqlite_connection - store.reset() - with store._connection as conn: - actual = conn.execute.call_args[0][1][0] - expected = store._PRIMARY_KEY - assert actual == expected - - def test_get_stored_insertion_timestamp_executes_expected_select_query(self, sqlite_connection): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store.get_stored_insertion_timestamp() - with store._connection as conn: - expected = "SELECT {0} FROM aed_checkpoint WHERE cursor_id=?".format( - INSERTION_TIMESTAMP_FIELD_NAME - ) - actual = conn.cursor().execute.call_args[0][0] - assert actual == expected - - def test_get_stored_insertion_timestamp_executes_query_with_expected_primary_key( - self, sqlite_connection - ): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store.get_stored_insertion_timestamp() - with store._connection as conn: - actual = conn.cursor().execute.call_args[0][1][0] - expected = store._PRIMARY_KEY - assert actual == expected - - def test_replace_stored_insertion_timestamp_executes_expected_update_query( - self, sqlite_connection - ): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - store.replace_stored_insertion_timestamp(123) - with store._connection as conn: - expected = "UPDATE aed_checkpoint SET {0}=? WHERE cursor_id=?".format( - INSERTION_TIMESTAMP_FIELD_NAME - ) - actual = conn.execute.call_args[0][0] - assert actual == expected - - def test_replace_stored_insertion_timestamp_executes_query_with_expected_primary_key( - self, sqlite_connection - ): - store = AEDCursorStore(MOCK_TEST_DB_PATH) - new_insertion_timestamp = 123 - store.replace_stored_insertion_timestamp(new_insertion_timestamp) - with store._connection as conn: - actual = conn.execute.call_args[0][1][0] - assert actual == new_insertion_timestamp diff --git a/tests/aed/test_main.py b/tests/aed/test_main.py deleted file mode 100644 index 95c0a59b9..000000000 --- a/tests/aed/test_main.py +++ /dev/null @@ -1,315 +0,0 @@ -import pytest -from datetime import datetime, timedelta -from socket import herror, gaierror, timeout - -from py42 import settings -import py42.debug_level as debug_level -from c42secevents.logging.formatters import AEDDictToCEFFormatter, AEDDictToJSONFormatter -from c42seceventcli.aed.cursor_store import AEDCursorStore -from c42seceventcli.aed.args import AEDArgs - -from c42seceventcli.aed import main - - -@pytest.fixture -def patches( - mocker, - mock_aed_extractor_constructor, - mock_aed_extractor, - mock_store, - mock_42, - mock_args, - mock_args_getter, - mock_password_getter, - mock_password_deleter, - mock_logger, - mock_error_logger, - mock_cursor_reset_function, -): - mock = mocker.MagicMock() - mock.aed_extractor_constructor = mock_aed_extractor_constructor - mock.aed_extractor = mock_aed_extractor - mock.store = mock_store - mock.py42 = mock_42 - mock.aed_args = mock_args - mock.args_getter = mock_args_getter - mock.get_password = mock_password_getter - mock.delete_password = mock_password_deleter - mock.get_logger = mock_logger - mock.error_logger = mock_error_logger - mock.reset_cursor = mock_cursor_reset_function - return mock - - -@pytest.fixture -def mock_aed_extractor_constructor(mocker): - mock = mocker.patch("c42secevents.extractors.AEDEventExtractor.__init__") - mock.return_value = None - return mock - - -@pytest.fixture -def mock_aed_extractor(mocker): - return mocker.patch("c42secevents.extractors.AEDEventExtractor.extract") - - -@pytest.fixture -def mock_store(mocker): - store = mocker.patch("c42seceventcli.aed.main.AEDCursorStore.__init__") - store.return_value = None - return store - - -@pytest.fixture -def mock_42(mocker): - settings.verify_ssl_certs = True - settings.debug_level = debug_level.NONE - return mocker.patch("py42.sdk.SDK.create_using_local_account") - - -@pytest.fixture -def mock_args(mocker, mock_args_getter): - args = AEDArgs() - args.cli_parser = mocker.MagicMock() - args.c42_authority_url = "https://example.com" - args.c42_username = "test.testerson@example.com" - mock_args_getter.return_value = args - return args - - -@pytest.fixture -def mock_args_getter(mocker): - return mocker.patch("c42seceventcli.aed.args.get_args") - - -@pytest.fixture -def mock_password_getter(mocker): - mock = mocker.patch("c42seceventcli.common.util.get_stored_password") - mock.get_password.return_value = "PASSWORD" - return mock - - -@pytest.fixture -def mock_password_deleter(mocker): - return mocker.patch("c42seceventcli.common.util.delete_stored_password") - - -@pytest.fixture -def mock_logger(mocker): - return mocker.patch("c42seceventcli.common.util.get_logger") - - -@pytest.fixture -def mock_error_logger(mocker): - return mocker.patch("c42seceventcli.common.util.get_error_logger") - - -@pytest.fixture -def mock_cursor_reset_function(mocker): - return mocker.patch("c42seceventcli.aed.main.AEDCursorStore.reset") - - -def test_main_when_get_args_raises_value_error_causes_system_exit(patches): - patches.args_getter.side_effect = ValueError - with pytest.raises(SystemExit): - main.main() - - -def test_main_when_ignore_ssl_errors_is_true_that_py42_settings_verify_ssl_certs_is_false(patches): - patches.aed_args.ignore_ssl_errors = True - main.main() - assert not settings.verify_ssl_certs - - -def test_main_when_ignore_ssl_errors_is_false_that_py42_settings_verify_ssl_certs_is_true(patches): - patches.args.ignore_ssl_errors = False - main.main() - assert settings.verify_ssl_certs - - -def test_main_when_reset_password_is_true_calls_delete_password(patches): - expected_username = "Bob" - patches.aed_args.c42_username = expected_username - patches.aed_args.reset_password = True - main.main() - patches.delete_password.assert_called_once_with(main._SERVICE_NAME, expected_username) - - -def test_main_when_reset_password_is_false_does_not_call_delete_password(patches): - patches.aed_args.reset_password = False - main.main() - assert not patches.delete_password.call_count - - -def test_main_when_clear_cursor_is_true_calls_aed_cursor_store_reset(patches): - patches.aed_args.record_cursor = True - patches.aed_args.clear_cursor = True - main.main() - assert patches.reset_cursor.call_count == 1 - - -def test_main_when_clear_cursor_is_false_does_not_call_aed_cursor_store_reset(patches): - patches.aed_args.record_cursor = True - patches.aed_args.clear_cursor = False - main.main() - assert not patches.reset_cursor.call_count - - -def test_main_when_debug_mode_is_true_that_py42_settings_debug_mode_is_debug(patches): - patches.aed_args.debug_mode = True - main.main() - assert settings.debug_level == debug_level.DEBUG - - -def test_main_uses_min_timestamp_from_sixty_days_ago(patches): - main.main() - expected = ( - (datetime.now() - timedelta(days=60)) - datetime.utcfromtimestamp(0) - ).total_seconds() - actual = patches.aed_extractor.call_args[0][0] - assert pytest.approx(expected, actual) - - -def test_main_uses_max_timestamp_from_now(patches): - main.main() - expected = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds() - actual = patches.aed_extractor.call_args[0][1] - assert pytest.approx(expected, actual) - - -def test_main_when_create_sdk_raises_exception_causes_exit(patches): - patches.py42.side_effect = Exception - with pytest.raises(SystemExit): - main.main() - - -def test_main_creates_sdk_with_args_and_password_from_get_password(patches): - expected_authority = "https://user.authority.com" - expected_username = "user.userson@userson.solutions" - expected_password = "querty" - patches.aed_args.c42_authority_url = expected_authority - patches.aed_args.c42_username = expected_username - patches.get_password.return_value = expected_password - main.main() - patches.py42.assert_called_once_with( - host_address=expected_authority, username=expected_username, password=expected_password - ) - - -def test_main_when_output_format_not_supported_causes_exit(patches): - patches.aed_args.output_format = "EAS3" - with pytest.raises(SystemExit): - main.main() - - -def test_main_when_output_format_is_json_creates_json_formatter(patches): - patches.aed_args.output_format = "JSON" - main.main() - expected = AEDDictToJSONFormatter - actual = type(patches.get_logger.call_args[1]["formatter"]) - assert actual == expected - - -def test_main_when_output_format_is_cef_creates_cef_formatter(patches): - patches.aed_args.output_format = "CEF" - main.main() - expected = AEDDictToCEFFormatter - actual = type(patches.get_logger.call_args[1]["formatter"]) - assert actual == expected - - -def test_main_when_destination_port_is_set_passes_port_to_get_logger(patches): - expected = 1000 - patches.aed_args.destination_port = expected - main.main() - actual = patches.get_logger.call_args[1]["destination_args"].destination_port - assert actual == expected - - -def test_main_when_given_destination_protocol_via_cli_passes_port_to_get_logger(patches): - expected = "SOME PROTOCOL" - patches.aed_args.destination_protocol = expected - main.main() - actual = patches.get_logger.call_args[1]["destination_args"].destination_protocol - assert actual == expected - - -def test_main_when_get_logger_raises_io_error_without_errno_61_print_error_about_file_path( - patches, capsys -): - patches.get_logger.side_effect = IOError - with pytest.raises(SystemExit): - main.main() - - assert "file path" in capsys.readouterr().out.lower() - - -def test_main_when_get_logger_raises_io_error_with_errno_61_prints_error_about_hostname( - patches, capsys -): - err = IOError() - err.errno = 61 - patches.get_logger.side_effect = err - - with pytest.raises(SystemExit): - main.main() - - assert "hostname" in capsys.readouterr().out.lower() - - -def test_main_when_get_logger_raises_h_error_causes_exit(patches): - patches.get_logger.side_effect = gaierror - with pytest.raises(SystemExit): - main.main() - - -def test_main_when_get_logger_raises_gai_error_causes_exit(patches): - patches.get_logger.side_effect = herror - with pytest.raises(SystemExit): - main.main() - - -def test_main_when_get_logger_raises_timeout_causes_exit(patches): - patches.get_logger.side_effect = timeout - with pytest.raises(SystemExit): - main.main() - - -def test_main_when_record_cursor_is_true_overrides_handlers_record_cursor_position(mocker, patches): - expected = mocker.MagicMock() - AEDCursorStore.replace_stored_insertion_timestamp = expected - patches.aed_args.record_cursor = True - main.main() - actual = patches.aed_extractor_constructor.call_args[0][1].record_cursor_position - assert actual is expected - - -def test_main_when_record_cursor_is_false_does_not_override_handlers_record_cursor_position( - mocker, patches -): - unexpected = mocker.MagicMock() - AEDCursorStore.replace_stored_insertion_timestamp = unexpected - patches.aed_args.record_cursor = False - main.main() - actual = patches.aed_extractor_constructor.call_args[0][1].record_cursor_position - assert actual is not unexpected - - -def test_main_when_record_cursor_is_true_overrides_handlers_get_cursor_position(mocker, patches): - expected = mocker.MagicMock() - AEDCursorStore.get_stored_insertion_timestamp = expected - patches.aed_args.record_cursor = True - main.main() - actual = patches.aed_extractor_constructor.call_args[0][1].get_cursor_position - assert actual is expected - - -def test_main_when_record_cursor_is_false_does_not_override_handlers_get_cursor_position( - mocker, patches -): - unexpected = mocker.MagicMock() - AEDCursorStore.get_stored_insertion_timestamp = unexpected - patches.aed_args.record_cursor = False - main.main() - actual = patches.aed_extractor_constructor.call_args[0][1].get_cursor_position - assert actual is not unexpected diff --git a/c42seceventcli/common/__init__.py b/tests/click_ext/__init__.py similarity index 100% rename from c42seceventcli/common/__init__.py rename to tests/click_ext/__init__.py diff --git a/tests/click_ext/test_types.py b/tests/click_ext/test_types.py new file mode 100644 index 000000000..d74b9ed80 --- /dev/null +++ b/tests/click_ext/test_types.py @@ -0,0 +1,10 @@ +from code42cli.click_ext.types import PromptChoice + + +class TestPromptChoice: + def test_convert_returns_expected_item(self): + choices = ["foo", "bar", "test"] + prompt_choice = PromptChoice(choices) + assert prompt_choice.convert("1", None, None) == "foo" + assert prompt_choice.convert("2", None, None) == "bar" + assert prompt_choice.convert("3", None, None) == "test" diff --git a/c42seceventcli/fed/__init__.py b/tests/cmds/__init__.py similarity index 100% rename from c42seceventcli/fed/__init__.py rename to tests/cmds/__init__.py diff --git a/tests/cmds/conftest.py b/tests/cmds/conftest.py new file mode 100644 index 000000000..c60fdbf3f --- /dev/null +++ b/tests/cmds/conftest.py @@ -0,0 +1,125 @@ +import json +import json as json_module +import threading + +import pytest +from py42.exceptions import Py42UserNotOnListError +from py42.sdk import SDKClient +from requests import HTTPError +from requests import Response +from tests.conftest import convert_str_to_date +from tests.conftest import create_mock_http_error +from tests.conftest import create_mock_response +from tests.conftest import TEST_ID + +from code42cli.cmds.search.cursor_store import FileEventCursorStore +from code42cli.logger import CliLogger + + +TEST_EMPLOYEE = "risky employee" + + +def get_user_not_on_list_side_effect(mocker, list_name): + def side_effect(*args, **kwargs): + mock_http_error = create_mock_http_error(mocker, data="TEST_ERR") + raise Py42UserNotOnListError(mock_http_error, TEST_ID, list_name) + + return side_effect + + +@pytest.fixture +def sdk(mocker): + return mocker.MagicMock(spec=SDKClient) + + +@pytest.fixture +def mock_42(mocker): + return mocker.patch("py42.sdk.from_local_account") + + +@pytest.fixture +def logger(mocker): + mock = mocker.MagicMock() + return mock + + +@pytest.fixture +def cli_logger(mocker): + mock = mocker.MagicMock(spec=CliLogger) + return mock + + +@pytest.fixture +def cli_state_with_user(sdk_with_user, cli_state): + cli_state.sdk = sdk_with_user + return cli_state + + +@pytest.fixture +def cli_state_without_user(sdk_without_user, cli_state): + cli_state.sdk = sdk_without_user + return cli_state + + +@pytest.fixture +def mock_file_event_checkpoint(mocker): + mock_file_event_checkpointer = mocker.MagicMock(spec=FileEventCursorStore) + mocker.patch( + "code42cli.cmds.securitydata._get_file_event_cursor_store", + return_value=mock_file_event_checkpointer, + ) + return mock_file_event_checkpointer + + +@pytest.fixture +def custom_error(mocker): + err = mocker.MagicMock(spec=HTTPError) + resp = mocker.MagicMock(spec=Response) + resp.text = "TEST_ERR" + err.response = resp + return err + + +def get_filter_value_from_json(json, filter_index): + return json_module.loads(str(json))["filters"][filter_index]["value"] + + +def filter_term_is_in_call_args(filter_group, term): + for f in filter_group: + if term in str(f): + return True + return False + + +def parse_date_from_filter_value(json, filter_index): + date_str = get_filter_value_from_json(json, filter_index) + return convert_str_to_date(date_str) + + +def thread_safe_side_effect(): + def f(*args): + with threading.Lock(): + f.call_count += 1 + f.call_args_list.append(args) + + f.call_count = 0 + f.call_args_list = [] + return f + + +def get_generator_for_get_all(mocker, mock_return_items): + if not mock_return_items: + mock_return_items = [] + elif not isinstance(mock_return_items, dict): + mock_return_items = [json.loads(mock_return_items)] + + def gen(*args, **kwargs): + yield create_mock_response(mocker, data={"items": mock_return_items}) + + return gen + + +def get_mark_for_search_and_send_to(command_group): + search_cmd = [command_group, "search"] + send_to_cmd = [command_group, "send-to", "0.0.0.0"] + return pytest.mark.parametrize("command", (search_cmd, send_to_cmd)) diff --git a/tests/aed/__init__.py b/tests/cmds/search/__init__.py similarity index 100% rename from tests/aed/__init__.py rename to tests/cmds/search/__init__.py diff --git a/tests/cmds/search/test_cursor_store.py b/tests/cmds/search/test_cursor_store.py new file mode 100644 index 000000000..09a4d65da --- /dev/null +++ b/tests/cmds/search/test_cursor_store.py @@ -0,0 +1,348 @@ +from os import path + +import pytest + +from code42cli.cmds.search.cursor_store import AlertCursorStore +from code42cli.cmds.search.cursor_store import AuditLogCursorStore +from code42cli.cmds.search.cursor_store import Cursor +from code42cli.cmds.search.cursor_store import FileEventCursorStore +from code42cli.errors import Code42CLIError + +PROFILE_NAME = "testprofile" +CURSOR_NAME = "testcursor" + +_NAMESPACE = "code42cli.cmds.search.cursor_store" + +ALERT_CHECKPOINT_FOLDER_NAME = "alert_checkpoints" +FILE_EVENT_CHECKPOINT_FOLDER_NAME = "file_event_checkpoints" +AUDIT_LOG_CHECKPOINT_FOLDER_NAME = "audit_log_checkpoints" + + +@pytest.fixture +def mock_open(mocker): + mock = mocker.patch("builtins.open", mocker.mock_open(read_data="123456789")) + return mock + + +@pytest.fixture +def mock_empty_checkpoint(mocker): + mock = mocker.patch("builtins.open", mocker.mock_open(read_data="")) + return mock + + +AUDIT_LOG_EVENT_HASH_1 = "bc8f70ff821cadcc3e717d534d14737d" +AUDIT_LOG_EVENT_HASH_2 = "66ad12c0a0dba2b41520fb69aeefd84d" + + +@pytest.fixture +def mock_open_events(mocker): + mock = mocker.patch( + "builtins.open", + mocker.mock_open( + read_data=f'["{AUDIT_LOG_EVENT_HASH_1}", "{AUDIT_LOG_EVENT_HASH_2}"]' + ), + ) + return mock + + +@pytest.fixture +def mock_isfile(mocker): + mock = mocker.patch(f"{_NAMESPACE}.os.path.isfile") + mock.return_value = True + return mock + + +class TestCursor: + def test_name_returns_expected_name(self): + cursor = Cursor("bogus/path") + assert cursor.name == "path" + + def test_value_returns_expected_value(self, mock_open): + cursor = Cursor("bogus/path") + assert cursor.value == "123456789" + + def test_value_reads_expected_file(self, mock_open): + cursor = Cursor("bogus/path") + _ = cursor.value + mock_open.assert_called_once_with("bogus/path") + + +class TestAlertCursorStore: + def test_get_returns_expected_timestamp(self, mock_open): + store = AlertCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + assert checkpoint == 123456789 + + def test_get_when_profile_does_not_exist_returns_none(self, mocker): + store = AlertCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + mock_open = mocker.patch(f"{_NAMESPACE}.open") + mock_open.side_effect = FileNotFoundError + assert checkpoint is None + + def test_get_reads_expected_file(self, mock_open): + store = AlertCursorStore(PROFILE_NAME) + store.get(CURSOR_NAME) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, ALERT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, CURSOR_NAME + ) + mock_open.assert_called_once_with(expected_path) + + def test_replace_writes_to_expected_file(self, mock_open): + store = AlertCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, ALERT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.assert_called_once_with(expected_path, "w") + + def test_replace_writes_expected_content(self, mock_open): + store = AlertCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + path.join( + user_path, ALERT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.return_value.write.assert_called_once_with("123") + + def test_delete_calls_remove_on_expected_file(self, mock_open, mock_remove): + store = AlertCursorStore(PROFILE_NAME) + store.delete("deleteme") + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, ALERT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "deleteme" + ) + mock_remove.assert_called_once_with(expected_path) + + def test_delete_when_checkpoint_does_not_exist_raises_cli_error( + self, mock_open, mock_remove + ): + store = AlertCursorStore(PROFILE_NAME) + mock_remove.side_effect = FileNotFoundError + with pytest.raises(Code42CLIError): + store.delete("deleteme") + + def test_clean_calls_remove_on_each_checkpoint( + self, mock_open, mock_remove, mock_listdir, mock_isfile + ): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = AlertCursorStore(PROFILE_NAME) + store.clean() + assert mock_remove.call_count == 3 + + def test_get_all_cursors_returns_all_checkpoints( + self, mock_open, mock_listdir, mock_isfile + ): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = AlertCursorStore(PROFILE_NAME) + cursors = store.get_all_cursors() + assert len(cursors) == 3 + assert cursors[0].name == "fileone" + assert cursors[1].name == "filetwo" + assert cursors[2].name == "filethree" + + +class TestFileEventCursorStore: + def test_get_returns_expected_timestamp(self, mock_open): + store = FileEventCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + assert checkpoint == "123456789" + + def test_get_reads_expected_file(self, mock_open): + store = FileEventCursorStore(PROFILE_NAME) + store.get(CURSOR_NAME) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, FILE_EVENT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, CURSOR_NAME + ) + mock_open.assert_called_once_with(expected_path) + + def test_get_when_profile_does_not_exist_returns_none(self, mocker): + store = FileEventCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + mock_open = mocker.patch(f"{_NAMESPACE}.open") + mock_open.side_effect = FileNotFoundError + assert checkpoint is None + + def test_replace_writes_to_expected_file(self, mock_open): + store = FileEventCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, FILE_EVENT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.assert_called_once_with(expected_path, "w") + + def test_replace_writes_expected_content(self, mock_open): + store = FileEventCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + path.join( + user_path, FILE_EVENT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.return_value.write.assert_called_once_with("123") + + def test_delete_calls_remove_on_expected_file(self, mock_open, mock_remove): + store = FileEventCursorStore(PROFILE_NAME) + store.delete("deleteme") + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, FILE_EVENT_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "deleteme" + ) + mock_remove.assert_called_once_with(expected_path) + + def test_delete_when_checkpoint_does_not_exist_raises_cli_error( + self, mock_open, mock_remove + ): + store = FileEventCursorStore(PROFILE_NAME) + mock_remove.side_effect = FileNotFoundError + with pytest.raises(Code42CLIError): + store.delete("deleteme") + + def test_clean_calls_remove_on_each_checkpoint( + self, mock_open, mock_remove, mock_listdir, mock_isfile + ): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = FileEventCursorStore(PROFILE_NAME) + store.clean() + assert mock_remove.call_count == 3 + + def test_get_all_cursors_returns_all_checkpoints(self, mock_listdir, mock_isfile): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = FileEventCursorStore(PROFILE_NAME) + cursors = store.get_all_cursors() + assert len(cursors) == 3 + assert cursors[0].name == "fileone" + assert cursors[1].name == "filetwo" + assert cursors[2].name == "filethree" + + +class TestAuditLogCursorStore: + def test_get_returns_expected_timestamp(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + assert checkpoint == 123456789 + + def test_get_reads_expected_file(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + store.get(CURSOR_NAME) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, AUDIT_LOG_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, CURSOR_NAME + ) + mock_open.assert_called_once_with(expected_path) + + def test_get_when_profile_does_not_exist_returns_none(self, mocker): + store = AuditLogCursorStore(PROFILE_NAME) + checkpoint = store.get(CURSOR_NAME) + mock_open = mocker.patch(f"{_NAMESPACE}.open") + mock_open.side_effect = FileNotFoundError + assert checkpoint is None + + def test_replace_writes_to_expected_file(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, AUDIT_LOG_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.assert_called_once_with(expected_path, "w") + + def test_replace_writes_expected_content(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + store.replace("checkpointname", 123) + user_path = path.join(path.expanduser("~"), ".code42cli") + path.join( + user_path, AUDIT_LOG_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "checkpointname" + ) + mock_open.return_value.write.assert_called_once_with("123") + + def test_delete_calls_remove_on_expected_file(self, mock_open, mock_remove): + store = AuditLogCursorStore(PROFILE_NAME) + store.delete("deleteme") + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, AUDIT_LOG_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, "deleteme" + ) + mock_remove.assert_called_once_with(expected_path) + + def test_delete_when_checkpoint_does_not_exist_raises_cli_error( + self, mock_open, mock_remove + ): + store = AuditLogCursorStore(PROFILE_NAME) + mock_remove.side_effect = FileNotFoundError + with pytest.raises(Code42CLIError): + store.delete("deleteme") + + def test_clean_calls_remove_on_each_checkpoint( + self, mock_open, mock_remove, mock_listdir, mock_isfile + ): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = AuditLogCursorStore(PROFILE_NAME) + store.clean() + assert mock_remove.call_count == 3 + + def test_get_all_cursors_returns_all_checkpoints(self, mock_listdir, mock_isfile): + mock_listdir.return_value = ["fileone", "filetwo", "filethree"] + store = AuditLogCursorStore(PROFILE_NAME) + cursors = store.get_all_cursors() + assert len(cursors) == 3 + assert cursors[0].name == "fileone" + assert cursors[1].name == "filetwo" + assert cursors[2].name == "filethree" + + def test_get_events_returns_expected_list(self, mock_open_events): + store = AuditLogCursorStore(PROFILE_NAME) + event_list = store.get_events(CURSOR_NAME) + assert event_list == [AUDIT_LOG_EVENT_HASH_1, AUDIT_LOG_EVENT_HASH_2] + + def test_get_events_reads_expected_file(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + store.get_events(CURSOR_NAME) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_filename = CURSOR_NAME + "_events" + expected_path = path.join( + user_path, AUDIT_LOG_CHECKPOINT_FOLDER_NAME, PROFILE_NAME, expected_filename + ) + mock_open.assert_called_once_with(expected_path) + + def test_get_events_when_profile_does_not_exist_returns_empty_list(self, mocker): + store = AuditLogCursorStore(PROFILE_NAME) + event_list = store.get_events(CURSOR_NAME) + mock_open = mocker.patch(f"{_NAMESPACE}.open") + mock_open.side_effect = FileNotFoundError + assert event_list == [] + + def test_get_events_when_checkpoint_not_valid_json_returns_empty_list(self, mocker): + mocker.patch("builtins.open", mocker.mock_open(read_data="invalid_json")) + store = AuditLogCursorStore(PROFILE_NAME) + event_list = store.get_events(CURSOR_NAME) + assert event_list == [] + + def test_replace_events_writes_to_expected_file(self, mock_open): + store = AuditLogCursorStore(PROFILE_NAME) + store.replace_events("checkpointname", ["hash1", "hash2"]) + user_path = path.join(path.expanduser("~"), ".code42cli") + expected_path = path.join( + user_path, + AUDIT_LOG_CHECKPOINT_FOLDER_NAME, + PROFILE_NAME, + "checkpointname_events", + ) + mock_open.assert_called_once_with(expected_path, "w") + + def test_replace_events_writes_expected_content(self, mock_open_events): + store = AuditLogCursorStore(PROFILE_NAME) + store.replace_events("checkpointname", ["hash1", "hash2"]) + user_path = path.join(path.expanduser("~"), ".code42cli") + path.join( + user_path, + AUDIT_LOG_CHECKPOINT_FOLDER_NAME, + PROFILE_NAME, + "checkpointname_events", + ) + mock_open_events.return_value.write.assert_called_once_with( + '["hash1", "hash2"]' + ) diff --git a/tests/cmds/search/test_init.py b/tests/cmds/search/test_init.py new file mode 100644 index 000000000..1d7327e28 --- /dev/null +++ b/tests/cmds/search/test_init.py @@ -0,0 +1,55 @@ +import pytest + +from code42cli.cmds.search import _try_get_logger_for_server +from code42cli.enums import SendToFileEventsOutputFormat +from code42cli.errors import Code42CLIError +from code42cli.logger.enums import ServerProtocol + + +_TEST_ERROR_MESSAGE = "TEST ERROR MESSAGE" +_TEST_HOST = "example.com" +_TEST_CERTS = "./certs.pem" + + +@pytest.fixture +def patched_get_logger_method(mocker): + return mocker.patch("code42cli.cmds.search.get_logger_for_server") + + +@pytest.fixture +def errored_logger(patched_get_logger_method): + patched_get_logger_method.side_effect = Exception(_TEST_ERROR_MESSAGE) + + +def test_try_get_logger_for_server_calls_get_logger_for_server( + patched_get_logger_method, +): + _try_get_logger_for_server( + _TEST_HOST, + ServerProtocol.TLS_TCP, + SendToFileEventsOutputFormat.CEF, + _TEST_CERTS, + ) + patched_get_logger_method.assert_called_once_with( + _TEST_HOST, + ServerProtocol.TLS_TCP, + SendToFileEventsOutputFormat.CEF, + _TEST_CERTS, + ) + + +def test_try_get_logger_for_server_when_exception_raised_raises_code42_cli_error( + errored_logger, +): + with pytest.raises(Code42CLIError) as err: + _try_get_logger_for_server( + _TEST_HOST, + ServerProtocol.TCP, + SendToFileEventsOutputFormat.RAW, + _TEST_CERTS, + ) + + assert ( + str(err.value) + == f"Unable to connect to example.com. Failed with error: {_TEST_ERROR_MESSAGE}." + ) diff --git a/tests/cmds/test_alert_rules.py b/tests/cmds/test_alert_rules.py new file mode 100644 index 000000000..fa41357fc --- /dev/null +++ b/tests/cmds/test_alert_rules.py @@ -0,0 +1,365 @@ +import logging + +import pytest +from py42.exceptions import Py42BadRequestError +from py42.exceptions import Py42InternalServerError +from py42.exceptions import Py42InvalidRuleOperationError +from requests import HTTPError +from requests import Request +from requests import Response +from tests.conftest import create_mock_http_error +from tests.conftest import create_mock_response + +from code42cli.main import cli + +TEST_RULE_ID = "rule-id" +TEST_USER_ID = "test-user-id" +TEST_USERNAME = "test@example.com" +TEST_SOURCE = "rule source" +TEST_EMPTY_RULE_RESPONSE = {"ruleMetadata": []} +ALERT_RULES_COMMAND = "alert-rules" +TEST_RULE_RESPONSE = { + "ruleMetadata": [ + { + "observerRuleId": TEST_RULE_ID, + "type": "FED_FILE_TYPE_MISMATCH", + "isEnabled": True, + "ruleSource": "NOTVALID", + "name": "Test", + "severity": "high", + } + ] +} +TEST_GET_ALL_RESPONSE_EXFILTRATION = { + "ruleMetadata": [ + {"observerRuleId": TEST_RULE_ID, "type": "FED_ENDPOINT_EXFILTRATION"} + ] +} +TEST_GET_ALL_RESPONSE_CLOUD_SHARE = { + "ruleMetadata": [ + {"observerRuleId": TEST_RULE_ID, "type": "FED_CLOUD_SHARE_PERMISSIONS"} + ] +} +TEST_GET_ALL_RESPONSE_FILE_TYPE_MISMATCH = { + "ruleMetadata": [{"observerRuleId": TEST_RULE_ID, "type": "FED_FILE_TYPE_MISMATCH"}] +} + + +def get_rule_not_found_side_effect(mocker): + def side_effect(*args, **kwargs): + return create_mock_response(mocker, data=TEST_EMPTY_RULE_RESPONSE) + + return side_effect + + +def get_user_not_on_alert_rule_side_effect(mocker): + def side_effect(*args, **kwargs): + err = create_mock_http_error(mocker, data="TEST_ERR", status=400) + raise Py42BadRequestError(err) + + return side_effect + + +def create_invalid_rule_type_side_effect(mocker): + def side_effect(*args, **kwargs): + err = create_mock_http_error(mocker, data="TEST_ERR", status=400) + raise Py42InvalidRuleOperationError(err, TEST_RULE_ID, TEST_SOURCE) + + return side_effect + + +@pytest.fixture +def get_user_id(mocker): + return mocker.patch("code42cli.cmds.alert_rules.get_user_id") + + +@pytest.fixture +def mock_server_error(mocker): + base_err = _get_error_base(mocker) + return Py42InternalServerError(base_err) + + +def _get_error_base(mocker): + base_err = HTTPError() + mock_response = mocker.MagicMock(spec=Response) + base_err.response = mock_response + request = mocker.MagicMock(spec=Request) + request.body = '{"test":"body"}' + base_err.response.request = request + return base_err + + +@pytest.fixture +def alert_rules_sdk(sdk): + sdk.alerts.rules.add_user.return_value = {} + sdk.alerts.rules.remove_user.return_value = {} + sdk.alerts.rules.remove_all_users.return_value = {} + sdk.alerts.rules.get_all.return_value = {} + sdk.alerts.rules.exfiltration.get.return_value = {} + sdk.alerts.rules.cloudshare.get.return_value = {} + sdk.alerts.rules.filetypemismatch.get.return_value = {} + return sdk + + +def test_add_user_adds_user_list_to_alert_rules(runner, cli_state): + cli_state.sdk.users.get_by_username.return_value = { + "users": [{"userUid": TEST_USER_ID}] + } + runner.invoke( + cli, + ["alert-rules", "add-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + cli_state.sdk.alerts.rules.add_user.assert_called_once_with( + TEST_RULE_ID, TEST_USER_ID + ) + + +def test_add_user_when_returns_invalid_rule_type_error_and_system_rule_exits( + mocker, runner, cli_state +): + cli_state.sdk.alerts.rules.add_user.side_effect = ( + create_invalid_rule_type_side_effect(mocker) + ) + result = runner.invoke( + cli, + ["alert-rules", "add-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + "Only alert rules with a source of 'Alerting' can be targeted by this command." + in result.output + ) + assert "Rule rule-id has a source of 'rule source'." in result.output + + +def test_add_user_when_returns_500_and_not_system_rule_raises_Py42InternalServerError( + runner, cli_state, mock_server_error, caplog +): + cli_state.sdk.alerts.rules.add_user.side_effect = mock_server_error + with caplog.at_level(logging.ERROR): + result = runner.invoke( + cli, + ["alert-rules", "add-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + assert result.exit_code == 1 + assert "Py42InternalServerError" in caplog.text + + +def test_add_user_when_rule_not_found_prints_expected_output(mocker, runner, cli_state): + cli_state.sdk.alerts.rules.get_by_observer_id.side_effect = ( + get_rule_not_found_side_effect(mocker) + ) + result = runner.invoke( + cli, + ["alert-rules", "add-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + assert "Error: No alert rules with RuleId rule-id found." in result.output + + +def test_remove_user_removes_user_list_from_alert_rules(runner, cli_state): + cli_state.sdk.users.get_by_username.return_value = { + "users": [{"userUid": TEST_USER_ID}] + } + runner.invoke( + cli, + ["alert-rules", "remove-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + cli_state.sdk.alerts.rules.remove_user.assert_called_once_with( + TEST_RULE_ID, TEST_USER_ID + ) + + +def test_remove_user_when_raise_invalid_rule_type_error_and_system_rule_raises_InvalidRuleTypeError( + mocker, runner, cli_state +): + cli_state.sdk.alerts.rules.remove_user.side_effect = ( + create_invalid_rule_type_side_effect(mocker) + ) + result = runner.invoke( + cli, + ["alert-rules", "remove-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + "Only alert rules with a source of 'Alerting' can be targeted by this command." + in result.output + ) + assert "Rule rule-id has a source of 'rule source'." in result.output + + +def test_remove_user_when_rule_not_found_prints_expected_output( + mocker, runner, cli_state +): + cli_state.sdk.alerts.rules.get_by_observer_id.side_effect = ( + get_rule_not_found_side_effect(mocker) + ) + result = runner.invoke( + cli, + ["alert-rules", "remove-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + assert "No alert rules with RuleId rule-id found." in result.output + + +def test_remove_user_when_raises_invalid_rule_type_side_effect_and_not_system_rule_raises_Py42InternalServerError( + mocker, runner, cli_state +): + cli_state.sdk.alerts.rules.remove_user.side_effect = ( + create_invalid_rule_type_side_effect(mocker) + ) + result = runner.invoke( + cli, + ["alert-rules", "remove-user", "--rule-id", TEST_RULE_ID, "-u", TEST_USERNAME], + obj=cli_state, + ) + + assert result.exit_code == 1 + assert ( + "Only alert rules with a source of 'Alerting' can be targeted by this command." + in result.output + ) + assert "Rule rule-id has a source of 'rule source'." in result.output + + +def test_list_gets_alert_rules(runner, cli_state): + runner.invoke(cli, ["alert-rules", "list"], obj=cli_state) + assert cli_state.sdk.alerts.rules.get_all.call_count == 1 + + +def test_list_when_no_rules_prints_no_rules_message(runner, cli_state): + cli_state.sdk.alerts.rules.get_all.return_value = [TEST_EMPTY_RULE_RESPONSE] + result = runner.invoke(cli, ["alert-rules", "list"], obj=cli_state) + assert "No alert rules found" in result.output + + +def test_show_rule_calls_correct_rule_property(runner, cli_state): + cli_state.sdk.alerts.rules.get_by_observer_id.return_value = ( + TEST_GET_ALL_RESPONSE_EXFILTRATION + ) + runner.invoke(cli, ["alert-rules", "show", TEST_RULE_ID], obj=cli_state) + cli_state.sdk.alerts.rules.exfiltration.get.assert_called_once_with(TEST_RULE_ID) + + +def test_show_rule_calls_correct_rule_property_cloud_share(runner, cli_state): + cli_state.sdk.alerts.rules.get_by_observer_id.return_value = ( + TEST_GET_ALL_RESPONSE_CLOUD_SHARE + ) + runner.invoke(cli, ["alert-rules", "show", TEST_RULE_ID], obj=cli_state) + cli_state.sdk.alerts.rules.cloudshare.get.assert_called_once_with(TEST_RULE_ID) + + +def test_show_rule_calls_correct_rule_property_file_type_mismatch(runner, cli_state): + cli_state.sdk.alerts.rules.get_by_observer_id.return_value = ( + TEST_GET_ALL_RESPONSE_FILE_TYPE_MISMATCH + ) + runner.invoke(cli, ["alert-rules", "show", TEST_RULE_ID], obj=cli_state) + cli_state.sdk.alerts.rules.filetypemismatch.get.assert_called_once_with( + TEST_RULE_ID + ) + + +def test_show_rule_when_no_matching_rule_prints_no_rule_message(runner, cli_state): + cli_state.sdk.alerts.rules.get_by_observer_id.return_value = ( + TEST_EMPTY_RULE_RESPONSE + ) + result = runner.invoke(cli, ["alert-rules", "show", TEST_RULE_ID], obj=cli_state) + msg = f"No alert rules with RuleId {TEST_RULE_ID} found" + assert msg in result.output + + +def test_add_bulk_users_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch("code42cli.cmds.alert_rules.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add.csv", "w") as csv: + csv.writelines(["rule_id,username\n", "test,value\n"]) + runner.invoke( + cli, ["alert-rules", "bulk", "add", "test_add.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [{"rule_id": "test", "username": "value"}] + + +def test_remove_bulk_users_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch("code42cli.cmds.alert_rules.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove.csv", "w") as csv: + csv.writelines(["rule_id,username\n", "test,value\n"]) + runner.invoke( + cli, ["alert-rules", "bulk", "add", "test_remove.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [{"rule_id": "test", "username": "value"}] + + +def test_list_cmd_prints_no_rules_found_when_f_is_passed_and_response_is_empty( + runner, cli_state +): + cli_state.sdk.alerts.rules.get_all.return_value = [TEST_EMPTY_RULE_RESPONSE] + result = runner.invoke(cli, ["alert-rules", "list", "-f", "csv"], obj=cli_state) + assert cli_state.sdk.alerts.rules.get_all.call_count == 1 + assert "No alert rules found" in result.output + + +def test_list_cmd_formats_to_csv_when_format_is_passed(runner, cli_state): + cli_state.sdk.alerts.rules.get_all.return_value = [TEST_RULE_RESPONSE] + result = runner.invoke(cli, ["alert-rules", "list", "-f", "csv"], obj=cli_state) + assert cli_state.sdk.alerts.rules.get_all.call_count == 1 + assert "observerRuleId" in result.output + assert "type" in result.output + assert "isEnabled" in result.output + assert "ruleSource" in result.output + assert "name" in result.output + assert "severity" in result.output + + +def test_remove_when_user_not_on_rule_raises_expected_error(runner, cli_state, mocker): + cli_state.sdk.alerts.rules.remove_user.side_effect = ( + get_user_not_on_alert_rule_side_effect(mocker) + ) + test_username = "test@example.com" + test_rule_id = "101010" + result = runner.invoke( + cli, + ["alert-rules", "remove-user", "-u", test_username, "--rule-id", test_rule_id], + obj=cli_state, + ) + assert ( + f"User {test_username} is not currently assigned to rule-id {test_rule_id}." + in result.output + ) + + +@pytest.mark.parametrize( + "command, error_msg", + [ + ( + f"{ALERT_RULES_COMMAND} add-user --rule-id test-rule-id", + "Missing option '-u' / '--username'.", + ), + ( + f"{ALERT_RULES_COMMAND} remove-user --rule-id test-rule-id", + "Missing option '-u' / '--username'.", + ), + (f"{ALERT_RULES_COMMAND} add-user", "Missing option '--rule-id'."), + (f"{ALERT_RULES_COMMAND} remove-user", "Missing option '--rule-id'."), + (f"{ALERT_RULES_COMMAND} show", "Missing argument 'RULE_ID'."), + ( + f"{ALERT_RULES_COMMAND} bulk add", + "Error: Missing argument 'CSV_FILE'.", + ), + ( + f"{ALERT_RULES_COMMAND} bulk remove", + "Error: Missing argument 'CSV_FILE'.", + ), + ], +) +def test_alert_rules_command_when_missing_required_parameters_errors( + command, error_msg, runner, cli_state +): + result = runner.invoke(cli, command.split(" "), obj=cli_state) + assert result.exit_code == 2 + assert error_msg in "".join(result.output) diff --git a/tests/cmds/test_alerts.py b/tests/cmds/test_alerts.py new file mode 100644 index 000000000..1cce32ebd --- /dev/null +++ b/tests/cmds/test_alerts.py @@ -0,0 +1,1235 @@ +import logging + +import py42.sdk.queries.alerts.filters as f +import pytest +from py42.exceptions import Py42NotFoundError +from py42.sdk.queries.alerts.alert_query import AlertQuery +from py42.sdk.queries.alerts.filters import AlertState +from tests.cmds.conftest import filter_term_is_in_call_args +from tests.cmds.conftest import get_mark_for_search_and_send_to +from tests.conftest import create_mock_response +from tests.conftest import get_test_date_str + +from code42cli import PRODUCT_NAME +from code42cli.cmds.search.cursor_store import AlertCursorStore +from code42cli.logger.enums import ServerProtocol +from code42cli.main import cli + + +BEGIN_TIMESTAMP = 1577858400.0 +END_TIMESTAMP = 1580450400.0 +CURSOR_TIMESTAMP = 1579500000.0 +ALERT_DETAIL_RESULT = [ + { + "alerts": [ + {"id": 1, "createdAt": "2020-01-17"}, + {"id": 11, "createdAt": "2020-01-18"}, + ] + }, + { + "alerts": [ + {"id": 2, "createdAt": "2020-01-19"}, + {"id": 12, "createdAt": "2020-01-20"}, + ] + }, + { + "alerts": [ + {"id": 3, "createdAt": "2020-01-01"}, + {"id": 13, "createdAt": "2020-01-02"}, + ] + }, + { + "alerts": [ + {"id": 4, "createdAt": "2020-01-03"}, + {"id": 14, "createdAt": "2020-01-04"}, + ] + }, + { + "alerts": [ + {"id": 5, "createdAt": "2020-01-05"}, + {"id": 15, "createdAt": "2020-01-06"}, + ] + }, + { + "alerts": [ + {"id": 6, "createdAt": "2020-01-07"}, + {"id": 16, "createdAt": "2020-01-08"}, + ] + }, + { + "alerts": [ + {"id": 7, "createdAt": "2020-01-09"}, + {"id": 17, "createdAt": "2020-01-10"}, + ] + }, + { + "alerts": [ + {"id": 8, "createdAt": "2020-01-11"}, + {"id": 18, "createdAt": "2020-01-12"}, + ] + }, + { + "alerts": [ + {"id": 9, "createdAt": "2020-01-13"}, + {"id": 19, "createdAt": "2020-01-14"}, + ] + }, + { + "alerts": [ + {"id": 10, "createdAt": "2020-01-15"}, + {"id": 20, "createdAt": "2020-01-16"}, + ] + }, +] +SORTED_ALERT_DETAILS = [ + {"id": 12, "createdAt": "2020-01-20"}, + {"id": 2, "createdAt": "2020-01-19"}, + {"id": 11, "createdAt": "2020-01-18"}, + {"id": 1, "createdAt": "2020-01-17"}, + {"id": 20, "createdAt": "2020-01-16"}, + {"id": 10, "createdAt": "2020-01-15"}, + {"id": 19, "createdAt": "2020-01-14"}, + {"id": 9, "createdAt": "2020-01-13"}, + {"id": 18, "createdAt": "2020-01-12"}, + {"id": 8, "createdAt": "2020-01-11"}, + {"id": 17, "createdAt": "2020-01-10"}, + {"id": 7, "createdAt": "2020-01-09"}, + {"id": 16, "createdAt": "2020-01-08"}, + {"id": 6, "createdAt": "2020-01-07"}, + {"id": 15, "createdAt": "2020-01-06"}, + {"id": 5, "createdAt": "2020-01-05"}, + {"id": 14, "createdAt": "2020-01-04"}, + {"id": 4, "createdAt": "2020-01-03"}, + {"id": 13, "createdAt": "2020-01-02"}, + {"id": 3, "createdAt": "2020-01-01"}, +] +ADVANCED_QUERY_VALUES = { + "state_1": "OPEN", + "state_2": "PENDING", + "state_3": "IN_PROGRESS", + "actor": "test@example.com", + "on_or_after": "2020-01-01T06:00:00.000000Z", + "on_or_after_timestamp": 1577858400.0, + "on_or_before": "2020-02-01T06:00:00.000000Z", + "on_or_before_timestamp": 1580536800.0, + "rule_id": "xyz123", +} +ADVANCED_QUERY_JSON = """ +{{ + "srtDirection": "DESC", + "pgNum": 0, + "pgSize": 100, + "srtKey": "CreatedAt", + "groups": [ + {{ + "filterClause": "OR", + "filters": [ + {{ + "value": "{state_1}", + "term": "state", + "operator": "IS" + }}, + {{ + "value": "{state_2}", + "term": "state", + "operator": "IS" + }}, + {{ + "value": "{state_3}", + "term": "state", + "operator": "IS" + }} + ] + }}, + {{ + "filterClause": "OR", + "filters": [ + {{ + "value": "{actor}", + "term": "actor", + "operator": "CONTAINS" + }} + ] + }}, + {{ + "filterClause": "AND", + "filters": [ + {{ + "value": "{on_or_after}", + "term": "createdAt", + "operator": "ON_OR_AFTER" + }}, + {{ + "value": "{on_or_before}", + "term": "createdAt", + "operator": "ON_OR_BEFORE" + }} + ] + }}, + {{ + "filterClause": "OR", + "filters": [ + {{ + "value": "{rule_id}", + "term": "ruleId", + "operator": "IS" + }} + ] + }} + ], + "groupClause": "AND" +}}""".format( + **ADVANCED_QUERY_VALUES +) +advanced_query_incompat_test_params = pytest.mark.parametrize( + "arg", + [ + ("--begin", "1d"), + ("--end", "1d"), + ("--severity", "HIGH"), + ("--actor", "test"), + ("--actor-contains", "test"), + ("--exclude-actor", "test"), + ("--exclude-actor-contains", "test"), + ("--rule-name", "test"), + ("--exclude-rule-name", "test"), + ("--rule-id", "test"), + ("--exclude-rule-id", "test"), + ("--rule-type", "FedEndpointExfiltration"), + ("--exclude-rule-type", "FedEndpointExfiltration"), + ("--description", "test"), + ("--state", "OPEN"), + ], +) +ALERT_DETAILS_FULL_RESPONSE = { + "type$": "ALERT_DETAILS_RESPONSE", + "alerts": [ + { + "type$": "ALERT_DETAILS", + "tenantId": "11111111-2222-3333-4444-55559a126666", + "type": "FED_ENDPOINT_EXFILTRATION", + "name": "Some Burp Suite Test Rule", + "description": "Some Burp Rule", + "actor": "neilwin0415@code42.com", + "actorId": "1002844444570300000", + "target": "N/A", + "severity": "HIGH", + "ruleId": "e9bfa082-4541-4432-aacd-d8b2ca074762", + "ruleSource": "Alerting", + "id": "TEST-ALERT-ID-123", + "createdAt": "2021-04-23T21:18:59.2032940Z", + "state": "PENDING", + "stateLastModifiedBy": "test@example.com", + "stateLastModifiedAt": "2021-04-26T12:37:30.4605390Z", + "observations": [ + { + "type$": "OBSERVATION", + "id": "f561e556-a746-4db0-b99b-71546adf57c4", + "observedAt": "2021-04-23T21:10:00.0000000Z", + "type": "FedEndpointExfiltration", + "data": { + "type$": "OBSERVED_ENDPOINT_ACTIVITY", + "id": "f561e556-a746-4db0-b99b-71546adf57c4", + "sources": ["Endpoint"], + "exposureTypes": ["ApplicationRead"], + "firstActivityAt": "2021-04-23T21:10:00.0000000Z", + "lastActivityAt": "2021-04-23T21:15:00.0000000Z", + "fileCount": 1, + "totalFileSize": 8326, + "fileCategories": [ + { + "type$": "OBSERVED_FILE_CATEGORY", + "category": "Image", + "fileCount": 1, + "totalFileSize": 8326, + "isSignificant": False, + } + ], + "files": [ + { + "type$": "OBSERVED_FILE", + "eventId": "0_c4e43418-07d9-4a9f-a138-29f39a124d33_1002847122023325984_4b6d298c-8660-4cb8-b6d1-61d09a5c69ba_0", + "path": "C:\\Users\\Test Testerson\\Downloads", + "name": "mad cat - Copy.jpg", + "category": "Image", + "size": 8326, + } + ], + "syncToServices": [], + "sendingIpAddresses": ["174.20.92.47"], + "appReadDetails": [ + { + "type$": "APP_READ_DETAILS", + "tabTitles": [ + "file.example.com - Super simple file sharing - Google Chrome" + ], + "tabUrl": "https://www.file.example.com/", + "tabInfos": [ + { + "type$": "TAB_INFO", + "tabUrl": "https://www.file.example.com/", + "tabTitle": "example - Super simple file sharing - Google Chrome", + } + ], + "destinationCategory": "Uncategorized", + "destinationName": "Uncategorized", + "processName": "\\Device\\HarddiskVolume3\\Program Files\\Google\\Chrome\\Application\\chrome.exe", + } + ], + }, + } + ], + "note": { + "type$": "NOTE", + "id": "72f8cd62-5cb8-4896-947d-f07e17053eaf", + "lastModifiedAt": "2021-04-26T12:37:30.4987600Z", + "lastModifiedBy": "test@example.com", + "message": "TEST-NOTE-CLI-UNIT-TESTS", + }, + } + ], +} +search_and_send_to_test = get_mark_for_search_and_send_to("alerts") + + +@pytest.fixture +def alert_cursor_with_checkpoint(mocker): + mock = mocker.patch(f"{PRODUCT_NAME}.cmds.alerts._get_alert_cursor_store") + mock_cursor = mocker.MagicMock(spec=AlertCursorStore) + mock_cursor.get.return_value = CURSOR_TIMESTAMP + mock.return_value = mock_cursor + mock.expected_datetime = "2020-01-20 06:00:00" + return mock + + +@pytest.fixture +def alert_cursor_without_checkpoint(mocker): + mock = mocker.patch(f"{PRODUCT_NAME}.cmds.alerts._get_alert_cursor_store") + mock_cursor = mocker.MagicMock(spec=AlertCursorStore) + mock_cursor.get.return_value = None + mock.return_value = mock_cursor + return mock + + +@pytest.fixture +def begin_option(mocker): + mock = mocker.patch(f"{PRODUCT_NAME}.cmds.alerts.convert_datetime_to_timestamp") + mock.return_value = BEGIN_TIMESTAMP + mock.expected_timestamp = "2020-01-01T06:00:00.000000Z" + return mock + + +@pytest.fixture +def send_to_logger_factory(mocker): + return mocker.patch("code42cli.cmds.search._try_get_logger_for_server") + + +@pytest.fixture +def full_alert_details_response(mocker): + return create_mock_response(mocker, data=ALERT_DETAILS_FULL_RESPONSE) + + +@pytest.fixture +def mock_alert_search_response(mocker): + + data = { + "type$": "ALERT_DETAILS", + "tenantId": "1d71796f-af5b-4231-9d8e-df6434da4663", + "type": "FED_COMPOSITE", + "name": "File Upload Alert", + "description": "Alert on any file upload events", + "actor": "test.user@code42.com", + "actorId": "1018651385932568954", + "target": "N/A", + "severity": "MEDIUM", + "ruleId": "962a6a1c-54f6-4477-90bd-a08cc74cbf71", + "ruleSource": "Alerting", + "id": "c209fa6c-c3c7-4242-b6de-207c0ff13e38", + "createdAt": "2021-09-01T07:43:06.7831980Z", + "state": "OPEN", + "observations": [ + { + "type$": "OBSERVATION", + "id": "3af2494d-3981-46b5-a14b-6087edc48c5c", + "observedAt": "2021-09-01T07:15:00.0000000Z", + "type": "FedEndpointExfiltration", + "data": { + "type$": "OBSERVED_ENDPOINT_ACTIVITY", + "id": "3af2494d-3981-46b5-a14b-6087edc48c5c", + "sources": ["Endpoint"], + "exposureTypes": ["ApplicationRead"], + "exposureTypeIsSignificant": True, + "firstActivityAt": "2021-09-01T07:15:00.0000000Z", + "lastActivityAt": "2021-09-01T07:20:00.0000000Z", + "fileCount": 1, + "totalFileSize": 7842255, + "fileCategories": [ + { + "type$": "OBSERVED_FILE_CATEGORY", + "category": "Archive", + "fileCount": 1, + "totalFileSize": 7842255, + } + ], + "fileCategoryIsSignificant": False, + "files": [ + { + "type$": "OBSERVED_FILE", + "eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_1019043250830767116_1022963810130583921_3_EPS", + "path": "C:/Users/qa/Downloads/", + "name": "TA-code42-insider-threats-add-on.tar.gz", + "category": "Archive", + "size": 7842255, + "riskSeverityInfo": { + "type$": "RISK_SEVERITY_INFO", + "score": 3, + "severity": "LOW", + "matchedRiskIndicators": [ + { + "type$": "RISK_INDICATOR", + "name": "Zip", + "weight": 3, + }, + { + "type$": "RISK_INDICATOR", + "name": "Other destination", + "weight": 0, + }, + ], + }, + "observedAt": "2021-09-01T07:19:18.5860000Z", + } + ], + "riskSeverityIsSignificant": False, + "riskSeveritySummary": [ + { + "type$": "RISK_SEVERITY_SUMMARY", + "severity": "LOW", + "numEvents": 1, + "summarizedRiskIndicators": [ + { + "type$": "SUMMARIZED_RISK_INDICATOR", + "name": "Zip", + "numEvents": 1, + }, + { + "type$": "SUMMARIZED_RISK_INDICATOR", + "name": "Other destination", + "numEvents": 1, + }, + ], + } + ], + "syncToServices": [], + "sendingIpAddresses": ["162.222.47.183"], + "appReadDetails": [ + { + "type$": "APP_READ_DETAILS", + "tabInfos": [ + { + "type$": "TAB_INFO", + "tabUrl": "http://127.0.0.1:8000/en-US/manager/appinstall/_upload", + "tabTitle": "Settings | Splunk - Google Chrome", + } + ], + "destinationCategory": "Uncategorized", + "destinationName": "Uncategorized", + "processName": "\\Device\\HarddiskVolume2\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe", + } + ], + "destinationIsSignificant": False, + }, + } + ], + } + + def response_gen(): + yield data + + return response_gen() + + +@pytest.fixture +def search_all_alerts_success(cli_state, mock_alert_search_response): + cli_state.sdk.alerts.get_all_alert_details.return_value = mock_alert_search_response + + +@search_and_send_to_test +def test_search_and_send_to_passes_query_object_when_searching_file_events( + runner, cli_state, command, search_all_alerts_success +): + runner.invoke( + cli, [*command, "--advanced-query", ADVANCED_QUERY_JSON], obj=cli_state + ) + + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert isinstance(query, AlertQuery) + + +@search_and_send_to_test +def test_search_and_send_to_when_advanced_query_passed_as_json_string_builds_expected_query( + cli_state, runner, command, search_all_alerts_success +): + runner.invoke( + cli, + [*command, "--advanced-query", ADVANCED_QUERY_JSON], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + passed_filter_groups = query._filter_group_list + expected_actor_filter = f.Actor.contains(ADVANCED_QUERY_VALUES["actor"]) + expected_actor_filter.filter_clause = "OR" + expected_timestamp_filter = f.DateObserved.in_range( + ADVANCED_QUERY_VALUES["on_or_after_timestamp"], + ADVANCED_QUERY_VALUES["on_or_before_timestamp"], + ) + expected_state_filter = f.AlertState.is_in( + [ + ADVANCED_QUERY_VALUES["state_1"], + ADVANCED_QUERY_VALUES["state_2"], + ADVANCED_QUERY_VALUES["state_3"], + ] + ) + expected_rule_id_filter = f.RuleId.eq(ADVANCED_QUERY_VALUES["rule_id"]) + expected_rule_id_filter.filter_clause = "OR" + assert expected_actor_filter in passed_filter_groups + assert expected_timestamp_filter in passed_filter_groups + assert expected_state_filter in passed_filter_groups + assert expected_rule_id_filter in passed_filter_groups + + +@advanced_query_incompat_test_params +def test_search_with_advanced_query_and_incompatible_argument_errors( + arg, cli_state, runner +): + + result = runner.invoke( + cli, + ["alerts", "search", "--advanced-query", ADVANCED_QUERY_JSON, *arg], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --advanced-query" in result.output + + +@advanced_query_incompat_test_params +def test_send_to_with_advanced_query_and_incompatible_argument_errors( + arg, cli_state, runner +): + + result = runner.invoke( + cli, + ["alerts", "send-to", "0.0.0.0", "--advanced-query", ADVANCED_QUERY_JSON, *arg], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --advanced-query" in result.output + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_and_end_dates_uses_expected_query( + cli_state, runner, command, search_all_alerts_success +): + begin_date = get_test_date_str(days_ago=89) + end_date = get_test_date_str(days_ago=1) + + runner.invoke( + cli, + [*command, "--begin", begin_date, "--end", end_date], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + + actual_begin = query_dict["groups"][0]["filters"][0]["value"] + expected_begin = f"{begin_date}T00:00:00.000000Z" + + actual_end = query_dict["groups"][0]["filters"][1]["value"] + expected_end = f"{end_date}T23:59:59.999999Z" + + assert actual_begin == expected_begin + assert actual_end == expected_end + + +@search_and_send_to_test +def test_search_when_given_begin_and_end_date_and_times_uses_expected_query( + cli_state, runner, command, search_all_alerts_success +): + begin_date = get_test_date_str(days_ago=89) + end_date = get_test_date_str(days_ago=1) + time = "15:33:02" + runner.invoke( + cli, + [*command, "--begin", f"{begin_date} {time}", "--end", f"{end_date} {time}"], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + + actual_begin = query_dict["groups"][0]["filters"][0]["value"] + expected_begin = f"{begin_date}T{time}.000000Z" + + actual_end = query_dict["groups"][0]["filters"][1]["value"] + expected_end = f"{end_date}T{time}.000000Z" + + assert actual_begin == expected_begin + assert actual_end == expected_end + + +@search_and_send_to_test +def test_search_when_given_begin_date_and_time_without_seconds_uses_expected_query( + cli_state, runner, command, search_all_alerts_success +): + date = get_test_date_str(days_ago=89) + time = "15:33" + runner.invoke(cli, [*command, "--begin", f"{date} {time}"], obj=cli_state) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + actual = query_dict["groups"][0]["filters"][0]["value"] + expected = f"{date}T{time}:00.000000Z" + assert actual == expected + + +@search_and_send_to_test +def test_search_and_send_to_when_given_end_date_and_time_uses_expected_query( + cli_state, runner, command, search_all_alerts_success +): + begin_date = get_test_date_str(days_ago=10) + end_date = get_test_date_str(days_ago=1) + time = "15:33" + runner.invoke( + cli, + [*command, "--begin", begin_date, "--end", f"{end_date} {time}"], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + actual = query_dict["groups"][0]["filters"][1]["value"] + expected = f"{end_date}T{time}:00.000000Z" + assert actual == expected + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_date_more_than_ninety_days_back_errors( + cli_state, runner, command +): + begin_date = get_test_date_str(days_ago=91) + " 12:51:00" + result = runner.invoke(cli, [*command, "--begin", begin_date], obj=cli_state) + assert "must be within 90 days" in result.output + assert result.exit_code == 2 + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_date_and_not_use_checkpoint_and_cursor_exists_uses_begin_date( + cli_state, runner, command, search_all_alerts_success +): + begin_date = get_test_date_str(days_ago=1) + runner.invoke(cli, [*command, "--begin", begin_date], obj=cli_state) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + actual_ts = query_dict["groups"][0]["filters"][0]["value"] + expected_ts = f"{begin_date}T00:00:00.000000Z" + assert actual_ts == expected_ts + assert filter_term_is_in_call_args(query, f.DateObserved._term) + + +@search_and_send_to_test +def test_search_and_send_to_when_end_date_is_before_begin_date_causes_exit( + cli_state, runner, command +): + begin_date = get_test_date_str(days_ago=1) + end_date = get_test_date_str(days_ago=3) + result = runner.invoke( + cli, + [*command, "--begin", begin_date, "--end", end_date], + obj=cli_state, + ) + assert result.exit_code == 2 + assert "'--begin': cannot be after --end date" in result.output + + +@search_and_send_to_test +def test_search_and_send_to_with_only_begin_calls_search_all_alerts_with_expected_filters( + cli_state, begin_option, runner, command, search_all_alerts_success +): + res = runner.invoke(cli, [*command, "--begin", "1d"], obj=cli_state) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + expected_filter_groups = [ + { + "filterClause": "AND", + "filters": [ + { + "operator": "ON_OR_AFTER", + "term": "createdAt", + "value": begin_option.expected_timestamp, + } + ], + } + ] + assert res.exit_code == 0 + assert query_dict["groups"] == expected_filter_groups + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_without_begin_and_without_stored_checkpoint_causes_expected_error( + cli_state, alert_cursor_without_checkpoint, runner, command +): + result = runner.invoke(cli, [*command, "--use-checkpoint", "test"], obj=cli_state) + assert result.exit_code == 2 + assert ( + "--begin date is required for --use-checkpoint when no checkpoint exists yet." + in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_with_begin_and_without_checkpoint_calls_search_all_alerts_with_begin_date( + cli_state, + begin_option, + alert_cursor_without_checkpoint, + runner, + command, + search_all_alerts_success, +): + res = runner.invoke( + cli, + [*command, "--use-checkpoint", "test", "--begin", "1d"], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + query_dict = dict(query) + actual_begin = query_dict["groups"][0]["filters"][0]["value"] + + assert res.exit_code == 0 + assert len(query._filter_group_list) == 1 + assert begin_option.expected_timestamp == actual_begin + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_with_begin_and_with_stored_checkpoint_calls_search_all_alerts_with_checkpoint_and_ignores_begin_arg( + cli_state, alert_cursor_with_checkpoint, runner, command, search_all_alerts_success +): + result = runner.invoke( + cli, + [*command, "--use-checkpoint", "test", "--begin", "1h"], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert result.exit_code == 0 + assert len(query._filter_group_list) == 1 + assert ( + f"checkpoint of {alert_cursor_with_checkpoint.expected_datetime} exists" + in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_when_given_actor_is_uses_username_filter( + cli_state, runner, command, search_all_alerts_success +): + actor_name = "test.testerson" + runner.invoke( + cli, [*command, "--begin", "1h", "--actor", actor_name], obj=cli_state + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.Actor.is_in([actor_name]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_exclude_actor_uses_actor_filter( + cli_state, runner, command, search_all_alerts_success +): + actor_name = "test.testerson" + runner.invoke( + cli, + [*command, "--begin", "1h", "--exclude-actor", actor_name], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.Actor.not_in([actor_name]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_rule_name_uses_rule_name_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_name = "departing employee" + runner.invoke( + cli, + [*command, "--begin", "1h", "--rule-name", rule_name], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleName.is_in([rule_name]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_exclude_rule_name_uses_rule_name_not_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_name = "departing employee" + runner.invoke( + cli, + [*command, "--begin", "1h", "--exclude-rule-name", rule_name], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleName.not_in([rule_name]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_rule_type_uses_rule_name_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_type = "FedEndpointExfiltration" + runner.invoke( + cli, + [*command, "--begin", "1h", "--rule-type", rule_type], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleType.is_in([rule_type]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_exclude_rule_type_uses_rule_name_not_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_type = "FedEndpointExfiltration" + runner.invoke( + cli, + [*command, "--begin", "1h", "--exclude-rule-type", rule_type], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleType.not_in([rule_type]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_rule_id_uses_rule_name_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_id = "departing employee" + runner.invoke(cli, [*command, "--begin", "1h", "--rule-id", rule_id], obj=cli_state) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleId.is_in([rule_id]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_exclude_rule_id_uses_rule_name_not_filter( + cli_state, runner, command, search_all_alerts_success +): + rule_id = "departing employee" + runner.invoke( + cli, + [*command, "--begin", "1h", "--exclude-rule-id", rule_id], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.RuleId.not_in([rule_id]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_description_uses_description_filter( + cli_state, runner, command, search_all_alerts_success +): + description = "test description" + runner.invoke( + cli, + [*command, "--begin", "1h", "--description", description], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.Description.contains(description) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_multiple_search_args_uses_expected_filters( + cli_state, runner, command, search_all_alerts_success +): + actor = "test.testerson@example.com" + exclude_actor = "flag.flagerson@example.com" + rule_name = "departing employee" + + runner.invoke( + cli, + [ + *command, + "--begin", + "1h", + "--actor", + actor, + "--exclude-actor", + exclude_actor, + "--rule-name", + rule_name, + ], + obj=cli_state, + ) + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + assert f.Actor.is_in([actor]) in query._filter_group_list + assert f.Actor.not_in([exclude_actor]) in query._filter_group_list + assert f.RuleName.is_in([rule_name]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_with_or_query_flag_produces_expected_query( + runner, cli_state, command, search_all_alerts_success +): + begin_date = get_test_date_str(days_ago=10) + test_actor = "test@example.com" + test_rule_type = "FedEndpointExfiltration" + runner.invoke( + cli, + [ + *command, + "--or-query", + "--begin", + begin_date, + "--actor", + test_actor, + "--rule-type", + test_rule_type, + ], + obj=cli_state, + ) + expected_query = { + "tenantId": None, + "groupClause": "AND", + "groups": [ + { + "filterClause": "AND", + "filters": [ + { + "operator": "ON_OR_AFTER", + "term": "createdAt", + "value": f"{begin_date}T00:00:00.000000Z", + } + ], + }, + { + "filterClause": "OR", + "filters": [ + {"operator": "IS", "term": "actor", "value": "test@example.com"}, + { + "operator": "IS", + "term": "type", + "value": "FedEndpointExfiltration", + }, + ], + }, + ], + "pgNum": 0, + "pgSize": 25, + "srtDirection": "asc", + "srtKey": "CreatedAt", + } + query = cli_state.sdk.alerts.get_all_alert_details.call_args[0][0] + actual_query = dict(query) + assert actual_query == expected_query + + +@search_and_send_to_test +def test_search_and_send_to_handles_error_expected_message_logged_and_printed( + runner, cli_state, caplog, command +): + exception_msg = "Test Exception" + expected_msg = "Unknown problem occurred" + cli_state.sdk.alerts.get_all_alert_details.side_effect = Exception(exception_msg) + with caplog.at_level(logging.ERROR): + result = runner.invoke(cli, [*command, "--begin", "1d"], obj=cli_state) + assert "Error:" in result.output + assert expected_msg in result.output + assert exception_msg in caplog.text + + +@pytest.mark.parametrize( + "protocol", (ServerProtocol.TLS_TCP, ServerProtocol.TLS_TCP, ServerProtocol.UDP) +) +def test_send_to_allows_protocol_arg(cli_state, runner, protocol): + res = runner.invoke( + cli, + ["alerts", "send-to", "0.0.0.0", "--begin", "1d", "--protocol", protocol], + obj=cli_state, + ) + assert res.exit_code == 0 + + +def test_send_to_when_given_unknown_protocol_fails(cli_state, runner): + res = runner.invoke( + cli, + ["alerts", "send-to", "0.0.0.0", "--begin", "1d", "--protocol", "ATM"], + obj=cli_state, + ) + assert res.exit_code + + +def test_send_to_certs_and_ignore_cert_validation_args_are_incompatible( + cli_state, runner +): + res = runner.invoke( + cli, + [ + "alerts", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert "Error: --ignore-cert-validation can't be used with: --certs" in res.output + + +def test_send_to_creates_expected_logger(cli_state, runner, send_to_logger_factory): + runner.invoke( + cli, + [ + "alerts", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "certs/file" + ) + + +def test_send_to_when_given_ignore_cert_validation_uses_certs_equal_to_ignore_str( + cli_state, runner, send_to_logger_factory +): + runner.invoke( + cli, + [ + "alerts", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "ignore" + ) + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_ignore_cert_validation_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "alerts", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert ( + "'--ignore-cert-validation' can only be used with '--protocol TLS-TCP'" + in res.output + ) + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_certs_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "alerts", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--certs", + "certs.pem", + ], + obj=cli_state, + ) + assert "'--certs' can only be used with '--protocol TLS-TCP'" in res.output + + +def test_show_outputs_expected_headers(cli_state, runner, full_alert_details_response): + cli_state.sdk.alerts.get_details.return_value = full_alert_details_response + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + assert "Id" in result.output + assert "RuleName" in result.output + assert "Username" in result.output + assert "ObservedDate" in result.output + assert "State" in result.output + assert "Severity" in result.output + assert "Description" in result.output + + +def test_show_outputs_expected_values(cli_state, runner, full_alert_details_response): + cli_state.sdk.alerts.get_details.return_value = full_alert_details_response + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + # Values found in ALERT_DETAILS_FULL_RESPONSE. + assert "TEST-ALERT-ID-123" in result.output + assert "Some Burp Suite Test Rule" in result.output + assert "neilwin0415@code42.com" in result.output + assert "2021-04-23T21:18:59.2032940Z" in result.output + assert "PENDING" in result.output + assert "HIGH" in result.output + assert "Some Burp Rule" in result.output + + +def test_show_when_alert_has_note_includes_note( + cli_state, runner, full_alert_details_response +): + cli_state.sdk.alerts.get_details.return_value = full_alert_details_response + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + # Note is included in `full_alert_details_response` initially. + assert "Note" in result.output + assert "TEST-NOTE-CLI-UNIT-TESTS" in result.output + + +def test_show_when_alert_has_no_note_excludes_note( + mocker, cli_state, runner, full_alert_details_response +): + response_data = dict(ALERT_DETAILS_FULL_RESPONSE) + response_data["alerts"][0]["note"] = None + cli_state.sdk.alerts.get_details.return_value = create_mock_response( + mocker, data=response_data + ) + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + # Note is included in `full_alert_details_response` initially. + assert "Note" not in result.output + + +def test_show_when_alert_not_found_output_expected_error_message( + cli_state, runner, custom_error +): + cli_state.sdk.alerts.get_details.side_effect = Py42NotFoundError(custom_error) + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + assert "No alert found with ID 'TEST-ALERT-ID'." in result.output + + +def test_show_when_alert_has_observations_and_includes_observations_outputs_observations( + cli_state, runner, full_alert_details_response +): + cli_state.sdk.alerts.get_details.return_value = full_alert_details_response + result = runner.invoke( + cli, + ["alerts", "show", "TEST-ALERT-ID", "--include-observations"], + obj=cli_state, + ) + assert "Observations:" in result.output + assert "OBSERVATION" in result.output + assert "f561e556-a746-4db0-b99b-71546adf57c4" in result.output + assert "observedAt" in result.output + assert "FedEndpointExfiltration" in result.output + + +def test_show_when_alert_has_observations_and_excludes_observations_does_not_output_observations( + cli_state, runner, full_alert_details_response +): + cli_state.sdk.alerts.get_details.return_value = full_alert_details_response + result = runner.invoke(cli, ["alerts", "show", "TEST-ALERT-ID"], obj=cli_state) + assert "Observations:" not in result.output + + +def test_show_when_alert_does_not_have_observations_and_includes_observations_outputs_no_observations( + mocker, cli_state, runner +): + response_data = dict(ALERT_DETAILS_FULL_RESPONSE) + response_data["alerts"][0]["observations"] = None + cli_state.sdk.alerts.get_details.return_value = create_mock_response( + mocker, data=response_data + ) + result = runner.invoke( + cli, + ["alerts", "show", "TEST-ALERT-ID", "--include-observations"], + obj=cli_state, + ) + assert "No observations found" in result.output + assert "Observations:" not in result.output + assert "FedEndpointExfiltration" not in result.output + + +def test_update_when_given_state_calls_py42_update_state(cli_state, runner): + runner.invoke( + cli, + ["alerts", "update", "TEST-ALERT-ID", "--state", AlertState.PENDING], + obj=cli_state, + ) + cli_state.sdk.alerts.update_state.assert_called_once_with( + AlertState.PENDING, ["TEST-ALERT-ID"], note=None + ) + + +def test_update_when_given_state_and_note_calls_py42_update_state_and_includes_note( + cli_state, runner +): + runner.invoke( + cli, + [ + "alerts", + "update", + "TEST-ALERT-ID", + "--state", + AlertState.PENDING, + "--note", + "test-note", + ], + obj=cli_state, + ) + cli_state.sdk.alerts.update_state.assert_called_once_with( + AlertState.PENDING, ["TEST-ALERT-ID"], note="test-note" + ) + + +def test_update_when_given_note_and_not_state_calls_py42_update_note(cli_state, runner): + runner.invoke( + cli, + ["alerts", "update", "TEST-ALERT-ID", "--note", "test-note"], + obj=cli_state, + ) + cli_state.sdk.alerts.update_note.assert_called_once_with( + "TEST-ALERT-ID", "test-note" + ) + + +def test_bulk_update_uses_expected_arguments(runner, mocker, cli_state_with_user): + bulk_processor = mocker.patch("code42cli.cmds.alerts.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_update.csv", "w") as csv: + csv.writelines( + ["id,state,note\n", "1,PENDING,note1\n", "2,IN_PROGRESS,note2\n"] + ) + runner.invoke( + cli, + ["alerts", "bulk", "update", "test_update.csv"], + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"id": "1", "state": "PENDING", "note": "note1"}, + {"id": "2", "state": "IN_PROGRESS", "note": "note2"}, + ] diff --git a/tests/cmds/test_auditlogs.py b/tests/cmds/test_auditlogs.py new file mode 100644 index 000000000..8faf50ae8 --- /dev/null +++ b/tests/cmds/test_auditlogs.py @@ -0,0 +1,708 @@ +import json +from datetime import datetime +from datetime import timedelta +from logging import Logger + +import pytest +from tests.cmds.conftest import get_mark_for_search_and_send_to +from tests.conftest import create_mock_response + +from code42cli.click_ext.types import MagicDate +from code42cli.cmds.search.cursor_store import AuditLogCursorStore +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.date_helper import round_datetime_to_day_end +from code42cli.date_helper import round_datetime_to_day_start +from code42cli.logger.handlers import ServerProtocol +from code42cli.main import cli +from code42cli.util import hash_event +from code42cli.util import parse_timestamp + +TEST_AUDIT_LOG_TIMESTAMP_1 = "2020-01-01T12:00:00.000Z" +TEST_AUDIT_LOG_TIMESTAMP_2 = "2020-02-01T12:01:00.000111Z" +TEST_AUDIT_LOG_TIMESTAMP_3 = "2020-03-01T02:00:00.123456Z" +CURSOR_TIMESTAMP = parse_timestamp(TEST_AUDIT_LOG_TIMESTAMP_3) +TEST_EVENTS_WITH_SAME_TIMESTAMP = [ + { + "type$": "audit_log::logged_in/1", + "actorId": "42", + "actorName": "42@example.com", + "actorAgent": "py42 python code42cli", + "actorIpAddress": "200.100.300.42", + "timestamp": TEST_AUDIT_LOG_TIMESTAMP_1, + }, + { + "type$": "audit_log::logged_in/1", + "actorId": "43", + "actorName": "43@example.com", + "actorAgent": "py42 python code42cli", + "actorIpAddress": "200.100.300.42", + "timestamp": TEST_AUDIT_LOG_TIMESTAMP_1, + }, +] +TEST_HIGHEST_TIMESTAMP = "2020-03-01T02:00:00.123456Z" +TEST_EVENTS_WITH_DIFFERENT_TIMESTAMPS = [ + { + "type$": "audit_log::logged_in/1", + "actorId": "44", + "actorName": "44@example.com", + "actorAgent": "py42 python code42cli", + "actorIpAddress": "200.100.300.42", + "timestamp": TEST_AUDIT_LOG_TIMESTAMP_2, + }, + { + "type$": "audit_log::logged_in/1", + "actorId": "45", + "actorName": "45@example.com", + "actorAgent": "py42 python code42cli", + "actorIpAddress": "200.100.300.42", + "timestamp": TEST_AUDIT_LOG_TIMESTAMP_3, + }, +] +search_and_send_to_test = get_mark_for_search_and_send_to("audit-logs") + + +@pytest.fixture +def audit_log_cursor_with_checkpoint(mocker): + mock_cursor = mocker.MagicMock(spec=AuditLogCursorStore) + mock_cursor.get.return_value = CURSOR_TIMESTAMP + mocker.patch( + "code42cli.cmds.auditlogs._get_audit_log_cursor_store", + return_value=mock_cursor, + ) + return mock_cursor + + +@pytest.fixture +def audit_log_cursor_with_checkpoint_and_events(mocker): + mock_cursor = mocker.MagicMock(spec=AuditLogCursorStore) + mock_cursor.get.return_value = CURSOR_TIMESTAMP + mock_cursor.get_events.return_value = [ + hash_event(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + ] + mocker.patch( + "code42cli.cmds.auditlogs._get_audit_log_cursor_store", + return_value=mock_cursor, + ) + return mock_cursor + + +@pytest.fixture +def date_str(): + dt = datetime.utcnow() - timedelta(days=10) + return dt.strftime("%Y-%m-%d %H:%M:%S") + + +@pytest.fixture +def send_to_logger_factory(mocker): + return mocker.patch("code42cli.cmds.search._try_get_logger_for_server") + + +@pytest.fixture +def send_to_logger(mocker, send_to_logger_factory): + mock_logger = mocker.MagicMock(spec=Logger) + send_to_logger_factory.return_value = mock_logger + return mock_logger + + +@pytest.fixture +def mock_audit_log_response(mocker): + response1 = create_mock_response( + mocker, data={"events": TEST_EVENTS_WITH_SAME_TIMESTAMP} + ) + response2 = create_mock_response( + mocker, data={"events": TEST_EVENTS_WITH_DIFFERENT_TIMESTAMPS} + ) + + def response_gen(): + yield response1 + yield response2 + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_10_records(mocker): + data = json.dumps({"events": TEST_EVENTS_WITH_SAME_TIMESTAMP}) + responses = [] + for _ in range(0, 10): + responses.append(create_mock_response(mocker, data=data)) + + def response_gen(): + yield from responses + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_only_same_timestamps(mocker): + data = {"events": TEST_EVENTS_WITH_SAME_TIMESTAMP} + + def response_gen(): + yield create_mock_response(mocker, data=data) + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_missing_ms_timestamp(mocker): + event = dict(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + event["timestamp"] = "2020-01-01T12:00:00Z" + response_data = {"events": [event]} + + def response_gen(): + yield create_mock_response(mocker, data=response_data) + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_micro_seconds(mocker): + event = dict(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + event["timestamp"] = "2021-07-01T14:47:13.093616Z" + + def response_gen(): + yield create_mock_response(mocker, data={"events": [event]}) + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_nano_seconds(mocker): + event = dict(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + event["timestamp"] = "2021-07-01T14:47:13.093616500Z" + + def response_gen(): + yield create_mock_response(mocker, data={"events": [event]}) + + return response_gen() + + +@pytest.fixture +def mock_audit_log_response_with_error_causing_timestamp(mocker): + good_event = dict(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + bad_event = dict(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]) + bad_event["timestamp"] = "I AM NOT A TIMESTAMP" # Will cause a ValueError. + + # good_event should still get processed. + response_data = {"events": [good_event, bad_event]} + + def response_gen(): + yield create_mock_response(mocker, data=response_data) + + return response_gen() + + +@search_and_send_to_test +def test_search_and_send_to_handles_json_format(runner, cli_state, date_str, command): + runner.invoke(cli, [*command, "-b", date_str], obj=cli_state) + assert cli_state.sdk.auditlogs.get_all.call_count == 1 + + +@search_and_send_to_test +def test_search_and_send_to_handles_filter_parameters( + runner, cli_state, date_str, command +): + expected_begin_timestamp = convert_datetime_to_timestamp( + MagicDate(rounding_func=round_datetime_to_day_start).convert( + date_str, None, None + ) + ) + runner.invoke( + cli, + [ + *command, + "--actor-username", + "test@example.com", + "--actor-username", + "test2@test.example.com", + "--begin", + date_str, + ], + obj=cli_state, + ) + cli_state.sdk.auditlogs.get_all.assert_called_once_with( + usernames=("test@example.com", "test2@test.example.com"), + affected_user_ids=(), + affected_usernames=(), + begin_time=expected_begin_timestamp, + end_time=None, + event_types=(), + user_ids=(), + user_ip_addresses=(), + ) + + +@search_and_send_to_test +def test_search_and_send_to_handles_all_filter_parameters( + runner, cli_state, date_str, command +): + end_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + expected_begin_timestamp = convert_datetime_to_timestamp( + MagicDate(rounding_func=round_datetime_to_day_start).convert( + date_str, None, None + ) + ) + expected_end_timestamp = convert_datetime_to_timestamp( + MagicDate(rounding_func=round_datetime_to_day_end).convert(end_time, None, None) + ) + runner.invoke( + cli, + [ + *command, + "--actor-username", + "test@example.com", + "--actor-username", + "test2@test.example.com", + "--event-type", + "saved-search", + "--actor-ip", + "0.0.0.0", + "--affected-username", + "test@test.example.com", + "--affected-user-id", + "123", + "--affected-user-id", + "456", + "--actor-user-id", + "userid", + "-b", + date_str, + "--end", + end_time, + ], + obj=cli_state, + ) + cli_state.sdk.auditlogs.get_all.assert_called_once_with( + usernames=("test@example.com", "test2@test.example.com"), + affected_user_ids=("123", "456"), + affected_usernames=("test@test.example.com",), + begin_time=expected_begin_timestamp, + end_time=expected_end_timestamp, + event_types=("saved-search",), + user_ids=("userid",), + user_ip_addresses=("0.0.0.0",), + ) + + +def test_send_to_makes_expected_call_count_to_the_logger_method( + cli_state, runner, send_to_logger, mock_audit_log_response +): + cli_state.sdk.auditlogs.get_all.return_value = mock_audit_log_response + runner.invoke( + cli, ["audit-logs", "send-to", "localhost", "--begin", "1d"], obj=cli_state + ) + assert send_to_logger.info.call_count == 4 + + +def test_send_to_creates_expected_logger(cli_state, runner, send_to_logger_factory): + runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "certs/file" + ) + + +def test_send_to_when_given_ignore_cert_validation_uses_certs_equal_to_ignore_str( + cli_state, runner, send_to_logger_factory +): + runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "ignore" + ) + + +def test_send_to_emits_events_in_chronological_order( + cli_state, runner, send_to_logger, mock_audit_log_response +): + cli_state.sdk.auditlogs.get_all.return_value = mock_audit_log_response + runner.invoke( + cli, ["audit-logs", "send-to", "localhost", "--begin", "1d"], obj=cli_state + ) + assert ( + send_to_logger.info.call_args_list[0][0][0]["timestamp"] + == TEST_AUDIT_LOG_TIMESTAMP_1 + ) + assert ( + send_to_logger.info.call_args_list[1][0][0]["timestamp"] + == TEST_AUDIT_LOG_TIMESTAMP_1 + ) + assert ( + send_to_logger.info.call_args_list[2][0][0]["timestamp"] + == TEST_AUDIT_LOG_TIMESTAMP_2 + ) + assert ( + send_to_logger.info.call_args_list[3][0][0]["timestamp"] + == TEST_AUDIT_LOG_TIMESTAMP_3 + ) + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_ignore_cert_validation_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert ( + "'--ignore-cert-validation' can only be used with '--protocol TLS-TCP'" + in res.output + ) + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_certs_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--certs", + "certs.pem", + ], + obj=cli_state, + ) + assert "'--certs' can only be used with '--protocol TLS-TCP'" in res.output + + +@search_and_send_to_test +def test_search_and_send_to_with_checkpoint_saves_expected_cursor_timestamp( + cli_state, + runner, + send_to_logger, + mock_audit_log_response, + audit_log_cursor_with_checkpoint, + command, +): + cli_state.sdk.auditlogs.get_all.return_value = mock_audit_log_response + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + assert audit_log_cursor_with_checkpoint.replace.call_count == 4 + assert audit_log_cursor_with_checkpoint.replace.call_args_list[3][0] == ( + "test", + CURSOR_TIMESTAMP, + ) + + +@search_and_send_to_test +def test_search_and_send_to_with_existing_checkpoint_replaces_begin_arg_if_passed( + cli_state, + runner, + send_to_logger, + mock_audit_log_response, + audit_log_cursor_with_checkpoint, + command, +): + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + assert ( + cli_state.sdk.auditlogs.get_all.call_args[1]["begin_time"] == CURSOR_TIMESTAMP + ) + + +def test_search_with_existing_checkpoint_events_skips_duplicate_events( + cli_state, + runner, + mock_audit_log_response, + audit_log_cursor_with_checkpoint_and_events, +): + cli_state.sdk.auditlogs.get_all.return_value = mock_audit_log_response + result = runner.invoke( + cli, + ["audit-logs", "search", "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + assert "42@example.com" not in result.stdout + assert "43@example.com" in result.stdout + + +@search_and_send_to_test +def test_search_and_send_to_without_existing_checkpoint_writes_both_event_hashes_with_same_timestamp( + cli_state, + runner, + send_to_logger, + mock_audit_log_response_with_only_same_timestamps, + audit_log_cursor_with_checkpoint, + command, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_only_same_timestamps + ) + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + assert audit_log_cursor_with_checkpoint.replace_events.call_count == 2 + assert audit_log_cursor_with_checkpoint.replace_events.call_args_list[1][0][1] == [ + hash_event(TEST_EVENTS_WITH_SAME_TIMESTAMP[0]), + hash_event(TEST_EVENTS_WITH_SAME_TIMESTAMP[1]), + ] + + +@pytest.mark.parametrize( + "protocol", (ServerProtocol.TLS_TCP, ServerProtocol.TLS_TCP, ServerProtocol.UDP) +) +def test_send_to_allows_protocol_arg(cli_state, runner, protocol): + res = runner.invoke( + cli, + ["audit-logs", "send-to", "0.0.0.0", "--begin", "1d", "--protocol", protocol], + obj=cli_state, + ) + assert res.exit_code == 0 + + +def test_send_when_given_unknown_protocol_fails(cli_state, runner): + res = runner.invoke( + cli, + ["audit-logs", "send-to", "0.0.0.0", "--begin", "1d", "--protocol", "ATM"], + obj=cli_state, + ) + assert res.exit_code + + +def test_send_to_certs_and_ignore_cert_validation_args_are_incompatible( + cli_state, runner +): + res = runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert "Error: --ignore-cert-validation can't be used with: --certs" in res.output + + +@search_and_send_to_test +def test_search_and_send_when_timestamps_missing_milliseconds_saves_checkpoint( + cli_state, + runner, + send_to_logger, + mock_audit_log_response_with_missing_ms_timestamp, + audit_log_cursor_with_checkpoint, + command, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_missing_ms_timestamp + ) + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + audit_log_cursor_with_checkpoint.replace.assert_called_once_with( + "test", 1577880000.0 + ) + + +@search_and_send_to_test +def test_search_and_send_when_timestamps_have_microseconds_saves_checkpoint( + cli_state, + runner, + send_to_logger, + mock_audit_log_response_with_micro_seconds, + audit_log_cursor_with_checkpoint, + command, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_micro_seconds + ) + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + audit_log_cursor_with_checkpoint.replace.assert_called_once_with( + "test", 1625150833.093616 + ) + + +@search_and_send_to_test +def test_search_and_send_when_timestamps_have_nanoseconds_saves_checkpoint( + cli_state, + runner, + send_to_logger, + mock_audit_log_response_with_nano_seconds, + audit_log_cursor_with_checkpoint, + command, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_nano_seconds + ) + runner.invoke( + cli, + [*command, "--begin", "1d", "--use-checkpoint", "test"], + obj=cli_state, + ) + call_args = audit_log_cursor_with_checkpoint.replace.call_args + assert call_args[0][0] == "test" + assert call_args[0][1] == 1625150833.093616 + + +def test_search_if_error_occurs_when_processing_event_timestamp_does_not_store_error_timestamp( + cli_state, + runner, + mock_audit_log_response_with_error_causing_timestamp, + audit_log_cursor_with_checkpoint, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_error_causing_timestamp + ) + runner.invoke( + cli, + ["audit-logs", "search", "--use-checkpoint", "test"], + obj=cli_state, + ) + + # Saved the timestamp from the good event but not the bad event + audit_log_cursor_with_checkpoint.replace.assert_called_once_with( + "test", 1577880000.0 + ) + + +# def test_search_when_table_format_and_using_output_via_pager_only_includes_header_keys_once( +# cli_state, +# runner, +# mock_audit_log_response_with_10_records, +# audit_log_cursor_with_checkpoint, +# ): +# cli_state.sdk.auditlogs.get_all.return_value = ( +# mock_audit_log_response_with_10_records +# ) +# result = runner.invoke( +# cli, +# ["audit-logs", "search", "--use-checkpoint", "test"], +# obj=cli_state, +# ) +# output = result.output +# output = output.split(" ") +# output = [s for s in output if s] +# assert ( +# output.count("Timestamp") +# == output.count("ActorName") +# == output.count("ActorIpAddress") +# == output.count("AffectedUserUID") +# == 1 +# ) + + +def test_send_to_if_error_occurs_still_processes_events( + cli_state, + runner, + mock_audit_log_response_with_error_causing_timestamp, + audit_log_cursor_with_checkpoint, + send_to_logger, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_error_causing_timestamp + ) + runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--use-checkpoint", + "test", + ], + obj=cli_state, + ) + assert ( + send_to_logger.info.call_args_list[0][0][0]["timestamp"] + == TEST_AUDIT_LOG_TIMESTAMP_1 + ) + assert ( + send_to_logger.info.call_args_list[1][0][0]["timestamp"] + == "I AM NOT A TIMESTAMP" + ) + + +def test_send_to_if_error_occurs_when_processing_event_timestamp_does_not_store_error_timestamp( + cli_state, + runner, + mock_audit_log_response_with_error_causing_timestamp, + audit_log_cursor_with_checkpoint, + send_to_logger, +): + cli_state.sdk.auditlogs.get_all.return_value = ( + mock_audit_log_response_with_error_causing_timestamp + ) + runner.invoke( + cli, + [ + "audit-logs", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--use-checkpoint", + "test", + ], + obj=cli_state, + ) + + # Saved the timestamp from the good event but not the bad event + audit_log_cursor_with_checkpoint.replace.assert_called_once_with( + "test", 1577880000.0 + ) diff --git a/tests/cmds/test_cases.py b/tests/cmds/test_cases.py new file mode 100644 index 000000000..3b9a288f9 --- /dev/null +++ b/tests/cmds/test_cases.py @@ -0,0 +1,538 @@ +import json +import os +from unittest import mock +from unittest.mock import mock_open + +import pytest +from py42.exceptions import Py42BadRequestError +from py42.exceptions import Py42CaseAlreadyHasEventError +from py42.exceptions import Py42CaseNameExistsError +from py42.exceptions import Py42DescriptionLimitExceededError +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42UpdateClosedCaseError +from py42.response import Py42Response + +from code42cli.main import cli + + +ALL_EVENTS = """{ + "events": [ + { + "eventId": "0_147e9445-2f30-4a91-8b2a-9455332e880a_973435567569502913_986467523038446097_163", + "eventTimestamp": "2020-12-23T14:24:44.593Z", + "exposure": [ + "OutsideTrustedDomains", + "IsPublic" + ], + "fileName": "example.docx", + "filePath": "/Users/casey/Documents/" + } + ], + "totalCount": 42 +}""" +ALL_CASES = """{ + "cases": [ + { + "assignee": 273411254592236320, + "assigneeUsername": "test@example.com", + "createdAt": "2020-10-27T15:16:05.369203Z", + "createdByUserUid": 806150685834341100, + "createdByUsername": "adrian@example.com", + "lastModifiedByUserUid": 806150685834341100, + "lastModifiedByUsername": "adrian@example.com", + "name": "Sample case name", + "number": 942897, + "status": "OPEN", + "subject": 421380797518239200, + "subjectUsername": "casey@example.com", + "updatedAt": "2021-01-24T11:00:04.217878Z" + } + ], + "totalCount": 42 +}""" +CASE_DETAILS = """{ + "assignee": 273411254592236320, + "assigneeUsername": "test-single@example.com", + "createdAt": "2020-10-27T15:16:05.369203Z", + "createdByUserUid": 806150685834341100, + "createdByUsername": "adrian@example.com", + "lastModifiedByUserUid": 806150685834341100, + "lastModifiedByUsername": "adrian@example.com", + "name": "Sample case name", + "number": 123456, + "status": "OPEN", + "subject": 421380797518239200, + "subjectUsername": "casey@example.com", + "updatedAt": "2021-01-24T11:00:04.217878Z" +} +""" +MISSING_ARGUMENT_ERROR = "Missing argument '{}'." +MISSING_NAME = MISSING_ARGUMENT_ERROR.format("NAME") +MISSING_CASE_NUMBER_ARG = MISSING_ARGUMENT_ERROR.format("CASE_NUMBER") +MISSING_OPTION_ERROR = "Missing option '--{}'." +MISSING_EVENT_ID = MISSING_OPTION_ERROR.format("event-id") +MISSING_CASE_NUMBER_OPTION = MISSING_OPTION_ERROR.format("case-number") + + +@pytest.fixture +def py42_response(mocker): + return mocker.MagicMock(spec=Py42Response) + + +@pytest.fixture +def case_already_exists_error(custom_error): + return Py42CaseNameExistsError(custom_error, "test case") + + +@pytest.fixture +def case_description_limit_exceeded_error(custom_error): + return Py42DescriptionLimitExceededError(custom_error) + + +@pytest.fixture +def case_already_has_event_error(custom_error): + return Py42CaseAlreadyHasEventError(custom_error) + + +@pytest.fixture +def update_on_a_closed_case_error(custom_error): + return Py42UpdateClosedCaseError(custom_error) + + +def test_create_calls_create_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "create", "TEST_CASE"], + obj=cli_state, + ) + cli_state.sdk.cases.create.assert_called_once_with( + "TEST_CASE", assignee=None, description=None, findings=None, subject=None + ) + + +def test_create_with_optional_fields_calls_create_with_expected_params( + runner, cli_state +): + runner.invoke( + cli, + [ + "cases", + "create", + "TEST_CASE", + "--assignee", + "a", + "--description", + "d", + "--findings", + "n", + "--subject", + "s", + ], + obj=cli_state, + ) + cli_state.sdk.cases.create.assert_called_once_with( + "TEST_CASE", assignee="a", description="d", findings="n", subject="s" + ) + + +def test_create_when_missing_name_prints_error(runner, cli_state): + command = ["cases", "create", "--description", "d"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_NAME in result.output + + +def test_update_with_optional_fields_calls_update_with_expected_params( + runner, cli_state +): + runner.invoke( + cli, + [ + "cases", + "update", + "1", + "--name", + "TEST_CASE2", + "--assignee", + "a", + "--description", + "d", + "--findings", + "n", + "--subject", + "s", + "--status", + "CLOSED", + ], + obj=cli_state, + ) + cli_state.sdk.cases.update.assert_called_once_with( + 1, + name="TEST_CASE2", + status="CLOSED", + assignee="a", + description="d", + findings="n", + subject="s", + ) + + +def test_update_calls_update_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "update", "1", "--name", "TEST_CASE2"], + obj=cli_state, + ) + cli_state.sdk.cases.update.assert_called_once_with( + 1, + name="TEST_CASE2", + status=None, + assignee=None, + description=None, + findings=None, + subject=None, + ) + + +def test_update_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "update", "--description", "d"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_ARG in result.output + + +def test_list_calls_get_all_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "list"], + obj=cli_state, + ) + assert cli_state.sdk.cases.get_all.call_count == 1 + + +def test_list_prints_expected_data(runner, cli_state, py42_response): + py42_response.data = json.loads(ALL_CASES) + + def gen(): + yield py42_response.data + + cli_state.sdk.cases.get_all.return_value = gen() + result = runner.invoke( + cli, + ["cases", "list"], + obj=cli_state, + ) + assert "2021-01-24T11:00:04.217878Z" in result.output + assert "942897" in result.output + + +def test_show_calls_get_case_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "show", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.get.assert_called_once_with(1) + + +def test_show_with_include_file_events_calls_file_events_get_all_with_expected_params( + runner, cli_state +): + runner.invoke( + cli, + ["cases", "show", "1", "--include-file-events"], + obj=cli_state, + ) + cli_state.sdk.cases.get.assert_called_once_with(1) + cli_state.sdk.cases.file_events.get_all.assert_called_once_with(1) + + +def test_show_when_py42_raises_exception_prints_error_message( + runner, cli_state, custom_error +): + cli_state.sdk.cases.file_events.get_all.side_effect = Py42NotFoundError( + custom_error + ) + result = runner.invoke( + cli, + ["cases", "show", "1", "--include-file-events"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.get_all.assert_called_once_with(1) + assert "Invalid case-number 1." in result.output + + +def test_show_prints_expected_data(runner, cli_state, py42_response): + py42_response.data = json.loads(CASE_DETAILS) + cli_state.sdk.cases.get.return_value = py42_response + result = runner.invoke( + cli, + ["cases", "show", "1"], + obj=cli_state, + ) + assert "test-single@example.com" in result.output + assert "2021-01-24T11:00:04.217878Z" in result.output + assert "123456" in result.output + + +def test_show_prints_expected_data_with_include_file_events_option( + runner, cli_state, py42_response, mocker +): + py42_response.text = ALL_EVENTS + get_case_response = mocker.MagicMock(spec=Py42Response) + get_case_response.data = json.loads(CASE_DETAILS) + cli_state.sdk.cases.get.return_value = get_case_response + cli_state.sdk.cases.file_events.get_all.return_value = py42_response + result = runner.invoke( + cli, + ["cases", "show", "1", "--include-file-events"], + obj=cli_state, + ) + assert ( + "0_147e9445-2f30-4a91-8b2a-9455332e880a_973435567569502913_986467523038446097_163" + in result.output + ) + assert "test-single@example.com" in result.output + assert "2021-01-24T11:00:04.217878Z" in result.output + assert "123456" in result.output + + +def test_show_case_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "show"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_ARG in result.output + + +def test_export_calls_export_summary_with_expected_params(runner, cli_state, mocker): + with mock.patch("builtins.open", mock_open()) as mf: + runner.invoke( + cli, + ["cases", "export", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.export_summary.assert_called_once_with(1) + expected = os.path.join(os.getcwd(), "1_case_summary.pdf") + mf.assert_called_once_with(expected, "wb") + + +def test_export_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "export"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_ARG in result.output + + +def test_file_events_add_calls_add_event_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "file-events", "add", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.add.assert_called_once_with(1, "1") + + +def test_file_events_add_when_py42_raises_exception_prints_error_message( + runner, cli_state, custom_error +): + cli_state.sdk.cases.file_events.add.side_effect = Py42BadRequestError(custom_error) + result = runner.invoke( + cli, + ["cases", "file-events", "add", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.add.assert_called_once_with(1, "1") + assert "Invalid case-number or event-id." in result.output + + +def test_file_events_add_when_missing_event_id_prints_error(runner, cli_state): + command = ["cases", "file-events", "remove", "--case-number", "4"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_EVENT_ID in result.output + + +def test_file_events_add_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "file-events", "add"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_OPTION in result.output + + +def test_file_events_remove_calls_delete_event_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "file-events", "remove", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.delete.assert_called_once_with(1, "1") + + +def test_file_events_remove_when_py42_raises_exception_prints_error_message( + runner, cli_state, custom_error +): + cli_state.sdk.cases.file_events.delete.side_effect = Py42NotFoundError(custom_error) + result = runner.invoke( + cli, + ["cases", "file-events", "remove", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.delete.assert_called_once_with(1, "1") + assert "Invalid case-number or event-id." in result.output + + +def test_file_events_remove_when_missing_event_id_prints_error(runner, cli_state): + command = ["cases", "file-events", "remove", "--case-number", "4"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_EVENT_ID in result.output + + +def test_file_events_remove_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "file-events", "add"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_OPTION in result.output + + +def test_file_events_list_calls_get_all_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["cases", "file-events", "list", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.get_all.assert_called_once_with(1) + + +def test_file_events_list_prints_expected_data(runner, cli_state): + cli_state.sdk.cases.file_events.get_all.return_value = json.loads(ALL_EVENTS) + result = runner.invoke( + cli, + ["cases", "file-events", "list", "1"], + obj=cli_state, + ) + assert ( + "0_147e9445-2f30-4a91-8b2a-9455332e880a_973435567569502913_986467523038446097_163" + in result.output + ) + assert "2020-12-23T14:24:44.593Z" in result.output + + +def test_file_events_list_when_missing_case_number_prints_error(runner, cli_state): + command = ["cases", "file-events", "list"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_CASE_NUMBER_ARG in result.output + + +def test_cases_create_when_case_name_already_exists_raises_exception_prints_error_message( + runner, cli_state, case_already_exists_error +): + cli_state.sdk.cases.create.side_effect = case_already_exists_error + result = runner.invoke( + cli, + ["cases", "create", "test case"], + obj=cli_state, + ) + assert ( + "Case name 'test case' already exists, please set another name" in result.output + ) + + +def test_cases_create_when_description_length_limit_exceeds_raises_exception_prints_error_message( + runner, cli_state, case_description_limit_exceeded_error +): + cli_state.sdk.cases.create.side_effect = case_description_limit_exceeded_error + result = runner.invoke( + cli, + ["cases", "create", "test case", "--description", "too long"], + obj=cli_state, + ) + assert "Description limit exceeded, max 250 characters allowed." in result.output + + +def test_cases_update_when_description_length_limit_exceeds_raises_exception_prints_error_message( + runner, cli_state, case_description_limit_exceeded_error +): + cli_state.sdk.cases.update.side_effect = case_description_limit_exceeded_error + result = runner.invoke( + cli, + ["cases", "update", "1", "--description", "too long"], + obj=cli_state, + ) + assert "Description limit exceeded, max 250 characters allowed." in result.output + + +def test_fileevents_add_on_closed_case_when_py42_raises_exception_prints_error_message( + runner, cli_state, update_on_a_closed_case_error +): + cli_state.sdk.cases.file_events.add.side_effect = update_on_a_closed_case_error + result = runner.invoke( + cli, + ["cases", "file-events", "add", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.add.assert_called_once_with(1, "1") + assert "Cannot update a closed case." in result.output + + +def test_fileevents_remove_on_closed_case_when_py42_raises_exception_prints_error_message( + runner, cli_state, update_on_a_closed_case_error +): + cli_state.sdk.cases.file_events.delete.side_effect = update_on_a_closed_case_error + result = runner.invoke( + cli, + ["cases", "file-events", "remove", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.delete.assert_called_once_with(1, "1") + assert "Cannot update a closed case." in result.output + + +def test_fileevents_when_event_id_is_already_associated_with_case_py42_raises_exception_prints_error_message( + runner, cli_state, case_already_has_event_error +): + cli_state.sdk.cases.file_events.add.side_effect = case_already_has_event_error + result = runner.invoke( + cli, + ["cases", "file-events", "add", "--case-number", "1", "--event-id", "1"], + obj=cli_state, + ) + cli_state.sdk.cases.file_events.add.assert_called_once_with(1, "1") + assert "Event is already associated to the case." in result.output + + +def test_add_bulk_file_events_to_cases_uses_expected_arguments( + runner, mocker, cli_state_with_user +): + bulk_processor = mocker.patch("code42cli.cmds.cases.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add.csv", "w") as csv: + csv.writelines(["number,event_id\n", "1,abc\n", "2,pqr\n"]) + runner.invoke( + cli, + ["cases", "file-events", "bulk", "add", "test_add.csv"], + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"number": "1", "event_id": "abc"}, + {"number": "2", "event_id": "pqr"}, + ] + + +def test_remove_bulk_file_events_from_cases_uses_expected_arguments( + runner, mocker, cli_state_with_user +): + bulk_processor = mocker.patch("code42cli.cmds.cases.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove.csv", "w") as csv: + csv.writelines(["number,event_id\n", "1,abc\n", "2,pqr\n"]) + runner.invoke( + cli, + ["cases", "file-events", "bulk", "remove", "test_remove.csv"], + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"number": "1", "event_id": "abc"}, + {"number": "2", "event_id": "pqr"}, + ] diff --git a/tests/cmds/test_devices.py b/tests/cmds/test_devices.py new file mode 100644 index 000000000..203b3b1f8 --- /dev/null +++ b/tests/cmds/test_devices.py @@ -0,0 +1,1263 @@ +import json +from datetime import date + +import numpy as np +import pytest +from pandas import DataFrame +from pandas import Series +from pandas._testing import assert_frame_equal +from pandas._testing import assert_series_equal +from py42.exceptions import Py42BadRequestError +from py42.exceptions import Py42ForbiddenError +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42OrgNotFoundError +from tests.conftest import create_mock_response + +from code42cli.cmds.devices import _add_backup_set_settings_to_dataframe +from code42cli.cmds.devices import _add_legal_hold_membership_to_device_dataframe +from code42cli.cmds.devices import _add_usernames_to_device_dataframe +from code42cli.cmds.devices import _break_backup_usage_into_total_storage +from code42cli.cmds.devices import _get_device_dataframe +from code42cli.main import cli +from code42cli.worker import WorkerStats + +_NAMESPACE = "code42cli.cmds.devices" +TEST_NEW_DEVICE_NAME = "test-new-device-name-123" +TEST_DATE_OLDER = "2020-01-01T12:00:00.774Z" +TEST_DATE_NEWER = "2021-01-01T12:00:00.774Z" +TEST_DATE_MIDDLE = "2020-06-01T12:00:00" +TEST_DEVICE_GUID = "954143368874689941" +TEST_DEVICE_ID = 139527 +TEST_ARCHIVE_GUID = "954143426849296547" +TEST_PURGE_DATE = "2020-10-12" +TEST_ARCHIVES_RESPONSE = { + "archives": [ + { + "archiveGuid": "954143426849296547", + "userId": None, + "userUid": None, + "archiveBytes": 1745757673, + "targetGuid": "632540230984925185", + "lastCompletedBackup": "2020-10-12T20:17:52.084Z", + "isColdStorage": False, + "lastMaintained": "2020-10-10T19:31:05.811Z", + "maintenanceDuration": 455, + "compactBytesRemoved": 0, + "storePointId": 1000, + "selectedBytes": 1658317953, + "selectedFiles": 596, + "todoBytes": 0, + "format": "ARCHIVE_V1", + } + ] +} +TEST_DEVICE_RESPONSE = """{"data":{"computerId":139527,"name":"testname","osHostname": +"testhostname","guid":"954143368874689941","type":"COMPUTER","status":"Active","active":true, +"blocked":false,"alertState":0,"alertStates":["OK"],"userId":203988,"userUid":"938960273869958201", +"orgId":3099,"orgUid":"915323705751579872","computerExtRef":null,"notes":null,"parentComputerId": +null,"parentComputerGuid":null,"lastConnected":"2020-10-12T16:55:40.632Z","osName":"win", +"osVersion":"10.0.18362","osArch":"amd64","address":"172.16.208.140:4242","remoteAddress": +"72.50.201.186","javaVersion":"11.0.4","modelInfo":null,"timeZone":"America/Chicago", +"version":1525200006822,"productVersion":"8.2.2","buildVersion":26,"creationDate": +"2020-05-14T13:03:20.302Z","modificationDate":"2020-10-12T16:55:40.632Z","loginDate": +"2020-10-12T12:54:45.132Z","service":"CrashPlan"}}""" +TEST_BACKUPUSAGE_RESPONSE = """{"metadata":{"timestamp":"2020-10-13T12:51:28.410Z", +"params":{"incBackupUsage":"True","idType":"guid"}},"data":{"computerId":1767,"name": +"SNWINTEST1","osHostname":"UNKNOWN","guid":"843290890230648046","type":"COMPUTER", +"status":"Active","active":true,"blocked":false,"alertState":2,"alertStates": +["CriticalConnectionAlert"],"userId":1934,"userUid":"843290130258496632","orgId":1067, +"orgUid":"843284512172838008","computerExtRef":null,"notes":null,"parentComputerId":null, +"parentComputerGuid":null,"lastConnected":"2018-04-13T20:57:12.496Z","osName":"win", +"osVersion":"10.0","osArch":"amd64","address":"10.0.1.23:4242","remoteAddress":"73.53.78.104", +"javaVersion":"1.8.0_144","modelInfo":null,"timeZone":"America/Los_Angeles","version": +1512021600671,"productVersion":"6.7.1","buildVersion":4615,"creationDate":"2018-04-10T19:23:23.564Z", +"modificationDate":"2018-06-29T17:41:12.616Z","loginDate":"2018-04-13T20:17:32.213Z","service": +"CrashPlan","backupUsage":[{"targetComputerParentId":null,"targetComputerParentGuid":null, +"targetComputerGuid":"632540230984925185","targetComputerName":"Code42 Cloud USA West", +"targetComputerOsName":null,"targetComputerType":"SERVER","selectedFiles":0,"selectedBytes":0, +"todoFiles":0,"todoBytes":0,"archiveBytes":119501,"billableBytes":119501,"sendRateAverage":0, +"completionRateAverage":0,"lastBackup":null,"lastCompletedBackup":null,"lastConnected": +"2018-04-11T16:23:35.776Z","lastMaintenanceDate":"2020-10-08T21:23:12.533Z","lastCompactDate": +"2020-10-08T21:23:12.411Z","modificationDate":"2020-10-12T16:19:01.267Z","creationDate": +"2018-04-10T19:48:29.903Z","using":true,"alertState":16,"alertStates":["CriticalBackupAlert"], +"percentComplete":0.0,"storePointId":1001,"storePointName":"cif-sea-2","serverId":1003,"serverGuid": +"836476656572622471","serverName":"cif-sea","serverHostName":"https://cif-sea.crashplan.com", +"isProvider":false,"archiveGuid":"843293524842941560","archiveFormat":"ARCHIVE_V1","activity": +{"connected":false,"backingUp":false,"restoring":false,"timeRemainingInMs":0, +"remainingFiles":0,"remainingBytes":0}},{"targetComputerParentId":null,"targetComputerParentGuid": +null,"targetComputerGuid":"43","targetComputerName":"PROe Cloud, US","targetComputerOsName":null, +"targetComputerType":"SERVER","selectedFiles":1599,"selectedBytes":1529420143,"todoFiles":0, +"todoBytes":0,"archiveBytes":56848550,"billableBytes":1529420143,"sendRateAverage":0, +"completionRateAverage":0,"lastBackup":"2019-12-02T09:34:28.364-06:00","lastCompletedBackup": +"2019-12-02T09:34:28.364-06:00","lastConnected":"2019-12-02T11:02:36.108-06:00","lastMaintenanceDate": +"2021-02-16T07:01:11.697-06:00","lastCompactDate":"2021-02-16T07:01:11.694-06:00","modificationDate": +"2021-02-17T04:57:27.222-06:00","creationDate":"2019-09-26T15:27:38.806-05:00","using":true, +"alertState":16,"alertStates":["CriticalBackupAlert"],"percentComplete":100.0,"storePointId":10989, +"storePointName":"fsa-iad-2","serverId":160024121,"serverGuid":"883282371081742804","serverName": +"fsa-iad","serverHostName":"https://web-fsa-iad.crashplan.com","isProvider":false,"archiveGuid": +"92077743916530001","archiveFormat":"ARCHIVE_V1","activity":{"connected":false,"backingUp":false, +"restoring":false,"timeRemainingInMs":0,"remainingFiles":0,"remainingBytes":0}}]}}""" +TEST_EMPTY_BACKUPUSAGE_RESPONSE = """{"metadata":{"timestamp":"2020-10-13T12:51:28.410Z","params": +{"incBackupUsage":"True","idType":"guid"}},"data":{"computerId":1767,"name":"SNWINTEST1", +"osHostname":"UNKNOWN","guid":"843290890230648046","type":"COMPUTER","status":"Active", +"active":true,"blocked":false,"alertState":2,"alertStates":["CriticalConnectionAlert"], +"userId":1934,"userUid":"843290130258496632","orgId":1067,"orgUid":"843284512172838008", +"computerExtRef":null,"notes":null,"parentComputerId":null,"parentComputerGuid":null,"lastConnected": +"2018-04-13T20:57:12.496Z","osName":"win","osVersion":"10.0","osArch":"amd64","address": +"10.0.1.23:4242","remoteAddress":"73.53.78.104","javaVersion":"1.8.0_144","modelInfo":null, +"timeZone":"America/Los_Angeles","version":1512021600671,"productVersion":"6.7.1","buildVersion": +4615,"creationDate":"2018-04-10T19:23:23.564Z","modificationDate":"2018-06-29T17:41:12.616Z", +"loginDate":"2018-04-13T20:17:32.213Z","service":"CrashPlan","backupUsage":[]}}""" +TEST_COMPUTER_PAGE = { + "computers": [ + { + "computerId": 1207, + "name": "ubuntu", + "osHostname": "UNKNOWN", + "guid": "839648314463407622", + "type": "COMPUTER", + "status": "Active, Deauthorized", + "active": True, + "blocked": False, + "alertState": 2, + "alertStates": ["CriticalConnectionAlert"], + "userId": 1320, + "userUid": "840103986007089121", + "orgId": 1017, + "orgUid": "836473214639515393", + "computerExtRef": None, + "notes": None, + "parentComputerId": None, + "parentComputerGuid": None, + "lastConnected": TEST_DATE_OLDER, + "osName": "linux", + "osVersion": "4.4.0-96-generic", + "osArch": "amd64", + "address": "172.16.132.193:4242", + "remoteAddress": "38.92.134.129", + "javaVersion": "1.8.0_144", + "modelInfo": None, + "timeZone": "America/Chicago", + "version": 1512021600671, + "productVersion": "6.7.1", + "buildVersion": 4589, + "creationDate": TEST_DATE_OLDER, + "modificationDate": "2020-09-03T13:32:02.383Z", + "loginDate": "2018-03-16T16:52:18.900Z", + "service": "CrashPlan", + }, + { + "computerId": 1281, + "name": "TOM-PC", + "osHostname": "UNKNOWN", + "guid": "840099921260026634", + "type": "COMPUTER", + "status": "Deactivated", + "active": False, + "blocked": False, + "alertState": 0, + "alertStates": ["OK"], + "userId": 1320, + "userUid": "840103986007089121", + "orgId": 1034, + "orgUid": "840098081282695137", + "computerExtRef": None, + "notes": None, + "parentComputerId": None, + "parentComputerGuid": None, + "lastConnected": TEST_DATE_NEWER, + "osName": "win", + "osVersion": "6.1", + "osArch": "amd64", + "address": "172.16.3.34:4242", + "remoteAddress": "38.92.134.129", + "javaVersion": "1.8.0_121", + "modelInfo": None, + "timeZone": "America/Chicago", + "version": 1508734800652, + "productVersion": "6.5.2", + "buildVersion": 32, + "creationDate": TEST_DATE_NEWER, + "modificationDate": "2020-09-08T15:43:45.875Z", + "loginDate": "2018-03-19T20:03:45.360Z", + "service": "CrashPlan", + }, + ] +} +TEST_USERS_LIST_PAGE = { + "totalCount": 2, + "users": [ + { + "userId": 1320, + "userUid": "840103986007089121", + "status": "Active", + "username": "ttranda_deactivated@ttrantest.com", + "email": "ttranda@ttrantest.com", + "firstName": "Thomas", + "lastName": "Tran", + "quotaInBytes": -1, + "orgId": 1034, + "orgUid": "840098081282695137", + "orgName": "Okta SSO", + "userExtRef": None, + "notes": None, + "active": True, + "blocked": False, + "emailPromo": True, + "invited": False, + "orgType": "ENTERPRISE", + "usernameIsAnEmail": True, + "creationDate": "2018-03-19T19:43:16.742Z", + "modificationDate": "2018-10-26T20:22:05.726Z", + "passwordReset": False, + "localAuthenticationOnly": False, + "licenses": ["admin.securityTools"], + }, + { + "userId": 1014, + "userUid": "836473273124890369", + "status": "Active", + "username": "test@example.com", + "email": "test@example.com", + "firstName": "Chad", + "lastName": "Valentine", + "quotaInBytes": -1, + "orgId": 1017, + "orgUid": "836473214639515393", + "orgName": "Holy SaaS-a-roli", + "userExtRef": None, + "notes": None, + "active": True, + "blocked": False, + "emailPromo": True, + "invited": False, + "orgType": "ENTERPRISE", + "usernameIsAnEmail": True, + "creationDate": "2018-02-22T18:35:23.217Z", + "modificationDate": "2018-04-25T11:12:11.504Z", + "passwordReset": False, + "localAuthenticationOnly": False, + "licenses": ["admin.securityTools"], + }, + ], +} +MATTER_RESPONSE = { + "legalHolds": [ + { + "legalHoldUid": "123456789", + "name": "Test legal hold matter", + "description": "", + "notes": None, + "holdExtRef": None, + "active": True, + "creationDate": "2020-08-05T10:49:58.353-05:00", + "lastModified": "2020-08-05T10:49:58.358-05:00", + "creator": { + "userUid": "12345", + "username": "user@code42.com", + "email": "user@code42.com", + "userExtRef": None, + }, + "holdPolicyUid": "966191295667423997", + }, + { + "legalHoldUid": "987654321", + "name": "Another Matter", + "description": "", + "notes": None, + "holdExtRef": None, + "active": True, + "creationDate": "2020-05-20T15:58:31.375-05:00", + "lastModified": "2020-05-28T13:49:16.098-05:00", + "creator": { + "userUid": "76543", + "username": "user2@code42.com", + "email": "user2@code42.com", + "userExtRef": None, + }, + "holdPolicyUid": "946178665645035826", + }, + ] +} +API_CLIENT_MATTER_RESPONSE = [ + { + "legalHoldUid": "123456789", + "name": "Test legal hold matter", + "description": "", + "notes": None, + "holdExtRef": None, + "active": True, + "creationDate": "2020-08-05T10:49:58.353-05:00", + "lastModified": "2020-08-05T10:49:58.358-05:00", + "creator": { + "userUid": "12345", + "username": "user@code42.com", + "email": "user@code42.com", + "userExtRef": None, + }, + "holdPolicyUid": "966191295667423997", + }, + { + "legalHoldUid": "987654321", + "name": "Another Matter", + "description": "", + "notes": None, + "holdExtRef": None, + "active": True, + "creationDate": "2020-05-20T15:58:31.375-05:00", + "lastModified": "2020-05-28T13:49:16.098-05:00", + "creator": { + "userUid": "76543", + "username": "user2@code42.com", + "email": "user2@code42.com", + "userExtRef": None, + }, + "holdPolicyUid": "946178665645035826", + }, +] +ALL_CUSTODIANS_RESPONSE = { + "legalHoldMemberships": [ + { + "legalHoldMembershipUid": "99999", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "123456789", + "name": "Test legal hold matter", + }, + "user": { + "userUid": "840103986007089121", + "username": "ttranda_deactivated@ttrantest.com", + "email": "ttranda_deactivated@ttrantest.com", + "userExtRef": None, + }, + }, + { + "legalHoldMembershipUid": "88888", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": {"legalHoldUid": "987654321", "name": "Another Matter"}, + "user": { + "userUid": "840103986007089121", + "username": "ttranda_deactivated@ttrantest.com", + "email": "ttranda_deactivated@ttrantest.com", + "userExtRef": None, + }, + }, + ] +} +API_CLIENT_ALL_CUSTODIANS_RESPONSE = [ + { + "legalHoldMembershipUid": "99999", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "123456789", + "name": "Test legal hold matter", + }, + "user": { + "userUid": "840103986007089121", + "username": "ttranda_deactivated@ttrantest.com", + "email": "ttranda_deactivated@ttrantest.com", + "userExtRef": None, + }, + }, + { + "legalHoldMembershipUid": "88888", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": {"legalHoldUid": "987654321", "name": "Another Matter"}, + "user": { + "userUid": "840103986007089121", + "username": "ttranda_deactivated@ttrantest.com", + "email": "ttranda_deactivated@ttrantest.com", + "userExtRef": None, + }, + }, +] + + +@pytest.fixture +def mock_device_settings(mocker, mock_backup_set): + device_settings = mocker.MagicMock() + device_settings.name = "testname" + device_settings.guid = "1234" + device_settings.backup_sets = [mock_backup_set, mock_backup_set] + return device_settings + + +@pytest.fixture +def mock_backup_set(mocker): + backup_set = mocker.MagicMock() + backup_set["name"] = "test_name" + backup_set.destinations = {"destination_guid": "destination_name"} + backup_set.excluded_files = ["/excluded/path"] + backup_set.included_files = ["/included/path"] + backup_set.filename_exclusions = [".*\\.excluded_filetype"] + backup_set.locked = True + return backup_set + + +@pytest.fixture +def empty_successful_response(mocker): + return create_mock_response(mocker) + + +@pytest.fixture +def device_info_response(mocker): + return create_mock_response(mocker, data=TEST_DEVICE_RESPONSE) + + +def archives_list_generator(): + yield TEST_ARCHIVES_RESPONSE + + +def devices_list_generator(): + yield TEST_COMPUTER_PAGE + + +def users_list_generator(): + yield TEST_USERS_LIST_PAGE + + +def matter_list_generator(mocker, api_client=False): + if api_client: + yield create_mock_response(mocker, data=API_CLIENT_MATTER_RESPONSE) + else: + yield create_mock_response(mocker, data=MATTER_RESPONSE) + + +def custodian_list_generator(mocker, api_client=False): + if api_client: + yield create_mock_response(mocker, data=API_CLIENT_ALL_CUSTODIANS_RESPONSE) + else: + yield create_mock_response(mocker, data=ALL_CUSTODIANS_RESPONSE) + + +@pytest.fixture +def backupusage_response(mocker): + return create_mock_response(mocker, data=TEST_BACKUPUSAGE_RESPONSE) + + +@pytest.fixture +def empty_backupusage_response(mocker): + return create_mock_response(mocker, data=TEST_EMPTY_BACKUPUSAGE_RESPONSE) + + +@pytest.fixture +def device_info_success(cli_state, device_info_response): + cli_state.sdk.devices.get_by_id.return_value = device_info_response + + +@pytest.fixture +def get_device_by_guid_success(cli_state, device_info_response): + cli_state.sdk.devices.get_by_guid.return_value = device_info_response + + +@pytest.fixture +def archives_list_success(cli_state): + cli_state.sdk.archive.get_all_by_device_guid.return_value = ( + archives_list_generator() + ) + + +@pytest.fixture +def deactivate_device_success(cli_state, empty_successful_response): + cli_state.sdk.devices.deactivate.return_value = empty_successful_response + + +@pytest.fixture +def reactivate_device_success(cli_state, empty_successful_response): + cli_state.sdk.devices.reactivate.return_value = empty_successful_response + + +@pytest.fixture +def deactivate_device_not_found_failure(cli_state, custom_error): + cli_state.sdk.devices.deactivate.side_effect = Py42NotFoundError(custom_error) + + +@pytest.fixture +def reactivate_device_not_found_failure(cli_state, custom_error): + cli_state.sdk.devices.reactivate.side_effect = Py42NotFoundError(custom_error) + + +@pytest.fixture +def deactivate_device_in_legal_hold_failure(cli_state, custom_error): + cli_state.sdk.devices.deactivate.side_effect = Py42BadRequestError(custom_error) + + +@pytest.fixture +def deactivate_device_not_allowed_failure(cli_state, custom_error): + cli_state.sdk.devices.deactivate.side_effect = Py42ForbiddenError(custom_error) + + +@pytest.fixture +def reactivate_device_not_allowed_failure(cli_state, custom_error): + cli_state.sdk.devices.reactivate.side_effect = Py42ForbiddenError(custom_error) + + +@pytest.fixture +def backupusage_success(cli_state, backupusage_response): + cli_state.sdk.devices.get_by_guid.return_value = backupusage_response + + +@pytest.fixture +def empty_backupusage_success(cli_state, empty_backupusage_response): + cli_state.sdk.devices.get_by_guid.return_value = empty_backupusage_response + + +@pytest.fixture +def get_all_devices_success(cli_state): + cli_state.sdk.devices.get_all.return_value = devices_list_generator() + + +@pytest.fixture +def get_all_users_success(cli_state): + cli_state.sdk.users.get_all.return_value = users_list_generator() + + +@pytest.fixture +def get_all_matter_success(mocker, cli_state): + cli_state.sdk.legalhold.get_all_matters.return_value = matter_list_generator(mocker) + + +@pytest.fixture +def get_api_client_all_matter_success(mocker, cli_state): + cli_state.sdk.legalhold.get_all_matters.return_value = matter_list_generator( + mocker, api_client=True + ) + + +@pytest.fixture +def get_all_custodian_success(mocker, cli_state): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + custodian_list_generator(mocker) + ) + + +@pytest.fixture +def get_api_client_all_custodian_success(mocker, cli_state): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + custodian_list_generator(mocker, api_client=True) + ) + + +@pytest.fixture +def worker_stats_factory(mocker): + return mocker.patch(f"{_NAMESPACE}.create_worker_stats") + + +@pytest.fixture +def worker_stats(mocker, worker_stats_factory): + stats = mocker.MagicMock(spec=WorkerStats) + worker_stats_factory.return_value = stats + return stats + + +def test_rename_calls_get_and_update_settings_with_expected_params(runner, cli_state): + cli_state.sdk.devices.get_settings.return_value = mock_device_settings + runner.invoke( + cli, + [ + "devices", + "rename", + TEST_DEVICE_GUID, + "--new-device-name", + TEST_NEW_DEVICE_NAME, + ], + obj=cli_state, + ) + cli_state.sdk.devices.get_settings.assert_called_once_with(TEST_DEVICE_GUID) + cli_state.sdk.devices.update_settings.assert_called_once_with(mock_device_settings) + + +def test_rename_when_missing_guid_prints_error(runner, cli_state): + result = runner.invoke( + cli, ["devices", "rename", "-n", TEST_NEW_DEVICE_NAME], obj=cli_state + ) + assert result.exit_code == 2 + assert "Missing argument 'DEVICE_GUID'" in result.output + + +def test_rename_when_missing_name_prints_error(runner, cli_state): + result = runner.invoke(cli, ["devices", "rename", TEST_DEVICE_GUID], obj=cli_state) + assert result.exit_code == 2 + assert "Missing option '-n' / '--new-device-name'" in result.output + + +def test_rename_when_guid_not_found_py42_raises_exception_prints_error( + runner, cli_state, custom_error +): + cli_state.sdk.devices.get_settings.side_effect = Py42NotFoundError(custom_error) + + result = runner.invoke( + cli, + [ + "devices", + "rename", + TEST_DEVICE_GUID, + "--new-device-name", + TEST_NEW_DEVICE_NAME, + ], + obj=cli_state, + ) + cli_state.sdk.devices.get_settings.assert_called_once_with(TEST_DEVICE_GUID) + assert result.exit_code == 1 + assert ( + f"Error: The device with GUID '{TEST_DEVICE_GUID}' was not found." + in result.output + ) + + +def test_deactivate_deactivates_device( + runner, cli_state, deactivate_device_success, get_device_by_guid_success +): + runner.invoke(cli, ["devices", "deactivate", TEST_DEVICE_GUID], obj=cli_state) + cli_state.sdk.devices.deactivate.assert_called_once_with(TEST_DEVICE_ID) + + +def test_deactivate_when_given_non_guid_raises_before_making_request(runner, cli_state): + result = runner.invoke(cli, ["devices", "deactivate", "not_a_guid"], obj=cli_state) + assert result.exit_code == 1 + assert "Not a valid GUID." in result.output + assert cli_state.sdk.devices.deactivate.call_count == 0 + + +def test_deactivate_when_given_flag_updates_purge_date( + runner, + cli_state, + deactivate_device_success, + get_device_by_guid_success, + device_info_success, + archives_list_success, +): + runner.invoke( + cli, + ["devices", "deactivate", TEST_DEVICE_GUID, "--purge-date", TEST_PURGE_DATE], + obj=cli_state, + ) + cli_state.sdk.archive.update_cold_storage_purge_date.assert_called_once_with( + TEST_ARCHIVE_GUID, TEST_PURGE_DATE + ) + + +def test_deactivate_when_given_flag_changes_device_name( + runner, + cli_state, + deactivate_device_success, + get_device_by_guid_success, + device_info_success, + mock_device_settings, +): + cli_state.sdk.devices.get_settings.return_value = mock_device_settings + runner.invoke( + cli, + ["devices", "deactivate", TEST_DEVICE_GUID, "--change-device-name"], + obj=cli_state, + ) + assert ( + mock_device_settings.name + == "deactivated_" + date.today().strftime("%Y-%m-%d") + "_testname" + ) + cli_state.sdk.devices.update_settings.assert_called_once_with(mock_device_settings) + + +def test_deactivate_does_not_change_device_name_when_not_given_flag( + runner, + cli_state, + deactivate_device_success, + device_info_success, + mock_device_settings, +): + cli_state.sdk.devices.get_settings.return_value = mock_device_settings + runner.invoke( + cli, + ["devices", "deactivate", TEST_DEVICE_GUID], + obj=cli_state, + ) + assert mock_device_settings.name == "testname" + cli_state.sdk.devices.update_settings.assert_not_called() + + +def test_deactivate_fails_if_device_does_not_exist( + runner, cli_state, deactivate_device_not_found_failure +): + result = runner.invoke( + cli, ["devices", "deactivate", TEST_DEVICE_GUID], obj=cli_state + ) + assert result.exit_code == 1 + assert f"The device with GUID '{TEST_DEVICE_GUID}' was not found." in result.output + + +def test_deactivate_fails_if_device_is_on_legal_hold( + runner, cli_state, deactivate_device_in_legal_hold_failure +): + result = runner.invoke( + cli, ["devices", "deactivate", TEST_DEVICE_GUID], obj=cli_state + ) + assert result.exit_code == 1 + assert ( + f"The device with GUID '{TEST_DEVICE_GUID}' is in legal hold." in result.output + ) + + +def test_deactivate_fails_if_device_deactivation_forbidden( + runner, cli_state, deactivate_device_not_allowed_failure +): + result = runner.invoke( + cli, ["devices", "deactivate", TEST_DEVICE_GUID], obj=cli_state + ) + assert result.exit_code == 1 + assert ( + f"Unable to deactivate the device with GUID '{TEST_DEVICE_GUID}'." + in result.output + ) + + +def test_reactivate_reactivates_device( + runner, cli_state, deactivate_device_success, get_device_by_guid_success +): + runner.invoke(cli, ["devices", "reactivate", TEST_DEVICE_GUID], obj=cli_state) + cli_state.sdk.devices.reactivate.assert_called_once_with(TEST_DEVICE_ID) + + +def test_reactivate_fails_if_device_does_not_exist( + runner, cli_state, reactivate_device_not_found_failure +): + result = runner.invoke( + cli, ["devices", "reactivate", TEST_DEVICE_GUID], obj=cli_state + ) + assert result.exit_code == 1 + assert f"The device with GUID '{TEST_DEVICE_GUID}' was not found." in result.output + + +def test_reactivate_fails_if_device_reactivation_forbidden( + runner, cli_state, reactivate_device_not_allowed_failure +): + result = runner.invoke( + cli, ["devices", "reactivate", TEST_DEVICE_GUID], obj=cli_state + ) + assert result.exit_code == 1 + assert ( + f"Unable to reactivate the device with GUID '{TEST_DEVICE_GUID}'." + in result.output + ) + + +def test_show_prints_device_info(runner, cli_state, backupusage_success): + result = runner.invoke(cli, ["devices", "show", TEST_DEVICE_GUID], obj=cli_state) + assert "SNWINTEST1" in result.output + assert "843290890230648046" in result.output + assert "119501" in result.output + assert "2018-04-13T20:57:12.496Z" in result.output + assert "6.7.1" in result.output + + +def test_show_prints_backup_set_info(runner, cli_state, backupusage_success): + result = runner.invoke(cli, ["devices", "show", TEST_DEVICE_GUID], obj=cli_state) + assert "Code42 Cloud USA West" in result.output + assert "843293524842941560" in result.output + + +def test_get_device_dataframe_returns_correct_columns( + cli_state, get_all_devices_success +): + columns = [ + "computerId", + "guid", + "name", + "osHostname", + "status", + "lastConnected", + "creationDate", + "productVersion", + "osName", + "osVersion", + "userUid", + ] + result = _get_device_dataframe(cli_state.sdk, columns, page_size=100) + assert "computerId" in result.columns + assert "guid" in result.columns + assert "name" in result.columns + assert "osHostname" in result.columns + assert "guid" in result.columns + assert "status" in result.columns + assert "lastConnected" in result.columns + assert "creationDate" in result.columns + assert "productVersion" in result.columns + assert "osName" in result.columns + assert "osVersion" in result.columns + assert "modelInfo" not in result.columns + assert "address" not in result.columns + assert "buildVersion" not in result.columns + + +def test_device_dataframe_return_includes_backupusage_when_flag_passed( + cli_state, get_all_devices_success +): + result = _get_device_dataframe( + cli_state.sdk, columns=[], page_size=100, include_backup_usage=True + ) + assert "backupUsage" in result.columns + + +def test_add_usernames_to_device_dataframe_adds_usernames_to_dataframe( + cli_state, get_all_users_success +): + testdf = DataFrame.from_records( + [{"userUid": "840103986007089121"}, {"userUid": "836473273124890369"}] + ) + result = _add_usernames_to_device_dataframe(cli_state.sdk, testdf) + assert "username" in result.columns + + +def test_add_legal_hold_membership_to_device_dataframe_adds_legal_hold_columns_to_dataframe( + cli_state, get_all_matter_success, get_all_custodian_success +): + testdf = DataFrame.from_records( + [ + {"userUid": "840103986007089121", "status": "Active"}, + {"userUid": "836473273124890369", "status": "Active, Deauthorized"}, + ] + ) + result = _add_legal_hold_membership_to_device_dataframe(cli_state.sdk, testdf) + assert "legalHoldUid" in result.columns + assert "legalHoldName" in result.columns + + +def test_api_client_add_legal_hold_membership_to_device_dataframe_adds_legal_hold_columns_to_dataframe( + cli_state, get_api_client_all_matter_success, get_api_client_all_custodian_success +): + cli_state.sdk._auth_flag = 1 + testdf = DataFrame.from_records( + [ + {"userUid": "840103986007089121", "status": "Active"}, + {"userUid": "836473273124890369", "status": "Active, Deauthorized"}, + ] + ) + result = _add_legal_hold_membership_to_device_dataframe(cli_state.sdk, testdf) + assert "legalHoldUid" in result.columns + assert "legalHoldName" in result.columns + + +def test_list_without_page_size_option_defaults_to_100_results_per_page( + cli_state, runner +): + runner.invoke(cli, ["devices", "list"], obj=cli_state) + cli_state.sdk.devices.get_all.assert_called_once_with( + active=None, include_backup_usage=False, org_uid=None, page_size=100 + ) + + +def test_list_with_page_size_option_sets_expected_page_size_in_request( + cli_state, runner +): + runner.invoke(cli, ["devices", "list", "--page-size", "1000"], obj=cli_state) + cli_state.sdk.devices.get_all.assert_called_once_with( + active=None, include_backup_usage=False, org_uid=None, page_size=1000 + ) + + +def test_list_include_legal_hold_membership_pops_legal_hold_if_device_deactivated( + cli_state, get_all_matter_success, get_all_custodian_success +): + testdf = DataFrame.from_records( + [ + {"userUid": "840103986007089121", "status": "Deactivated"}, + {"userUid": "840103986007089121", "status": "Active"}, + ] + ) + + testdf_result = DataFrame.from_records( + [ + { + "userUid": "840103986007089121", + "status": "Deactivated", + "legalHoldUid": np.nan, + "legalHoldName": np.nan, + }, + { + "userUid": "840103986007089121", + "status": "Active", + "legalHoldUid": "123456789,987654321", + "legalHoldName": "Test legal hold matter,Another Matter", + }, + ] + ) + result = _add_legal_hold_membership_to_device_dataframe(cli_state.sdk, testdf) + + assert_frame_equal(result, testdf_result) + + +def test_list_include_legal_hold_membership_merges_in_and_concats_legal_hold_info( + runner, + cli_state, + get_all_devices_success, + get_all_custodian_success, + get_all_matter_success, +): + result = runner.invoke( + cli, ["devices", "list", "--include-legal-hold-membership"], obj=cli_state + ) + + assert "Test legal hold matter,Another Matter" in result.output + assert "123456789,987654321" in result.output + + +def test_list_invalid_org_uid_raises_error(runner, cli_state, custom_error): + custom_error.response.text = "Unable to find org" + invalid_org_uid = "invalid_org_uid" + cli_state.sdk.devices.get_all.side_effect = Py42OrgNotFoundError( + custom_error, invalid_org_uid + ) + result = runner.invoke( + cli, ["devices", "list", "--org-uid", invalid_org_uid], obj=cli_state + ) + assert result.exit_code == 1 + assert ( + f"Error: The organization with UID '{invalid_org_uid}' was not found." + in result.output + ) + + +def test_list_excludes_recently_connected_devices_before_filtering_by_date( + runner, + cli_state, + get_all_devices_success, +): + result = runner.invoke( + cli, + [ + "devices", + "list", + "--exclude-most-recently-connected", + "1", + "--last-connected-before", + TEST_DATE_NEWER, + ], + obj=cli_state, + ) + assert "839648314463407622" in result.output + + +def test_list_backup_sets_invalid_org_uid_raises_error(runner, cli_state, custom_error): + custom_error.response.text = "Unable to find org" + invalid_org_uid = "invalid_org_uid" + cli_state.sdk.devices.get_all.side_effect = Py42OrgNotFoundError( + custom_error, invalid_org_uid + ) + result = runner.invoke( + cli, + ["devices", "list-backup-sets", "--org-uid", invalid_org_uid], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + f"Error: The organization with UID '{invalid_org_uid}' was not found." + in result.output + ) + + +def test_break_backup_usage_into_total_storage_correctly_calculates_values(): + test_backupusage_cell = json.loads(TEST_BACKUPUSAGE_RESPONSE)["data"]["backupUsage"] + result = _break_backup_usage_into_total_storage(test_backupusage_cell) + + test_empty_backupusage_cell = json.loads(TEST_EMPTY_BACKUPUSAGE_RESPONSE)["data"][ + "backupUsage" + ] + empty_result = _break_backup_usage_into_total_storage(test_empty_backupusage_cell) + + assert_series_equal(result, Series([2, 56968051])) + assert_series_equal(empty_result, Series([0, 0])) + + +def test_last_connected_after_filters_appropriate_results( + cli_state, runner, get_all_devices_success +): + result = runner.invoke( + cli, + ["devices", "list", "--last-connected-after", TEST_DATE_MIDDLE], + obj=cli_state, + ) + assert TEST_DATE_NEWER in result.output + assert TEST_DATE_OLDER not in result.output + + +def test_last_connected_before_filters_appropriate_results( + cli_state, runner, get_all_devices_success +): + result = runner.invoke( + cli, + ["devices", "list", "--last-connected-before", TEST_DATE_MIDDLE], + obj=cli_state, + ) + assert TEST_DATE_NEWER not in result.output + assert TEST_DATE_OLDER in result.output + + +def test_created_after_filters_appropriate_results( + cli_state, runner, get_all_devices_success +): + result = runner.invoke( + cli, + ["devices", "list", "--created-after", TEST_DATE_MIDDLE], + obj=cli_state, + ) + assert TEST_DATE_NEWER in result.output + assert TEST_DATE_OLDER not in result.output + + +def test_created_before_filters_appropriate_results( + cli_state, runner, get_all_devices_success +): + result = runner.invoke( + cli, + ["devices", "list", "--created-before", TEST_DATE_MIDDLE], + obj=cli_state, + ) + assert TEST_DATE_NEWER not in result.output + assert TEST_DATE_OLDER in result.output + + +def test_exclude_most_recent_connected_filters_appropriate_results( + cli_state, runner, get_all_devices_success +): + older_connection_guid = TEST_COMPUTER_PAGE["computers"][0]["guid"] + newer_connection_guid = TEST_COMPUTER_PAGE["computers"][1]["guid"] + result_1 = runner.invoke( + cli, + ["devices", "list", "--exclude-most-recently-connected", "1"], + obj=cli_state, + ) + assert older_connection_guid in result_1.output + assert newer_connection_guid not in result_1.output + + result_2 = runner.invoke( + cli, + ["devices", "list", "--exclude-most-recently-connected", "2"], + obj=cli_state, + ) + assert older_connection_guid not in result_2.output + assert newer_connection_guid not in result_2.output + + +def test_add_backup_set_settings_to_dataframe_returns_one_line_per_backup_set( + cli_state, mock_device_settings +): + cli_state.sdk.devices.get_settings.return_value = mock_device_settings + testdf = DataFrame.from_records([{"guid": "1234"}]) + result = _add_backup_set_settings_to_dataframe(cli_state.sdk, testdf) + assert len(result) == 2 + + +def test_bulk_deactivate_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(["guid,username\n", "test,value\n"]) + runner.invoke( + cli, + ["devices", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + { + "guid": "test", + "deactivated": "False", + "change_device_name": False, + "purge_date": None, + } + ] + + +def test_bulk_deactivate_uses_expected_arguments_when_no_header( + runner, mocker, cli_state +): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(["test_guid1\n"]) + runner.invoke( + cli, + ["devices", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + { + "guid": "test_guid1", + "deactivated": "False", + "change_device_name": False, + "purge_date": None, + } + ] + + +def test_bulk_deactivate_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(["guid,username\n", "\n", "test,value\n\n"]) + runner.invoke( + cli, + ["devices", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + { + "guid": "test", + "deactivated": "False", + "change_device_name": False, + "purge_date": None, + } + ] + + +def test_bulk_deactivate_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats +): + lines = ["guid\n", "1\n"] + + def _get(guid): + if guid == "test": + raise Exception("TEST") + return create_mock_response(mocker, data=TEST_DEVICE_RESPONSE) + + cli_state.sdk.devices.get_by_guid.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["devices", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(guid="test", change_device_name="test", purge_date="test") + handler(guid="not test", change_device_name="test", purge_date="test") + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_reactivate_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(["guid,username\n", "test,value\n"]) + runner.invoke( + cli, + ["devices", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [{"guid": "test", "reactivated": "False"}] + + +def test_bulk_reactivate_uses_expected_arguments_when_no_header( + runner, mocker, cli_state +): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(["test_guid1\n"]) + runner.invoke( + cli, + ["devices", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"guid": "test_guid1", "reactivated": "False"}, + ] + + +def test_bulk_reactivate_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(["guid,username\n", "\n", "test,value\n\n"]) + runner.invoke( + cli, + ["devices", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [{"guid": "test", "reactivated": "False"}] + bulk_processor.assert_called_once() + + +def test_bulk_reactivate_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats +): + lines = ["guid\n", "1\n"] + + def _get(guid): + if guid == "test": + raise Exception("TEST") + return create_mock_response(mocker, data=TEST_DEVICE_RESPONSE) + + cli_state.sdk.devices.get_by_guid.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["devices", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(guid="test") + handler(guid="not test") + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_rename_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_rename.csv", "w") as csv: + csv.writelines(["guid,name\n", "test-guid,test-name\n"]) + runner.invoke( + cli, + ["devices", "bulk", "rename", "test_bulk_rename.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"guid": "test-guid", "name": "test-name", "renamed": "False"} + ] + + +def test_bulk_rename_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_rename.csv", "w") as csv: + csv.writelines(["guid,name\n", "\n", "test-guid,test-name\n\n"]) + runner.invoke( + cli, + ["devices", "bulk", "rename", "test_bulk_rename.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"guid": "test-guid", "name": "test-name", "renamed": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_rename_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats +): + def _get(guid): + if guid == "test": + raise Exception("TEST") + return create_mock_response(mocker, data=TEST_DEVICE_RESPONSE) + + cli_state.sdk.devices.get_settings = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_rename.csv", "w") as csv: + csv.writelines(["guid,name\n", "1,2\n"]) + runner.invoke( + cli, + ["devices", "bulk", "rename", "test_bulk_rename.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(guid="test", name="test-name-1") + handler(guid="not test", name="test-name-2") + assert worker_stats.increment_total_errors.call_count == 1 diff --git a/tests/cmds/test_legal_hold.py b/tests/cmds/test_legal_hold.py new file mode 100644 index 000000000..b52d690fd --- /dev/null +++ b/tests/cmds/test_legal_hold.py @@ -0,0 +1,711 @@ +import datetime + +import pytest +from py42.exceptions import Py42BadRequestError +from py42.response import Py42Response +from requests import HTTPError +from requests import Response + +from code42cli.cmds.legal_hold import _check_matter_is_accessible +from code42cli.date_helper import convert_datetime_to_timestamp +from code42cli.main import cli + +_NAMESPACE = "code42cli.cmds.legal_hold" +TEST_MATTER_ID = "99999" +TEST_LEGAL_HOLD_MEMBERSHIP_UID = "88888" +TEST_LEGAL_HOLD_MEMBERSHIP_UID_2 = "77777" +ACTIVE_TEST_USERNAME = "user@example.com" +ACTIVE_TEST_USER_ID = "12345" +INACTIVE_TEST_USERNAME = "inactive@example.com" +INACTIVE_TEST_USER_ID = "54321" +TEST_POLICY_UID = "66666" +_CREATE_EVENT_ID = "564564654566" +_MEMBERSHIP_EVENT_ID = "74533457745" +TEST_PRESERVATION_POLICY_UID = "1010101010" +MATTER_RESPONSE = """ +{ + "legalHoldUid": "88888", + "name": "Test_Matter", + "description": "", + "notes": null, + "holdExtRef": null, + "active": true, + "creationDate": "2020-01-01T00:00:00.000-06:00", + "lastModified": "2019-12-19T20:32:10.781Z", + "creator": { + "userUid": "12345", + "username": "creator@example.com", + "email": "user@example.com", + "userExtRef": null + }, + "holdPolicyUid": "66666" +} +""" +POLICY_RESPONSE = """ +{ + "legalHoldPolicyUid": "1010101010", + "name": "Test", + "creatorUser": { + "userUid": "12345", + "userId": 12345, + "username": "user@example.com", + "email": "user@example.com", + "firstName": "User", + "lastName": "User" + }, + "policy": { + "backupOpenFiles": true, + "compression": "ON", + "dataDeDupAutoMaxFileSizeForLan": 1000000000, + "dataDeDupAutoMaxFileSizeForWan": 1000000000, + "dataDeDuplication": "FULL", + "encryptionEnabled": true, + "scanIntervalMillis": 86400000, + "scanTime": "03:00", + "watchFiles": true, + "destinations": [], + "backupRunWindow": { + "alwaysRun": true, + "startTimeOfDay": "01:00", + "endTimeOfDay": "06:00", + "days": ["SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY"] + }, + "backupPaths": { + "paths": [], + "excludePatterns": { + "windows": [], + "linux": [], + "macintosh": [], + "all": [] + } + }, + "retentionPolicy": { + "backupFrequencyMillis": 900000, + "keepDeleted": true, + "keepDeletedMinutes": 0, + "versionLastWeekIntervalMinutes": 15, + "versionLastNinetyDaysIntervalMinutes": 1440, + "versionLastYearIntervalMinutes": 10080, + "versionPrevYearsIntervalMinutes": 43200 + } + }, + "creationDate": "2019-05-14T16:19:09.930Z", + "modificationDate": "2019-05-14T16:19:09.930Z" +} +""" +EMPTY_CUSTODIANS_RESPONSE = """{"legalHoldMemberships": []}""" +ALL_ACTIVE_CUSTODIANS_RESPONSE = """ +{ + "legalHoldMemberships": [ + { + "legalHoldMembershipUid": "88888", + "active": true, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "99999", + "name": "test" + }, + "user": { + "userUid": "12345", + "username": "user@example.com", + "email": "user@example.com", + "userExtRef": null + } + } + ] +} +""" +ALL_INACTIVE_CUSTODIANS_RESPONSE = """ +{ + "legalHoldMemberships": [ + { + "legalHoldMembershipUid": "88888", + "active": false, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "99999", + "name": "test" + }, + "user": { + "userUid": "02345", + "username": "inactive@example.com", + "email": "user@example.com", + "userExtRef": null + } + } + ] +} +""" +ALL_ACTIVE_AND_INACTIVE_CUSTODIANS_RESPONSE = """ +{ + "legalHoldMemberships": [ + { + "legalHoldMembershipUid": "88888", + "active": true, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "99999", + "name": "test" + }, + "user": { + "userUid": "12345", + "username": "user@example.com", + "email": "user@example.com", + "userExtRef": null + } + }, + { + "legalHoldMembershipUid": "88888", + "active": false, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": { + "legalHoldUid": "99999", + "name": "test" + }, + "user": { + "userUid": "02345", + "username": "inactive@example.com", + "email": "user@example.com", + "userExtRef": null + } + } + ] +} +""" +TEST_EVENT_PAGE = { + "legalHoldEvents": [ + { + "eventUid": "564564654566", + "eventType": "HoldCreated", + "eventDate": "2015-05-16T15:07:44.820Z", + "legalHoldUid": "88888", + "actorUserUid": "12345", + "actorUsername": "holdcreator@example.com", + "actorFirstName": "john", + "actorLastName": "doe", + "actorUserExtRef": None, + "actorEmail": "holdcreatorr@example.com", + }, + { + "eventUid": "74533457745", + "eventType": "MembershipCreated", + "eventDate": "2019-05-17T15:07:44.820Z", + "legalHoldUid": "88888", + "legalHoldMembershipUid": "645576514441664433", + "custodianUserUid": "12345", + "custodianUsername": "kim.jones@code42.com", + "custodianFirstName": "kim", + "custodianLastName": "jones", + "custodianUserExtRef": None, + "custodianEmail": "user@example.com", + "actorUserUid": "1234512345", + "actorUsername": "creator@example.com", + "actorFirstName": "john", + "actorLastName": "doe", + "actorUserExtRef": None, + "actorEmail": "user@example.com", + }, + ] +} +EMPTY_EVENTS_RESPONSE = '{"legalHoldEvents": []}' +EMPTY_MATTERS_RESPONSE = '{"legalHolds": []}' +ALL_MATTERS_RESPONSE = f'{{"legalHolds": [{MATTER_RESPONSE}]}}' +LEGAL_HOLD_COMMAND = "legal-hold" + + +def _create_py42_response(mocker, text): + response = mocker.MagicMock(spec=Response) + response.text = text + response._content_consumed = mocker.MagicMock() + response.status_code = 200 + return Py42Response(response) + + +@pytest.fixture +def matter_response(mocker): + return _create_py42_response(mocker, MATTER_RESPONSE) + + +@pytest.fixture +def preservation_policy_response(mocker): + return _create_py42_response(mocker, POLICY_RESPONSE) + + +@pytest.fixture +def empty_legal_hold_memberships_response(mocker): + return [_create_py42_response(mocker, EMPTY_CUSTODIANS_RESPONSE)] + + +@pytest.fixture +def active_legal_hold_memberships_response(mocker): + return [_create_py42_response(mocker, ALL_ACTIVE_CUSTODIANS_RESPONSE)] + + +@pytest.fixture +def inactive_legal_hold_memberships_response(mocker): + return [_create_py42_response(mocker, ALL_INACTIVE_CUSTODIANS_RESPONSE)] + + +@pytest.fixture +def active_and_inactive_legal_hold_memberships_response(mocker): + return [_create_py42_response(mocker, ALL_ACTIVE_AND_INACTIVE_CUSTODIANS_RESPONSE)] + + +@pytest.fixture +def empty_events_response(mocker): + return _create_py42_response(mocker, EMPTY_EVENTS_RESPONSE) + + +def events_list_generator(): + yield TEST_EVENT_PAGE + + +@pytest.fixture +def get_user_id_success(cli_state): + cli_state.sdk.users.get_by_username.return_value = { + "users": [{"userUid": ACTIVE_TEST_USER_ID}] + } + + +@pytest.fixture +def empty_matters_response(mocker): + return [_create_py42_response(mocker, EMPTY_MATTERS_RESPONSE)] + + +@pytest.fixture +def all_matters_response(mocker): + return [_create_py42_response(mocker, ALL_MATTERS_RESPONSE)] + + +@pytest.fixture +def get_user_id_failure(cli_state): + cli_state.sdk.users.get_by_username.return_value = {"users": []} + + +@pytest.fixture +def check_matter_accessible_success(cli_state, matter_response): + cli_state.sdk.legalhold.get_matter_by_uid.return_value = matter_response + + +@pytest.fixture +def check_matter_accessible_failure(cli_state, custom_error): + cli_state.sdk.legalhold.get_matter_by_uid.side_effect = Py42BadRequestError( + custom_error + ) + + +@pytest.fixture +def get_all_events_success(cli_state): + cli_state.sdk.legalhold.get_all_events.return_value = events_list_generator() + + +@pytest.fixture +def user_already_added_response(mocker): + mock_response = mocker.MagicMock(spec=Response) + mock_response.text = "USER_ALREADY_IN_HOLD" + http_error = HTTPError() + http_error.response = mock_response + return Py42BadRequestError(http_error) + + +def test_add_user_raises_user_already_added_error_when_user_already_on_hold( + runner, cli_state, user_already_added_response +): + + cli_state.sdk.legalhold.add_to_matter.side_effect = user_already_added_response + result = runner.invoke( + cli, + [ + "legal-hold", + "add-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + assert result.exit_code == 1 + assert f"'{ACTIVE_TEST_USERNAME}' is already on the legal hold matter id={TEST_MATTER_ID}" + + +def test_add_user_raises_legalhold_not_found_error_if_matter_inaccessible( + runner, cli_state, check_matter_accessible_failure, get_user_id_success +): + result = runner.invoke( + cli, + [ + "legal-hold", + "add-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + f"Matter with id={TEST_MATTER_ID} either does not exist or your profile does not have " + f"permission to view it." + ) + + +def test_add_user_adds_user_to_hold_if_user_and_matter_exist( + runner, cli_state, check_matter_accessible_success, get_user_id_success +): + runner.invoke( + cli, + [ + "legal-hold", + "add-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + cli_state.sdk.legalhold.add_to_matter.assert_called_once_with( + ACTIVE_TEST_USER_ID, TEST_MATTER_ID + ) + + +def test_remove_user_raises_legalhold_not_found_error_if_matter_inaccessible( + runner, cli_state, check_matter_accessible_failure, get_user_id_success +): + result = runner.invoke( + cli, + [ + "legal-hold", + "remove-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + f"Matter with id={TEST_MATTER_ID} either does not exist or your profile does not have " + "permission to view it." + ) + + +def test_remove_user_raises_user_not_in_matter_error_if_user_not_active_in_matter( + runner, + cli_state, + check_matter_accessible_success, + get_user_id_success, + empty_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + empty_legal_hold_memberships_response + ) + result = runner.invoke( + cli, + [ + "legal-hold", + "remove-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + f"User '{ACTIVE_TEST_USERNAME}' is not an active member of legal hold matter " + f"'{TEST_MATTER_ID}'" + ) + + +def test_remove_user_removes_user_if_user_in_matter( + runner, + cli_state, + check_matter_accessible_success, + get_user_id_success, + active_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + active_legal_hold_memberships_response + ) + membership_uid = "88888" + runner.invoke( + cli, + [ + "legal-hold", + "remove-user", + "--matter-id", + TEST_MATTER_ID, + "--username", + ACTIVE_TEST_USERNAME, + ], + obj=cli_state, + ) + cli_state.sdk.legalhold.remove_from_matter.assert_called_once_with(membership_uid) + + +def test_matter_accessible_check_only_makes_one_http_call_when_called_multiple_times_with_same_matter_id( + sdk, check_matter_accessible_success +): + _check_matter_is_accessible(sdk, TEST_MATTER_ID) + _check_matter_is_accessible(sdk, TEST_MATTER_ID) + _check_matter_is_accessible(sdk, TEST_MATTER_ID) + _check_matter_is_accessible(sdk, TEST_MATTER_ID) + assert sdk.legalhold.get_matter_by_uid.call_count == 1 + + +def test_show_matter_prints_active_and_inactive_results_when_include_inactive_flag_set( + runner, + cli_state, + check_matter_accessible_success, + active_and_inactive_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + active_and_inactive_legal_hold_memberships_response + ) + result = runner.invoke( + cli, ["legal-hold", "show", TEST_MATTER_ID, "--include-inactive"], obj=cli_state + ) + assert ACTIVE_TEST_USERNAME in result.output + assert INACTIVE_TEST_USERNAME in result.output + + +def test_show_matter_prints_active_results_only( + runner, + cli_state, + check_matter_accessible_success, + active_and_inactive_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + active_and_inactive_legal_hold_memberships_response + ) + result = runner.invoke(cli, ["legal-hold", "show", TEST_MATTER_ID], obj=cli_state) + assert ACTIVE_TEST_USERNAME in result.output + assert INACTIVE_TEST_USERNAME not in result.output + + +def test_show_matter_prints_no_active_members_when_no_membership( + runner, + cli_state, + check_matter_accessible_success, + empty_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + empty_legal_hold_memberships_response + ) + result = runner.invoke(cli, ["legal-hold", "show", TEST_MATTER_ID], obj=cli_state) + assert ACTIVE_TEST_USERNAME not in result.output + assert INACTIVE_TEST_USERNAME not in result.output + assert "No active matter members." in result.output + + +def test_show_matter_prints_no_inactive_members_when_no_inactive_membership( + runner, + cli_state, + check_matter_accessible_success, + active_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + active_legal_hold_memberships_response + ) + result = runner.invoke( + cli, ["legal-hold", "show", TEST_MATTER_ID, "--include-inactive"], obj=cli_state + ) + assert ACTIVE_TEST_USERNAME in result.output + assert INACTIVE_TEST_USERNAME not in result.output + assert "No inactive matter members." in result.output + + +def test_show_matter_prints_no_active_members_when_no_active_membership( + runner, + cli_state, + check_matter_accessible_success, + inactive_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + inactive_legal_hold_memberships_response + ) + result = runner.invoke( + cli, ["legal-hold", "show", TEST_MATTER_ID, "--include-inactive"], obj=cli_state + ) + assert ACTIVE_TEST_USERNAME not in result.output + assert INACTIVE_TEST_USERNAME in result.output + assert "No active matter members." in result.output + + +def test_show_matter_prints_no_active_members_when_no_active_membership_and_inactive_membership_included( + runner, + cli_state, + check_matter_accessible_success, + inactive_legal_hold_memberships_response, +): + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + inactive_legal_hold_memberships_response + ) + result = runner.invoke( + cli, ["legal-hold", "show", TEST_MATTER_ID, "--include-inactive"], obj=cli_state + ) + assert ACTIVE_TEST_USERNAME not in result.output + assert INACTIVE_TEST_USERNAME in result.output + assert "No active matter members." in result.output + + +def test_show_matter_prints_preservation_policy_when_include_policy_flag_set( + runner, cli_state, check_matter_accessible_success, preservation_policy_response +): + cli_state.sdk.legalhold.get_policy_by_uid.return_value = ( + preservation_policy_response + ) + result = runner.invoke( + cli, ["legal-hold", "show", TEST_MATTER_ID, "--include-policy"], obj=cli_state + ) + assert TEST_PRESERVATION_POLICY_UID in result.output + + +def test_show_matter_does_not_print_preservation_policy( + runner, cli_state, check_matter_accessible_success, preservation_policy_response +): + cli_state.sdk.legalhold.get_policy_by_uid.return_value = ( + preservation_policy_response + ) + result = runner.invoke(cli, ["legal-hold", "show", TEST_MATTER_ID], obj=cli_state) + assert TEST_PRESERVATION_POLICY_UID not in result.output + + +def test_add_bulk_users_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add.csv", "w") as csv: + csv.writelines(["matter_id,username\n", "test,value\n"]) + runner.invoke(cli, ["legal-hold", "bulk", "add", "test_add.csv"], obj=cli_state) + assert bulk_processor.call_args[0][1] == [ + {"matter_id": "test", "username": "value"} + ] + + +def test_remove_bulk_users_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove.csv", "w") as csv: + csv.writelines(["matter_id,username\n", "test,value\n"]) + runner.invoke( + cli, ["legal-hold", "bulk", "remove", "test_remove.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + {"matter_id": "test", "username": "value"} + ] + + +def test_list_with_format_csv_returns_csv_format( + runner, cli_state, all_matters_response +): + cli_state.sdk.legalhold.get_all_matters.return_value = all_matters_response + result = runner.invoke(cli, ["legal-hold", "list", "-f", "csv"], obj=cli_state) + assert "legalHoldUid" in result.output + assert "name" in result.output + assert "description" in result.output + assert "active" in result.output + assert "creationDate" in result.output + assert "lastModified" in result.output + assert "creator" in result.output + assert "holdPolicyUid" in result.output + assert "creator_username" in result.output + assert "88888" in result.output + assert "Test_Matter" in result.output + comma_count = [c for c in result.output if c == ","] + assert len(comma_count) >= 13 + + +def test_list_with_csv_format_returns_no_response_when_response_is_empty( + runner, cli_state, empty_legal_hold_memberships_response, empty_matters_response +): + cli_state.sdk.legalhold.get_all_matters.return_value = empty_matters_response + result = runner.invoke(cli, ["legal-hold", "list", "-f", "csv"], obj=cli_state) + assert "Matter ID,Name,Description,Creator,Creation Date" not in result.output + + +def test_search_events_shows_events_that_respect_type_filters( + runner, cli_state, get_all_events_success +): + + result = runner.invoke( + cli, + ["legal-hold", "search-events", "--event-type", "HoldCreated"], + obj=cli_state, + ) + + assert _CREATE_EVENT_ID in result.output + assert _MEMBERSHIP_EVENT_ID not in result.output + + +def test_search_events_with_csv_returns_no_events_when_response_is_empty( + runner, cli_state, get_all_events_success, empty_events_response +): + cli_state.sdk.legalhold.get_all_events.return_value = empty_events_response + result = runner.invoke(cli, ["legal-hold", "events", "-f", "csv"], obj=cli_state) + + assert ( + "actorEmail,actorUsername,actorLastName,actorUserUid,actorUserExtRef" + not in result.output + ) + + +def test_search_events_is_called_with_expected_begin_timestamp(runner, cli_state): + expected_timestamp = convert_datetime_to_timestamp( + datetime.datetime.strptime("2017-01-01", "%Y-%m-%d") + ) + command = ["legal-hold", "search-events", "--begin", "2017-01-01T00:00:00"] + runner.invoke(cli, command, obj=cli_state) + + cli_state.sdk.legalhold.get_all_events.assert_called_once_with( + None, expected_timestamp, None + ) + + +def test_search_events_when_no_results_outputs_no_results(runner, cli_state): + cli_state.sdk.legalhold.get_all_matters.return_value = empty_matters_response + command = ["legal-hold", "search-events"] + result = runner.invoke(cli, command, obj=cli_state) + assert "No results found." in result.output + + +@pytest.mark.parametrize( + "command, error_msg", + [ + ( + f"{LEGAL_HOLD_COMMAND} add-user --matter-id test-matter-id", + "Missing option '-u' / '--username'.", + ), + ( + f"{LEGAL_HOLD_COMMAND} remove-user --matter-id test-matter-id", + "Missing option '-u' / '--username'.", + ), + ( + f"{LEGAL_HOLD_COMMAND} add-user", + "Missing option '-m' / '--matter-id'.", + ), + ( + f"{LEGAL_HOLD_COMMAND} remove-user", + "Missing option '-m' / '--matter-id'.", + ), + (f"{LEGAL_HOLD_COMMAND} show", "Missing argument 'MATTER_ID'."), + ( + f"{LEGAL_HOLD_COMMAND} bulk add", + "Error: Missing argument 'CSV_FILE'.", + ), + ( + f"{LEGAL_HOLD_COMMAND} bulk remove", + "Error: Missing argument 'CSV_FILE'.", + ), + ], +) +def test_legal_hold_command_when_missing_required_parameters_returns_error( + command, error_msg, runner, cli_state +): + result = runner.invoke(cli, command.split(" "), obj=cli_state) + assert result.exit_code == 2 + assert error_msg in "".join(result.output) diff --git a/tests/cmds/test_profile.py b/tests/cmds/test_profile.py new file mode 100644 index 000000000..859345d41 --- /dev/null +++ b/tests/cmds/test_profile.py @@ -0,0 +1,774 @@ +import pytest +from py42.sdk import SDKClient + +from ..conftest import create_mock_profile +from code42cli.errors import Code42CLIError +from code42cli.errors import LoggedCLIError +from code42cli.main import cli + + +_SELECTED_PROFILE_NAME = "test_profile" + + +@pytest.fixture +def user_agreement(mocker): + mock = mocker.patch("code42cli.cmds.profile.does_user_agree") + mock.return_value = True + return mocker + + +@pytest.fixture +def user_disagreement(mocker): + mock = mocker.patch("code42cli.cmds.profile.does_user_agree") + mock.return_value = False + return mocker + + +@pytest.fixture +def mock_cliprofile_namespace(mocker): + return mocker.patch("code42cli.cmds.profile.cliprofile") + + +@pytest.fixture(autouse=True) +def mock_getpass(mocker): + mock = mocker.patch("code42cli.cmds.profile.getpass") + mock.return_value = "newpassword" + + +@pytest.fixture +def mock_verify(mocker): + return mocker.patch("code42cli.cmds.profile.create_sdk") + + +@pytest.fixture +def valid_connection(mocker, mock_verify): + mock_sdk = mocker.MagicMock(spec=SDKClient) + mock_verify.return_value = mock_sdk + return mock_verify + + +@pytest.fixture +def invalid_connection(mock_verify): + mock_verify.side_effect = LoggedCLIError("Problem connecting to server") + return mock_verify + + +@pytest.fixture +def profile_name_selector(mocker): + mock = mocker.patch("code42cli.cmds.profile.click.prompt") + mock.return_value = _SELECTED_PROFILE_NAME + return mock + + +def test_show_profile_outputs_profile_info(runner, mock_cliprofile_namespace, profile): + profile.name = "testname" + profile.authority_url = "example.com" + profile.username = "foo" + profile.disable_ssl_errors = True + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke(cli, ["profile", "show"]) + assert "testname" in result.output + assert "example.com" in result.output + assert "foo" in result.output + assert "A password is set" in result.output + + +def test_show_profile_when_password_set_outputs_password_note( + runner, mock_cliprofile_namespace, profile +): + mock_cliprofile_namespace.get_profile.return_value = profile + mock_cliprofile_namespace.get_stored_password.return_value = None + result = runner.invoke(cli, ["profile", "show"]) + assert "A password is set" not in result.output + + +def test_create_profile_if_user_sets_password_is_created( + runner, user_agreement, mock_verify, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + mock_cliprofile_namespace.create_profile.assert_called_once_with( + "foo", "bar", "baz", True, None, api_client_auth=False + ) + + +def test_create_profile_if_user_does_not_set_password_is_created( + runner, user_disagreement, mock_verify, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + "--use-v2-file-events", + "True", + ], + ) + mock_cliprofile_namespace.create_profile.assert_called_once_with( + "foo", "bar", "baz", True, True, api_client_auth=False + ) + + +def test_create_profile_if_user_does_not_agree_does_not_save_password( + runner, user_disagreement, mock_verify, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + assert not mock_cliprofile_namespace.set_password.call_count + + +def test_create_profile_if_credentials_invalid_password_not_saved( + runner, user_agreement, invalid_connection, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + result = runner.invoke( + cli, + ["profile", "create", "-n", "foo", "-s", "bar", "-u", "baz"], + ) + assert "Password not stored!" in result.output + assert not mock_cliprofile_namespace.set_password.call_count + + +def test_create_profile_with_password_option_if_credentials_invalid_password_not_saved( + runner, invalid_connection, mock_cliprofile_namespace +): + password = "test_pass" + mock_cliprofile_namespace.profile_exists.return_value = False + result = runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--password", + password, + ], + ) + assert "Password not stored!" in result.output + assert not mock_cliprofile_namespace.set_password.call_count + assert "Would you like to set a password?" not in result.output + + +def test_create_profile_if_credentials_valid_password_saved( + runner, mocker, user_agreement, valid_connection, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke(cli, ["profile", "create", "-n", "foo", "-s", "bar", "-u", "baz"]) + mock_cliprofile_namespace.set_password.assert_called_once_with( + "newpassword", mocker.ANY + ) + + +def test_create_profile_with_password_option_if_credentials_valid_password_saved( + runner, mocker, valid_connection, mock_cliprofile_namespace +): + password = "test_pass" + mock_cliprofile_namespace.profile_exists.return_value = False + result = runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--password", + password, + ], + ) + mock_cliprofile_namespace.set_password.assert_called_once_with(password, mocker.ANY) + assert "Would you like to set a password?" not in result.output + + +def test_create_profile_outputs_confirmation( + runner, user_agreement, valid_connection, mock_cliprofile_namespace +): + mock_cliprofile_namespace.profile_exists.return_value = False + result = runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + assert "Successfully created profile 'foo'." in result.output + + +def test_create_api_client_profile_with_api_client_id_and_secret_creates_profile( + runner, mock_cliprofile_namespace, valid_connection, profile +): + mock_cliprofile_namespace.profile_exists.return_value = False + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke( + cli, + [ + "profile", + "create-api-client", + "-n", + "foo", + "-s", + "bar", + "--api-client-id", + "baz", + "--secret", + "fob", + "--disable-ssl-errors", + "True", + ], + ) + mock_cliprofile_namespace.create_profile.assert_called_once_with( + "foo", "bar", "baz", True, None, api_client_auth=True + ) + assert "Successfully created profile 'foo'." in result.output + + +def test_update_profile_updates_existing_profile( + runner, mock_cliprofile_namespace, user_agreement, valid_connection, profile +): + name = "foo" + profile.name = name + mock_cliprofile_namespace.get_profile.return_value = profile + runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, "bar", "baz", True, None + ) + + +def test_update_profile_updates_default_profile( + runner, mock_cliprofile_namespace, user_agreement, valid_connection, profile +): + name = "foo" + profile.name = name + mock_cliprofile_namespace.get_profile.return_value = profile + runner.invoke( + cli, + [ + "profile", + "update", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + "--use-v2-file-events", + "True", + ], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, "bar", "baz", True, True + ) + + +def test_update_profile_updates_name_alone( + runner, mock_cliprofile_namespace, user_agreement, valid_connection, profile +): + name = "foo" + profile.name = name + mock_cliprofile_namespace.get_profile.return_value = profile + runner.invoke( + cli, + ["profile", "update", "-u", "baz", "--disable-ssl-errors", "True"], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, None, "baz", True, None + ) + + +def test_update_profile_if_user_does_not_agree_does_not_save_password( + runner, mock_cliprofile_namespace, user_disagreement, invalid_connection, profile +): + name = "foo" + profile.name = name + mock_cliprofile_namespace.get_profile.return_value = profile + runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + assert not mock_cliprofile_namespace.set_password.call_count + + +def test_update_profile_if_credentials_invalid_password_not_saved( + runner, user_agreement, invalid_connection, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.has_stored_password = False + mock_cliprofile_namespace.get_profile.return_value = profile + + result = runner.invoke( + cli, + [ + "profile", + "update", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + assert not mock_cliprofile_namespace.set_password.call_count + assert "Password not stored!" in result.output + + +def test_update_profile_if_user_agrees_and_valid_connection_sets_password( + runner, mocker, user_agreement, valid_connection, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.has_stored_password = False + mock_cliprofile_namespace.get_profile.return_value = profile + runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "-u", + "baz", + "--disable-ssl-errors", + "True", + ], + ) + mock_cliprofile_namespace.set_password.assert_called_once_with( + "newpassword", mocker.ANY + ) + + +def test_update_profile_when_given_zero_args_prints_error_message( + runner, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.ignore_ssl_errors = "False" + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke(cli, ["profile", "update"]) + expected = ( + "Must provide at least one of `--server`, `--username`, `--password`, " + "`--use-v2-file-events` or `--disable-ssl-errors` when updating a username/password authenticated profile." + ) + assert "Profile 'foo' has been updated" not in result.output + assert expected in result.output + + +def test_update_profile_when_api_client_authentication_and_is_given_zero_args_prints_error_message( + runner, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.api_client_auth = "True" + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke(cli, ["profile", "update"]) + expected = ( + "Must provide at least one of `--server`, `--api-client-id`, `--secret`, `--use-v2-file-events` or " + "`--disable-ssl-errors` when updating an API client profile." + ) + assert "Profile 'foo' has been updated" not in result.output + assert expected in result.output + + +def test_update_profile_when_api_client_authentication_updates_existing_profile( + runner, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.api_client_auth = "True" + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "--api-client-id", + "baz", + "--use-v2-file-events", + "True", + ], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, "bar", "baz", None, True + ) + assert "Profile 'foo' has been updated" in result.output + + +def test_update_profile_when_updating_auth_profile_to_api_client_updates_existing_profile( + runner, valid_connection, mock_cliprofile_namespace, profile +): + name = "foo" + profile.name = name + profile.api_client_auth = "False" + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "--api-client-id", + "baz", + "--secret", + "fob", + "--use-v2-file-events", + "True", + "-y", + ], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, "bar", "baz", None, True, api_client_auth=True + ) + assert "Profile 'foo' has been updated" in result.output + + +def test_update_profile_when_updating_api_client_profile_to_user_credentails_updates_existing_profile( + runner, mock_cliprofile_namespace, profile, valid_connection +): + name = "foo" + profile.name = name + profile.api_client_auth = "True" + mock_cliprofile_namespace.get_profile.return_value = profile + result = runner.invoke( + cli, + [ + "profile", + "update", + "-n", + name, + "-s", + "bar", + "-u", + "baz", + "--password", + "fob", + "--use-v2-file-events", + "True", + "-y", + ], + ) + mock_cliprofile_namespace.update_profile.assert_called_once_with( + name, "bar", "baz", None, True, api_client_auth=False + ) + assert "Profile 'foo' has been updated" in result.output + + +def test_delete_profile_warns_if_deleting_default(runner, mock_cliprofile_namespace): + mock_cliprofile_namespace.is_default_profile.return_value = True + result = runner.invoke(cli, ["profile", "delete", "mockdefault"]) + assert "'mockdefault' is currently the default profile!" in result.output + + +def test_delete_profile_requires_profile_name_arg(runner, mock_cliprofile_namespace): + result = runner.invoke(cli, ["profile", "delete"]) + assert "Error: Missing argument 'PROFILE_NAME'." in result.output + assert mock_cliprofile_namespace.delete_profile.call_count == 0 + + +def test_delete_profile_raises_CLIError_when_profile_does_not_exist(runner): + result = runner.invoke(cli, ["profile", "delete", "not_a_real_profile"]) + assert result.output == "Error: Profile 'not_a_real_profile' does not exist.\n" + + +def test_delete_profile_does_nothing_if_user_doesnt_agree( + runner, user_disagreement, mock_cliprofile_namespace +): + runner.invoke(cli, ["profile", "delete", "mockdefault"]) + assert mock_cliprofile_namespace.delete_profile.call_count == 0 + + +def test_delete_profile_outputs_success( + runner, mock_cliprofile_namespace, user_agreement +): + result = runner.invoke(cli, ["profile", "delete", "mockdefault"]) + assert "Profile 'mockdefault' has been deleted." in result.output + + +def test_delete_all_warns_if_profiles_exist(runner, mock_cliprofile_namespace): + mock_cliprofile_namespace.get_all_profiles.return_value = [ + create_mock_profile("test1"), + create_mock_profile("test2"), + ] + result = runner.invoke(cli, ["profile", "delete-all"]) + assert "Are you sure you want to delete the following profiles?" in result.output + assert "test1" in result.output + assert "test2" in result.output + + +def test_delete_all_does_not_warn_if_assume_yes_flag(runner, mock_cliprofile_namespace): + mock_cliprofile_namespace.get_all_profiles.return_value = [ + create_mock_profile("test1"), + create_mock_profile("test2"), + ] + result = runner.invoke(cli, ["profile", "delete-all", "-y"]) + assert ( + "Are you sure you want to delete the following profiles?" not in result.output + ) + assert "Profile 'test1' has been deleted." in result.output + assert "Profile 'test2' has been deleted." in result.output + + +def test_delete_all_profiles_does_nothing_if_user_doesnt_agree( + runner, user_disagreement, mock_cliprofile_namespace +): + runner.invoke(cli, ["profile", "delete-all"]) + assert mock_cliprofile_namespace.delete_profile.call_count == 0 + + +def test_delete_all_deletes_all_existing_profiles( + runner, user_agreement, mock_cliprofile_namespace +): + mock_cliprofile_namespace.get_all_profiles.return_value = [ + create_mock_profile("test1"), + create_mock_profile("test2"), + ] + runner.invoke(cli, ["profile", "delete-all"]) + mock_cliprofile_namespace.delete_profile.assert_any_call("test1") + mock_cliprofile_namespace.delete_profile.assert_any_call("test2") + + +def test_reset_pw_if_credentials_valid_password_saved( + runner, mocker, user_agreement, mock_verify, mock_cliprofile_namespace +): + mock_verify.return_value = True + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke(cli, ["profile", "reset-pw"]) + mock_cliprofile_namespace.set_password.assert_called_once_with( + "newpassword", mocker.ANY + ) + + +def test_reset_pw_if_credentials_invalid_password_not_saved( + runner, user_agreement, mock_verify, mock_cliprofile_namespace +): + mock_verify.side_effect = Code42CLIError("Invalid credentials for user") + mock_cliprofile_namespace.profile_exists.return_value = False + runner.invoke(cli, ["profile", "reset-pw"]) + assert not mock_cliprofile_namespace.set_password.call_count + + +def test_reset_pw_uses_default_profile_when_not_given_one( + runner, mocker, user_agreement, mock_verify, mock_cliprofile_namespace +): + mock_verify.return_value = True + mock_cliprofile_namespace.profile_exists.return_value = False + mock_profile = create_mock_profile("one") + mock_cliprofile_namespace.get_profile.return_value = mock_profile + res = runner.invoke(cli, ["profile", "reset-pw"]) + mock_cliprofile_namespace.set_password.assert_called_once_with( + "newpassword", mocker.ANY + ) + assert "Password updated for profile 'one'." in res.output + + +def test_list_profiles(runner, mock_cliprofile_namespace): + profiles = [ + create_mock_profile("one"), + create_mock_profile("two"), + create_mock_profile("three"), + ] + mock_cliprofile_namespace.get_all_profiles.return_value = profiles + result = runner.invoke(cli, ["profile", "list"]) + assert "one" in result.output + assert "two" in result.output + assert "three" in result.output + + +def test_list_profiles_when_no_profiles_outputs_no_profiles_message( + runner, mock_cliprofile_namespace +): + mock_cliprofile_namespace.get_all_profiles.return_value = [] + result = runner.invoke(cli, ["profile", "list"]) + assert "No existing profile." in result.output + + +def test_use_profile(runner, mock_cliprofile_namespace, profile): + result = runner.invoke(cli, ["profile", "use", profile.name]) + mock_cliprofile_namespace.switch_default_profile.assert_called_once_with( + profile.name + ) + assert f"{profile.name} has been set as the default profile." in result.output + + +def test_use_profile_when_not_given_profile_name_arg_sets_selected_profile_as_default( + runner, mock_cliprofile_namespace, profile_name_selector +): + runner.invoke(cli, ["profile", "use"]) + mock_cliprofile_namespace.switch_default_profile.assert_called_once_with( + _SELECTED_PROFILE_NAME + ) + + +def test_use_profile_when_not_given_profile_name_outputs_expected_text( + runner, mock_cliprofile_namespace, profile_name_selector +): + mock_cliprofile_namespace.get_all_profiles.return_value = [ + create_mock_profile("test1"), + create_mock_profile("test2"), + ] + result = runner.invoke(cli, ["profile", "use"]) + expected_prompt = "1. test1\n2. test2" + expected_result_message = "test_profile has been set as the default profile." + assert expected_prompt in result.output + assert expected_result_message in result.output + + +def test_totp_option_passes_token_to_sdk_on_profile_cmds_that_init_sdk( + runner, mocker, mock_cliprofile_namespace, cli_state +): + totp1 = "123456" + totp2 = "234567" + mock_create_sdk = mocker.patch("code42cli.cmds.profile.create_sdk") + runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--password", + "testpass", + "--totp", + totp1, + ], + obj=cli_state, + ) + runner.invoke( + cli, + [ + "profile", + "update", + "-n", + "foo", + "--password", + "updatedpass", + "--totp", + totp2, + ], + obj=cli_state, + ) + assert mock_create_sdk.call_args_list[0][1]["totp"] == totp1 + assert mock_create_sdk.call_args_list[1][1]["totp"] == totp2 + + +def test_debug_option_passed_to_sdk_on_profile_cmds_that_init_sdk( + runner, mocker, mock_cliprofile_namespace, cli_state +): + mock_create_sdk = mocker.patch("code42cli.cmds.profile.create_sdk") + runner.invoke( + cli, + [ + "profile", + "create", + "-n", + "foo", + "-s", + "bar", + "-u", + "baz", + "--password", + "testpass", + "--debug", + ], + obj=cli_state, + ) + runner.invoke( + cli, + ["profile", "update", "-n", "foo", "--password", "updatedpass", "--debug"], + obj=cli_state, + ) + assert mock_create_sdk.call_args_list[0][1]["is_debug_mode"] is True + assert mock_create_sdk.call_args_list[1][1]["is_debug_mode"] is True diff --git a/tests/cmds/test_securitydata.py b/tests/cmds/test_securitydata.py new file mode 100644 index 000000000..a062be4d6 --- /dev/null +++ b/tests/cmds/test_securitydata.py @@ -0,0 +1,1540 @@ +import json +import logging + +import pandas +import py42.sdk.queries.fileevents.filters as f +import pytest +from py42.exceptions import Py42InvalidPageTokenError +from py42.sdk.queries.fileevents.file_event_query import FileEventQuery +from py42.sdk.queries.fileevents.filters import RiskIndicator +from py42.sdk.queries.fileevents.filters import RiskSeverity +from py42.sdk.queries.fileevents.filters.file_filter import FileCategory +from py42.sdk.queries.fileevents.v2 import filters as v2_filters +from tests.cmds.conftest import filter_term_is_in_call_args +from tests.cmds.conftest import get_mark_for_search_and_send_to +from tests.conftest import create_mock_response +from tests.conftest import get_test_date_str + +from code42cli.cmds.search.cursor_store import FileEventCursorStore +from code42cli.logger.enums import ServerProtocol +from code42cli.main import cli + +BEGIN_TIMESTAMP = 1577858400.0 +END_TIMESTAMP = 1580450400.0 +CURSOR_TIMESTAMP = 1579500000.0 +TEST_LIST_RESPONSE = { + "searches": [ + { + "id": "a083f08d-8f33-4cbd-81c4-8d1820b61185", + "name": "test-events", + "notes": "py42 is here", + }, + ] +} +TEST_EMPTY_LIST_RESPONSE = {"searches": []} +ADVANCED_QUERY_VALUES = { + "within_last_value": "P30D", + "hostname_1": "DESKTOP-H88BEKO", + "hostname_2": "W10E-X64-FALLCR", + "event_type": "CREATED", +} +ADVANCED_QUERY_JSON = """ +{{ + "purpose": "USER_EXECUTED_SEARCH", + "groups": [ + {{ + "filterClause": "AND", + "filters": [ + {{ + "value": "{within_last_value}", + "operator": "WITHIN_THE_LAST", + "term": "eventTimestamp" + }} + ] + }}, + {{ + "filterClause": "AND", + "filters": [ + {{ + "value": ".*", + "operator": "IS", + "term": "fileName" + }} + ] + }}, + {{ + "filterClause": "OR", + "filters": [ + {{ + "value": "{hostname_1}", + "operator": "IS", + "term": "osHostName" + }}, + {{ + "value": "{hostname_2}", + "operator": "IS", + "term": "osHostName" + }} + ] + }}, + {{ + "filterClause": "OR", + "filters": [ + {{ + "value": "{event_type}", + "operator": "IS", + "term": "eventType" + }} + ] + }} + ], + "pgSize": 100, + "pgNum": 1 +}}""".format( + **ADVANCED_QUERY_VALUES +) +advanced_query_incompat_test_params = pytest.mark.parametrize( + "arg", + [ + ("--begin", "1d"), + ("--end", "1d"), + ("--c42-username", "test@example.com"), + ("--actor", "test.testerson"), + ("--md5", "abcd1234"), + ("--sha256", "abcdefg12345678"), + ("--source", "Gmail"), + ("--file-name", "test.txt"), + ("--file-path", "C:\\Program Files"), + ("--file-category", "IMAGE"), + ("--process-owner", "root"), + ("--tab-url", "https://example.com"), + ("--type", "SharedViaLink"), + ("--include-non-exposure",), + ("--risk-indicator", "PUBLIC_CORPORATE_BOX"), + ("--risk-severity", "LOW"), + ], +) +saved_search_incompat_test_params = pytest.mark.parametrize( + "arg", + [ + ("--begin", "1d"), + ("--end", "1d"), + ("--c42-username", "test@example.com"), + ("--actor", "test.testerson"), + ("--md5", "abcd1234"), + ("--sha256", "abcdefg12345678"), + ("--source", "Gmail"), + ("--file-name", "test.txt"), + ("--file-path", "C:\\Program Files"), + ("--file-category", "IMAGE"), + ("--process-owner", "root"), + ("--tab-url", "https://example.com"), + ("--type", "SharedViaLink"), + ("--include-non-exposure",), + ("--use-checkpoint", "test"), + ("--risk-indicator", "PUBLIC_CORPORATE_BOX"), + ("--risk-severity", "LOW"), + ], +) + +TEST_FILE_EVENT_TIMESTAMP_1 = "2020-01-01T12:00:00.000Z" +TEST_FILE_EVENT_TIMESTAMP_2 = "2020-02-01T12:01:00.000111Z" +TEST_FILE_EVENT_ID_1 = "0_test1" +TEST_FILE_EVENT_ID_2 = "0_test2" +TEST_EVENTS = [ + { + "eventId": TEST_FILE_EVENT_ID_1, + "eventType": "READ_BY_APP", + "eventTimestamp": TEST_FILE_EVENT_TIMESTAMP_1, + "insertionTimestamp": TEST_FILE_EVENT_TIMESTAMP_1, + "fileName": "test.txt", + "filePath": "/my/path", + "fileSize": 4242, + "fileOwner": "john.doe", + "fileType": "FILE", + "fileCategory": "Document", + "md5Checksum": "abcdef12345", + "sha256Checksum": "12345abcdef", + "destinationCategory": "Cloud Storage", + "destinationName": "Google Drive", + "riskScore": 5, + "riskSeverity": "MODERATE", + "riskIndicators": [ + {"name": "Google Drive upload", "weight": 5}, + {"name": "Document", "weight": 0}, + ], + }, + { + "eventId": TEST_FILE_EVENT_ID_2, + "eventType": "READ_BY_APP", + "eventTimestamp": TEST_FILE_EVENT_TIMESTAMP_2, + "insertionTimestamp": TEST_FILE_EVENT_TIMESTAMP_2, + "fileName": "test2.txt", + "filePath": "/my/path/2", + "fileSize": 4242, + "fileOwner": "john.doe", + "fileType": "FILE", + "fileCategory": "Document", + "md5Checksum": "abcdef1234567", + "sha256Checksum": "1234567abcdef", + "destinationCategory": "Cloud Storage", + "destinationName": "Google Drive", + "riskScore": 5, + "riskSeverity": "MODERATE", + "riskIndicators": [ + {"name": "Google Drive upload", "weight": 5}, + {"name": "Document", "weight": 0}, + ], + }, +] + +search_and_send_to_test = get_mark_for_search_and_send_to("security-data") + + +@pytest.fixture +def file_event_cursor_with_timestamp_checkpoint(mocker): + mock = mocker.patch("code42cli.cmds.securitydata._get_file_event_cursor_store") + mock_cursor = mocker.MagicMock(spec=FileEventCursorStore) + mock_cursor.get.return_value = CURSOR_TIMESTAMP + mock.return_value = mock_cursor + mock.expected_timestamp = "2020-01-20T06:00:00.000Z" + return mock + + +@pytest.fixture +def file_event_cursor_with_eventid_checkpoint(mocker): + mock = mocker.patch("code42cli.cmds.securitydata._get_file_event_cursor_store") + mock_cursor = mocker.MagicMock(spec=FileEventCursorStore) + mock_cursor.get.return_value = TEST_FILE_EVENT_ID_2 + mock.return_value = mock_cursor + mock.expected_eventid = "0_test2" + return mock + + +@pytest.fixture +def file_event_cursor_without_checkpoint(mocker): + mock = mocker.patch("code42cli.cmds.securitydata._get_file_event_cursor_store") + mock_cursor = mocker.MagicMock(spec=FileEventCursorStore) + mock_cursor.get.return_value = None + mock.return_value = mock_cursor + return mock + + +@pytest.fixture +def begin_option(mocker): + mock = mocker.patch("code42cli.cmds.securitydata.convert_datetime_to_timestamp") + mock.return_value = BEGIN_TIMESTAMP + mock.expected_timestamp = "2020-01-01T06:00:00.000Z" + return mock + + +@pytest.fixture +def send_to_logger_factory(mocker): + return mocker.patch("code42cli.cmds.search._try_get_logger_for_server") + + +@pytest.fixture +def mock_file_event_response(mocker): + data = json.dumps( + {"totalCount": 2, "fileEvents": TEST_EVENTS, "nextPgToken": "", "problems": ""} + ) + + response = create_mock_response(mocker, data=data) + + return response + + +@pytest.fixture +def search_all_file_events_success(cli_state, mock_file_event_response): + cli_state.sdk.securitydata.search_all_file_events.return_value = ( + mock_file_event_response + ) + + +@search_and_send_to_test +def test_search_and_send_to_passes_query_object_when_searching_file_events( + runner, cli_state, command, search_all_file_events_success +): + runner.invoke( + cli, [*command, "--advanced-query", ADVANCED_QUERY_JSON], obj=cli_state + ) + + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert isinstance(query, FileEventQuery) + + +@search_and_send_to_test +def test_search_and_send_to_when_advanced_query_passed_as_json_string_builds_expected_query( + runner, cli_state, command, search_all_file_events_success +): + runner.invoke( + cli, [*command, "--advanced-query", ADVANCED_QUERY_JSON], obj=cli_state + ) + + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + passed_filter_groups = query._filter_group_list + expected_event_filter = f.EventTimestamp.within_the_last( + ADVANCED_QUERY_VALUES["within_last_value"] + ) + expected_hostname_filter = f.OSHostname.is_in( + [ADVANCED_QUERY_VALUES["hostname_1"], ADVANCED_QUERY_VALUES["hostname_2"]] + ) + expected_event_type_filter = f.EventType.is_in( + [ADVANCED_QUERY_VALUES["event_type"]] + ) + expected_event_type_filter.filter_clause = "OR" + + assert expected_event_filter in passed_filter_groups + assert expected_hostname_filter in passed_filter_groups + assert expected_event_type_filter in passed_filter_groups + + +@search_and_send_to_test +def test_search_and_send_to_when_advanced_query_passed_as_filename_builds_expected_query( + runner, cli_state, command, search_all_file_events_success +): + + with runner.isolated_filesystem(): + with open("query.json", "w") as jsonfile: + jsonfile.write(ADVANCED_QUERY_JSON) + + runner.invoke(cli, [*command, "--advanced-query", "@query.json"], obj=cli_state) + + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + passed_filter_groups = query._filter_group_list + expected_event_filter = f.EventTimestamp.within_the_last( + ADVANCED_QUERY_VALUES["within_last_value"] + ) + expected_hostname_filter = f.OSHostname.is_in( + [ADVANCED_QUERY_VALUES["hostname_1"], ADVANCED_QUERY_VALUES["hostname_2"]] + ) + expected_event_type_filter = f.EventType.is_in( + [ADVANCED_QUERY_VALUES["event_type"]] + ) + expected_event_type_filter.filter_clause = "OR" + assert expected_event_filter in passed_filter_groups + assert expected_hostname_filter in passed_filter_groups + assert expected_event_type_filter in passed_filter_groups + + +@search_and_send_to_test +def test_search_and_send_to_when_advanced_query_passed_non_existent_filename_raises_error( + runner, cli_state, command +): + with runner.isolated_filesystem(): + result = runner.invoke( + cli, [*command, "--advanced-query", "@not_a_file"], obj=cli_state + ) + assert result.exit_code == 2 + assert ( + " Invalid value for '--advanced-query': 'not_a_file': No such file or directory" + in result.stdout + ) or ("Could not open file: not_a_file" in result.stdout) + + +@search_and_send_to_test +def test_search_and_send_to_when_given_invalid_page_token_raises_error( + runner, cli_state, custom_error, file_event_cursor_with_eventid_checkpoint, command +): + cli_state.sdk.securitydata.search_all_file_events.side_effect = ( + Py42InvalidPageTokenError(custom_error, TEST_FILE_EVENT_ID_2) + ) + result = runner.invoke(cli, [*command, "--use-checkpoint", "test"], obj=cli_state) + assert f'Invalid page token: "{TEST_FILE_EVENT_ID_2}"' in result.output + + +@advanced_query_incompat_test_params +def test_search_with_advanced_query_and_incompatible_argument_errors( + runner, arg, cli_state +): + result = runner.invoke( + cli, + ["security-data", "search", "--advanced-query", ADVANCED_QUERY_JSON, *arg], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --advanced-query" in result.output + + +@advanced_query_incompat_test_params +def test_send_to_with_advanced_query_and_incompatible_argument_errors( + runner, arg, cli_state +): + result = runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--advanced-query", + ADVANCED_QUERY_JSON, + *arg, + ], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --advanced-query" in result.output + + +@saved_search_incompat_test_params +def test_search_with_saved_search_and_incompatible_argument_errors( + runner, arg, cli_state +): + result = runner.invoke( + cli, + ["security-data", "search", "--saved-search", "test_id", *arg], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --saved-search" in result.output + + +@saved_search_incompat_test_params +def test_send_to_with_saved_search_and_incompatible_argument_errors( + runner, arg, cli_state +): + result = runner.invoke( + cli, + ["security-data", "send-to", "0.0.0.0", "--saved-search", "test_id", *arg], + obj=cli_state, + ) + assert result.exit_code == 2 + assert f"{arg[0]} can't be used with: --saved-search" in result.output + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_ignore_cert_validation_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert ( + "'--ignore-cert-validation' can only be used with '--protocol TLS-TCP'" + in res.output + ) + + +@pytest.mark.parametrize("protocol", (ServerProtocol.UDP, ServerProtocol.TCP)) +def test_send_to_when_given_certs_with_non_tls_protocol_fails_expectedly( + cli_state, runner, protocol +): + res = runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + "--certs", + "certs.pem", + ], + obj=cli_state, + ) + assert "'--certs' can only be used with '--protocol TLS-TCP'" in res.output + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_and_end_dates_uses_expected_query( + runner, cli_state, command, search_all_file_events_success +): + begin_date = get_test_date_str(days_ago=89) + end_date = get_test_date_str(days_ago=1) + + runner.invoke( + cli, + [ + *command, + "--begin", + get_test_date_str(days_ago=89), + "--end", + get_test_date_str(days_ago=1), + ], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + + actual_begin = query_dict["groups"][1]["filters"][0]["value"] + expected_begin = f"{begin_date}T00:00:00.000Z" + + actual_end = query_dict["groups"][1]["filters"][1]["value"] + expected_end = f"{end_date}T23:59:59.999Z" + + assert actual_begin == expected_begin + assert actual_end == expected_end + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_and_end_date_and_time_uses_expected_query( + runner, cli_state, command, search_all_file_events_success +): + begin_date = get_test_date_str(days_ago=89) + end_date = get_test_date_str(days_ago=1) + time = "15:33:02" + + runner.invoke( + cli, + [*command, "--begin", f"{begin_date} {time}", "--end", f"{end_date} {time}"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + + actual_begin = query_dict["groups"][1]["filters"][0]["value"] + expected_begin = f"{begin_date}T{time}.000Z" + + actual_end = query_dict["groups"][1]["filters"][1]["value"] + expected_end = f"{end_date}T{time}.000Z" + + assert actual_begin == expected_begin + assert actual_end == expected_end + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_date_and_time_without_seconds_uses_expected_query( + runner, cli_state, command, search_all_file_events_success +): + date = get_test_date_str(days_ago=89) + time = "15:33" + + runner.invoke( + cli, + [*command, "--begin", f"{date} {time}"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + + actual = query_dict["groups"][1]["filters"][0]["value"] + expected = f"{date}T{time}:00.000Z" + assert actual == expected + + +@search_and_send_to_test +def test_search_and_send_to_when_given_end_date_and_time_uses_expected_query( + runner, cli_state, command, search_all_file_events_success +): + begin_date = get_test_date_str(days_ago=10) + end_date = get_test_date_str(days_ago=1) + time = "15:33" + + runner.invoke( + cli, + [*command, "--begin", begin_date, "--end", f"{end_date} {time}"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + + actual = query_dict["groups"][1]["filters"][1]["value"] + expected = f"{end_date}T{time}:00.000Z" + assert actual == expected + + +@search_and_send_to_test +def test_search_send_to_when_given_begin_date_more_than_ninety_days_back_errors( + runner, cli_state, command +): + result = runner.invoke( + cli, + [*command, "--begin", get_test_date_str(days_ago=91) + " 12:51:00"], + obj=cli_state, + ) + assert result.exit_code == 2 + assert "must be within 90 days" in result.output + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_date_past_90_days_and_use_checkpoint_and_a_stored_cursor_exists_and_not_given_end_date_does_not_use_any_event_timestamp_filter( + runner, + cli_state, + file_event_cursor_with_eventid_checkpoint, + command, + search_all_file_events_success, +): + begin_date = get_test_date_str(days_ago=91) + " 12:51:00" + + runner.invoke( + cli, + [*command, "--begin", begin_date, "--use-checkpoint", "test"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert not filter_term_is_in_call_args( + query._filter_group_list, f.InsertionTimestamp._term + ) + + +@search_and_send_to_test +def test_search_and_send_to_when_given_begin_date_and_not_use_checkpoint_and_cursor_exists_uses_begin_date( + runner, cli_state, command, search_all_file_events_success +): + begin_date = get_test_date_str(days_ago=1) + runner.invoke(cli, [*command, "--begin", begin_date], obj=cli_state) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + actual_ts = query_dict["groups"][1]["filters"][0]["value"] + expected_ts = f"{begin_date}T00:00:00.000Z" + assert actual_ts == expected_ts + assert filter_term_is_in_call_args(query._filter_group_list, f.EventTimestamp._term) + + +@search_and_send_to_test +def test_search_and_send_to_when_end_date_is_before_begin_date_causes_exit( + runner, cli_state, command +): + begin_date = get_test_date_str(days_ago=1) + end_date = get_test_date_str(days_ago=3) + result = runner.invoke( + cli, + [*command, "--begin", begin_date, "--end", end_date], + obj=cli_state, + ) + assert result.exit_code == 2 + assert "'--begin': cannot be after --end date" in result.output + + +@search_and_send_to_test +def test_search_and_send_to_with_only_begin_calls_search_all_file_events_with_expected_args( + runner, cli_state, begin_option, command, search_all_file_events_success +): + result = runner.invoke(cli, [*command, "--begin", "1h"], obj=cli_state) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + expected_filter_groups = [ + { + "filterClause": "AND", + "filters": [{"operator": "EXISTS", "term": "exposure", "value": None}], + }, + { + "filterClause": "AND", + "filters": [ + { + "operator": "ON_OR_AFTER", + "term": "eventTimestamp", + "value": begin_option.expected_timestamp, + } + ], + }, + ] + assert result.exit_code == 0 + assert query_dict["groups"] == expected_filter_groups + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_without_begin_and_without_checkpoint_causes_expected_error( + runner, cli_state, file_event_cursor_without_checkpoint, command +): + result = runner.invoke(cli, [*command, "--use-checkpoint", "test"], obj=cli_state) + assert result.exit_code == 2 + assert ( + "--begin date is required for --use-checkpoint when no checkpoint exists yet." + in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_with_begin_and_without_checkpoint_calls_search_all_file_events_with_begin_date( + runner, + cli_state, + begin_option, + file_event_cursor_without_checkpoint, + command, + search_all_file_events_success, +): + result = runner.invoke( + cli, + [*command, "--use-checkpoint", "test", "--begin", "1h"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + actual_begin = query_dict["groups"][1]["filters"][0]["value"] + + assert result.exit_code == 0 + assert len(query._filter_group_list) == 2 + assert begin_option.expected_timestamp == actual_begin + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_with_begin_and_with_stored_checkpoint_as_timestamp_calls_search_all_file_events_with_checkpoint_timestamp_and_ignores_begin_arg( + runner, + cli_state, + file_event_cursor_with_timestamp_checkpoint, + command, + search_all_file_events_success, +): + result = runner.invoke( + cli, + [*command, "--use-checkpoint", "test", "--begin", "1h"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + query_dict = dict(query) + actual_query_timestamp = query_dict["groups"][1]["filters"][0]["value"] + assert result.exit_code == 0 + assert len(query._filter_group_list) == 2 + assert ( + file_event_cursor_with_timestamp_checkpoint.expected_timestamp + == actual_query_timestamp + ) + + +@search_and_send_to_test +def test_search_and_send_to_with_use_checkpoint_and_with_stored_checkpoint_as_eventid_calls_search_all_file_events_with_checkpoint_and_ignores_begin_arg( + runner, + cli_state, + file_event_cursor_with_eventid_checkpoint, + command, + search_all_file_events_success, +): + result = runner.invoke( + cli, + [*command, "--use-checkpoint", "test", "--begin", "1h"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert result.exit_code == 0 + assert len(query._filter_group_list) == 1 + assert ( + f"checkpoint of {file_event_cursor_with_eventid_checkpoint.expected_eventid} exists" + in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_when_given_invalid_exposure_type_causes_exit( + runner, cli_state, command +): + result = runner.invoke( + cli, + [*command, "--begin", "1d", "-t", "NotValid"], + obj=cli_state, + ) + assert result.exit_code == 2 + assert ( + "Invalid value" in result.output or "invalid choice: NotValid" in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_when_given_username_uses_username_filter( + runner, cli_state, command, search_all_file_events_success +): + c42_username = "test@example.com" + command = [*command, "--begin", "1h", "--c42-username", c42_username] + + runner.invoke( + cli, + [*command], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + + filter_obj = f.DeviceUsername.is_in([c42_username]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_actor_is_uses_username_filter( + runner, cli_state, command, search_all_file_events_success +): + actor_name = "test.testerson" + command = [*command, "--begin", "1h", "--actor", actor_name] + + runner.invoke( + cli, + [*command], + obj=cli_state, + ) + + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.Actor.is_in([actor_name]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_md5_uses_md5_filter( + runner, cli_state, command, search_all_file_events_success +): + md5 = "abcd12345" + command = [*command, "--begin", "1h", "--md5", md5] + runner.invoke(cli, [*command], obj=cli_state) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.MD5.is_in([md5]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_sha256_uses_sha256_filter( + runner, cli_state, command, search_all_file_events_success +): + sha_256 = "abcd12345" + command = [*command, "--begin", "1h", "--sha256", sha_256] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.SHA256.is_in([sha_256]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_source_uses_source_filter( + runner, cli_state, command, search_all_file_events_success +): + source = "Gmail" + command = [*command, "--begin", "1h", "--source", source] + runner.invoke(cli, command, obj=cli_state) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.Source.is_in([source]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_file_name_uses_file_name_filter( + runner, cli_state, command, search_all_file_events_success +): + filename = "test.txt" + command = [*command, "--begin", "1h", "--file-name", filename] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.FileName.is_in([filename]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_file_path_uses_file_path_filter( + runner, cli_state, command, search_all_file_events_success +): + filepath = "C:\\Program Files" + command = [*command, "--begin", "1h", "--file-path", filepath] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.FilePath.is_in([filepath]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_file_category_uses_file_category_filter( + runner, cli_state, command, search_all_file_events_success +): + file_category = FileCategory.IMAGE + command = [*command, "--begin", "1h", "--file-category", file_category] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.FileCategory.is_in([file_category]) + assert filter_obj in query._filter_group_list + + +@pytest.mark.parametrize( + "category_choice", + [ + ("AUDIO", FileCategory.AUDIO), + ("DOCUMENT", FileCategory.DOCUMENT), + ("EXECUTABLE", FileCategory.EXECUTABLE), + ("IMAGE", FileCategory.IMAGE), + ("PDF", FileCategory.PDF), + ("PRESENTATION", FileCategory.PRESENTATION), + ("SCRIPT", FileCategory.SCRIPT), + ("SOURCE_CODE", FileCategory.SOURCE_CODE), + ("SPREADSHEET", FileCategory.SPREADSHEET), + ("VIDEO", FileCategory.VIDEO), + ("VIRTUAL_DISK_IMAGE", FileCategory.VIRTUAL_DISK_IMAGE), + ("ARCHIVE", FileCategory.ZIP), + ("ZIP", FileCategory.ZIP), + ("Zip", FileCategory.ZIP), + ], +) +def test_all_caps_file_category_choices_convert_to_filecategory_constant( + runner, cli_state, category_choice, search_all_file_events_success +): + ALL_CAPS_VALUE, camelCaseValue = category_choice + command = [ + "security-data", + "search", + "--begin", + "1h", + "--file-category", + ALL_CAPS_VALUE, + ] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.FileCategory.is_in([camelCaseValue]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_process_owner_uses_process_owner_filter( + runner, cli_state, command, search_all_file_events_success +): + process_owner = "root" + command = [*command, "-b", "1h", "--process-owner", process_owner] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.ProcessOwner.is_in([process_owner]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_tab_url_uses_process_tab_url_filter( + runner, cli_state, command, search_all_file_events_success +): + tab_url = "https://example.com" + command = [*command, "--begin", "1h", "--tab-url", tab_url] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.TabURL.is_in([tab_url]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_exposure_types_uses_exposure_type_is_in_filter( + runner, cli_state, command, search_all_file_events_success +): + exposure_type = "SharedViaLink" + command = [*command, "--begin", "1h", "--type", exposure_type] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.ExposureType.is_in([exposure_type]) + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_include_non_exposure_does_not_include_exposure_type_exists( + runner, cli_state, command, search_all_file_events_success +): + runner.invoke( + cli, + [*command, "--begin", "1h", "--include-non-exposure"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.ExposureType.exists() + assert filter_obj not in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_not_given_include_non_exposure_includes_exposure_type_exists( + runner, cli_state, command, search_all_file_events_success +): + runner.invoke( + cli, + [*command, "--begin", "1h"], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + filter_obj = f.ExposureType.exists() + assert filter_obj in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_multiple_search_args_uses_expected_filters( + runner, cli_state, command, search_all_file_events_success +): + process_owner = "root" + c42_username = "test@example.com" + filename = "test.txt" + runner.invoke( + cli, + [ + *command, + "--begin", + "1h", + "--process-owner", + process_owner, + "--c42-username", + c42_username, + "--file-name", + filename, + ], + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert f.ProcessOwner.is_in([process_owner]) in query._filter_group_list + assert f.FileName.is_in([filename]) in query._filter_group_list + assert f.DeviceUsername.is_in([c42_username]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_include_non_exposure_and_exposure_types_causes_exit( + runner, cli_state, command +): + result = runner.invoke( + cli, + [ + *command, + "--begin", + "1h", + "--include-non-exposure", + "--type", + "SharedViaLink", + ], + obj=cli_state, + ) + assert result.exit_code == 2 + + +@search_and_send_to_test +def test_search_and_send_to_when_given_risk_indicator_uses_risk_indicator_filter( + runner, cli_state, command, search_all_file_events_success +): + risk_indicator = RiskIndicator.MessagingServiceUploads.SLACK + command = [*command, "--begin", "1h", "--risk-indicator", risk_indicator] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert f.RiskIndicator.is_in([risk_indicator]) in query._filter_group_list + + +@pytest.mark.parametrize( + "indicator_choice", + [ + ("PUBLIC_CORPORATE_BOX", RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_BOX), + ( + "PUBLIC_CORPORATE_GOOGLE", + RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_GOOGLE_DRIVE, + ), + ( + "PUBLIC_CORPORATE_ONEDRIVE", + RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_ONEDRIVE, + ), + ("SENT_CORPORATE_GMAIL", RiskIndicator.CloudDataExposures.SENT_CORPORATE_GMAIL), + ("SHARED_CORPORATE_BOX", RiskIndicator.CloudDataExposures.SHARED_CORPORATE_BOX), + ( + "SHARED_CORPORATE_GOOGLE_DRIVE", + RiskIndicator.CloudDataExposures.SHARED_CORPORATE_GOOGLE_DRIVE, + ), + ( + "SHARED_CORPORATE_ONEDRIVE", + RiskIndicator.CloudDataExposures.SHARED_CORPORATE_ONEDRIVE, + ), + ("AMAZON_DRIVE", RiskIndicator.CloudStorageUploads.AMAZON_DRIVE), + ("BOX", RiskIndicator.CloudStorageUploads.BOX), + ("DROPBOX", RiskIndicator.CloudStorageUploads.DROPBOX), + ("GOOGLE_DRIVE", RiskIndicator.CloudStorageUploads.GOOGLE_DRIVE), + ("ICLOUD", RiskIndicator.CloudStorageUploads.ICLOUD), + ("MEGA", RiskIndicator.CloudStorageUploads.MEGA), + ("ONEDRIVE", RiskIndicator.CloudStorageUploads.ONEDRIVE), + ("ZOHO", RiskIndicator.CloudStorageUploads.ZOHO), + ("BITBUCKET", RiskIndicator.CodeRepositoryUploads.BITBUCKET), + ("GITHUB", RiskIndicator.CodeRepositoryUploads.GITHUB), + ("GITLAB", RiskIndicator.CodeRepositoryUploads.GITLAB), + ("SOURCEFORGE", RiskIndicator.CodeRepositoryUploads.SOURCEFORGE), + ("STASH", RiskIndicator.CodeRepositoryUploads.STASH), + ("163.COM", RiskIndicator.EmailServiceUploads.ONESIXTHREE_DOT_COM), + ("126.COM", RiskIndicator.EmailServiceUploads.ONETWOSIX_DOT_COM), + ("AOL", RiskIndicator.EmailServiceUploads.AOL), + ("COMCAST", RiskIndicator.EmailServiceUploads.COMCAST), + ("GMAIL", RiskIndicator.EmailServiceUploads.GMAIL), + ("ICLOUD_MAIL", RiskIndicator.EmailServiceUploads.ICLOUD), + ("MAIL.COM", RiskIndicator.EmailServiceUploads.MAIL_DOT_COM), + ("OUTLOOK", RiskIndicator.EmailServiceUploads.OUTLOOK), + ("PROTONMAIL", RiskIndicator.EmailServiceUploads.PROTONMAIL), + ("QQMAIL", RiskIndicator.EmailServiceUploads.QQMAIL), + ("SINA_MAIL", RiskIndicator.EmailServiceUploads.SINA_MAIL), + ("SOHU_MAIL", RiskIndicator.EmailServiceUploads.SOHU_MAIL), + ("YAHOO", RiskIndicator.EmailServiceUploads.YAHOO), + ("ZOHO_MAIL", RiskIndicator.EmailServiceUploads.ZOHO_MAIL), + ("AIRDROP", RiskIndicator.ExternalDevices.AIRDROP), + ("REMOVABLE_MEDIA", RiskIndicator.ExternalDevices.REMOVABLE_MEDIA), + ("AUDIO", RiskIndicator.FileCategories.AUDIO), + ("DOCUMENT", RiskIndicator.FileCategories.DOCUMENT), + ("EXECUTABLE", RiskIndicator.FileCategories.EXECUTABLE), + ("IMAGE", RiskIndicator.FileCategories.IMAGE), + ("PDF", RiskIndicator.FileCategories.PDF), + ("PRESENTATION", RiskIndicator.FileCategories.PRESENTATION), + ("SCRIPT", RiskIndicator.FileCategories.SCRIPT), + ("SOURCE_CODE", RiskIndicator.FileCategories.SOURCE_CODE), + ("SPREADSHEET", RiskIndicator.FileCategories.SPREADSHEET), + ("VIDEO", RiskIndicator.FileCategories.VIDEO), + ("VIRTUAL_DISK_IMAGE", RiskIndicator.FileCategories.VIRTUAL_DISK_IMAGE), + ("ZIP", RiskIndicator.FileCategories.ZIP), + ( + "FACEBOOK_MESSENGER", + RiskIndicator.MessagingServiceUploads.FACEBOOK_MESSENGER, + ), + ("MICROSOFT_TEAMS", RiskIndicator.MessagingServiceUploads.MICROSOFT_TEAMS), + ("SLACK", RiskIndicator.MessagingServiceUploads.SLACK), + ("WHATSAPP", RiskIndicator.MessagingServiceUploads.WHATSAPP), + ("OTHER", RiskIndicator.Other.OTHER), + ("UNKNOWN", RiskIndicator.Other.UNKNOWN), + ("FACEBOOK", RiskIndicator.SocialMediaUploads.FACEBOOK), + ("LINKEDIN", RiskIndicator.SocialMediaUploads.LINKEDIN), + ("REDDIT", RiskIndicator.SocialMediaUploads.REDDIT), + ("TWITTER", RiskIndicator.SocialMediaUploads.TWITTER), + ("FILE_MISMATCH", RiskIndicator.UserBehavior.FILE_MISMATCH), + ("OFF_HOURS", RiskIndicator.UserBehavior.OFF_HOURS), + ("REMOTE", RiskIndicator.UserBehavior.REMOTE), + ("FIRST_DESTINATION_USE", RiskIndicator.UserBehavior.FIRST_DESTINATION_USE), + ("RARE_DESTINATION_USE", RiskIndicator.UserBehavior.RARE_DESTINATION_USE), + ], +) +def test_all_caps_risk_indicator_choices_convert_to_risk_indicator_string( + runner, cli_state, indicator_choice, search_all_file_events_success +): + ALL_CAPS_VALUE, string_value = indicator_choice + command = [ + "security-data", + "search", + "--begin", + "1h", + "--risk-indicator", + ALL_CAPS_VALUE, + ] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert f.RiskIndicator.is_in([string_value]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_when_given_risk_severity_uses_risk_severity_filter( + runner, cli_state, command, search_all_file_events_success +): + risk_severity = RiskSeverity.LOW + command = [*command, "--begin", "1h", "--risk-severity", risk_severity] + runner.invoke( + cli, + command, + obj=cli_state, + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert f.RiskSeverity.is_in([risk_severity]) in query._filter_group_list + + +@search_and_send_to_test +def test_search_and_send_to_handles_error_expected_message_logged_and_printed( + runner, cli_state, caplog, command +): + exception_msg = "Test Exception" + expected_msg = "Unknown problem occurred" + cli_state.sdk.securitydata.search_all_file_events.side_effect = Exception( + exception_msg + ) + with caplog.at_level(logging.ERROR): + result = runner.invoke(cli, [*command, "--begin", "1d"], obj=cli_state) + assert "Error:" in result.output + assert expected_msg in result.output + assert exception_msg in caplog.text + + +@search_and_send_to_test +def test_search_and_send_to_with_or_query_flag_produces_expected_query( + runner, cli_state, command, search_all_file_events_success +): + begin_date = get_test_date_str(days_ago=10) + test_username = "test@example.com" + test_filename = "test.txt" + runner.invoke( + cli, + [ + *command, + "--or-query", + "--begin", + begin_date, + "--c42-username", + test_username, + "--file-name", + test_filename, + ], + obj=cli_state, + ) + expected_query = { + "groupClause": "AND", + "groups": [ + { + "filterClause": "AND", + "filters": [ + {"operator": "EXISTS", "term": "exposure", "value": None}, + { + "operator": "ON_OR_AFTER", + "term": "eventTimestamp", + "value": f"{begin_date}T00:00:00.000Z", + }, + ], + }, + { + "filterClause": "OR", + "filters": [ + { + "operator": "IS", + "term": "deviceUserName", + "value": "test@example.com", + }, + {"operator": "IS", "term": "fileName", "value": "test.txt"}, + ], + }, + ], + "pgNum": 1, + "pgSize": 10000, + "srtDir": "asc", + "srtKey": "insertionTimestamp", + } + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + actual_query = dict(query) + assert actual_query == expected_query + + +def test_saved_search_calls_search_all_file_events_and_saved_search_execute( + runner, cli_state, search_all_file_events_success +): + search_query = { + "groupClause": "AND", + "groups": [ + { + "filterClause": "AND", + "filters": [ + { + "operator": "ON_OR_AFTER", + "term": "eventTimestamp", + "value": "2020-05-01T00:00:00.000Z", + } + ], + }, + { + "filterClause": "OR", + "filters": [ + {"operator": "IS", "term": "eventType", "value": "DELETED"}, + {"operator": "IS", "term": "eventType", "value": "EMAILED"}, + {"operator": "IS", "term": "eventType", "value": "MODIFIED"}, + {"operator": "IS", "term": "eventType", "value": "READ_BY_AP"}, + {"operator": "IS", "term": "eventType", "value": "CREATED"}, + ], + }, + ], + "pgNum": 1, + "pgSize": 10000, + "srtDir": "asc", + "srtKey": "eventId", + } + saved_search_query = FileEventQuery.from_dict(search_query) + cli_state.sdk.securitydata.savedsearches.get_query.return_value = saved_search_query + runner.invoke( + cli, ["security-data", "search", "--saved-search", "test_id"], obj=cli_state + ) + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + assert cli_state.sdk.securitydata.search_all_file_events.call_count == 1 + assert query._filter_group_list[0] in saved_search_query._filter_group_list + assert query._filter_group_list[1] in saved_search_query._filter_group_list + + +@pytest.mark.parametrize( + "protocol", (ServerProtocol.TLS_TCP, ServerProtocol.TLS_TCP, ServerProtocol.UDP) +) +def test_send_to_allows_protocol_arg( + cli_state, runner, protocol, search_all_file_events_success +): + res = runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + protocol, + ], + obj=cli_state, + ) + assert res.exit_code == 0 + + +def test_send_to_fails_when_given_unknown_protocol( + cli_state, runner, search_all_file_events_success +): + res = runner.invoke( + cli, + ["security-data", "send-to", "0.0.0.0", "--begin", "1d", "--protocol", "ATM"], + obj=cli_state, + ) + assert res.exit_code + + +def test_send_to_certs_and_ignore_cert_validation_args_are_incompatible( + cli_state, runner, search_all_file_events_success +): + res = runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + assert "Error: --ignore-cert-validation can't be used with: --certs" in res.output + + +def test_send_to_creates_expected_logger( + cli_state, runner, send_to_logger_factory, search_all_file_events_success +): + runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--certs", + "certs/file", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "certs/file" + ) + + +def test_send_to_when_given_ignore_cert_validation_uses_certs_equal_to_ignore_str( + cli_state, runner, send_to_logger_factory, search_all_file_events_success +): + runner.invoke( + cli, + [ + "security-data", + "send-to", + "0.0.0.0", + "--begin", + "1d", + "--protocol", + "TLS-TCP", + "--ignore-cert-validation", + ], + obj=cli_state, + ) + send_to_logger_factory.assert_called_once_with( + "0.0.0.0", "TLS-TCP", "RAW-JSON", "ignore" + ) + + +def test_saved_search_list_calls_get_method(runner, cli_state): + runner.invoke(cli, ["security-data", "saved-search", "list"], obj=cli_state) + assert cli_state.sdk.securitydata.savedsearches.get.call_count == 1 + + +def test_saved_search_show_detail_calls_get_by_id_method(runner, cli_state): + test_id = "test_id" + runner.invoke( + cli, ["security-data", "saved-search", "show", test_id], obj=cli_state + ) + cli_state.sdk.securitydata.savedsearches.get_by_id.assert_called_once_with( + test_id, use_v2=False + ) + + +def test_saved_search_list_with_format_option_returns_csv_formatted_response( + runner, cli_state +): + cli_state.sdk.securitydata.savedsearches.get.return_value = TEST_LIST_RESPONSE + result = runner.invoke( + cli, ["security-data", "saved-search", "list", "-f", "CSV"], obj=cli_state + ) + assert "id" in result.output + assert "name" in result.output + assert "notes" in result.output + + assert "083f08d-8f33-4cbd-81c4-8d1820b61185" in result.output + assert "test-events" in result.output + assert "py42 is here" in result.output + + +def test_saved_search_list_with_format_option_does_not_return_when_response_is_empty( + runner, cli_state +): + cli_state.sdk.securitydata.savedsearches.get.return_value = TEST_EMPTY_LIST_RESPONSE + result = runner.invoke( + cli, ["security-data", "saved-search", "list", "-f", "csv"], obj=cli_state + ) + assert "Name,Id" not in result.output + + +def test_non_exposure_only_query_with_checkpoint_does_not_send_empty_filter_list( + runner, cli_state, mock_file_event_checkpoint, mocker +): + mock_file_event_checkpoint.get.return_value = "event_1234" + mock_get_all_file_events = mocker.patch( + "code42cli.cmds.securitydata._get_all_file_events" + ) + + def generator(): + yield pandas.DataFrame() + + mock_get_all_file_events.return_value = generator() + result = runner.invoke( + cli, + ["security-data", "search", "--include-non-exposure", "-c", "checkpoint"], + obj=cli_state, + ) + assert result.exit_code == 0 + assert len(mock_get_all_file_events.call_args[0][1]._filter_group_list) > 0 + + +def test_saved_search_get_by_id_uses_v2_flag_if_settings_enabled(runner, cli_state): + cli_state.profile.use_v2_file_events = "True" + test_saved_search_id = "123-test-saved-search" + runner.invoke( + cli, + ["security-data", "saved-search", "show", test_saved_search_id], + obj=cli_state, + ) + cli_state.profile.use_v2_file_events = "False" + cli_state.sdk.securitydata.savedsearches.get_by_id.assert_called_once_with( + test_saved_search_id, use_v2=True + ) + + +def test_saved_search_list_uses_v2_flag_if_settings_enabled(runner, cli_state): + cli_state.profile.use_v2_file_events = "True" + runner.invoke(cli, ["security-data", "saved-search", "list"], obj=cli_state) + cli_state.profile.use_v2_file_events = "False" + cli_state.sdk.securitydata.savedsearches.get.assert_called_once_with(use_v2=True) + + +def test_exposure_type_raises_exception_when_called_with_v2_settings_enabled( + runner, cli_state +): + cli_state.profile.use_v2_file_events = "True" + result = runner.invoke( + cli, + ["security-data", "search", "-b", "10d", "--type", "IsPublic"], + obj=cli_state, + ) + cli_state.profile.use_v2_file_events = "False" + assert result.exit_code == 1 + assert ( + "Exposure type (--type/-t) filter is incompatible with V2 file events. Use the event action (--event-action) filter instead." + in result.output + ) + + +def test_event_action_raises_exception_when_called_with_v2_settings_disabled( + runner, cli_state +): + cli_state.profile.use_v2_file_events = "False" + result = runner.invoke( + cli, + ["security-data", "search", "-b", "10d", "--event-action", "file-created"], + obj=cli_state, + ) + assert result.exit_code == 1 + assert ( + "Event action (--event-action) filter is incompatible with V1 file events. Upgrade your profile to use the V2 file event data model with `code42 profile update --use-v2-file-events True`" + in result.output + ) + + +@search_and_send_to_test +def test_search_and_send_to_builds_correct_query_when_v2_events_enabled( + runner, cli_state, command, search_all_file_events_success +): + cli_state.profile.use_v2_file_events = "True" + cmd = [ + *command, + "--begin", + "1d", + "--event-action", + "file-created", + "--c42-username", + "test-username", + "--md5", + "test-md5-hash", + "--sha256", + "test-sha256-hash", + "--source", + "Gmail", + "--file-name", + "my-test-file.txt", + "--file-path", + "my/test-directory/", + "--file-category", + "DOCUMENT", + "--process-owner", + "test-owner", + "--tab-url", + "google.com", + "--risk-indicator", + "SOURCE_CODE", + ] + runner.invoke(cli, cmd, obj=cli_state) + cli_state.profile.use_v2_file_events = "False" + query = cli_state.sdk.securitydata.search_all_file_events.call_args[0][0] + + filter_objs = [ + v2_filters.event.Action.is_in(["file-created"]), + v2_filters.user.Email.is_in(["test-username"]), + v2_filters.file.MD5.is_in(["test-md5-hash"]), + v2_filters.file.SHA256.is_in(["test-sha256-hash"]), + v2_filters.source.Name.is_in(["Gmail"]), + v2_filters.file.Name.is_in(["my-test-file.txt"]), + v2_filters.file.Directory.is_in(["my/test-directory/"]), + v2_filters.file.Category.is_in(["Document"]), + v2_filters.process.Owner.is_in(["test-owner"]), + v2_filters.destination.TabUrls.is_in(["google.com"]), + v2_filters.risk.Severity.not_eq(v2_filters.risk.Severity.NO_RISK_INDICATED), + v2_filters.risk.Indicators.is_in(["Source code"]), + ] + for filter_obj in filter_objs: + assert filter_obj in query._filter_group_list diff --git a/tests/cmds/test_shared.py b/tests/cmds/test_shared.py new file mode 100644 index 000000000..6d6ddcab0 --- /dev/null +++ b/tests/cmds/test_shared.py @@ -0,0 +1,9 @@ +import pytest + +from code42cli.cmds.shared import get_user_id +from code42cli.errors import UserDoesNotExistError + + +def test_get_user_id_when_user_does_not_raise_error(sdk_without_user): + with pytest.raises(UserDoesNotExistError): + get_user_id(sdk_without_user, "risky employee") diff --git a/tests/cmds/test_trustedactivities.py b/tests/cmds/test_trustedactivities.py new file mode 100644 index 000000000..4efe57666 --- /dev/null +++ b/tests/cmds/test_trustedactivities.py @@ -0,0 +1,401 @@ +import pytest +from py42.exceptions import Py42DescriptionLimitExceededError +from py42.exceptions import Py42TrustedActivityConflictError +from py42.exceptions import Py42TrustedActivityIdNotFound +from py42.exceptions import Py42TrustedActivityInvalidCharacterError +from tests.conftest import create_mock_response + +from code42cli.main import cli + +TEST_RESOURCE_ID = 123 +ALL_TRUSTED_ACTIVITIES = """ +{ + "trustResources": [ + { + "description": "test description", + "resourceId": 456, + "type": "DOMAIN", + "updatedAt": "2021-09-22T15:46:35.088Z", + "updatedByUserUid": "user123", + "updatedByUsername": "username", + "value": "test" + } + ], + "totalCount": 10 +} +""" + +TRUSTED_ACTIVITY_DETAILS = """ +{ + "description": "test description", + "resourceId": 123, + "type": "DOMAIN", + "updatedAt": "2021-09-22T20:39:59.999Z", + "updatedByUserUid": "user123", + "updatedByUsername": "username", + "value": "test" +} +""" + +MISSING_ARGUMENT_ERROR = "Missing argument '{}'." +MISSING_TYPE = MISSING_ARGUMENT_ERROR.format("{DOMAIN|SLACK}") +MISSING_VALUE = MISSING_ARGUMENT_ERROR.format("VALUE") +MISSING_RESOURCE_ID_ARG = MISSING_ARGUMENT_ERROR.format("RESOURCE_ID") +RESOURCE_ID_NOT_FOUND_ERROR = "Resource ID '{}' not found." +INVALID_CHARACTER_ERROR = "Invalid character in domain or Slack workspace name" +CONFLICT_ERROR = ( + "Duplicate URL or workspace name, '{}' already exists on your trusted list." +) +DESCRIPTION_LIMIT_ERROR = "Description limit exceeded, max 250 characters allowed." + + +@pytest.fixture +def get_all_activities_response(mocker): + def gen(): + yield create_mock_response(mocker, data=ALL_TRUSTED_ACTIVITIES) + + return gen() + + +@pytest.fixture +def trusted_activity_conflict_error(custom_error): + return Py42TrustedActivityConflictError(custom_error, "test-case") + + +@pytest.fixture +def trusted_activity_description_limit_exceeded_error(custom_error): + return Py42DescriptionLimitExceededError(custom_error) + + +@pytest.fixture +def trusted_activity_invalid_character_error(custom_error): + return Py42TrustedActivityInvalidCharacterError(custom_error) + + +@pytest.fixture +def trusted_activity_resource_id_not_found_error(custom_error): + return Py42TrustedActivityIdNotFound(custom_error, TEST_RESOURCE_ID) + + +def test_create_calls_create_with_expected_params(runner, cli_state): + command = ["trusted-activities", "create", "DOMAIN", "test-activity"] + runner.invoke( + cli, + command, + obj=cli_state, + ) + cli_state.sdk.trustedactivities.create.assert_called_once_with( + "DOMAIN", "test-activity", description=None + ) + + +def test_create_with_optional_fields_calls_create_with_expected_params( + runner, cli_state +): + command = [ + "trusted-activities", + "create", + "SLACK", + "test-activity", + "--description", + "description", + ] + runner.invoke( + cli, + command, + obj=cli_state, + ) + cli_state.sdk.trustedactivities.create.assert_called_once_with( + "SLACK", "test-activity", description="description" + ) + + +def test_create_when_missing_type_prints_error(runner, cli_state): + command = ["trusted-activities", "create", "--description", "description"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert ( + MISSING_TYPE in result.output + or MISSING_ARGUMENT_ERROR.format("[DOMAIN|SLACK]") in result.output + ) + + +def test_create_when_missing_value_prints_error(runner, cli_state): + command = ["trusted-activities", "create", "DOMAIN", "--description", "description"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_VALUE in result.output + + +def test_create_when_invalid_character_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_invalid_character_error +): + cli_state.sdk.trustedactivities.create.side_effect = ( + trusted_activity_invalid_character_error + ) + command = ["trusted-activities", "create", "DOMAIN", "inv@lid-domain"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert INVALID_CHARACTER_ERROR in result.output + + +def test_create_when_duplicate_value_conflict_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_conflict_error +): + cli_state.sdk.trustedactivities.create.side_effect = trusted_activity_conflict_error + command = ["trusted-activities", "create", "DOMAIN", "test-case"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert CONFLICT_ERROR.format("test-case") in result.output + + +def test_create_when_description_limit_exceeded_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_description_limit_exceeded_error +): + cli_state.sdk.trustedactivities.create.side_effect = ( + trusted_activity_description_limit_exceeded_error + ) + command = [ + "trusted-activities", + "create", + "DOMAIN", + "test-domain", + "--description", + ">250 characters", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert DESCRIPTION_LIMIT_ERROR in result.output + + +def test_update_calls_update_with_expected_params(runner, cli_state): + command = [ + "trusted-activities", + "update", + f"{TEST_RESOURCE_ID}", + "--value", + "test-activity-update", + ] + runner.invoke( + cli, + command, + obj=cli_state, + ) + cli_state.sdk.trustedactivities.update.assert_called_once_with( + TEST_RESOURCE_ID, + value="test-activity-update", + description=None, + ) + + +def test_update_with_optional_fields_calls_update_with_expected_params( + runner, cli_state +): + command = [ + "trusted-activities", + "update", + f"{TEST_RESOURCE_ID}", + "--value", + "test-activity-update", + "--description", + "update description", + ] + runner.invoke( + cli, + command, + obj=cli_state, + ) + cli_state.sdk.trustedactivities.update.assert_called_once_with( + TEST_RESOURCE_ID, + value="test-activity-update", + description="update description", + ) + + +def test_update_when_missing_resource_id_prints_error(runner, cli_state): + command = ["trusted-activities", "update", "--value", "test-activity-update"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_RESOURCE_ID_ARG in result.output + + +def test_update_when_resource_id_not_found_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_resource_id_not_found_error +): + cli_state.sdk.trustedactivities.update.side_effect = ( + trusted_activity_resource_id_not_found_error + ) + command = ["trusted-activities", "update", f"{TEST_RESOURCE_ID}"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert RESOURCE_ID_NOT_FOUND_ERROR.format(TEST_RESOURCE_ID) in result.output + + +def test_update_when_invalid_character_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_invalid_character_error +): + cli_state.sdk.trustedactivities.update.side_effect = ( + trusted_activity_invalid_character_error + ) + command = [ + "trusted-activities", + "update", + f"{TEST_RESOURCE_ID}", + "--value", + "inv@lid-domain", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert INVALID_CHARACTER_ERROR in result.output + + +def test_update_when_duplicate_value_conflict_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_conflict_error +): + cli_state.sdk.trustedactivities.update.side_effect = trusted_activity_conflict_error + command = [ + "trusted-activities", + "update", + f"{TEST_RESOURCE_ID}", + "--value", + "test-case", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert CONFLICT_ERROR.format("test-case") in result.output + + +def test_update_when_description_limit_exceeded_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_description_limit_exceeded_error +): + cli_state.sdk.trustedactivities.update.side_effect = ( + trusted_activity_description_limit_exceeded_error + ) + command = [ + "trusted-activities", + "update", + f"{TEST_RESOURCE_ID}", + "--description", + ">250 characters", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert DESCRIPTION_LIMIT_ERROR in result.output + + +def test_remove_calls_delete_with_expected_params(runner, cli_state): + command = ["trusted-activities", "remove", f"{TEST_RESOURCE_ID}"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.trustedactivities.delete.assert_called_once_with(TEST_RESOURCE_ID) + + +def test_remove_when_missing_resource_id_prints_error(runner, cli_state): + command = ["trusted-activities", "remove"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 2 + assert MISSING_RESOURCE_ID_ARG in result.output + + +def test_remove_when_resource_id_not_found_py42_raises_exception_prints_error( + runner, cli_state, trusted_activity_resource_id_not_found_error +): + cli_state.sdk.trustedactivities.delete.side_effect = ( + trusted_activity_resource_id_not_found_error + ) + command = ["trusted-activities", "remove", f"{TEST_RESOURCE_ID}"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert RESOURCE_ID_NOT_FOUND_ERROR.format(TEST_RESOURCE_ID) in result.output + + +def test_list_calls_get_all_with_expected_params(runner, cli_state): + command = ["trusted-activities", "list"] + runner.invoke(cli, command, obj=cli_state) + assert cli_state.sdk.trustedactivities.get_all.call_count == 1 + + +def test_list_with_optional_fields_called_get_all_with_expected_params( + runner, cli_state +): + command = ["trusted-activities", "list", "--type", "DOMAIN"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.trustedactivities.get_all.assert_called_once_with(type="DOMAIN") + + +def test_list_prints_expected_data(runner, cli_state, get_all_activities_response): + cli_state.sdk.trustedactivities.get_all.return_value = get_all_activities_response + command = ["trusted-activities", "list"] + result = runner.invoke(cli, command, obj=cli_state) + assert "2021-09-22T15:46:35.088Z" in result.output + assert "456" in result.output + + +def test_bulk_add_trusted_activities_uses_expected_arguments( + runner, mocker, cli_state_with_user +): + bulk_processor = mocker.patch("code42cli.cmds.trustedactivities.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_create.csv", "w") as csv: + csv.writelines( + [ + "type,value,description\n", + "DOMAIN,test-domain,\n", + "SLACK,test-slack,desc\n", + ] + ) + command = ["trusted-activities", "bulk", "create", "test_create.csv"] + runner.invoke( + cli, + command, + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"type": "DOMAIN", "value": "test-domain", "description": ""}, + {"type": "SLACK", "value": "test-slack", "description": "desc"}, + ] + + +def test_bulk_update_trusted_activities_uses_expected_arguments( + runner, mocker, cli_state_with_user +): + bulk_processor = mocker.patch("code42cli.cmds.trustedactivities.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_update.csv", "w") as csv: + csv.writelines( + [ + "resource_id,value,description\n", + "1,test-domain,\n", + "2,test-slack,desc\n", + "3,,desc\n", + ] + ) + command = ["trusted-activities", "bulk", "update", "test_update.csv"] + runner.invoke( + cli, + command, + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"resource_id": "1", "value": "test-domain", "description": ""}, + {"resource_id": "2", "value": "test-slack", "description": "desc"}, + {"resource_id": "3", "value": "", "description": "desc"}, + ] + + +def test_bulk_remove_trusted_activities_uses_expected_arguments_when_no_header( + runner, mocker, cli_state_with_user +): + bulk_processor = mocker.patch("code42cli.cmds.trustedactivities.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove.csv", "w") as csv: + csv.writelines(["1\n", "2\n"]) + command = ["trusted-activities", "bulk", "remove", "test_remove.csv"] + runner.invoke( + cli, + command, + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"resource_id": "1"}, + {"resource_id": "2"}, + ] diff --git a/tests/cmds/test_users.py b/tests/cmds/test_users.py new file mode 100644 index 000000000..32bcc3be8 --- /dev/null +++ b/tests/cmds/test_users.py @@ -0,0 +1,1936 @@ +import datetime +import json + +import pytest +from py42.exceptions import Py42ActiveLegalHoldError +from py42.exceptions import Py42CloudAliasCharacterLimitExceededError +from py42.exceptions import Py42CloudAliasLimitExceededError +from py42.exceptions import Py42InvalidEmailError +from py42.exceptions import Py42InvalidPasswordError +from py42.exceptions import Py42InvalidUsernameError +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42OrgNotFoundError +from py42.exceptions import Py42UserRiskProfileNotFound +from tests.conftest import create_mock_http_error +from tests.conftest import create_mock_response + +from code42cli.main import cli +from code42cli.worker import WorkerStats + +_NAMESPACE = "code42cli.cmds.users" +TEST_ROLE_RETURN_DATA = { + "data": [{"roleName": "Customer Cloud Admin", "roleId": "1234543"}] +} +TEST_USERS_RESPONSE = { + "users": [ + { + "firstName": "test", + "lastName": "username", + "orgId": 4321, + "orgUid": "44444444", + "orgName": "ORG_NAME", + "status": "Active", + "notes": "This is a note.", + "active": True, + "blocked": False, + "creationDate": "2021-03-12T20:07:40.898Z", + "modificationDate": "2021-03-12T20:07:40.938Z", + "roles": ["Desktop User"], + "userId": 1234, + "username": "test.username@example.com", + "userUid": "911162111513111325", + "invited": False, + "quotaInBytes": 55555, + } + ] +} +TEST_USER_RESPONSE = { + "tenantId": "SampleTenant1", + "userId": 12345, + "userName": "Sample.User1@samplecase.com", + "displayName": "Sample User1", + "notes": "This is an example of notes about Sample User1.", + "cloudAliases": ["Sample.User1@samplecase.com", "Sample.User1@gmail.com"], + "managerUid": 12345, + "managerUsername": "manager.user1@samplecase.com", + "managerDisplayName": "Manager Name", + "title": "Software Engineer", + "division": "Engineering", + "department": "Research and Development", + "employmentType": "Full-time", + "city": "Anytown", + "state": "MN", + "country": "US", + "riskFactors": ["FLIGHT_RISK", "HIGH_IMPACT_EMPLOYEE"], +} +TEST_PROFILE_RESPONSE = { + "userId": "12345-42", + "tenantId": "SampleTenant1", + "username": "foo@bar.com", + "displayName": "Foo Bar", + "notes": "", + "managerId": "123-42", + "managerUsername": "test@bar.com", + "managerDisplayName": "", + "title": "Engineer", + "division": "Engineering", + "department": "RDO", + "employmentType": "Remote", + "country": "USA", + "region": "Minnesota", + "locality": "Minneapolis", + "active": True, + "deleted": False, + "supportUser": False, + "startDate": {"year": 2020, "month": 8, "day": 10}, + "endDate": {"year": 2021, "month": 5, "day": 1}, + "cloudAliases": ["baz@bar.com", "foo@bar.com"], +} + +TEST_MATTER_RESPONSE = { + "legalHolds": [ + {"legalHoldUid": "123456789", "name": "Legal Hold #1", "active": True}, + {"legalHoldUid": "987654321", "name": "Legal Hold #2", "active": True}, + ] +} +TEST_CUSTODIANS_RESPONSE = { + "legalHoldMemberships": [ + { + "legalHoldMembershipUid": "99999", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": {"legalHoldUid": "123456789", "name": "Legal Hold #1"}, + "user": { + "userUid": "911162111513111325", + "username": "test.username@example.com", + "email": "test.username@example.com", + "userExtRef": None, + }, + }, + { + "legalHoldMembershipUid": "11111", + "active": True, + "creationDate": "2020-07-16T08:50:23.405Z", + "legalHold": {"legalHoldUid": "987654321", "name": "Legal Hold #2"}, + "user": { + "userUid": "911162111513111325", + "username": "test.username@example.com", + "email": "test.username@example.com", + "userExtRef": None, + }, + }, + ] +} +TEST_EMPTY_CUSTODIANS_RESPONSE = {"legalHoldMemberships": []} +TEST_EMPTY_MATTERS_RESPONSE = {"legalHolds": []} +TEST_EMPTY_USERS_RESPONSE = {"users": []} +TEST_USERNAME = TEST_USERS_RESPONSE["users"][0]["username"] +TEST_ALIAS = TEST_USER_RESPONSE["cloudAliases"][0] +TEST_USER_ID = TEST_USERS_RESPONSE["users"][0]["userId"] +TEST_USER_UID = TEST_USER_RESPONSE["userId"] +TEST_ROLE_NAME = TEST_ROLE_RETURN_DATA["data"][0]["roleName"] +TEST_GET_ORG_RESPONSE = { + "orgId": 9087, + "orgUid": "1007759454961904673", + "orgGuid": "a9578c3d-736b-4d96-80e5-71edd8a11ea3", + "orgName": "19may", + "orgExtRef": None, + "notes": None, + "status": "Active", + "active": True, + "blocked": False, + "parentOrgId": 2689, + "parentOrgUid": "890854247383106706", + "parentOrgGuid": "8c97h74umc2s8mmm", + "type": "ENTERPRISE", + "classification": "BASIC", + "externalId": "1007759454961904673", + "hierarchyCounts": {}, + "configInheritanceCounts": {}, + "creationDate": "2021-05-19T10:10:43.459Z", + "modificationDate": "2021-05-20T14:43:42.276Z", + "deactivationDate": None, + "settings": {"maxSeats": None, "maxBytes": None}, + "settingsInherited": {"maxSeats": "", "maxBytes": ""}, + "settingsSummary": {"maxSeats": "", "maxBytes": ""}, + "registrationKey": "72RU-8P9S-M7KK-RHCC", + "reporting": {"orgManagers": []}, + "customConfig": False, +} +TEST_EMPTY_ORGS_RESPONSE = {"totalCount": 0, "orgs": []} +TEST_GET_ALL_ORGS_RESPONSE = {"totalCount": 1, "orgs": [TEST_GET_ORG_RESPONSE]} +TEST_ORG_UID = "1007759454961904673" + + +@pytest.fixture +def update_user_response(mocker): + return create_mock_response(mocker) + + +@pytest.fixture +def get_available_roles_response(mocker): + return create_mock_response(mocker, data=TEST_ROLE_RETURN_DATA) + + +@pytest.fixture +def get_users_response(mocker): + return create_mock_response(mocker, data=TEST_USERS_RESPONSE) + + +@pytest.fixture +def get_user_response(mocker): + return create_mock_response(mocker, data=TEST_USER_RESPONSE) + + +@pytest.fixture +def get_user_failure(mocker): + return Py42UserRiskProfileNotFound( + create_mock_http_error(mocker, data=None, status=400), + "Failure in HTTP call 400 Client Error: Bad Request for url: https://ecm-east.us.code42.com/svc/api/v2/user/getbyusername.", + ) + + +@pytest.fixture +def change_org_response(mocker): + return create_mock_response(mocker) + + +@pytest.fixture +def get_org_response(mocker): + return create_mock_response(mocker, data=TEST_GET_ORG_RESPONSE) + + +@pytest.fixture +def get_org_success(cli_state, get_org_response): + cli_state.sdk.orgs.get_by_uid.return_value = get_org_response + + +@pytest.fixture +def get_all_orgs_empty_success(mocker, cli_state): + def get_all_orgs_empty_generator(): + yield create_mock_response(mocker, data=json.dumps(TEST_EMPTY_ORGS_RESPONSE)) + + cli_state.sdk.orgs.get_all.return_value = get_all_orgs_empty_generator() + + +@pytest.fixture +def get_all_orgs_success(mocker, cli_state): + def get_all_orgs_generator(): + yield create_mock_response(mocker, data=json.dumps(TEST_GET_ALL_ORGS_RESPONSE)) + + cli_state.sdk.orgs.get_all.return_value = get_all_orgs_generator() + + +@pytest.fixture +def get_all_users_success(mocker, cli_state): + def get_all_users_generator(): + yield create_mock_response(mocker, data=TEST_USERS_RESPONSE) + + cli_state.sdk.users.get_all.return_value = get_all_users_generator() + + +@pytest.fixture +def get_user_id_success(cli_state, get_users_response): + """Get by username returns a list of users""" + cli_state.sdk.users.get_by_username.return_value = get_users_response + + +@pytest.fixture +def get_user_uid_success(cli_state, get_user_response): + """userriskprofile.get_by_username returns a single user""" + cli_state.sdk.userriskprofile.get_by_username.return_value = get_user_response + + +@pytest.fixture +def get_user_uid_failure(cli_state, get_user_failure): + cli_state.sdk.userriskprofile.get_by_username.side_effect = get_user_failure + + +@pytest.fixture +def get_user_id_failure(mocker, cli_state): + cli_state.sdk.users.get_by_username.return_value = create_mock_response( + mocker, data=TEST_EMPTY_USERS_RESPONSE + ) + + +@pytest.fixture +def get_custodian_failure(mocker, cli_state): + def empty_custodian_list_generator(): + yield create_mock_response(mocker, data=TEST_EMPTY_CUSTODIANS_RESPONSE) + + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + empty_custodian_list_generator() + ) + + +@pytest.fixture +def get_matter_failure(mocker, cli_state): + def empty_matter_list_generator(): + yield create_mock_response(mocker, data=TEST_EMPTY_MATTERS_RESPONSE) + + cli_state.sdk.legalhold.get_all_matters.return_value = empty_matter_list_generator() + + +@pytest.fixture +def get_all_matter_success(mocker, cli_state): + def matter_list_generator(): + yield create_mock_response(mocker, data=TEST_MATTER_RESPONSE) + + cli_state.sdk.legalhold.get_all_matters.return_value = matter_list_generator() + + +@pytest.fixture +def get_all_custodian_success(mocker, cli_state): + def custodian_list_generator(): + yield create_mock_response(mocker, data=TEST_CUSTODIANS_RESPONSE) + + cli_state.sdk.legalhold.get_all_matter_custodians.return_value = ( + custodian_list_generator() + ) + + +@pytest.fixture +def get_available_roles_success(cli_state, get_available_roles_response): + cli_state.sdk.users.get_available_roles.return_value = get_available_roles_response + + +@pytest.fixture +def update_user_success(cli_state, update_user_response): + cli_state.sdk.users.update_user.return_value = update_user_response + + +@pytest.fixture +def deactivate_user_success(mocker, cli_state): + cli_state.sdk.users.deactivate.return_value = create_mock_response(mocker) + + +@pytest.fixture +def deactivate_user_legal_hold_failure(mocker, cli_state): + cli_state.sdk.users.deactivate.side_effect = Py42ActiveLegalHoldError( + create_mock_http_error(mocker, status=400), "user", TEST_USER_ID + ) + + +@pytest.fixture +def reactivate_user_success(mocker, cli_state): + cli_state.sdk.users.deactivate.return_value = create_mock_response(mocker) + + +@pytest.fixture +def change_org_success(cli_state, change_org_response): + cli_state.sdk.users.change_org_assignment.return_value = change_org_response + + +@pytest.fixture +def add_alias_limit_failure(mocker, cli_state): + cli_state.sdk.userriskprofile.add_cloud_aliases.side_effect = ( + Py42CloudAliasLimitExceededError(create_mock_http_error(mocker)) + ) + + +@pytest.fixture +def remove_alias_success(mocker, cli_state): + cli_state.sdk.userriskprofile.delete_cloud_aliases.return_value = ( + create_mock_response(mocker) + ) + + +@pytest.fixture +def worker_stats_factory(mocker): + return mocker.patch(f"{_NAMESPACE}.create_worker_stats") + + +@pytest.fixture +def worker_stats(mocker, worker_stats_factory): + stats = mocker.MagicMock(spec=WorkerStats) + worker_stats_factory.return_value = stats + return stats + + +def test_list_when_non_table_format_outputs_expected_columns( + runner, cli_state, get_all_users_success +): + result = runner.invoke(cli, ["users", "list", "-f", "CSV"], obj=cli_state) + assert "firstName" in result.output + assert "lastName" in result.output + assert "orgId" in result.output + assert "orgUid" in result.output + assert "orgName" in result.output + assert "status" in result.output + assert "notes" in result.output + assert "active" in result.output + assert "blocked" in result.output + assert "creationDate" in result.output + assert "modificationDate" in result.output + assert "userId" in result.output + assert "username" in result.output + assert "userUid" in result.output + assert "invited" in result.output + assert "quotaInBytes" in result.output + + +def test_list_when_table_format_outputs_expected_columns( + runner, cli_state, get_all_users_success +): + result = runner.invoke(cli, ["users", "list", "-f", "TABLE"], obj=cli_state) + assert "orgUid" in result.output + assert "status" in result.output + assert "username" in result.output + assert "userUid" in result.output + + assert "firstName" not in result.output + assert "lastName" not in result.output + assert "orgId" not in result.output + assert "orgName" not in result.output + assert "notes" not in result.output + assert "active" not in result.output + assert "blocked" not in result.output + assert "creationDate" not in result.output + assert "modificationDate" not in result.output + assert "userId" not in result.output + assert "invited" not in result.output + assert "quotaInBytes" not in result.output + + +def test_list_users_calls_users_get_all_with_expected_role_id( + runner, cli_state, get_available_roles_success, get_all_users_success +): + role_name = "Customer Cloud Admin" + runner.invoke(cli, ["users", "list", "--role-name", role_name], obj=cli_state) + cli_state.sdk.users.get_all.assert_called_once_with( + active=None, org_uid=None, role_id="1234543", incRoles=False + ) + + +def test_list_users_calls_get_all_users_with_correct_parameters( + runner, cli_state, get_all_users_success +): + org_uid = "TEST_ORG_UID" + runner.invoke( + cli, + ["users", "list", "--org-uid", org_uid, "--active", "--include-roles"], + obj=cli_state, + ) + cli_state.sdk.users.get_all.assert_called_once_with( + active=True, org_uid=org_uid, role_id=None, incRoles=True + ) + + +def test_list_users_when_given_inactive_uses_active_equals_false( + runner, cli_state, get_available_roles_success, get_all_users_success +): + runner.invoke(cli, ["users", "list", "--inactive"], obj=cli_state) + cli_state.sdk.users.get_all.assert_called_once_with( + active=False, org_uid=None, role_id=None, incRoles=False + ) + + +def test_list_users_when_given_active_and_inactive_raises_error( + runner, cli_state, get_available_roles_success, get_all_users_success +): + result = runner.invoke( + cli, ["users", "list", "--active", "--inactive"], obj=cli_state + ) + assert "Error: --inactive can't be used with: --active" in result.output + + +def test_list_users_when_given_excluding_active_and_inactive_uses_active_equals_none( + runner, cli_state, get_available_roles_success, get_all_users_success +): + runner.invoke(cli, ["users", "list"], obj=cli_state) + cli_state.sdk.users.get_all.assert_called_once_with( + active=None, org_uid=None, role_id=None, incRoles=False + ) + + +def test_list_users_when_given_invalid_org_uid_raises_error( + runner, cli_state, get_available_roles_success, custom_error +): + invalid_org_uid = "invalid_org_uid" + cli_state.sdk.users.get_all.side_effect = Py42OrgNotFoundError( + custom_error, invalid_org_uid + ) + result = runner.invoke( + cli, ["users", "list", "--org-uid", invalid_org_uid], obj=cli_state + ) + assert ( + f"Error: The organization with UID '{invalid_org_uid}' was not found." + in result.output + ) + + +def test_list_legal_hold_flag_reports_none_for_users_not_on_legal_hold( + runner, + cli_state, + get_all_users_success, + get_custodian_failure, + get_all_matter_success, +): + result = runner.invoke( + cli, + ["users", "list", "--include-legal-hold-membership", "-f", "CSV"], + obj=cli_state, + ) + + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "legalHoldUid" not in result.output + assert "test.username@example.com" in result.output + + +def test_list_legal_hold_flag_reports_none_if_no_matters_exist( + runner, cli_state, get_all_users_success, get_custodian_failure, get_matter_failure +): + result = runner.invoke( + cli, ["users", "list", "--include-legal-hold-membership"], obj=cli_state + ) + + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "legalHoldUid" not in result.output + assert "test.username@example.com" in result.output + + +def test_list_legal_hold_values_not_included_for_legal_hold_user_if_legal_hold_flag_not_passed( + runner, + cli_state, + get_all_users_success, + get_all_custodian_success, + get_all_matter_success, +): + result = runner.invoke(cli, ["users", "list"], obj=cli_state) + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "test.username@example.com" in result.output + + +def test_list_include_legal_hold_membership_merges_in_and_concats_legal_hold_info( + runner, + cli_state, + get_all_users_success, + get_all_custodian_success, + get_all_matter_success, +): + result = runner.invoke( + cli, ["users", "list", "--include-legal-hold-membership"], obj=cli_state + ) + + assert "Legal Hold #1,Legal Hold #2" in result.output + assert "123456789,987654321" in result.output + + +def test_list_prints_expected_data_if_include_roles( + runner, cli_state, get_all_users_success +): + result = runner.invoke(cli, ["users", "list", "--include-roles"], obj=cli_state) + assert "roles" in result.output + assert "Desktop User" in result.output + + +def test_show_calls_get_by_username_with_expected_params(runner, cli_state): + runner.invoke( + cli, + ["users", "show", "test.username@example.com"], + obj=cli_state, + ) + cli_state.sdk.users.get_by_username.assert_called_once_with( + "test.username@example.com", incRoles=True + ) + + +def test_show_prints_expected_data(runner, cli_state, get_users_response): + cli_state.sdk.users.get_by_username.return_value = get_users_response + result = runner.invoke( + cli, + ["users", "show", "test.username@example.com"], + obj=cli_state, + ) + assert "test.username@example.com" in result.output + assert "911162111513111325" in result.output + assert "Active" in result.output + assert "44444444" in result.output + assert "Desktop User" in result.output + + +def test_show_legal_hold_flag_reports_none_for_users_not_on_legal_hold( + runner, + cli_state, + get_users_response, + get_custodian_failure, + get_all_matter_success, +): + cli_state.sdk.users.get_by_username.return_value = get_users_response + result = runner.invoke( + cli, + [ + "users", + "show", + "test.username@example.com", + "--include-legal-hold-membership", + "-f", + "CSV", + ], + obj=cli_state, + ) + + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "legalHoldUid" not in result.output + assert "test.username@example.com" in result.output + + +def test_show_legal_hold_flag_reports_none_if_no_matters_exist( + runner, cli_state, get_users_response, get_custodian_failure, get_matter_failure +): + cli_state.sdk.users.get_by_username.return_value = get_users_response + result = runner.invoke( + cli, + [ + "users", + "show", + "test.username@example.com", + "--include-legal-hold-membership", + ], + obj=cli_state, + ) + + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "legalHoldUid" not in result.output + assert "test.username@example.com" in result.output + + +def test_show_legal_hold_values_not_included_for_legal_hold_user_if_legal_hold_flag_not_passed( + runner, + cli_state, + get_users_response, + get_all_custodian_success, + get_all_matter_success, +): + cli_state.sdk.users.get_by_username.return_value = get_users_response + result = runner.invoke( + cli, ["users", "show", "test.username@example.com"], obj=cli_state + ) + assert "Legal Hold #1,Legal Hold #2" not in result.output + assert "123456789,987654321" not in result.output + assert "test.username@example.com" in result.output + + +def test_show_include_legal_hold_membership_merges_in_and_concats_legal_hold_info( + runner, + cli_state, + get_users_response, + get_all_custodian_success, + get_all_matter_success, +): + cli_state.sdk.users.get_by_username.return_value = get_users_response + result = runner.invoke( + cli, + [ + "users", + "show", + "test.username@example.com", + "--include-legal-hold-membership", + ], + obj=cli_state, + ) + + assert "Legal Hold #1,Legal Hold #2" in result.output + assert "123456789,987654321" in result.output + + +def test_list_risk_profiles_calls_get_all_user_risk_profiles_with_default_parameters( + runner, cli_state +): + runner.invoke( + cli, + ["users", "list-risk-profiles"], + obj=cli_state, + ) + cli_state.sdk.userriskprofile.get_all.assert_called_once_with( + active=None, manager_id=None, department=None, employment_type=None, region=None + ) + + +def test_list_risk_profiles_calls_get_all_user_risk_profiles_with_correct_parameters( + runner, cli_state +): + r = runner.invoke( + cli, + [ + "users", + "list-risk-profiles", + "--active", + "--manager-id", + "123-42", + "--department", + "Engineering", + "--employment-type", + "Remote", + "--region", + "Minnesota", + ], + obj=cli_state, + ) + print(r.output) + cli_state.sdk.userriskprofile.get_all.assert_called_once_with( + active=True, + manager_id="123-42", + department="Engineering", + employment_type="Remote", + region="Minnesota", + ) + + +def test_show_risk_profile_calls_user_risk_profile_get_by_username_with( + runner, cli_state, get_users_response +): + runner.invoke( + cli, + ["users", "show-risk-profile", "foo@bar.com"], + obj=cli_state, + ) + + cli_state.sdk.userriskprofile.get_by_username.assert_called_once_with("foo@bar.com") + + +def test_add_user_role_adds( + runner, cli_state, get_user_id_success, get_available_roles_success +): + command = [ + "users", + "add-role", + "--username", + "test.username@example.com", + "--role-name", + "Customer Cloud Admin", + ] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.add_role.assert_called_once_with(TEST_USER_ID, TEST_ROLE_NAME) + + +def test_add_user_role_raises_error_when_role_does_not_exist( + runner, cli_state, get_user_id_success, get_available_roles_success +): + command = [ + "users", + "add-role", + "--username", + "test.username@example.com", + "--role-name", + "test", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert "Role with name 'test' not found." in result.output + + +def test_add_user_role_raises_error_when_username_does_not_exist( + runner, cli_state, get_user_id_failure, get_available_roles_success +): + command = [ + "users", + "add-role", + "--username", + "not_a_username@example.com", + "--role-name", + "Desktop User", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + "User 'not_a_username@example.com' does not exist or you do not have permission to view them." + in result.output + ) + + +def test_remove_user_role_removes( + runner, cli_state, get_user_id_success, get_available_roles_success +): + command = [ + "users", + "remove-role", + "--username", + "test.username@example.com", + "--role-name", + "Customer Cloud Admin", + ] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.remove_role.assert_called_once_with( + TEST_USER_ID, TEST_ROLE_NAME + ) + + +def test_remove_user_role_raises_error_when_role_does_not_exist( + runner, cli_state, get_user_id_success, get_available_roles_success +): + command = [ + "users", + "remove-role", + "--username", + "test.username@example.com", + "--role-name", + "test", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert "Role with name 'test' not found." in result.output + + +def test_remove_user_role_raises_error_when_username_does_not_exist( + runner, cli_state, get_user_id_failure, get_available_roles_success +): + command = [ + "users", + "remove-role", + "--username", + "not_a_username@example.com", + "--role-name", + "Desktop User", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + "User 'not_a_username@example.com' does not exist or you do not have permission to view them." + in result.output + ) + + +def test_update_user_calls_update_user_with_correct_parameters_when_only_some_are_passed( + runner, cli_state, update_user_success +): + command = ["users", "update", "--user-id", "12345", "--email", "test_email"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.update_user.assert_called_once_with( + "12345", + username=None, + email="test_email", + password=None, + first_name=None, + last_name=None, + notes=None, + archive_size_quota_bytes=None, + ) + + +def test_update_user_calls_update_user_with_correct_parameters_when_all_are_passed( + runner, cli_state, update_user_success +): + command = [ + "users", + "update", + "--user-id", + "12345", + "--email", + "test_email", + "--username", + "test_username", + "--password", + "test_password", + "--first-name", + "test_fname", + "--last-name", + "test_lname", + "--notes", + "test notes", + "--archive-size-quota", + "123456", + ] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.update_user.assert_called_once_with( + "12345", + username="test_username", + email="test_email", + password="test_password", + first_name="test_fname", + last_name="test_lname", + notes="test notes", + archive_size_quota_bytes="123456", + ) + + +def test_update_when_py42_raises_invalid_email_outputs_error_message( + mocker, runner, cli_state, update_user_success +): + test_email = "test_email" + mock_http_error = create_mock_http_error(mocker, status=500) + cli_state.sdk.users.update_user.side_effect = Py42InvalidEmailError( + test_email, mock_http_error + ) + command = ["users", "update", "--user-id", "12345", "--email", test_email] + result = runner.invoke(cli, command, obj=cli_state) + assert "Error: 'test_email' is not a valid email." in result.output + + +def test_update_when_py42_raises_invalid_username_outputs_error_message( + mocker, runner, cli_state, update_user_success +): + mock_http_error = create_mock_http_error(mocker, status=500) + cli_state.sdk.users.update_user.side_effect = Py42InvalidUsernameError( + mock_http_error + ) + command = ["users", "update", "--user-id", "12345", "--username", "test_username"] + result = runner.invoke(cli, command, obj=cli_state) + assert "Error: Invalid username." in result.output + + +def test_update_when_py42_raises_invalid_password_outputs_error_message( + mocker, runner, cli_state, update_user_success +): + mock_http_error = create_mock_http_error(mocker, status=500) + cli_state.sdk.users.update_user.side_effect = Py42InvalidPasswordError( + mock_http_error + ) + command = ["users", "update", "--user-id", "12345", "--password", "test_password"] + result = runner.invoke(cli, command, obj=cli_state) + assert "Error: Invalid password." in result.output + + +def test_deactivate_calls_deactivate_with_correct_parameters( + runner, cli_state, get_user_id_success, deactivate_user_success +): + command = ["users", "deactivate", "test@example.com"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.deactivate.assert_called_once_with(TEST_USER_ID) + + +def test_deactivate_when_user_on_legal_hold_outputs_expected_error_text( + runner, cli_state, get_user_id_success, deactivate_user_legal_hold_failure +): + command = ["users", "deactivate", "test@example.com"] + result = runner.invoke(cli, command, obj=cli_state) + assert ( + "Error: Cannot deactivate the user with ID 1234 as the user is involved in a legal hold matter." + in result.output + ) + + +def test_reactivate_calls_reactivate_with_correct_parameters( + runner, cli_state, get_user_id_success, deactivate_user_success +): + command = ["users", "reactivate", "test@example.com"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.users.reactivate.assert_called_once_with(TEST_USER_ID) + + +def test_bulk_update_uses_expected_arguments_when_only_some_are_passed( + runner, mocker, cli_state +): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_update.csv", "w") as csv: + csv.writelines( + [ + "user_id,username,email,password,first_name,last_name,notes,archive_size_quota\n", + "12345,,test_email,,,,,\n", + ] + ) + runner.invoke( + cli, ["users", "bulk", "update", "test_bulk_update.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + { + "user_id": "12345", + "username": "", + "email": "test_email", + "password": "", + "first_name": "", + "last_name": "", + "notes": "", + "archive_size_quota": "", + "updated": "False", + } + ] + + +def test_bulk_update_uses_expected_arguments_when_all_are_passed( + runner, mocker, cli_state +): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_update.csv", "w") as csv: + csv.writelines( + [ + "user_id,username,email,password,first_name,last_name,notes,archive_size_quota\n", + "12345,test_username,test_email,test_pword,test_fname,test_lname,test notes,4321\n", + ] + ) + runner.invoke( + cli, ["users", "bulk", "update", "test_bulk_update.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + { + "user_id": "12345", + "username": "test_username", + "email": "test_email", + "password": "test_pword", + "first_name": "test_fname", + "last_name": "test_lname", + "notes": "test notes", + "archive_size_quota": "4321", + "updated": "False", + } + ] + + +def test_bulk_update_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_update.csv", "w") as csv: + csv.writelines( + [ + "user_id,username,email,password,first_name,last_name,notes,archive_size_quota\n", + "\n", + "12345,test_username,test_email,test_pword,test_fname,test_lname,test notes,4321\n", + "\n", + ] + ) + runner.invoke( + cli, ["users", "bulk", "update", "test_bulk_update.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + { + "user_id": "12345", + "username": "test_username", + "email": "test_email", + "password": "test_pword", + "first_name": "test_fname", + "last_name": "test_lname", + "notes": "test notes", + "archive_size_quota": "4321", + "updated": "False", + } + ] + + +def test_bulk_update_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats +): + lines = [ + "user_id,username,email,password,first_name,last_name,notes,archive_size_quota\n", + "12345,test_username,test_email,test_pword,test_fname,test_lname,test notes,4321\n", + ] + + def _update(user_id, *args, **kwargs): + if user_id == "12345": + raise Exception("TEST") + return create_mock_response(mocker, data=TEST_USERS_RESPONSE) + + cli_state.sdk.users.update_user.side_effect = _update + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_update.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "update", "test_bulk_update.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler( + user_id="12345", + username="test", + email="test", + password="test", + first_name="test", + last_name="test", + notes="test", + archive_size_quota="test", + ) + handler( + user_id="not 12345", + username="test", + email="test", + password="test", + first_name="test", + last_name="test", + notes="test", + archive_size_quota="test", + ) + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_move_calls_change_org_assignment_with_correct_parameters( + runner, cli_state, change_org_success, get_user_id_success, get_org_success +): + command = [ + "users", + "move", + "--username", + TEST_USERNAME, + "--org-id", + "1007744453331222111", + ] + runner.invoke(cli, command, obj=cli_state) + expected_org_id = TEST_GET_ORG_RESPONSE["orgId"] + cli_state.sdk.users.change_org_assignment.assert_called_once_with( + user_id=TEST_USER_ID, org_id=expected_org_id + ) + + +def test_bulk_move_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_move.csv", "w") as csv: + csv.writelines(["username,org_id\n", f"{TEST_USERNAME},4321\n"]) + runner.invoke( + cli, ["users", "bulk", "move", "test_bulk_move.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "org_id": "4321", "moved": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_move_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_move.csv", "w") as csv: + csv.writelines(["username,org_id\n\n\n", f"{TEST_USERNAME},4321\n\n\n"]) + runner.invoke( + cli, ["users", "bulk", "move", "test_bulk_move.csv"], obj=cli_state + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "org_id": "4321", "moved": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_move_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats, get_users_response +): + lines = ["username,org_id\n", f"{TEST_USERNAME},4321\n"] + + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + return get_users_response + + cli_state.sdk.users.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_move.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "move", "test_bulk_move.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(username="test@example.com", org_id="test") + handler(username="not.test@example.com", org_id="test") + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_move_uses_handle_than_when_called_and_row_has_missing_username_errors_at_row( + runner, mocker, cli_state, worker_stats +): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + lines = ["username,org_id\n", ",123\n"] # Missing username + with runner.isolated_filesystem(): + with open("test_bulk_move.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, ["users", "bulk", "move", "test_bulk_move.csv"], obj=cli_state + ) + + handler = bulk_processor.call_args[0][0] + handler(username=None, org_id="123") + assert worker_stats.increment_total_errors.call_count == 1 + # Ensure it does not try to get the username for the None user. + assert not cli_state.sdk.users.get_by_username.call_count + + +def test_bulk_deactivate_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(["username\n", f"{TEST_USERNAME}\n"]) + runner.invoke( + cli, + ["users", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "deactivated": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_deactivate_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(["username\n\n\n", f"{TEST_USERNAME}\n\n\n"]) + runner.invoke( + cli, + ["users", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "deactivated": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_deactivate_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats, get_users_response +): + lines = ["username\n", f"{TEST_USERNAME}\n"] + + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + return get_users_response + + cli_state.sdk.users.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_deactivate.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "deactivate", "test_bulk_deactivate.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(username="test@example.com") + handler(username="not.test@example.com") + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_reactivate_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(["username\n", f"{TEST_USERNAME}\n"]) + runner.invoke( + cli, + ["users", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "reactivated": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_reactivate_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(["username\n\n\n", f"{TEST_USERNAME}\n\n\n"]) + runner.invoke( + cli, + ["users", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "reactivated": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_reactivate_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats, get_users_response +): + lines = ["username\n", f"{TEST_USERNAME}\n"] + + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + + return get_users_response + + cli_state.sdk.users.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_reactivate.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "reactivate", "test_bulk_reactivate.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(username="test@example.com") + handler(username="not.test@example.com") + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_add_roles_uses_expected_arguments(runner, mocker, cli_state_with_user): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_add_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n"] + ) + command = ["users", "bulk", "add-roles", "test_bulk_add_roles.csv"] + runner.invoke( + cli, + command, + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "role_name": TEST_ROLE_NAME, "role added": "False"}, + ] + bulk_processor.assert_called_once() + + +def test_bulk_add_roles_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_add_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n\n\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n\n\n"] + ) + runner.invoke( + cli, + ["users", "bulk", "add-roles", "test_bulk_add_roles.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "role_name": TEST_ROLE_NAME, "role added": "False"}, + ] + bulk_processor.assert_called_once() + + +def test_bulk_add_roles_uses_handler_that_when_encounters_error_increments_total_errors( + runner, + mocker, + cli_state, + worker_stats, + get_users_response, + get_available_roles_success, +): + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + return get_users_response + + cli_state.sdk.users.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_add_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n"] + ) + + runner.invoke( + cli, + ["users", "bulk", "add-roles", "test_bulk_add_roles.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + + handler( + username="test@example.com", + role_name=TEST_ROLE_NAME, + ) + handler(username="not.test@example.com", role_name=TEST_ROLE_NAME) + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_remove_roles_uses_expected_arguments(runner, mocker, cli_state_with_user): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_remove_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n"] + ) + command = ["users", "bulk", "remove-roles", "test_bulk_remove_roles.csv"] + runner.invoke( + cli, + command, + obj=cli_state_with_user, + ) + assert bulk_processor.call_args[0][1] == [ + { + "username": TEST_USERNAME, + "role_name": TEST_ROLE_NAME, + "role removed": "False", + }, + ] + bulk_processor.assert_called_once() + + +def test_bulk_remove_roles_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_remove_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n\n\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n\n\n"] + ) + runner.invoke( + cli, + ["users", "bulk", "remove-roles", "test_bulk_remove_roles.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + { + "username": TEST_USERNAME, + "role_name": TEST_ROLE_NAME, + "role removed": "False", + }, + ] + bulk_processor.assert_called_once() + + +def test_bulk_remove_roles_uses_handler_that_when_encounters_error_increments_total_errors( + runner, + mocker, + cli_state, + worker_stats, + get_users_response, + get_available_roles_success, +): + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + + return get_users_response + + cli_state.sdk.users.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_bulk_remove_roles.csv", "w") as csv: + csv.writelines( + ["username,role_name\n", f"{TEST_USERNAME},{TEST_ROLE_NAME}\n"] + ) + + runner.invoke( + cli, + ["users", "bulk", "remove-roles", "test_bulk_remove_roles.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler( + username="test@example.com", + role_name=TEST_ROLE_NAME, + ) + handler(username="not.test@example.com", role_name=TEST_ROLE_NAME) + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_orgs_list_calls_orgs_get_all_with_expected_params(runner, cli_state): + runner.invoke(cli, ["users", "orgs", "list"], obj=cli_state) + assert cli_state.sdk.orgs.get_all.call_count == 1 + + +def test_orgs_list_prints_no_results_if_no_orgs_found( + runner, cli_state, get_all_orgs_empty_success +): + result = runner.invoke(cli, ["users", "orgs", "list"], obj=cli_state) + assert "No orgs found." in result.output + + +def test_orgs_list_prints_expected_data(runner, cli_state, get_all_orgs_success): + result = runner.invoke(cli, ["users", "orgs", "list"], obj=cli_state) + assert "9087" in result.output + assert "1007759454961904673" in result.output + assert "19may" in result.output + assert "Active" in result.output + assert "2689" in result.output + assert "890854247383106706" in result.output + assert "ENTERPRISE" in result.output + assert "BASIC" in result.output + assert "2021-05-19T10:10:43.459Z" in result.output + assert "{'maxSeats': None, 'maxBytes': None}" in result.output + + +def test_orgs_list_prints_all_data_fields_when_not_table_format( + runner, cli_state, get_all_orgs_success +): + result = runner.invoke(cli, ["users", "orgs", "list", "-f", "JSON"], obj=cli_state) + for k, _v in TEST_GET_ORG_RESPONSE.items(): + assert k in result.output + assert "9087" in result.output + assert "1007759454961904673" in result.output + assert "19may" in result.output + assert "Active" in result.output + assert "2689" in result.output + assert "890854247383106706" in result.output + assert "ENTERPRISE" in result.output + assert "BASIC" in result.output + assert "2021-05-19T10:10:43.459Z" in result.output + assert '"maxSeats": null' in result.output + assert '"maxSeats": null' in result.output + + +def test_orgs_show_calls_orgs_get_by_uid_with_expected_params( + runner, + cli_state, +): + runner.invoke(cli, ["users", "orgs", "show", TEST_ORG_UID], obj=cli_state) + cli_state.sdk.orgs.get_by_uid.assert_called_once_with(TEST_ORG_UID) + + +def test_orgs_show_exits_and_returns_error_if_uid_arg_not_provided(runner, cli_state): + result = runner.invoke(cli, ["users", "orgs", "show"], obj=cli_state) + assert result.exit_code == 2 + assert "Error: Missing argument 'ORG_UID'." in result.output + + +def test_orgs_show_prints_expected_data( + runner, + cli_state, + get_org_success, +): + result = runner.invoke(cli, ["users", "orgs", "show", TEST_ORG_UID], obj=cli_state) + assert "9087" in result.output + assert "1007759454961904673" in result.output + assert "19may" in result.output + assert "Active" in result.output + assert "2689" in result.output + assert "890854247383106706" in result.output + assert "ENTERPRISE" in result.output + assert "BASIC" in result.output + assert "2021-05-19T10:10:43.459Z" in result.output + assert "{'maxSeats': None, 'maxBytes': None}" in result.output + + +def test_orgs_show_prints_all_data_fields_when_not_table_format( + runner, + cli_state, + get_org_success, +): + result = runner.invoke( + cli, ["users", "orgs", "show", TEST_ORG_UID, "-f", "JSON"], obj=cli_state + ) + for k, _v in TEST_GET_ORG_RESPONSE.items(): + assert k in result.output + assert "9087" in result.output + assert "1007759454961904673" in result.output + assert "19may" in result.output + assert "Active" in result.output + assert "2689" in result.output + assert "890854247383106706" in result.output + assert "ENTERPRISE" in result.output + assert "BASIC" in result.output + assert "2021-05-19T10:10:43.459Z" in result.output + assert '"maxSeats": null' in result.output + assert '"maxSeats": null' in result.output + + +def test_orgs_show_when_invalid_org_uid_raises_error(runner, cli_state, custom_error): + cli_state.sdk.orgs.get_by_uid.side_effect = Py42NotFoundError(custom_error) + result = runner.invoke(cli, ["users", "orgs", "show", TEST_ORG_UID], obj=cli_state) + assert result.exit_code == 1 + assert f"Invalid org UID {TEST_ORG_UID}." in result.output + + +def test_list_aliases_calls_get_user_with_expected_parameters(runner, cli_state): + username = "alias@example" + command = ["users", "list-aliases", username] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.userriskprofile.get_by_username.assert_called_once_with( + "alias@example" + ) + + +def test_list_aliases_prints_no_aliases_found_when_empty_list( + runner, cli_state, mocker +): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"cloudAliases": []} + ) + username = "alias@example" + command = ["users", "list-aliases", username] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 0 + assert f"No cloud aliases for user '{username}' found" in result.output + + +def test_list_aliases_prints_expected_data(runner, cli_state, get_user_uid_success): + result = runner.invoke( + cli, ["users", "list-aliases", "alias@example.com"], obj=cli_state + ) + assert result.exit_code == 0 + assert "['Sample.User1@samplecase.com', 'Sample.User1@gmail.com']" in result.output + + +def test_list_aliases_raises_error_when_user_does_not_exist( + runner, cli_state, get_user_uid_failure +): + username = "fake@notreal.com" + command = ["users", "list-aliases", username] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + f"User '{username}' does not exist or you do not have permission to view them." + in result.output + ) + + +def test_add_cloud_alias_calls_add_cloud_alias_with_correct_parameters( + runner, cli_state, get_user_uid_success +): + command = ["users", "add-alias", "test@example.com", "alias@example.com"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.userriskprofile.add_cloud_aliases.assert_called_once_with( + TEST_USER_UID, "alias@example.com" + ) + + +def test_add_alias_raises_error_when_user_does_not_exist( + runner, cli_state, get_user_uid_failure +): + username = "fake@notreal.com" + command = ["users", "add-alias", username, "alias@example.com"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + f"User '{username}' does not exist or you do not have permission to view them." + in result.output + ) + + +def test_add_alias_raises_error_when_alias_is_too_long(runner, cli_state): + command = [ + "users", + "add-alias", + "fake@notreal.com", + "alias-is-too-long-its-very-long-for-real-more-than-50-characters@example.com", + ] + cli_state.sdk.userriskprofile.add_cloud_aliases.side_effect = ( + Py42CloudAliasCharacterLimitExceededError + ) + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert "Cloud alias character limit exceeded" in result.output + + +def test_add_alias_raises_error_when_user_has_two_aliases( + runner, cli_state, add_alias_limit_failure +): + command = [ + "users", + "add-alias", + "fake@notreal.com", + "second_alias", + ] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + "Error: Cloud alias limit exceeded. A max of 2 cloud aliases are allowed." + in result.output + ) + + +def test_remove_cloud_alias_calls_remove_cloud_alias_with_correct_parameters( + runner, cli_state, get_user_uid_success, remove_alias_success +): + command = ["users", "remove-alias", "test@example.com", "alias@example.com"] + runner.invoke(cli, command, obj=cli_state) + cli_state.sdk.userriskprofile.delete_cloud_aliases.assert_called_once_with( + TEST_USER_UID, "alias@example.com" + ) + + +def test_remove_alias_raises_error_when_user_does_not_exist( + runner, cli_state, get_user_uid_failure +): + username = "fake@notreal.com" + command = ["users", "remove-alias", username, "alias@example.com"] + result = runner.invoke(cli, command, obj=cli_state) + assert result.exit_code == 1 + assert ( + f"User '{username}' does not exist or you do not have permission to view them." + in result.output + ) + + +def test_bulk_add_alias_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add_alias.csv", "w") as csv: + csv.writelines(["username,alias\n", f"{TEST_USERNAME},{TEST_ALIAS}\n"]) + runner.invoke( + cli, + ["users", "bulk", "add-alias", "test_add_alias.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "alias": TEST_ALIAS, "alias added": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_add_alias_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add_alias.csv", "w") as csv: + csv.writelines( + ["username,alias\n\n\n", f"{TEST_USERNAME},{TEST_ALIAS}\n\n\n"] + ) + runner.invoke( + cli, + ["users", "bulk", "add-alias", "test_add_alias.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "alias": TEST_ALIAS, "alias added": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_add_alias_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats, get_user_response +): + lines = ["username,alias\n", f"{TEST_USERNAME},{TEST_ALIAS}\n"] + + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + return get_user_response + + cli_state.sdk.userriskprofile.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_add_alias.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "add-alias", "test_add_alias.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(username="test@example.com", alias=TEST_ALIAS) + handler(username="not.test@example.com", alias=TEST_ALIAS) + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_bulk_remove_alias_uses_expected_arguments(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove_alias.csv", "w") as csv: + csv.writelines(["username,alias\n", f"{TEST_USERNAME},{TEST_ALIAS}\n"]) + runner.invoke( + cli, + ["users", "bulk", "remove-alias", "test_remove_alias.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "alias": TEST_ALIAS, "alias removed": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_remove_alias_ignores_blank_lines(runner, mocker, cli_state): + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove_alias.csv", "w") as csv: + csv.writelines( + ["username,alias\n\n\n", f"{TEST_USERNAME},{TEST_ALIAS}\n\n\n"] + ) + runner.invoke( + cli, + ["users", "bulk", "remove-alias", "test_remove_alias.csv"], + obj=cli_state, + ) + assert bulk_processor.call_args[0][1] == [ + {"username": TEST_USERNAME, "alias": TEST_ALIAS, "alias removed": "False"} + ] + bulk_processor.assert_called_once() + + +def test_bulk_remove_alias_uses_handler_that_when_encounters_error_increments_total_errors( + runner, mocker, cli_state, worker_stats, get_user_response +): + lines = ["username,alias\n", f"{TEST_USERNAME},{TEST_ALIAS}\n"] + + def _get(username, *args, **kwargs): + if username == "test@example.com": + raise Exception("TEST") + return get_user_response + + cli_state.sdk.userriskprofile.get_by_username.side_effect = _get + bulk_processor = mocker.patch(f"{_NAMESPACE}.run_bulk_process") + with runner.isolated_filesystem(): + with open("test_remove_alias.csv", "w") as csv: + csv.writelines(lines) + runner.invoke( + cli, + ["users", "bulk", "remove-alias", "test_remove_alias.csv"], + obj=cli_state, + ) + handler = bulk_processor.call_args[0][0] + handler(username="test@example.com", alias=TEST_ALIAS) + handler(username="not.test@example.com", alias=TEST_ALIAS) + assert worker_stats.increment_total_errors.call_count == 1 + + +def test_update_start_date_without_date_arg_or_clear_option_raises_cli_error( + runner, cli_state +): + res = runner.invoke( + cli, ["users", "update-start-date", "test@example.com"], obj=cli_state + ) + assert res.exit_code == 1 + assert "Must supply DATE argument if --clear is not used." in res.output + + +def test_update_start_date_with_date_makes_expected_call(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-start-date", "test@example.com", "2020-10-10"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with( + 1234, start_date=datetime.datetime(2020, 10, 10) + ) + + +def test_update_start_date_with_clear_option_clears_date(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-start-date", "test@example.com", "--clear"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with(1234, start_date="") + + +def test_update_departure_date_without_date_arg_or_clear_option_raises_cli_error( + runner, cli_state +): + res = runner.invoke( + cli, ["users", "update-departure-date", "test@example.com"], obj=cli_state + ) + assert res.exit_code == 1 + assert "Must supply DATE argument if --clear is not used." in res.output + + +def test_update_departure_date_with_clear_option_clears_date(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-departure-date", "test@example.com", "--clear"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with(1234, end_date="") + + +def test_update_departure_date_with_date_makes_expected_call(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-departure-date", "test@example.com", "2020-10-10"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with( + 1234, end_date=datetime.datetime(2020, 10, 10) + ) + + +def test_update_notes_without_note_arg_or_clear_option_raises_cli_error( + runner, cli_state +): + res = runner.invoke( + cli, ["users", "update-risk-profile-notes", "test@example.com"], obj=cli_state + ) + assert res.exit_code == 1 + assert "Must supply NOTE argument if --clear is not used." in res.output + + +def test_update_notes_with_clear_option_clears_date(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-risk-profile-notes", "test@example.com", "--clear"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with(1234, notes="") + + +def test_update_notes_with_note_makes_expected_call(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + res = runner.invoke( + cli, + ["users", "update-risk-profile-notes", "test@example.com", "new note"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with(1234, notes="new note") + + +def test_update_notes_with_append_option_appends_note_value(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234, "notes": "existing note"} + ) + res = runner.invoke( + cli, + [ + "users", + "update-risk-profile-notes", + "test@example.com", + "--append", + "new note", + ], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with( + 1234, notes="existing note\n\nnew note" + ) + + +def test_bulk_update_risk_profile_makes_expected_calls(mocker, runner, cli_state): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234} + ) + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,start_date,end_date,notes\ntest@example.com,2020-10-10,2022-10-10,new note\n" + ) + res = runner.invoke( + cli, ["users", "bulk", "update-risk-profile", "csv"], obj=cli_state + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with( + 1234, start_date="2020-10-10", end_date="2022-10-10", notes="new note" + ) + + +def test_bulk_update_risk_profile_with_append_note_option_appends_note( + mocker, runner, cli_state +): + cli_state.sdk.userriskprofile.get_by_username.return_value = create_mock_response( + mocker, data={"userId": 1234, "notes": "existing note"} + ) + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,start_date,end_date,notes\ntest@example.com,2020-10-10,2022-10-10,new note\n" + ) + res = runner.invoke( + cli, + ["users", "bulk", "update-risk-profile", "--append-notes", "csv"], + obj=cli_state, + ) + assert res.exit_code == 0 + cli_state.sdk.userriskprofile.update.assert_called_once_with( + 1234, + start_date="2020-10-10", + end_date="2022-10-10", + notes="existing note\n\nnew note", + ) diff --git a/tests/cmds/test_util.py b/tests/cmds/test_util.py new file mode 100644 index 000000000..fb3494197 --- /dev/null +++ b/tests/cmds/test_util.py @@ -0,0 +1,36 @@ +import pytest + +from code42cli import errors +from code42cli.cmds.util import try_get_default_header +from code42cli.output_formats import OutputFormat + +key = "events" + + +class TestQuery: + """""" + + pass + + +def search(*args, **kwargs): + pass + + +def test_try_get_default_header_raises_cli_error_when_using_include_all_with_none_table_format(): + with pytest.raises(errors.Code42CLIError) as err: + try_get_default_header(True, {}, OutputFormat.CSV) + + assert str(err.value) == "--include-all only allowed for Table output format." + + +def test_try_get_default_header_uses_default_header_when_not_include_all(): + default_header = {"default": "header"} + actual = try_get_default_header(False, default_header, OutputFormat.TABLE) + assert actual is default_header + + +def test_try_get_default_header_returns_none_when_is_table_and_told_to_include_all(): + default_header = {"default": "header"} + actual = try_get_default_header(True, default_header, OutputFormat.TABLE) + assert actual is None diff --git a/tests/cmds/test_watchlists.py b/tests/cmds/test_watchlists.py new file mode 100644 index 000000000..fa76e3861 --- /dev/null +++ b/tests/cmds/test_watchlists.py @@ -0,0 +1,666 @@ +import pytest +from py42.exceptions import Py42NotFoundError +from py42.exceptions import Py42UserRiskProfileNotFound +from py42.exceptions import Py42WatchlistNotFound + +from .conftest import create_mock_response +from code42cli.main import cli + +WATCHLISTS_RESPONSE = { + "watchlists": [ + { + "listType": "DEPARTING_EMPLOYEE", + "watchlistId": "departing-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 50}, + }, + { + "listType": "HIGH_IMPACT_EMPLOYEE", + "watchlistId": "high-impact-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 3}, + }, + { + "listType": "POOR_SECURITY_PRACTICES", + "watchlistId": "poor-security-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 2}, + }, + { + "listType": "FLIGHT_RISK", + "watchlistId": "flight-risk-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 2}, + }, + { + "listType": "SUSPICIOUS_SYSTEM_ACTIVITY", + "watchlistId": "suspicious-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 2}, + }, + { + "listType": "CONTRACT_EMPLOYEE", + "watchlistId": "contract-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 2}, + }, + { + "listType": "NEW_EMPLOYEE", + "watchlistId": "new-employee-id", + "tenantId": "tenant-123", + "stats": {"includedUsersCount": 1}, + }, + { + "listType": "ELEVATED_ACCESS_PRIVILEGES", + "watchlistId": "elevated-id", + "tenantId": "tenant-123", + "stats": {}, + }, + { + "listType": "PERFORMANCE_CONCERNS", + "watchlistId": "performance-id", + "tenantId": "tenant-123", + "stats": {}, + }, + ], + "totalCount": 9, +} + +WATCHLISTS_MEMBERS_RESPONSE = { + "watchlistMembers": [ + { + "userId": "1234", + "username": "one@example.com", + "addedTime": "2022-04-10T23:05:48.096964", + }, + { + "userId": "2345", + "username": "two@example.com", + "addedTime": "2022-02-26T18:52:36.805807", + }, + { + "userId": "3456", + "username": "three@example.com", + "addedTime": "2022-02-26T18:52:36.805807", + }, + ], + "totalCount": 3, +} + +WATCHLISTS_INCLUDED_USERS_RESPONSE = { + "includedUsers": [ + { + "userId": "1234", + "username": "one@example.com", + "addedTime": "2022-04-10T23:05:48.096964", + }, + { + "userId": "2345", + "username": "two@example.com", + "addedTime": "2022-02-26T18:52:36.805807", + }, + { + "userId": "3456", + "username": "three@example.com", + "addedTime": "2022-02-26T18:52:36.805807", + }, + ], + "totalCount": 3, +} + + +@pytest.fixture() +def mock_watchlists_response(mocker): + return create_mock_response(mocker, data=WATCHLISTS_RESPONSE) + + +@pytest.fixture() +def mock_included_users_response(mocker): + return create_mock_response(mocker, data=WATCHLISTS_INCLUDED_USERS_RESPONSE) + + +@pytest.fixture() +def mock_members_response(mocker): + return create_mock_response(mocker, data=WATCHLISTS_MEMBERS_RESPONSE) + + +class TestWatchlistsListCmd: + def test_table_output_contains_expected_properties( + self, runner, cli_state, mock_watchlists_response + ): + cli_state.sdk.watchlists.get_all.return_value = iter([mock_watchlists_response]) + res = runner.invoke(cli, ["watchlists", "list"], obj=cli_state) + assert "listType" in res.output + assert "watchlistId" in res.output + assert "tenantId" in res.output + assert "stats" in res.output + assert "DEPARTING_EMPLOYEE" in res.output + assert "includedUsersCount" in res.output + assert "tenant-123" in res.output + + def test_json_output_contains_expected_properties( + self, runner, cli_state, mock_watchlists_response + ): + cli_state.sdk.watchlists.get_all.return_value = iter([mock_watchlists_response]) + res = runner.invoke(cli, ["watchlists", "list", "-f", "JSON"], obj=cli_state) + assert "listType" in res.output + assert "watchlistId" in res.output + assert "tenantId" in res.output + assert "stats" in res.output + assert "DEPARTING_EMPLOYEE" in res.output + assert "includedUsersCount" in res.output + assert "tenant-123" in res.output + + def test_csv_ouput_contains_expected_properties( + self, runner, cli_state, mock_watchlists_response + ): + cli_state.sdk.watchlists.get_all.return_value = iter([mock_watchlists_response]) + res = runner.invoke(cli, ["watchlists", "list", "-f", "CSV"], obj=cli_state) + assert "listType" in res.output + assert "watchlistId" in res.output + assert "tenantId" in res.output + assert "stats" in res.output + assert "DEPARTING_EMPLOYEE" in res.output + assert "includedUsersCount" in res.output + assert "tenant-123" in res.output + + +class TestWatchlistsListMembersCmd: + def test_table_output_contains_expected_properties( + self, runner, cli_state, mock_members_response, mock_included_users_response + ): + # all members: + cli_state.sdk.watchlists.get_all_watchlist_members.return_value = iter( + [mock_members_response] + ) + res = runner.invoke( + cli, + ["watchlists", "list-members", "--watchlist-id", "test-id"], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + # only included users: + cli_state.sdk.watchlists.get_all_included_users.return_value = iter( + [mock_included_users_response] + ) + res = runner.invoke( + cli, + [ + "watchlists", + "list-members", + "--watchlist-id", + "test-id", + "--only-included-users", + ], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + def test_json_output_contains_expected_properties( + self, runner, cli_state, mock_members_response, mock_included_users_response + ): + # all members: + cli_state.sdk.watchlists.get_all_watchlist_members.return_value = iter( + [mock_members_response] + ) + res = runner.invoke( + cli, + ["watchlists", "list-members", "--watchlist-id", "test-id", "-f", "JSON"], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + # only included users: + cli_state.sdk.watchlists.get_all_included_users.return_value = iter( + [mock_included_users_response] + ) + res = runner.invoke( + cli, + [ + "watchlists", + "list-members", + "--watchlist-id", + "test-id", + "-f", + "JSON", + "--only-included-users", + ], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + def test_csv_output_contains_expected_properties( + self, runner, cli_state, mock_members_response, mock_included_users_response + ): + # all members: + cli_state.sdk.watchlists.get_all_watchlist_members.return_value = iter( + [mock_members_response] + ) + res = runner.invoke( + cli, + ["watchlists", "list-members", "--watchlist-id", "test-id", "-f", "CSV"], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + # only included users: + cli_state.sdk.watchlists.get_all_included_users.return_value = iter( + [mock_included_users_response] + ) + res = runner.invoke( + cli, + [ + "watchlists", + "list-members", + "--watchlist-id", + "test-id", + "-f", + "CSV", + "--only-included-users", + ], + obj=cli_state, + ) + assert "userId" in res.output + assert "username" in res.output + assert "addedTime" in res.output + assert "1234" in res.output + assert "2345" in res.output + assert "3456" in res.output + assert "one@example.com" in res.output + assert "two@example.com" in res.output + assert "three@example.com" in res.output + assert "2022-04-10T23:05:48.096964" in res.output + + def test_invalid_watchlist_type_raises_cli_error(self, runner, cli_state): + res = runner.invoke( + cli, + ["watchlists", "list-members", "--watchlist-type", "INVALID"], + obj=cli_state, + ) + assert res.exit_code == 2 + assert "Invalid value for '--watchlist-type'" in res.output + + def test_missing_watchlist_identifying_option_raises_cli_error( + self, runner, cli_state + ): + res = runner.invoke( + cli, + ["watchlists", "list-members"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: --watchlist-id OR --watchlist-type is required" in res.output + + +class TestWatchlistsAddCmd: + def test_missing_watchlist_identifying_option_raises_cli_error( + self, runner, cli_state + ): + res = runner.invoke(cli, ["watchlists", "add", "1234"], obj=cli_state) + assert res.exit_code == 1 + assert "Error: --watchlist-id OR --watchlist-type is required" in res.output + + def test_invalid_watchlist_type_raises_cli_error(self, runner, cli_state): + res = runner.invoke( + cli, + ["watchlists", "add", "--watchlist-type", "INVALID", "1234"], + obj=cli_state, + ) + assert res.exit_code == 2 + assert "Invalid value for '--watchlist-type'" in res.output + + def test_non_int_user_arg_calls_get_by_username_and_uses_user_id( + self, mocker, runner, cli_state + ): + mock_user_response = create_mock_response(mocker, data={"userId": 1234}) + cli_state.sdk.userriskprofile.get_by_username.return_value = mock_user_response + runner.invoke( + cli, + [ + "watchlists", + "add", + "--watchlist-type", + "DEPARTING_EMPLOYEE", + "test@example.com", + ], + obj=cli_state, + ) + cli_state.sdk.userriskprofile.get_by_username.assert_called_once_with( + "test@example.com" + ) + cli_state.sdk.watchlists.add_included_users_by_watchlist_type.assert_called_once_with( + 1234, "DEPARTING_EMPLOYEE" + ) + + def test_invalid_username_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + username = "test@example.com" + cli_state.sdk.userriskprofile.get_by_username.side_effect = ( + Py42UserRiskProfileNotFound(custom_error, username, identifier="username") + ) + res = runner.invoke( + cli, + ["watchlists", "add", "--watchlist-type", "DEPARTING_EMPLOYEE", username], + obj=cli_state, + ) + assert res.exit_code == 1 + assert ( + "Error: User risk profile for user with the username 'test@example.com' not found." + in res.output + ) + + def test_invalid_user_id_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + cli_state.sdk.watchlists.add_included_users_by_watchlist_type.side_effect = ( + Py42NotFoundError(custom_error) + ) + cli_state.sdk.watchlists.add_included_users_by_watchlist_id.side_effect = ( + Py42NotFoundError(custom_error) + ) + res = runner.invoke( + cli, + ["watchlists", "add", "--watchlist-type", "DEPARTING_EMPLOYEE", "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: User ID 1234 not found." in res.output + + res = runner.invoke( + cli, + ["watchlists", "add", "--watchlist-id", "id", "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: User ID 1234 not found." in res.output + + def test_invalid_watchlist_id_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + invalid_watchlist_id = "INVALID" + cli_state.sdk.watchlists.add_included_users_by_watchlist_id.side_effect = ( + Py42WatchlistNotFound(custom_error, invalid_watchlist_id) + ) + res = runner.invoke( + cli, + ["watchlists", "add", "--watchlist-id", invalid_watchlist_id, "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: Watchlist ID 'INVALID' not found." in res.output + + +class TestWatchlistsRemoveCmd: + def test_missing_watchlist_identifying_option_raises_cli_error( + self, runner, cli_state + ): + res = runner.invoke(cli, ["watchlists", "remove", "1234"], obj=cli_state) + assert res.exit_code == 1 + assert "Error: --watchlist-id OR --watchlist-type is required" in res.output + + def test_invalid_watchlist_type_raises_cli_error(self, runner, cli_state): + res = runner.invoke( + cli, + ["watchlists", "remove", "--watchlist-type", "INVALID", "1234"], + obj=cli_state, + ) + assert res.exit_code == 2 + assert "Invalid value for '--watchlist-type'" in res.output + + def test_non_int_user_arg_calls_get_by_username_and_uses_user_id( + self, mocker, runner, cli_state + ): + mock_user_response = create_mock_response(mocker, data={"userId": 1234}) + cli_state.sdk.userriskprofile.get_by_username.return_value = mock_user_response + runner.invoke( + cli, + [ + "watchlists", + "remove", + "--watchlist-type", + "DEPARTING_EMPLOYEE", + "test@example.com", + ], + obj=cli_state, + ) + cli_state.sdk.userriskprofile.get_by_username.assert_called_once_with( + "test@example.com" + ) + cli_state.sdk.watchlists.remove_included_users_by_watchlist_type.assert_called_once_with( + 1234, "DEPARTING_EMPLOYEE" + ) + + def test_invalid_username_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + username = "test@example.com" + cli_state.sdk.userriskprofile.get_by_username.side_effect = ( + Py42UserRiskProfileNotFound(custom_error, username, identifier="username") + ) + res = runner.invoke( + cli, + [ + "watchlists", + "remove", + "--watchlist-type", + "DEPARTING_EMPLOYEE", + username, + ], + obj=cli_state, + ) + assert res.exit_code == 1 + assert ( + "Error: User risk profile for user with the username 'test@example.com' not found." + in res.output + ) + + def test_invalid_user_id_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + cli_state.sdk.watchlists.remove_included_users_by_watchlist_type.side_effect = ( + Py42NotFoundError(custom_error) + ) + cli_state.sdk.watchlists.remove_included_users_by_watchlist_id.side_effect = ( + Py42NotFoundError(custom_error) + ) + res = runner.invoke( + cli, + ["watchlists", "remove", "--watchlist-type", "DEPARTING_EMPLOYEE", "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: User ID 1234 not found." in res.output + + res = runner.invoke( + cli, + ["watchlists", "remove", "--watchlist-id", "id", "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: User ID 1234 not found." in res.output + + def test_invalid_watchlist_id_raises_not_found_cli_error( + self, custom_error, runner, cli_state + ): + invalid_watchlist_id = "INVALID" + cli_state.sdk.watchlists.remove_included_users_by_watchlist_id.side_effect = ( + Py42WatchlistNotFound(custom_error, invalid_watchlist_id) + ) + res = runner.invoke( + cli, + ["watchlists", "remove", "--watchlist-id", invalid_watchlist_id, "1234"], + obj=cli_state, + ) + assert res.exit_code == 1 + assert "Error: Watchlist ID 'INVALID' not found." in res.output + + +class TestWatchlistBulkAddCmd: + def test_csv_without_either_username_or_user_id_raises_cli_error( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write("watchlist_id,watchlist_type\n") + res = runner.invoke( + cli, ["watchlists", "bulk", "add", "csv"], obj=cli_state + ) + assert res.exit_code == 1 + assert ( + "Error: CSV requires either a `username` or `user_id` column to identify which users to add to watchlist." + in res.output + ) + + def test_csv_without_either_watchlist_type_or_watchlist_id_raises_cli_error( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write("username,user_id\n") + res = runner.invoke( + cli, ["watchlists", "bulk", "add", "csv"], obj=cli_state + ) + assert res.exit_code == 1 + assert ( + "Error: CSV requires either a `watchlist_id` or `watchlist_type` column to identify which watchlist to add user to." + in res.output + ) + + def test_handle_row_when_passed_all_headers_uses_user_id_and_watchlist_id( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,user_id,watchlist_id,watchlist_type,extra_header\ntest@example.com,1234,abcd,DEPARTING_EMPLOYEE,\n" + ) + runner.invoke(cli, ["watchlists", "bulk", "add", "csv"], obj=cli_state) + cli_state.sdk.watchlists.add_included_users_by_watchlist_id.assert_called_once_with( + "1234", "abcd" + ) + + def test_handle_row_when_passed_no_id_headers_uses_username_and_watchlist_type( + self, mocker, runner, cli_state + ): + cli_state.sdk.userriskprofile.get_by_username.return_value = ( + create_mock_response(mocker, data={"userId": 1234}) + ) + + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,watchlist_type\ntest@example.com,DEPARTING_EMPLOYEE\n" + ) + runner.invoke(cli, ["watchlists", "bulk", "add", "csv"], obj=cli_state) + cli_state.sdk.watchlists.add_included_users_by_watchlist_type.assert_called_once_with( + 1234, "DEPARTING_EMPLOYEE" + ) + + +class TestWatchlistBulkRemoveCmd: + def test_csv_without_either_username_or_user_id_raises_cli_error( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write("watchlist_id,watchlist_type\n") + res = runner.invoke( + cli, ["watchlists", "bulk", "remove", "csv"], obj=cli_state + ) + assert res.exit_code == 1 + assert ( + "Error: CSV requires either a `username` or `user_id` column to identify which users to remove from watchlist." + in res.output + ) + + def test_csv_without_either_watchlist_type_or_watchlist_id_raises_cli_error( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write("username,user_id\n") + res = runner.invoke( + cli, ["watchlists", "bulk", "remove", "csv"], obj=cli_state + ) + assert res.exit_code == 1 + assert ( + "Error: CSV requires either a `watchlist_id` or `watchlist_type` column to identify which watchlist to remove user from." + in res.output + ) + + def test_handle_row_when_passed_all_headers_uses_user_id_and_watchlist_id( + self, runner, cli_state + ): + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,user_id,watchlist_id,watchlist_type\ntest@example.com,1234,abcd,DEPARTING_EMPLOYEE\n" + ) + runner.invoke(cli, ["watchlists", "bulk", "remove", "csv"], obj=cli_state) + cli_state.sdk.watchlists.remove_included_users_by_watchlist_id.assert_called_once_with( + "1234", "abcd" + ) + + def test_handle_row_when_passed_no_id_headers_uses_username_and_watchlist_type( + self, mocker, runner, cli_state + ): + cli_state.sdk.userriskprofile.get_by_username.return_value = ( + create_mock_response(mocker, data={"userId": 1234}) + ) + + with runner.isolated_filesystem(): + with open("csv", "w") as file: + file.write( + "username,watchlist_type\ntest@example.com,DEPARTING_EMPLOYEE\n" + ) + runner.invoke(cli, ["watchlists", "bulk", "remove", "csv"], obj=cli_state) + cli_state.sdk.watchlists.remove_included_users_by_watchlist_type.assert_called_once_with( + 1234, "DEPARTING_EMPLOYEE" + ) diff --git a/tests/common/test_common.py b/tests/common/test_common.py deleted file mode 100644 index dc71c1064..000000000 --- a/tests/common/test_common.py +++ /dev/null @@ -1,322 +0,0 @@ -import pytest -from os import path -from datetime import datetime, timedelta -from logging import StreamHandler, FileHandler - -from c42secevents.logging.handlers import NoPrioritySysLogHandler - -from c42seceventcli.common.util import ( - get_config_args, - parse_timestamp, - get_logger, - get_error_logger, - SecArgs, - get_stored_password, - delete_stored_password, - get_user_project_path, - DestinationArgs, -) - - -_DUMMY_KEY = "Key" - - -@pytest.fixture -def mock_config_read(mocker): - return mocker.patch("configparser.ConfigParser.read") - - -@pytest.fixture -def mock_config_get_function(mocker): - return mocker.patch("configparser.ConfigParser.get") - - -@pytest.fixture -def mock_config_get_bool_function(mocker): - return mocker.patch("configparser.ConfigParser.getboolean") - - -@pytest.fixture -def mock_config_file_reader(mocker): - reader = mocker.patch("configparser.ConfigParser.read") - reader.return_value = ["NOT EMPTY LIST"] - return reader - - -@pytest.fixture -def mock_config_file_sections(mocker): - sections = mocker.patch("configparser.ConfigParser.sections") - sections.return_value = ["NOT EMPTY LIST"] - return sections - - -@pytest.fixture -def mock_get_logger(mocker): - return mocker.patch("logging.getLogger") - - -@pytest.fixture -def mock_no_priority_syslog_handler(mocker): - mock_handler_init = mocker.patch( - "c42secevents.logging.handlers.NoPrioritySysLogHandler.__init__" - ) - mock_handler_init.return_value = None - return mock_handler_init - - -@pytest.fixture -def mock_file_handler(mocker): - mock_handler_init = mocker.patch("logging.FileHandler.__init__") - mock_handler_init.return_value = None - return mock_handler_init - - -@pytest.fixture -def mock_password_getter(mocker): - return mocker.patch("keyring.get_password") - - -@pytest.fixture -def password_patches( - mocker, - mock_password_getter, - mock_password_setter, - mock_password_deleter, - mock_get_input, - mock_getpass_function, -): - mock = mocker.MagicMock() - mock.get_password = mock_password_getter - mock.set_password = mock_password_setter - mock.delete_password = mock_password_deleter - mock.getpass = mock_getpass_function - mock.get_input = mock_get_input - return mock - - -@pytest.fixture -def mock_password_setter(mocker): - return mocker.patch("keyring.set_password") - - -@pytest.fixture -def mock_password_deleter(mocker): - return mocker.patch("keyring.delete_password") - - -@pytest.fixture -def mock_getpass_function(mocker): - return mocker.patch("getpass.getpass") - - -@pytest.fixture -def mock_get_input(mocker): - return mocker.patch("c42seceventcli.common.util._get_input") - - -@pytest.fixture -def path_patches(mocker, mock_user_expansion, mock_dir_maker, mock_path_existence): - mock = mocker.MagicMock() - mock.expand_user = mock_user_expansion - mock.make_dirs = mock_dir_maker - mock.path_exists = mock_path_existence - return mock - - -@pytest.fixture -def mock_user_expansion(mocker): - return mocker.patch("os.path.expanduser") - - -@pytest.fixture -def mock_dir_maker(mocker): - return mocker.patch("c42seceventcli.common.util.makedirs") - - -@pytest.fixture -def mock_path_existence(mocker): - return mocker.patch("os.path.exists") - - -def test_get_user_project_path_returns_expected_path(path_patches): - expected_home = "/PATH/" - expected_subdir = "SUBDIR" - path_patches.expand_user.return_value = expected_home - expected = path.join(expected_home, ".c42seceventcli", expected_subdir) - actual = get_user_project_path(expected_subdir) - assert actual == expected - - -def test_get_user_project_path_calls_make_dirs_when_path_does_not_exist(path_patches): - expected_home = "/PATH/" - expected_subdir = "SUBDIR" - path_patches.expand_user.return_value = expected_home - expected_path = path.join(expected_home, ".c42seceventcli", expected_subdir) - path_patches.path_exists.return_value = False - get_user_project_path(expected_subdir) - path_patches.make_dirs.assert_called_once_with(expected_path) - - -def test_get_user_project_path_does_not_call_make_dirs_when_path_exists(path_patches): - expected_home = "/PATH/" - expected_subdir = "SUBDIR" - path_patches.expand_user.return_value = expected_home - path_patches.path_exists.return_value = True - get_user_project_path(expected_subdir) - assert not path_patches.make_dirs.call_count - - -def test_get_config_args_when_read_returns_empty_list_raises_io_error(mocker): - reader = mocker.patch("configparser.ConfigParser.read") - reader.return_value = [] - with pytest.raises(IOError): - get_config_args("Test") - - -def test_get_config_args_when_sections_returns_empty_list_returns_empty_dict( - mocker, mock_config_file_reader -): - sections = mocker.patch("configparser.ConfigParser.sections") - sections.return_value = [] - assert get_config_args("Test") == {} - - -def test_get_config_args_returns_dict_made_from_items( - mocker, mock_config_file_reader, mock_config_file_sections -): - mock_tuples = mocker.patch("configparser.ConfigParser.items") - mock_tuples.return_value = [("Hi", "Bye"), ("Pizza", "FrenchFries")] - arg_dict = get_config_args("Test") - assert arg_dict == {"Hi": "Bye", "Pizza": "FrenchFries"} - - -def test_parse_timestamp_when_given_date_format_returns_expected_timestamp(): - date_str = "2019-10-01" - date = datetime.strptime(date_str, "%Y-%m-%d") - expected = (date - date.utcfromtimestamp(0)).total_seconds() - actual = parse_timestamp(date_str) - assert actual == expected - - -def test_parse_timestamp_when_given_minutes_ago_format_returns_expected_timestamp(): - minutes_ago = 1000 - now = datetime.utcnow() - time = now - timedelta(minutes=minutes_ago) - expected = (time - datetime.utcfromtimestamp(0)).total_seconds() - actual = parse_timestamp("1000") - assert pytest.approx(actual, expected) - - -def test_parse_timestamp_when_given_bad_string_raises_value_error(): - with pytest.raises(ValueError): - parse_timestamp("BAD!") - - -def test_get_error_logger_uses_rotating_file_with_expected_args(mocker, mock_get_logger): - expected_service_name = "TEST_SERVICE" - mock_handler = mocker.patch("logging.handlers.RotatingFileHandler.__init__") - mock_handler.return_value = None - get_error_logger(expected_service_name) - expected_path = "{0}/{1}_errors.log".format(get_user_project_path("log"), expected_service_name) - mock_handler.assert_called_once_with(expected_path, maxBytes=250000000) - - -def test_get_logger_when_destination_type_is_stdout_adds_stream_handler_to_logger(mock_get_logger): - service = "TEST_SERVICE" - args = DestinationArgs() - args.destination_type = "stdout" - logger = get_logger(None, service, args) - actual = type(logger.addHandler.call_args[0][0]) - expected = StreamHandler - assert actual == expected - - -def test_get_logger_when_destination_type_is_file_adds_file_handler_to_logger( - mock_get_logger, mock_file_handler -): - service = "TEST_SERVICE" - args = DestinationArgs() - args.destination_type = "file" - logger = get_logger(None, service, args) - actual = type(logger.addHandler.call_args[0][0]) - expected = FileHandler - assert actual == expected - - -def test_get_logger_when_destination_type_is_server_adds_no_priority_syslog_handler_to_logger( - mock_get_logger, mock_no_priority_syslog_handler -): - service = "TEST_SERVICE" - args = DestinationArgs() - args.destination_type = "server" - logger = get_logger(None, service, args) - actual = type(logger.addHandler.call_args[0][0]) - expected = NoPrioritySysLogHandler - assert actual == expected - - -def test_get_stored_password_when_keyring_returns_none_uses_password_from_getpass(password_patches): - password_patches.get_password.return_value = None - expected = "super_secret_password" - password_patches.getpass.return_value = expected - actual = get_stored_password("TEST", "USER") - assert actual == expected - - -def test_get_stored_password_returns_same_value_from_keyring(password_patches): - expected = "super_secret_password" - password_patches.get_password.return_value = expected - actual = get_stored_password("TEST", "USER") - assert actual == expected - - -def test_get_stored_password_when_keyring_returns_none_and_get_input_returns_y_calls_set_password_with_password_from_getpass( - password_patches -): - expected_service_name = "SERVICE" - expected_username = "ME" - expected_password = "super_secret_password" - password_patches.get_password.return_value = None - password_patches.get_input.return_value = "y" - password_patches.getpass.return_value = expected_password - - get_stored_password(expected_service_name, expected_username) - password_patches.set_password.assert_called_once_with( - expected_service_name, expected_username, expected_password - ) - - -def test_get_stored_password_when_keyring_returns_none_and_get_input_returns_n_does_not_call_set_password( - password_patches -): - expected_service_name = "SERVICE" - expected_username = "ME" - expected_password = "super_secret_password" - password_patches.get_password.return_value = expected_username - password_patches.get_input.return_value = "n" - password_patches.getpass.return_value = expected_password - - get_stored_password(expected_service_name, expected_username) - assert not password_patches.set_password.call_count - - -def test_delete_stored_password_calls_keyring_delete_password(password_patches): - expected_service_name = "SERVICE" - expected_username = "ME" - delete_stored_password(expected_service_name, expected_username) - password_patches.delete_password.assert_called_once_with( - expected_service_name, expected_username - ) - - -def test_subclass_of_sec_args_try_set_favors_cli_arg_over_config_arg(): - class SubclassSecArgs(SecArgs): - test = None - - arg_name = "test" - cli_arg_value = 1 - config_arg_value = 2 - args = SubclassSecArgs() - args.try_set(arg_name, cli_arg_value, config_arg_value) - expected = cli_arg_value - assert args.test == expected diff --git a/tests/common/test_cursor_store.py b/tests/common/test_cursor_store.py deleted file mode 100644 index 624f04ab9..000000000 --- a/tests/common/test_cursor_store.py +++ /dev/null @@ -1,20 +0,0 @@ -from os import path -from c42seceventcli.common.cursor_store import SecurityEventCursorStore - - -class TestSecurityEventCursorStore(object): - def test_init_cursor_store_when_not_given_db_file_path_uses_expected_path_with_db_table_name_as_db_file_name( - self, sqlite_connection - ): - home_dir = path.expanduser("~") - expected_path = path.join(home_dir, ".c42seceventcli/db") - expected_db_name = "TEST" - expected_db_file_path = "{0}/{1}.db".format(expected_path, expected_db_name) - SecurityEventCursorStore(expected_db_name) - sqlite_connection.assert_called_once_with(expected_db_file_path) - - def test_init_cursor_store_when_given_db_file_path_uses_given_path(self, mocker): - mock_connect_function = mocker.patch("sqlite3.connect") - expected_db_file_path = "Hey, look, I'm a file path..." - SecurityEventCursorStore("test", expected_db_file_path) - mock_connect_function.assert_called_once_with(expected_db_file_path) diff --git a/tests/conftest.py b/tests/conftest.py index 664988c12..d5bec08f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,315 @@ +import json +from datetime import datetime +from datetime import timedelta + import pytest +from click.testing import CliRunner +from py42.response import Py42Response +from py42.sdk import SDKClient +from requests import HTTPError +from requests import Response + +import code42cli.errors as error_tracker +from code42cli.config import ConfigAccessor +from code42cli.options import CLIState +from code42cli.profile import Code42Profile + +TEST_ID = "TEST_ID" + + +@pytest.fixture(scope="session") +def runner(): + return CliRunner() + + +@pytest.fixture(autouse=True) +def io_prevention(monkeypatch): + monkeypatch.setattr("logging.FileHandler._open", lambda *args, **kwargs: None) + + +@pytest.fixture +def file_event_namespace(): + args = dict( + sdk=mock_42, + profile=create_mock_profile(), + incremental=None, + advanced_query=None, + begin=None, + end=None, + type=None, + c42_username=None, + actor=None, + md5=None, + sha256=None, + source=None, + file_name=None, + file_path=None, + process_owner=None, + tab_url=None, + include_non_exposure=None, + format=None, + output_file=None, + server=None, + protocol=None, + saved_search=None, + ) + return args + + +@pytest.fixture +def alert_namespace(): + args = dict( + sdk=mock_42, + profile=create_mock_profile(), + incremental=None, + advanced_query=None, + begin=None, + end=None, + severity=None, + state=None, + actor=None, + actor_contains=None, + exclude_actor=None, + exclude_actor_contains=None, + rule_name=None, + exclude_rule_name=None, + rule_id=None, + exclude_rule_id=None, + rule_type=None, + exclude_rule_type=None, + description=None, + format=None, + output_file=None, + server=None, + protocol=None, + ) + return args + + +def create_profile_values_dict( + authority=None, + username=None, + ignore_ssl=False, + use_v2_file_events=False, + api_client_auth="False", +): + return { + ConfigAccessor.AUTHORITY_KEY: "example.com", + ConfigAccessor.USERNAME_KEY: "foo", + ConfigAccessor.IGNORE_SSL_ERRORS_KEY: "True", + ConfigAccessor.USE_V2_FILE_EVENTS_KEY: "False", + ConfigAccessor.API_CLIENT_AUTH_KEY: "False", + } + + +@pytest.fixture +def sdk(mocker): + return mocker.MagicMock(spec=SDKClient) + + +@pytest.fixture +def sdk_with_user(sdk): + sdk.users.get_by_username.return_value = {"users": [{"userUid": TEST_ID}]} + return sdk + + +@pytest.fixture +def sdk_without_user(sdk): + sdk.users.get_by_username.return_value = {"users": []} + return sdk + + +@pytest.fixture +def mock_42(mocker): + return mocker.patch("py42.sdk.from_local_account") + + +@pytest.fixture +def cli_state(mocker, sdk, profile): + mock_state = mocker.MagicMock(spec=CLIState) + mock_state._sdk = sdk + mock_state.profile = profile + mock_state.search_filters = [] + mock_state.totp = None + mock_state.assume_yes = False + return mock_state + + +class MockSection: + def __init__(self, name=None, values_dict=None): + self.name = name + self.values_dict = values_dict or create_profile_values_dict() + + def __getitem__(self, item): + return self.values_dict[item] + + def __setitem__(self, key, value): + self.values_dict[key] = value + + def get(self, item): + return self.values_dict.get(item) + + +def create_mock_profile(name="Test Profile Name"): + profile_section = MockSection(name) + profile = Code42Profile(profile_section) + return profile + + +def setup_mock_accessor(mock_accessor, name=None, values_dict=None): + profile_section = MockSection(name, values_dict) + mock_accessor.get_profile.return_value = profile_section + return mock_accessor + + +@pytest.fixture +def profile(mocker): + mock = mocker.MagicMock(spec=Code42Profile) + mock.name = "testcliprofile" + return mock + + +@pytest.fixture(autouse=True) +def mock_makedirs(mocker): + return mocker.patch("os.makedirs") + + +@pytest.fixture(autouse=True) +def mock_remove(mocker): + return mocker.patch("os.remove") + + +@pytest.fixture(autouse=True) +def mock_listdir(mocker): + return mocker.patch("os.listdir") + -MOCK_TEST_DB_PATH = "test_path.db" +def func_keyword_args( + one=None, two=None, three=None, default="testdefault", nargstest=None +): + pass + + +def func_single_positional_arg(one): + pass + + +def func_single_positional_arg_many_optional_args(one, two=None, three=None, four=None): + pass + + +def func_positional_args(one, two, three): + pass + + +def func_mixed_args(one, two, three=None, four=None): + pass + + +def func_with_sdk(sdk, one, two, three=None, four=None): + pass + + +def func_single_positional_arg_with_sdk_and_profile( + sdk, profile, one, two=None, three=None, four=None +): + pass + + +def func_with_args(args): + pass + + +def convert_str_to_date(date_str): + return datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%fZ") + + +def get_test_date(days_ago=None, hours_ago=None, minutes_ago=None): + """Note: only pass in one parameter to get the right test date... this is just a test func.""" + now = datetime.utcnow() + if days_ago: + return now - timedelta(days=days_ago) + if hours_ago: + return now - timedelta(hours=hours_ago) + if minutes_ago: + return now - timedelta(minutes=minutes_ago) + + +def get_test_date_str(days_ago): + return get_test_date(days_ago).strftime("%Y-%m-%d") + + +begin_date_str = get_test_date_str(days_ago=89) +begin_date_str_with_time = f"{begin_date_str} 3:12:33" +begin_date_str_with_t_time = f"{begin_date_str}T3:12:33" +end_date_str = get_test_date_str(days_ago=10) +end_date_str_with_time = f"{end_date_str} 11:22:43" +begin_date_str = get_test_date_str(days_ago=89) +begin_date_with_time = [get_test_date_str(days_ago=89), "3:12:33"] +end_date_str = get_test_date_str(days_ago=10) +end_date_with_time = [get_test_date_str(days_ago=10), "11:22:43"] + + +class ErrorTrackerTestHelper: + def __enter__(self): + error_tracker.ERRORED = True + + def __exit__(self, exc_type, exc_val, exc_tb): + error_tracker.ERRORED = False + + +TEST_FILE_PATH = "some/path" + + +@pytest.fixture +def mock_to_table(mocker): + return mocker.patch("code42cli.output_formats.to_table") + + +@pytest.fixture +def mock_to_csv(mocker): + return mocker.patch("code42cli.output_formats.to_csv") + + +@pytest.fixture +def mock_to_json(mocker): + return mocker.patch("code42cli.output_formats.to_json") + + +@pytest.fixture +def mock_to_formatted_json(mocker): + return mocker.patch("code42cli.output_formats.to_formatted_json") + + +@pytest.fixture +def mock_dataframe_to_json(mocker): + return mocker.patch("pandas.DataFrame.to_json") + + +@pytest.fixture +def mock_dataframe_to_csv(mocker): + return mocker.patch("pandas.DataFrame.to_csv") @pytest.fixture -def sqlite_connection(mocker): - return mocker.patch("sqlite3.connect") +def mock_dataframe_to_string(mocker): + return mocker.patch("pandas.DataFrame.to_string") + + +def create_mock_response(mocker, data=None, status=200): + if isinstance(data, (dict, list)): + data = json.dumps(data) + elif not data: + data = "" + response = mocker.MagicMock(spec=Response) + response.text = data + response.status_code = status + response.encoding = None + response._content_consumed = "" + return Py42Response(response) + + +def create_mock_http_error(mocker, data=None, status=400): + mock_http_error = mocker.MagicMock(spec=HTTPError) + mock_http_error.response = create_mock_response(mocker, data=data, status=status) + return mock_http_error diff --git a/tests/common/__init__.py b/tests/integration/__init__.py similarity index 100% rename from tests/common/__init__.py rename to tests/integration/__init__.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 000000000..dffbe487b --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,61 @@ +import os +from shlex import split as split_command + +import pytest +from tests.integration.util import DataServer + +from code42cli.errors import Code42CLIError +from code42cli.main import cli +from code42cli.profile import get_profile + + +TEST_PROFILE_NAME = "TEMP-INTEGRATION-TEST" +_LINE_FEED = b"\r\n" +_PASSWORD_PROMPT = b"Password: " +_ENCODING_TYPE = "utf-8" + + +@pytest.fixture(scope="session") +def integration_test_profile(runner): + """Creates a temporary profile to use for executing integration tests.""" + host = os.environ.get("C42_HOST") or "http://127.0.0.1:4200" + username = os.environ.get("C42_USER") or "test_username@example.com" + password = os.environ.get("C42_PW") or "test_password" + delete_test_profile = split_command(f"profile delete {TEST_PROFILE_NAME} -y") + create_test_profile = split_command( + f"profile create -n {TEST_PROFILE_NAME} -u {username} -s {host} --password {password} -y" + ) + runner.invoke(cli, delete_test_profile) + result = runner.invoke(cli, create_test_profile) + if result.exit_code != 0: + pytest.exit(result.output) + yield + runner.invoke(cli, delete_test_profile) + + +def _get_current_profile_name(): + try: + profile = get_profile() + return profile.name + except Code42CLIError: + return None + + +def _encode_response(line, encoding_type=_ENCODING_TYPE): + return line.decode(encoding_type) + + +def append_profile(command): + return f"{command} --profile {TEST_PROFILE_NAME}" + + +@pytest.fixture(scope="session") +def udp_dataserver(): + with DataServer(protocol="UDP"): + yield + + +@pytest.fixture(scope="session") +def tcp_dataserver(): + with DataServer(protocol="TCP"): + yield diff --git a/tests/integration/test_alert_rules.py b/tests/integration/test_alert_rules.py new file mode 100644 index 000000000..f915baf07 --- /dev/null +++ b/tests/integration/test_alert_rules.py @@ -0,0 +1,19 @@ +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + + +@pytest.mark.integration +def test_alert_rules_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "alert-rules list" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_alert_rules_show_command_returns_success_return_code( + runner, integration_test_profile +): + command = ("alert-rules show test-rule-id",) + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_alerts.py b/tests/integration/test_alerts.py new file mode 100644 index 000000000..992998ced --- /dev/null +++ b/tests/integration/test_alerts.py @@ -0,0 +1,57 @@ +from datetime import datetime +from datetime import timedelta +from shlex import split as split_command + +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + +from code42cli.main import cli + +begin_date = datetime.utcnow() - timedelta(days=20) +end_date = datetime.utcnow() - timedelta(days=10) +begin_date_str = begin_date.strftime("%Y-%m-%d") +end_date_str = end_date.strftime("%Y-%m-%d") + + +@pytest.mark.integration +def test_alerts_search_command_returns_success_return_code( + runner, integration_test_profile +): + command = f"alerts search -b {begin_date_str} -e {end_date_str}" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_alerts_send_to_tcp_returns_success_return_code( + runner, integration_test_profile, tcp_dataserver +): + command = append_profile( + f"alerts send-to localhost:5140 -p TCP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_alerts_send_to_udp_returns_success_return_code( + runner, integration_test_profile, udp_dataserver +): + command = append_profile( + f"alerts send-to localhost:5141 -p UDP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_alerts_advanced_query_returns_success_return_code( + runner, integration_test_profile +): + advanced_query = """{"groupClause":"AND", "groups":[{"filterClause":"AND", + "filters":[{"operator":"ON_OR_AFTER", "term":"eventTimestamp", "value":"2020-09-13T00:00:00.000Z"}, + {"operator":"ON_OR_BEFORE", "term":"eventTimestamp", "value":"2020-12-07T13:20:15.195Z"}]}], + "srtDir":"asc", "srtKey":"eventId", "pgNum":1, "pgSize":10000} + """ + command = f"alerts search --advanced-query '{advanced_query}'" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_auditlogs.py b/tests/integration/test_auditlogs.py new file mode 100644 index 000000000..c8a09d247 --- /dev/null +++ b/tests/integration/test_auditlogs.py @@ -0,0 +1,54 @@ +from datetime import datetime +from datetime import timedelta +from shlex import split as split_command + +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + +from code42cli.main import cli + + +begin_date = datetime.utcnow() - timedelta(days=2) +begin_date_str = begin_date.strftime("%Y-%m-%d %H:%M:%S") +end_date = datetime.utcnow() - timedelta(days=0) +end_date_str = end_date.strftime("%Y-%m-%d %H:%M:%S") + + +@pytest.mark.integration +def test_auditlogs_send_to_tcp_command_returns_success_return_code( + runner, integration_test_profile, tcp_dataserver +): + command = append_profile( + f"audit-logs send-to localhost:5140 -p TCP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_auditlogs_send_to_udp_command_returns_success_return_code( + runner, integration_test_profile, udp_dataserver +): + command = append_profile( + f"audit-logs send-to localhost:5141 -p UDP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_auditlogs_search_command_with_short_hand_begin_returns_success_return_code( + runner, integration_test_profile +): + command = f"audit-logs search -b '{begin_date_str}'" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_auditlogs_search_command_with_full_begin_returns_success_return_code( + runner, + integration_test_profile, +): + command = f"audit-logs search --begin '{begin_date_str}'" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_cases.py b/tests/integration/test_cases.py new file mode 100644 index 000000000..6fab7e9d0 --- /dev/null +++ b/tests/integration/test_cases.py @@ -0,0 +1,11 @@ +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + + +@pytest.mark.integration +def test_cases_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "cases list" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_legal_hold.py b/tests/integration/test_legal_hold.py new file mode 100644 index 000000000..faa499efb --- /dev/null +++ b/tests/integration/test_legal_hold.py @@ -0,0 +1,19 @@ +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + + +@pytest.mark.integration +def test_legal_hold_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "legal-hold list" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_legal_hold_show_command_returns_success_return_code( + runner, integration_test_profile +): + command = ("legal-hold show 984140047896012577",) + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_securitydata.py b/tests/integration/test_securitydata.py new file mode 100644 index 000000000..d2e457908 --- /dev/null +++ b/tests/integration/test_securitydata.py @@ -0,0 +1,53 @@ +from datetime import datetime +from datetime import timedelta +from shlex import split as split_command + +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + +from code42cli.main import cli + +begin_date = datetime.utcnow() - timedelta(days=20) +end_date = datetime.utcnow() - timedelta(days=10) +begin_date_str = begin_date.strftime("%Y-%m-%d") +end_date_str = end_date.strftime("%Y-%m-%d") + + +@pytest.mark.integration +def test_security_data_send_to_tcp_return_success_return_code( + runner, integration_test_profile, tcp_dataserver +): + command = append_profile( + f"security-data send-to localhost:5140 -p TCP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_security_data_send_to_udp_return_success_return_code( + runner, integration_test_profile, udp_dataserver +): + command = append_profile( + f"security-data send-to localhost:5141 -p UDP -b '{begin_date_str}'" + ) + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 + + +@pytest.mark.integration +def test_security_data_advanced_query_returns_success_return_code( + runner, integration_test_profile +): + advanced_query = """{"groupClause":"AND", "groups":[{"filterClause":"AND","filters":[{"operator":"ON_OR_AFTER", "term":"eventTimestamp", "value":"2020-09-13T00:00:00.000Z"},{"operator":"ON_OR_BEFORE", "term":"eventTimestamp", "value":"2020-12-07T13:20:15.195Z"}]}],"srtDir":"asc", "srtKey":"eventId", "pgNum":1, "pgSize":10000}""" + command = f"security-data search --advanced-query '{advanced_query}'" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_security_data_search_command_returns_success_return_code( + runner, integration_test_profile +): + command = f"security-data search -b {begin_date_str} -e {end_date_str}" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_trustedactivities.py b/tests/integration/test_trustedactivities.py new file mode 100644 index 000000000..d8d99cc1c --- /dev/null +++ b/tests/integration/test_trustedactivities.py @@ -0,0 +1,11 @@ +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + + +@pytest.mark.integration +def test_trusted_activities_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "trusted-activities list" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/test_users.py b/tests/integration/test_users.py new file mode 100644 index 000000000..24c3f4dcb --- /dev/null +++ b/tests/integration/test_users.py @@ -0,0 +1,19 @@ +import pytest +from tests.integration.conftest import append_profile +from tests.integration.util import assert_test_is_successful + + +@pytest.mark.integration +def test_users_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "users list" + assert_test_is_successful(runner, append_profile(command)) + + +@pytest.mark.integration +def test_users_orgs_list_command_returns_success_return_code( + runner, integration_test_profile +): + command = "users orgs list" + assert_test_is_successful(runner, append_profile(command)) diff --git a/tests/integration/util.py b/tests/integration/util.py new file mode 100644 index 000000000..d1646b015 --- /dev/null +++ b/tests/integration/util.py @@ -0,0 +1,61 @@ +import os +import subprocess +import time +from shlex import split as split_command + +from code42cli.main import cli + + +class cleanup: + def __init__(self, filename): + self.filename = filename + + def __enter__(self): + return open(self.filename) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.remove(self.filename) + + +def cleanup_after_validation(filename): + """Decorator to read response from file for `write-to` commands and cleanup the file after test + execution. + + The decorated function should return validation function that takes the content of the file + as input. e.g + """ + + def wrap(test_function): + def wrapper(): + validate = test_function() + with cleanup(filename) as f: + response = f.read() + validate(response) + + return wrapper + + return wrap + + +class DataServer: + TCP_SERVER_COMMAND = "ncat -l 5140" + UDP_SERVER_COMMAND = "ncat -ul 5141" + + def __init__(self, protocol="TCP"): + if protocol.upper() == "UDP": + self.command = DataServer.UDP_SERVER_COMMAND + else: + self.command = DataServer.TCP_SERVER_COMMAND + self.process = None + + def __enter__(self): + self.process = subprocess.Popen(self.command, shell=True) + time.sleep(1) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.process.kill() + + +def assert_test_is_successful(runner, command): + result = runner.invoke(cli, split_command(command)) + assert result.exit_code == 0 diff --git a/tests/logger/__init__.py b/tests/logger/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/logger/conftest.py b/tests/logger/conftest.py new file mode 100644 index 000000000..c50a04db9 --- /dev/null +++ b/tests/logger/conftest.py @@ -0,0 +1,119 @@ +import json +import logging + +import pytest + + +AED_CLOUD_ACTIVITY_EVENT_DICT = json.loads( + """{ + "url": "https://www.example.com", + "syncDestination": "TEST_SYNC_DESTINATION", + "sharedWith": [{"cloudUsername": "example1@example.com"}, {"cloudUsername": "example2@example.com"}], + "cloudDriveId": "TEST_CLOUD_DRIVE_ID", + "actor": "actor@example.com", + "tabUrl": "TEST_TAB_URL", + "windowTitle": "TEST_WINDOW_TITLE" + }""" +) +AED_REMOVABLE_MEDIA_EVENT_DICT = json.loads( + """{ + "removableMediaVendor": "TEST_VENDOR_NAME", + "removableMediaName": "TEST_NAME", + "removableMediaSerialNumber": "TEST_SERIAL_NUMBER", + "removableMediaCapacity": 5000000, + "removableMediaBusType": "TEST_BUS_TYPE" + }""" +) +AED_EMAIL_EVENT_DICT = json.loads( + """{ + "emailSender": "TEST_EMAIL_SENDER", + "emailRecipients": ["test.recipient1@example.com", "test.recipient2@example.com"] + }""" +) +AED_EVENT_DICT = json.loads( + """{ + "eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16", + "eventType": "READ_BY_APP", + "eventTimestamp": "2019-09-09T02:42:23.851Z", + "insertionTimestamp": "2019-09-09T22:47:42.724Z", + "filePath": "/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/", + "fileName": "InfoPlist.strings", + "fileType": "FILE", + "fileCategory": "UNCATEGORIZED", + "fileSize": 86, + "fileOwner": "testtesterson", + "md5Checksum": "19b92e63beb08c27ab4489fcfefbbe44", + "sha256Checksum": "2e0677355c37fa18fd20d372c7420b8b34de150c5801910c3bbb1e8e04c727ef", + "createTimestamp": "2012-07-22T02:19:29Z", + "modifyTimestamp": "2012-12-19T03:00:08Z", + "deviceUserName": "test.testerson+testair@example.com", + "osHostName": "Test's MacBook Air", + "domainName": "192.168.0.3", + "publicIpAddress": "71.34.4.22", + "privateIpAddresses": [ + "fe80:0:0:0:f053:a9bd:973:6c8c%utun1", + "fe80:0:0:0:a254:cb31:8840:9d6b%utun0", + "0:0:0:0:0:0:0:1%lo0", + "192.168.0.3", + "fe80:0:0:0:0:0:0:1%lo0", + "fe80:0:0:0:8c28:1ac9:5745:a6e7%utun3", + "fe80:0:0:0:2e4a:351c:bb9b:2f28%utun2", + "fe80:0:0:0:6df:855c:9436:37f8%utun4", + "fe80:0:0:0:ce:5072:e5f:7155%en0", + "fe80:0:0:0:b867:afff:fefc:1a82%awdl0", + "127.0.0.1" + ], + "deviceUid": "912339407325443353", + "userUid": "912338501981077099", + "actor": null, + "directoryId": [], + "source": "Endpoint", + "url": null, + "shared": null, + "sharedWith": [], + "sharingTypeAdded": [], + "cloudDriveId": null, + "detectionSourceAlias": null, + "fileId": null, + "exposure": [ + "ApplicationRead" + ], + "processOwner": "testtesterson", + "processName": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", + "removableMediaVendor": null, + "removableMediaName": null, + "removableMediaSerialNumber": null, + "removableMediaCapacity": null, + "removableMediaBusType": null, + "syncDestination": null + }""" +) + + +@pytest.fixture() +def mock_log_record(mocker): + return mocker.MagicMock(spec=logging.LogRecord) + + +@pytest.fixture +def mock_file_event_log_record(mock_log_record): + mock_log_record.msg = AED_EVENT_DICT + return mock_log_record + + +@pytest.fixture +def mock_file_event_removable_media_event_log_record(mock_log_record): + mock_log_record.msg = AED_REMOVABLE_MEDIA_EVENT_DICT + return mock_log_record + + +@pytest.fixture +def mock_file_event_cloud_activity_event_log_record(mock_log_record): + mock_log_record.msg = AED_CLOUD_ACTIVITY_EVENT_DICT + return mock_log_record + + +@pytest.fixture +def mock_file_event_email_event_log_record(mock_log_record): + mock_log_record.msg = AED_EMAIL_EVENT_DICT + return mock_log_record diff --git a/tests/logger/test_formatters.py b/tests/logger/test_formatters.py new file mode 100644 index 000000000..6515ffa10 --- /dev/null +++ b/tests/logger/test_formatters.py @@ -0,0 +1,546 @@ +import json + +from code42cli.logger.formatters import FileEventDictToCEFFormatter +from code42cli.logger.formatters import FileEventDictToJSONFormatter +from code42cli.logger.formatters import FileEventDictToRawJSONFormatter +from code42cli.maps import FILE_EVENT_TO_SIGNATURE_ID_MAP + + +class TestFileEventDictToCEFFormatter: + def test_format_returns_cef_tagged_string(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[0] == "CEF:0" + + def test_format_uses_correct_vendor_name(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[1] == "Code42" + + def test_format_uses_correct_default_product_name(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[2] == "Advanced Exfiltration Detection" + + def test_format_uses_correct_product_name(self, mock_file_event_log_record): + alternate_product_name = "Security Parser Formatter Extractor Thingamabob" + cef_out = FileEventDictToCEFFormatter( + default_product_name=alternate_product_name + ).format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[2] == alternate_product_name + + def test_format_uses_correct_default_severity(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[6] == "5" + + def test_format_uses_correct_severity(self, mock_file_event_log_record): + alternate_severity = "7" + cef_out = FileEventDictToCEFFormatter( + default_severity_level=alternate_severity + ).format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[6] == alternate_severity + + def test_format_excludes_none_values_from_output(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert "=None " not in cef_parts[-1] + + def test_format_excludes_empty_values_from_output(self, mock_file_event_log_record): + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + assert "= " not in cef_parts[-1] + + def test_format_excludes_file_event_fields_not_in_cef_map( + self, mock_file_event_log_record + ): + test_value = "definitelyExcludedValue" + mock_file_event_log_record.msg["unmappedFieldName"] = test_value + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + cef_parts = get_cef_parts(cef_out) + del mock_file_event_log_record.msg["unmappedFieldName"] + assert test_value not in cef_parts[-1] + + def test_format_includes_os_hostname_if_present(self, mock_file_event_log_record): + expected_field_name = "shost" + expected_value = "Test's MacBook Air" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_public_ip_address_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "src" + expected_value = "71.34.4.22" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_user_uid_if_present(self, mock_file_event_log_record): + expected_field_name = "suid" + expected_value = "912338501981077099" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_device_username_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "suser" + expected_value = "test.testerson+testair@example.com" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_capacity_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cn1" + expected_value = "5000000" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_capacity_label_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cn1Label" + expected_value = "Code42AEDRemovableMediaCapacity" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_bus_type_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs1" + expected_value = "TEST_BUS_TYPE" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_bus_type_label_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs1Label" + expected_value = "Code42AEDRemovableMediaBusType" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_vendor_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs2" + expected_value = "TEST_VENDOR_NAME" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_vendor_label_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs2Label" + expected_value = "Code42AEDRemovableMediaVendor" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_name_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs3" + expected_value = "TEST_NAME" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_name_label_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs3Label" + expected_value = "Code42AEDRemovableMediaName" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_serial_number_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs4" + expected_value = "TEST_SERIAL_NUMBER" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_removable_media_serial_number_label_if_present( + self, mock_file_event_removable_media_event_log_record + ): + expected_field_name = "cs4Label" + expected_value = "Code42AEDRemovableMediaSerialNumber" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_removable_media_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_actor_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "suser" + expected_value = "actor@example.com" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_sync_destination_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "destinationServiceName" + expected_value = "TEST_SYNC_DESTINATION" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_event_timestamp_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "end" + expected_value = "1567996943851" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_create_timestamp_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "fileCreateTime" + expected_value = "1342923569000" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_md5_checksum_if_present(self, mock_file_event_log_record): + expected_field_name = "fileHash" + expected_value = "19b92e63beb08c27ab4489fcfefbbe44" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_modify_timestamp_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "fileModificationTime" + expected_value = "1355886008000" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_file_path_if_present(self, mock_file_event_log_record): + expected_field_name = "filePath" + expected_value = "/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_file_name_if_present(self, mock_file_event_log_record): + expected_field_name = "fname" + expected_value = "InfoPlist.strings" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_file_size_if_present(self, mock_file_event_log_record): + expected_field_name = "fsize" + expected_value = "86" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_file_category_if_present(self, mock_file_event_log_record): + expected_field_name = "fileType" + expected_value = "UNCATEGORIZED" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_exposure_if_present(self, mock_file_event_log_record): + expected_field_name = "reason" + expected_value = "ApplicationRead" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_url_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "filePath" + expected_value = "https://www.example.com" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_insertion_timestamp_if_present( + self, mock_file_event_log_record + ): + expected_field_name = "rt" + expected_value = "1568069262724" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_process_name_if_present(self, mock_file_event_log_record): + expected_field_name = "sproc" + expected_value = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_event_id_if_present(self, mock_file_event_log_record): + expected_field_name = "externalId" + expected_value = "0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_device_uid_if_present(self, mock_file_event_log_record): + expected_field_name = "deviceExternalId" + expected_value = "912339407325443353" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_domain_name_if_present(self, mock_file_event_log_record): + expected_field_name = "dvchost" + expected_value = "192.168.0.3" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_source_if_present(self, mock_file_event_log_record): + expected_field_name = "sourceServiceName" + expected_value = "Endpoint" + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_cloud_drive_id_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "aid" + expected_value = "TEST_CLOUD_DRIVE_ID" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_shared_with_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "duser" + expected_value = "example1@example.com,example2@example.com" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_tab_url_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "request" + expected_value = "TEST_TAB_URL" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_window_title_if_present( + self, mock_file_event_cloud_activity_event_log_record + ): + expected_field_name = "requestClientApplication" + expected_value = "TEST_WINDOW_TITLE" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_cloud_activity_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_email_recipients_if_present( + self, mock_file_event_email_event_log_record + ): + expected_field_name = "duser" + expected_value = "test.recipient1@example.com,test.recipient2@example.com" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_email_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_email_sender_if_present( + self, mock_file_event_email_event_log_record + ): + expected_field_name = "suser" + expected_value = "TEST_EMAIL_SENDER" + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_email_event_log_record + ) + assert key_value_pair_in_cef_extension( + expected_field_name, expected_value, cef_out + ) + + def test_format_includes_correct_event_name_and_signature_id_for_created( + self, mock_file_event_log_record + ): + event_type = "CREATED" + mock_file_event_log_record.msg["eventType"] = event_type + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert event_name_assigned_correct_signature_id(event_type, "C42200", cef_out) + + def test_format_includes_correct_event_name_and_signature_id_for_modified( + self, mock_file_event_log_record + ): + event_type = "MODIFIED" + mock_file_event_log_record.msg["eventType"] = event_type + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert event_name_assigned_correct_signature_id(event_type, "C42201", cef_out) + + def test_format_includes_correct_event_name_and_signature_id_for_deleted( + self, mock_file_event_log_record + ): + event_type = "DELETED" + mock_file_event_log_record.msg["eventType"] = event_type + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert event_name_assigned_correct_signature_id(event_type, "C42202", cef_out) + + def test_format_includes_correct_event_name_and_signature_id_for_read_by_app( + self, mock_file_event_log_record + ): + event_type = "READ_BY_APP" + mock_file_event_log_record.msg["eventType"] = event_type + cef_out = FileEventDictToCEFFormatter().format(mock_file_event_log_record) + assert event_name_assigned_correct_signature_id(event_type, "C42203", cef_out) + + def test_format_includes_correct_event_name_and_signature_id_for_emailed( + self, mock_file_event_email_event_log_record + ): + event_type = "EMAILED" + mock_file_event_email_event_log_record.msg["eventType"] = event_type + cef_out = FileEventDictToCEFFormatter().format( + mock_file_event_email_event_log_record + ) + assert event_name_assigned_correct_signature_id(event_type, "C42204", cef_out) + + +class TestFileEventDictToJSONFormatter: + def test_format_returns_expected_number_of_fields(self, mock_file_event_log_record): + json_out = FileEventDictToJSONFormatter().format(mock_file_event_log_record) + file_event_dict = json.loads(json_out) + assert len(file_event_dict) == 25 # Fields that are not null or an empty list + + def test_format_returns_only_non_null_fields(self, mock_file_event_log_record): + json_out = FileEventDictToJSONFormatter().format(mock_file_event_log_record) + file_event_dict = json.loads(json_out) + for key in file_event_dict: + if not file_event_dict[key] and file_event_dict != 0: + raise AssertionError() + assert True + + +class TestFileEventDictToRawJSONFormatter: + def test_format_returns_expected_number_of_fields(self, mock_file_event_log_record): + json_out = FileEventDictToRawJSONFormatter().format(mock_file_event_log_record) + file_event_dict = json.loads(json_out) + assert len(file_event_dict) == 40 + + def test_format_is_okay_with_null_values(self, mock_file_event_log_record): + json_out = FileEventDictToRawJSONFormatter().format(mock_file_event_log_record) + file_event_dict = json.loads(json_out) + assert ( + file_event_dict["actor"] is None + ) # actor happens to be null in this case. + + +def get_cef_parts(cef_str): + return cef_str.split("|") + + +def key_value_pair_in_cef_extension(field_name, field_value, cef_str): + cef_parts = get_cef_parts(cef_str) + kvp = f"{field_name}={field_value}" + return kvp in cef_parts[-1] + + +def event_name_assigned_correct_signature_id(event_name, signature_id, cef_out): + if event_name in FILE_EVENT_TO_SIGNATURE_ID_MAP: + cef_parts = get_cef_parts(cef_out) + return cef_parts[4] == signature_id and cef_parts[5] == event_name + + # `assert False` can cause test call to be removed, according to flake8. + raise AssertionError() diff --git a/tests/logger/test_handlers.py b/tests/logger/test_handlers.py new file mode 100644 index 000000000..bfc3ac445 --- /dev/null +++ b/tests/logger/test_handlers.py @@ -0,0 +1,257 @@ +import ssl +from socket import IPPROTO_TCP +from socket import IPPROTO_UDP +from socket import SOCK_DGRAM +from socket import SOCK_STREAM +from socket import socket +from socket import SocketKind + +import pytest + +from code42cli.logger import FileEventDictToRawJSONFormatter +from code42cli.logger.enums import ServerProtocol +from code42cli.logger.handlers import NoPrioritySysLogHandler +from code42cli.logger.handlers import SyslogServerNetworkConnectionError + +_TEST_HOST = "example.com" +_TEST_PORT = 5000 +_TEST_CERTS = "path/to/cert.crt" +tls_and_tcp_test = pytest.mark.parametrize( + "protocol", (ServerProtocol.TLS_TCP, ServerProtocol.TCP) +) +tcp_and_udp_test = pytest.mark.parametrize( + "protocol", (ServerProtocol.TCP, ServerProtocol.UDP) +) + + +class SocketMocks: + mock_socket = None + socket_initializer = None + + class SSLMocks: + mock_ssl_context = None + context_creator = None + + +@pytest.fixture(autouse=True) +def socket_mocks(mocker): + mocks = SocketMocks() + new_socket = mocker.MagicMock(spec=ssl.SSLSocket) + mocks.mock_socket = new_socket + mocks.socket_initializer = _get_normal_socket_initializer_mocks(mocker, new_socket) + mocks.SSLMocks.mock_ssl_context = mocker.MagicMock(ssl.SSLContext) + mocks.SSLMocks.mock_ssl_context.wrap_socket.return_value = new_socket + mocks.SSLMocks.context_creator = mocker.patch( + "code42cli.logger.handlers.ssl.create_default_context" + ) + mocks.SSLMocks.context_creator.return_value = mocks.SSLMocks.mock_ssl_context + return mocks + + +@pytest.fixture() +def system_exception_info(mocker): + return mocker.patch("code42cli.logger.handlers.sys.exc_info") + + +@pytest.fixture() +def broken_pipe_error(system_exception_info): + system_exception_info.return_value = (BrokenPipeError, None, None) + return system_exception_info + + +@pytest.fixture() +def connection_reset_error(system_exception_info): + system_exception_info.return_value = (ConnectionResetError, None, None) + return system_exception_info + + +def _get_normal_socket_initializer_mocks(mocker, new_socket): + new_socket_magic_method = mocker.patch( + "code42cli.logger.handlers.socket.socket.__new__" + ) + new_socket_magic_method.return_value = new_socket + return new_socket_magic_method + + +class TestNoPrioritySysLogHandler: + def test_init_sets_expected_address(self): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + assert handler.address == (_TEST_HOST, _TEST_PORT) + + @tls_and_tcp_test + def test_init_when_stream_based_sets_expected_sock_type(self, protocol): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + actual = handler.socktype + assert actual == SocketKind.SOCK_STREAM + + def test_init_when_udp_sets_expected_sock_type(self): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + actual = handler.socktype + assert actual == SocketKind.SOCK_DGRAM + + def test_init_sets_socket_to_none(self): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + assert handler.socket is None + + @tcp_and_udp_test + def test_init_when_not_tls_sets_wrap_socket_to_false(self, protocol): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + assert not handler._wrap_socket + + def test_init_when_using_tls_sets_wrap_socket_to_true(self): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.TLS_TCP, _TEST_CERTS + ) + assert handler._wrap_socket + assert handler._certs == _TEST_CERTS + + def test_connect_socket_only_connects_once(self, socket_mocks): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + handler.connect_socket() + handler.connect_socket() + assert socket_mocks.socket_initializer.call_count == 1 + + def test_connect_socket_when_udp_initializes_with_expected_properties( + self, socket_mocks + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + handler.connect_socket() + call_args = socket_mocks.socket_initializer.call_args[0] + assert call_args[0] == socket + assert call_args[2] == SOCK_DGRAM + assert call_args[3] == IPPROTO_UDP + + @tls_and_tcp_test + def test_connect_socket_when_tcp_initializes_with_expected_properties( + self, socket_mocks, protocol + ): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + handler.connect_socket() + call_args = socket_mocks.socket_initializer.call_args[0] + assert call_args[0] == socket + assert call_args[2] == SOCK_STREAM + assert call_args[3] == IPPROTO_TCP + assert socket_mocks.mock_socket.connect.call_count == 1 + + def test_connect_when_tls_calls_create_default_context(self, socket_mocks): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.TLS_TCP, "certs" + ) + handler.connect_socket() + call_args = socket_mocks.SSLMocks.context_creator.call_args + assert call_args[1]["cafile"] == "certs" + + @pytest.mark.parametrize("ignore", ("ignore", "IGNORE")) + def test_connect_when_tls_and_told_to_ignore_certs_sets_expected_context_properties( + self, socket_mocks, ignore + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.TLS_TCP, ignore + ) + handler.connect_socket() + assert socket_mocks.SSLMocks.mock_ssl_context.verify_mode == ssl.CERT_NONE + assert not socket_mocks.SSLMocks.mock_ssl_context.check_hostname + + @pytest.mark.parametrize("ignore", ("ignore", "IGNORE")) + def test_connect_when_tls_and_told_to_ignore_certs_creates_context_with_none_certs( + self, socket_mocks, ignore + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.TLS_TCP, ignore + ) + handler.connect_socket() + socket_mocks.SSLMocks.context_creator.assert_called_once_with(cafile=None) + + @tls_and_tcp_test + def test_connect_socket_when_tcp_or_tls_sets_timeout_for_connection_and_resets( + self, socket_mocks, protocol + ): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + handler.connect_socket() + call_args = socket_mocks.mock_socket.settimeout.call_args_list + assert len(call_args) == 2 + assert call_args[0][0][0] == 10 + assert call_args[1][0][0] is None + + @tls_and_tcp_test + def test_emit_when_tcp_calls_socket_sendall_with_expected_message( + self, mock_file_event_log_record, protocol + ): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + handler.connect_socket() + formatter = FileEventDictToRawJSONFormatter() + handler.setFormatter(formatter) + handler.emit(mock_file_event_log_record) + expected_message = (formatter.format(mock_file_event_log_record) + "\n").encode( + "utf-8" + ) + handler.socket.sendall.assert_called_once_with(expected_message) + + def test_emit_when_udp_calls_socket_sendto_with_expected_message_and_address( + self, mock_file_event_log_record + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + handler.connect_socket() + formatter = FileEventDictToRawJSONFormatter() + handler.setFormatter(formatter) + handler.emit(mock_file_event_log_record) + expected_message = (formatter.format(mock_file_event_log_record) + "\n").encode( + "utf-8" + ) + handler.socket.sendto.assert_called_once_with( + expected_message, (_TEST_HOST, _TEST_PORT) + ) + + def test_handle_error_when_broken_pipe_error_occurs_raises_expected_error( + self, mock_file_event_log_record, broken_pipe_error + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + with pytest.raises(SyslogServerNetworkConnectionError): + handler.handleError(mock_file_event_log_record) + + def test_handle_error_when_connection_reset_error_occurs_raises_expected_error( + self, mock_file_event_log_record, connection_reset_error + ): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + with pytest.raises(SyslogServerNetworkConnectionError): + handler.handleError(mock_file_event_log_record) + + def test_close_when_using_tls_unwraps_socket(self): + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.TLS_TCP, None + ) + handler.connect_socket() + handler.close() + assert handler.socket.unwrap.call_count == 1 + + @tcp_and_udp_test + def test_close_when_not_using_tls_does_not_unwrap_socket(self, protocol): + handler = NoPrioritySysLogHandler(_TEST_HOST, _TEST_PORT, protocol, None) + handler.connect_socket() + handler.close() + assert not handler.socket.unwrap.call_count + + def test_close_globally_closes(self, mocker): + global_close = mocker.patch("code42cli.logger.handlers.logging.Handler.close") + handler = NoPrioritySysLogHandler( + _TEST_HOST, _TEST_PORT, ServerProtocol.UDP, None + ) + handler.connect_socket() + handler.close() + assert global_close.call_count == 1 diff --git a/tests/logger/test_init.py b/tests/logger/test_init.py new file mode 100644 index 000000000..640f67b94 --- /dev/null +++ b/tests/logger/test_init.py @@ -0,0 +1,172 @@ +import logging +import os +from logging.handlers import RotatingFileHandler + +import pytest +from requests import Request + +from code42cli.enums import OutputFormat +from code42cli.enums import SendToFileEventsOutputFormat +from code42cli.logger import add_handler_to_logger +from code42cli.logger import CliLogger +from code42cli.logger import get_logger_for_server +from code42cli.logger import get_view_error_details_message +from code42cli.logger import logger_has_handlers +from code42cli.logger.enums import ServerProtocol +from code42cli.logger.formatters import FileEventDictToCEFFormatter +from code42cli.logger.formatters import FileEventDictToJSONFormatter +from code42cli.logger.formatters import FileEventDictToRawJSONFormatter +from code42cli.logger.handlers import NoPrioritySysLogHandler +from code42cli.util import get_user_project_path + + +@pytest.fixture(autouse=True) +def init_socket_mock(mocker): + return mocker.patch("code42cli.logger.NoPrioritySysLogHandler.connect_socket") + + +@pytest.fixture(autouse=True) +def fresh_syslog_handler(init_socket_mock): + # Set handlers to empty list so it gets initialized each test + get_logger_for_server( + "example.com", + ServerProtocol.TCP, + SendToFileEventsOutputFormat.CEF, + None, + ).handlers = [] + init_socket_mock.call_count = 0 + + +def test_add_handler_to_logger_does_as_expected(): + logger = logging.getLogger("TEST_CODE42_CLI") + formatter = logging.Formatter() + handler = logging.Handler() + add_handler_to_logger(logger, handler, formatter) + assert handler in logger.handlers + assert handler.formatter == formatter + + +def test_logger_has_handlers_when_logger_has_handlers_returns_true(): + logger = logging.getLogger("TEST_CODE42_CLI") + handler = logging.Handler() + logger.addHandler(handler) + assert logger_has_handlers(logger) + + +def test_logger_has_handlers_when_logger_does_not_have_handlers_returns_false(): + logger = logging.getLogger("TEST_CODE42_CLI") + logger.handlers = [] + assert not logger_has_handlers(logger) + + +def test_get_view_exceptions_location_message_returns_expected_message(): + actual = get_view_error_details_message() + path = os.path.join(get_user_project_path("log"), "code42_errors.log") + expected = f"View details in {path}" + assert actual == expected + + +def test_get_logger_for_server_has_info_level(): + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + assert logger.level == logging.INFO + + +def test_get_logger_for_server_when_given_cef_format_uses_cef_formatter(): + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + assert isinstance(logger.handlers[0].formatter, FileEventDictToCEFFormatter) + + +def test_get_logger_for_server_when_given_json_format_uses_json_formatter(): + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, OutputFormat.JSON, None + ) + assert isinstance(logger.handlers[0].formatter, FileEventDictToJSONFormatter) + + +def test_get_logger_for_server_when_given_raw_json_format_uses_raw_json_formatter(): + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, OutputFormat.RAW, None + ) + assert isinstance(logger.handlers[0].formatter, FileEventDictToRawJSONFormatter) + + +def test_get_logger_for_server_when_called_twice_only_has_one_handler(): + get_logger_for_server("example.com", ServerProtocol.TCP, OutputFormat.JSON, None) + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + assert len(logger.handlers) == 1 + + +def test_get_logger_for_server_uses_no_priority_syslog_handler(): + logger = get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + assert isinstance(logger.handlers[0], NoPrioritySysLogHandler) + + +def test_get_logger_for_server_constructs_handler_with_expected_args( + mocker, monkeypatch +): + no_priority_syslog_handler = mocker.patch( + "code42cli.logger.handlers.NoPrioritySysLogHandler.__init__" + ) + no_priority_syslog_handler.return_value = None + get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, "cert" + ) + no_priority_syslog_handler.assert_called_once_with( + "example.com", 514, ServerProtocol.TCP, "cert" + ) + + +def test_get_logger_for_server_when_hostname_includes_port_constructs_handler_with_expected_args( + mocker, +): + no_priority_syslog_handler = mocker.patch( + "code42cli.logger.handlers.NoPrioritySysLogHandler.__init__" + ) + no_priority_syslog_handler.return_value = None + get_logger_for_server( + "example.com:999", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + no_priority_syslog_handler.assert_called_once_with( + "example.com", + 999, + ServerProtocol.TCP, + None, + ) + + +def test_get_logger_for_server_inits_socket(init_socket_mock): + get_logger_for_server( + "example.com", ServerProtocol.TCP, SendToFileEventsOutputFormat.CEF, None + ) + assert init_socket_mock.call_count == 1 + + +class TestCliLogger: + def test_init_creates_user_error_logger_with_expected_handlers(self): + logger = CliLogger() + handler_types = [type(h) for h in logger._logger.handlers] + assert RotatingFileHandler in handler_types + + def test_log_error_logs_expected_text_at_expected_level(self, caplog): + with caplog.at_level(logging.ERROR): + ex = Exception("TEST") + CliLogger().log_error(ex) + assert str(ex) in caplog.text + + def test_log_verbose_error_logs_expected_text_at_expected_level( + self, mocker, caplog + ): + with caplog.at_level(logging.ERROR): + request = mocker.MagicMock(sepc=Request) + request.body = {"foo": "bar"} + CliLogger().log_verbose_error("code42 dothing --flag YES", request) + assert "'code42 dothing --flag YES'" in caplog.text + assert "Request parameters: {'foo': 'bar'}" in caplog.text diff --git a/tests/test_bulk.py b/tests/test_bulk.py new file mode 100644 index 000000000..07c8badf7 --- /dev/null +++ b/tests/test_bulk.py @@ -0,0 +1,195 @@ +from collections import OrderedDict + +import pytest + +from code42cli import errors +from code42cli import PRODUCT_NAME +from code42cli.bulk import BulkProcessor +from code42cli.bulk import generate_template_cmd_factory +from code42cli.bulk import run_bulk_process +from code42cli.logger import get_view_error_details_message + +_NAMESPACE = f"{PRODUCT_NAME}.bulk" + + +@pytest.fixture +def bulk_processor(mocker): + return mocker.MagicMock(spec=BulkProcessor) + + +@pytest.fixture +def bulk_processor_factory(mocker, bulk_processor): + mock_factory = mocker.patch(f"{_NAMESPACE}._create_bulk_processor") + mock_factory.return_value = bulk_processor + return mock_factory + + +def func_with_multiple_args(sdk, profile, test1, test2): + pass + + +def func_with_one_arg(sdk, profile, test1): + pass + + +def test_generate_template_cmd_factory_returns_expected_command(): + add_headers = ["foo", "bar"] + remove_headers = ["test"] + help_message = "HELP!" + template = generate_template_cmd_factory( + group_name="cmd-group", + commands_dict={"add": add_headers, "remove": remove_headers}, + help_message=help_message, + ) + assert template.help == help_message + assert template.name == "generate-template" + assert len(template.params) == 2 + assert template.params[0].name == "cmd" + assert "add" in template.params[0].type.choices + assert "remove" in template.params[0].type.choices + assert template.params[1].name == "path" + + +def test_generate_template_cmd_factory_when_using_defaults_returns_expected_command(): + add_headers = ["foo", "bar"] + remove_headers = ["test"] + template = generate_template_cmd_factory( + group_name="cmd-group", + commands_dict={"add": add_headers, "remove": remove_headers}, + ) + assert ( + template.help + == "Generate the CSV template needed for bulk adding/removing users." + ) + assert template.name == "generate-template" + assert len(template.params) == 2 + assert template.params[0].name == "cmd" + assert "add" in template.params[0].type.choices + assert "remove" in template.params[0].type.choices + assert template.params[1].name == "path" + + +def test_run_bulk_process_calls_run(bulk_processor, bulk_processor_factory): + errors.ERRORED = False + run_bulk_process(func_with_one_arg, None) + assert bulk_processor.run.call_count + + +def test_run_bulk_process_creates_processor(bulk_processor_factory): + errors.ERRORED = False + rows = [1, 2] + run_bulk_process(func_with_one_arg, rows) + bulk_processor_factory.assert_called_once_with( + func_with_one_arg, rows, None, stats=None, raise_global_error=True + ) + + +class TestBulkProcessor: + def test_run_when_reader_returns_ordered_dict_process_kwargs(self): + processed_rows = [] + + def func_for_bulk(test1, test2): + processed_rows.append((test1, test2)) + + rows = [ + OrderedDict({"test1": 1, "test2": 2}), + OrderedDict({"test1": 3, "test2": 4}), + OrderedDict({"test1": 5, "test2": 6}), + ] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + assert (1, 2) in processed_rows + assert (3, 4) in processed_rows + assert (5, 6) in processed_rows + + def test_run_when_reader_returns_dict_process_kwargs(self): + processed_rows = [] + + def func_for_bulk(test1, test2): + processed_rows.append((test1, test2)) + + rows = [ + {"test1": 1, "test2": 2}, + {"test1": 3, "test2": 4}, + {"test1": 5, "test2": 6}, + ] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + assert (1, 2) in processed_rows + assert (3, 4) in processed_rows + assert (5, 6) in processed_rows + + def test_run_when_dict_reader_has_none_for_key_ignores_key(self): + processed_rows = [] + + def func_for_bulk(test1): + processed_rows.append(test1) + + rows = [{"test1": 1, None: 2}] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + assert processed_rows == [1] + + def test_run_when_error_occurs_raises_expected_logged_cli_error(self): + def func_for_bulk(test): + if test == "row2": + raise Exception() + + rows = [{"test": "row1"}, {"test": "row2"}, {"test": "row3"}] + with pytest.raises(errors.LoggedCLIError) as err: + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + + assert err.value.message == "Some problems occurred during bulk processing." + + def test_run_when_no_errors_occur_does_not_print_error_message(self, capsys): + def func_for_bulk(test): + pass + + rows = [{"test": "row1"}, {"test": "row2"}, {"test": "row3"}] + processor = BulkProcessor(func_for_bulk, rows) + + processor.run() + output = capsys.readouterr() + assert get_view_error_details_message() not in output.out + + def test_run_when_row_is_endline_does_not_process_row(self): + processed_rows = [] + + def func_for_bulk(test): + processed_rows.append(test) + + rows = [{"test": "row1"}, {"test": "row2"}, {"test": "\n"}] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + + assert "row1" in processed_rows + assert "row2" in processed_rows + assert "row3" not in processed_rows + + def test_run_when_reader_returns_dict_rows_containing_empty_strs_converts_them_to_none( + self, + ): + processed_rows = [] + + def func_for_bulk(test1, test2): + processed_rows.append((test1, test2)) + + rows = [{"test1": "", "test2": "foo"}, {"test1": "bar", "test2": ""}] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + assert (None, "foo") in processed_rows + assert ("bar", None) in processed_rows + + def test_processor_stores_results_in_stats( + self, + ): + def func_for_bulk(test): + return test + + rows = [{"test": "row1"}, {"test": "row2"}, {"test": "row3"}] + processor = BulkProcessor(func_for_bulk, rows) + processor.run() + assert "row1" in processor._stats.results + assert "row2" in processor._stats.results + assert "row3" in processor._stats.results diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 000000000..c24371fc5 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,252 @@ +from configparser import ConfigParser + +import pytest + +from .conftest import MockSection +from code42cli.config import ConfigAccessor +from code42cli.config import NoConfigProfileError + +_TEST_PROFILE_NAME = "ProfileA" +_TEST_SECOND_PROFILE_NAME = "ProfileB" +_INTERNAL = "Internal" + + +@pytest.fixture(autouse=True) +def mock_saver(mocker): + return mocker.patch("code42cli.config.open") + + +@pytest.fixture +def mock_config_parser(mocker): + return mocker.MagicMock(sepc=ConfigParser) + + +@pytest.fixture +def config_parser_for_multiple_profiles(mock_config_parser): + mock_config_parser.sections.return_value = [ + _INTERNAL, + _TEST_PROFILE_NAME, + _TEST_SECOND_PROFILE_NAME, + ] + mock_profile_a = create_mock_profile_object(_TEST_PROFILE_NAME, "test", "test") + mock_profile_b = create_mock_profile_object( + _TEST_SECOND_PROFILE_NAME, "test", "test" + ) + + mock_internal = create_internal_object(True, _TEST_PROFILE_NAME) + + def side_effect(item): + if item == _TEST_PROFILE_NAME: + return mock_profile_a + elif item == _TEST_SECOND_PROFILE_NAME: + return mock_profile_b + elif item == _INTERNAL: + return mock_internal + + mock_config_parser.__getitem__.side_effect = side_effect + return mock_config_parser + + +@pytest.fixture +def config_parser_for_create(mock_config_parser): + values = [[_INTERNAL], [_INTERNAL, _TEST_PROFILE_NAME]] + + def side_effect(): + if len(values) == 2: + return values.pop(0) + return values[0] + + mock_config_parser.sections.side_effect = side_effect + return mock_config_parser + + +def create_mock_profile_object(profile_name, authority_url=None, username=None): + mock_profile = MockSection(profile_name) + mock_profile[ConfigAccessor.AUTHORITY_KEY] = authority_url + mock_profile[ConfigAccessor.USERNAME_KEY] = username + return mock_profile + + +def create_internal_object(is_complete, default_profile_name=None): + default_profile_name = default_profile_name or ConfigAccessor.DEFAULT_VALUE + internal_dict = {ConfigAccessor.DEFAULT_PROFILE: default_profile_name} + internal_section = MockSection(_INTERNAL, internal_dict) + + def getboolean(*args): + return is_complete + + internal_section.getboolean = getboolean + return internal_section + + +def setup_parser_one_profile(profile, internal, parser): + def side_effect(item): + if item == _TEST_PROFILE_NAME: + return profile + elif item == _INTERNAL: + return internal + + parser.__getitem__.side_effect = side_effect + + +class TestConfigAccessor: + def test_get_profile_when_profile_does_not_exist_raises(self, mock_config_parser): + mock_config_parser.sections.return_value = [_INTERNAL] + accessor = ConfigAccessor(mock_config_parser) + with pytest.raises(NoConfigProfileError): + accessor.get_profile("Profile Name that does not exist") + + def test_get_profile_when_profile_has_default_name_raises(self, mock_config_parser): + mock_config_parser.sections.return_value = [_INTERNAL] + accessor = ConfigAccessor(mock_config_parser) + with pytest.raises(NoConfigProfileError): + accessor.get_profile(ConfigAccessor.DEFAULT_VALUE) + + def test_get_profile_returns_expected_profile(self, mock_config_parser): + mock_config_parser.sections.return_value = [_INTERNAL, _TEST_PROFILE_NAME] + accessor = ConfigAccessor(mock_config_parser) + accessor.get_profile(_TEST_PROFILE_NAME) + assert mock_config_parser.__getitem__.call_args[0][0] == _TEST_PROFILE_NAME + + def test_get_all_profiles_excludes_internal_section(self, mock_config_parser): + mock_config_parser.sections.return_value = [ + _TEST_PROFILE_NAME, + _INTERNAL, + _TEST_SECOND_PROFILE_NAME, + ] + accessor = ConfigAccessor(mock_config_parser) + profiles = accessor.get_all_profiles() + for p in profiles: + if p.name == _INTERNAL: + raise AssertionError() + + def test_get_all_profiles_returns_profiles_with_expected_values( + self, config_parser_for_multiple_profiles + ): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + profiles = accessor.get_all_profiles() + assert profiles[0].name == _TEST_PROFILE_NAME + assert profiles[1].name == _TEST_SECOND_PROFILE_NAME + + def test_switch_default_profile_switches_internal_value( + self, config_parser_for_multiple_profiles + ): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + accessor.switch_default_profile(_TEST_SECOND_PROFILE_NAME) + assert ( + config_parser_for_multiple_profiles[_INTERNAL][ + ConfigAccessor.DEFAULT_PROFILE + ] + == _TEST_SECOND_PROFILE_NAME + ) + + def test_switch_default_profile_saves( + self, config_parser_for_multiple_profiles, mock_saver + ): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + accessor.switch_default_profile(_TEST_SECOND_PROFILE_NAME) + assert mock_saver.call_count + + def test_create_profile_when_given_default_name_does_not_create( + self, config_parser_for_create + ): + accessor = ConfigAccessor(config_parser_for_create) + with pytest.raises(NoConfigProfileError): + accessor.create_profile( + ConfigAccessor.DEFAULT_VALUE, "foo", "bar", False, False, False + ) + + def test_create_profile_when_no_default_profile_sets_default( + self, mocker, config_parser_for_create, mock_saver + ): + create_mock_profile_object(_TEST_PROFILE_NAME, None, None) + mock_internal = create_internal_object(False) + setup_parser_one_profile(mock_internal, mock_internal, config_parser_for_create) + accessor = ConfigAccessor(config_parser_for_create) + accessor.switch_default_profile = mocker.MagicMock() + + accessor.create_profile( + _TEST_PROFILE_NAME, "example.com", "bar", None, None, None + ) + assert accessor.switch_default_profile.call_count == 1 + + def test_create_profile_when_has_default_profile_does_not_set_default( + self, mocker, config_parser_for_create, mock_saver + ): + create_mock_profile_object(_TEST_PROFILE_NAME, None, None) + mock_internal = create_internal_object(True, _TEST_PROFILE_NAME) + setup_parser_one_profile(mock_internal, mock_internal, config_parser_for_create) + accessor = ConfigAccessor(config_parser_for_create) + accessor.switch_default_profile = mocker.MagicMock() + + accessor.create_profile( + _TEST_PROFILE_NAME, "example.com", "bar", None, None, None + ) + assert not accessor.switch_default_profile.call_count + + def test_create_profile_when_not_existing_saves( + self, config_parser_for_create, mock_saver + ): + create_mock_profile_object(_TEST_PROFILE_NAME, None, None) + mock_internal = create_internal_object(False) + setup_parser_one_profile(mock_internal, mock_internal, config_parser_for_create) + accessor = ConfigAccessor(config_parser_for_create) + + accessor.create_profile( + _TEST_PROFILE_NAME, "example.com", "bar", None, None, None + ) + assert mock_saver.call_count + + def test_update_profile_when_no_profile_exists_raises_exception( + self, config_parser_for_multiple_profiles + ): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + with pytest.raises(NoConfigProfileError): + accessor.update_profile("Non-existent Profile") + + def test_update_profile_updates_profile(self, config_parser_for_multiple_profiles): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + address = "NEW ADDRESS" + username = "NEW USERNAME" + + accessor.update_profile(_TEST_PROFILE_NAME, address, username, True, True) + assert ( + accessor.get_profile(_TEST_PROFILE_NAME)[ConfigAccessor.AUTHORITY_KEY] + == address + ) + assert ( + accessor.get_profile(_TEST_PROFILE_NAME)[ConfigAccessor.USERNAME_KEY] + == username + ) + assert accessor.get_profile(_TEST_PROFILE_NAME)[ + ConfigAccessor.IGNORE_SSL_ERRORS_KEY + ] + assert accessor.get_profile(_TEST_PROFILE_NAME)[ + ConfigAccessor.USE_V2_FILE_EVENTS_KEY + ] + + def test_update_profile_does_not_update_when_given_none( + self, config_parser_for_multiple_profiles + ): + accessor = ConfigAccessor(config_parser_for_multiple_profiles) + + # First, make sure they're not None + address = "NOT NONE" + username = "NOT NONE" + accessor.update_profile(_TEST_PROFILE_NAME, address, username, True, True) + + accessor.update_profile(_TEST_PROFILE_NAME, None, None, None, None) + assert ( + accessor.get_profile(_TEST_PROFILE_NAME)[ConfigAccessor.AUTHORITY_KEY] + == address + ) + assert ( + accessor.get_profile(_TEST_PROFILE_NAME)[ConfigAccessor.USERNAME_KEY] + == username + ) + assert accessor.get_profile(_TEST_PROFILE_NAME)[ + ConfigAccessor.IGNORE_SSL_ERRORS_KEY + ] + assert accessor.get_profile(_TEST_PROFILE_NAME)[ + ConfigAccessor.USE_V2_FILE_EVENTS_KEY + ] diff --git a/tests/test_file_readers.py b/tests/test_file_readers.py new file mode 100644 index 000000000..23e5d8f2b --- /dev/null +++ b/tests/test_file_readers.py @@ -0,0 +1,104 @@ +import click.exceptions +import pytest + +from code42cli.click_ext.types import AutoDecodedFile +from code42cli.click_ext.types import FileOrString +from code42cli.errors import Code42CLIError +from code42cli.file_readers import read_csv + +HEADERLESS_CSV = [ + "col1_val1,col2_val1,col3_val1\n", + "col1_val2,col2_val2,col3_val2\n", +] +HEADERS = ["header1", "header2", "header3"] +HEADERED_CSV = [ + "header2,header1,header3,extra_column\n" + "col2_val1,col1_val1,col3_val1,extra_value\n", + "col2_val2,col1_val2,col3_val2,extra_value\n", +] + + +def test_read_csv_handles_headerless_columns_in_proper_number_and_order(runner): + with runner.isolated_filesystem(): + with open("test_csv.csv", "w") as csv: + csv.writelines(HEADERLESS_CSV) + with open("test_csv.csv") as csv: + result_list = read_csv(file=csv, headers=HEADERS) + assert result_list[0]["header1"] == "col1_val1" + assert result_list[1]["header3"] == "col3_val2" + + +def test_read_csv_handles_headered_columns_in_arbitrary_number_and_order(runner): + with runner.isolated_filesystem(): + with open("test_csv.csv", "w") as csv: + csv.writelines(HEADERED_CSV) + with open("test_csv.csv") as csv: + result_list = read_csv(file=csv, headers=HEADERS) + assert result_list[0]["header1"] == "col1_val1" + assert result_list[1]["header3"] == "col3_val2" + + +def test_read_csv_raises_when_no_header_detected_and_column_count_doesnt_match_expected_header( + runner, +): + with runner.isolated_filesystem(): + with open("test_csv.csv", "w") as csv: + csv.writelines(HEADERLESS_CSV) + with open("test_csv.csv") as csv: + with pytest.raises(Code42CLIError): + read_csv(csv, ["column1", "column2"]) + + +def test_read_csv_when_all_expected_headers_present_filters_out_extra_columns(runner): + with runner.isolated_filesystem(): + with open("test_csv.csv", "w") as csv: + csv.writelines(HEADERED_CSV) + with open("test_csv.csv") as csv: + result_list = read_csv(file=csv, headers=HEADERS) + assert "extra_column" not in result_list[0] + + +def test_read_csv_when_some_but_not_all_required_headers_present_raises(runner): + with runner.isolated_filesystem(): + with open("test_csv.csv", "w") as csv: + csv.writelines(HEADERED_CSV) + with open("test_csv.csv") as csv: + with pytest.raises(Code42CLIError): + read_csv(file=csv, headers=HEADERS + ["extra_header"]) + + +@pytest.mark.parametrize( + "encoding", + ["utf8", "utf16", "latin_1"], +) +def test_read_csv_reads_various_encodings_automatically(runner, encoding): + with runner.isolated_filesystem(): + with open("test.csv", "w", encoding=encoding) as file: + file.write("".join(HEADERED_CSV)) + + csv = AutoDecodedFile("r").convert("test.csv", None, None) + result_list = read_csv(csv, headers=HEADERS) + + assert result_list == [ + {"header1": "col1_val1", "header2": "col2_val1", "header3": "col3_val1"}, + {"header1": "col1_val2", "header2": "col2_val2", "header3": "col3_val2"}, + ] + + +def test_AutoDecodedFile_raises_expected_exception_when_file_not_exists(runner): + with pytest.raises(click.exceptions.BadParameter): + AutoDecodedFile("r").convert("not_a_file", None, None) + + +@pytest.mark.parametrize( + "encoding", + ["utf8", "utf16", "latin_1"], +) +def test_FileOrString_arg_handles_various_encodings_automatically(runner, encoding): + test_data = '{"tést": "dåta"}' + with runner.isolated_filesystem(): + with open("test1.json", "w", encoding=encoding) as file: + file.write(test_data) + + result_data = FileOrString().convert("@test1.json", None, None) + assert result_data == test_data diff --git a/tests/test_magic_date_type.py b/tests/test_magic_date_type.py new file mode 100644 index 000000000..4e1ac5a74 --- /dev/null +++ b/tests/test_magic_date_type.py @@ -0,0 +1,148 @@ +from datetime import datetime +from datetime import timedelta +from datetime import timezone + +import pytest +from click.exceptions import BadParameter + +from .conftest import begin_date_str +from .conftest import begin_date_str_with_t_time +from .conftest import begin_date_str_with_time +from .conftest import end_date_str +from .conftest import end_date_str_with_time +from .conftest import get_test_date +from code42cli.click_ext.types import MagicDate +from code42cli.date_helper import round_datetime_to_day_end +from code42cli.date_helper import round_datetime_to_day_start + +one_ms = timedelta(milliseconds=1) + + +def utc(dt): + return dt.replace(tzinfo=timezone.utc) + + +class TestMagicDateNoRounding: + md = MagicDate() + + def convert(self, val): + return self.md.convert(val, ctx=None, param=None) + + def test_when_given_date_str_parses_successfully(self): + actual = self.convert(begin_date_str) + expected = utc(datetime.strptime(begin_date_str, "%Y-%m-%d")) + assert actual == expected + + @pytest.mark.parametrize( + "param", + [begin_date_str_with_time, begin_date_str_with_t_time], + ) + def test_when_given_date_str_with_time_parses_successfully(self, param): + actual = self.convert(param) + expected = utc(datetime.strptime(begin_date_str_with_time, "%Y-%m-%d %H:%M:%S")) + assert actual == expected + + @pytest.mark.parametrize("param", ["20d", "20D"]) + def test_when_given_magic_days_parses_successfully(self, param): + actual_date = self.convert(param) + expected_date = utc(get_test_date(days_ago=20)) + assert actual_date - expected_date < one_ms + + @pytest.mark.parametrize("param", ["20h", "20H"]) + def test_when_given_magic_hours_parses_successfully(self, param): + actual = self.convert(param) + expected = utc(get_test_date(hours_ago=20)) + assert expected - actual < one_ms + + @pytest.mark.parametrize("param", ["20m", "20M"]) + def test_when_given_magic_minutes_parses_successfully(self, param): + actual = self.convert(param) + expected = utc(get_test_date(minutes_ago=20)) + assert expected - actual < one_ms + + @pytest.mark.parametrize( + "badparam", + [ + "20days", + "20S", + "d20", + "01-01-2020", + "2020-01-0110:10:10", + "2020-01-01 30:30:30", + ], + ) + def test_when_given_bad_values_raises_exception(self, badparam): + with pytest.raises(BadParameter): + self.convert(badparam) + + +class TestMagicDateRoundingToStart: + md = MagicDate(rounding_func=round_datetime_to_day_start) + + def convert(self, val): + return self.md.convert(val, ctx=None, param=None) + + def test_when_given_date_str_parses_successfully(self): + actual = self.convert(begin_date_str) + expected = utc(datetime.strptime(begin_date_str, "%Y-%m-%d")) + assert actual == expected + + def test_when_given_date_str_with_time_parses_successfully( + self, + ): + actual = self.convert(begin_date_str_with_time) + expected = utc(datetime.strptime(begin_date_str_with_time, "%Y-%m-%d %H:%M:%S")) + assert actual == expected + + def test_when_given_magic_days_parses_successfully(self): + actual_date = self.convert("20d") + expected_date = utc(get_test_date(days_ago=20)) + assert actual_date - expected_date < one_ms + + def test_when_given_magic_hours_parses_successfully(self): + actual = self.convert("20h") + expected = utc(get_test_date(hours_ago=20)) + assert expected - actual < one_ms + + def test_when_given_magic_minutes_parses_successfully(self): + actual = self.convert("20m") + expected = utc(get_test_date(minutes_ago=20)) + assert expected - actual < one_ms + + +class TestMagicDateRoundingToEnd: + md = MagicDate(rounding_func=round_datetime_to_day_end) + + def convert(self, val): + return self.md.convert(val, ctx=None, param=None) + + def test_when_given_date_str_parses_successfully(self): + actual = self.convert(end_date_str) + expected = datetime.strptime(end_date_str, "%Y-%m-%d") + expected = utc( + expected.replace(hour=23, minute=59, second=59, microsecond=999999) + ) + assert actual == expected + + def test_when_given_date_str_with_time_parses_successfully(self): + actual = self.convert(end_date_str_with_time) + expected = utc(datetime.strptime(end_date_str_with_time, "%Y-%m-%d %H:%M:%S")) + assert actual == expected + + def test_when_given_magic_days_parses_successfully(self): + actual_date = self.convert("20d") + expected_date = get_test_date(days_ago=20) + expected_date = utc( + expected_date.replace(hour=23, minute=59, second=59, microsecond=999999) + ) + assert actual_date == expected_date + + def test_when_given_magic_hours_parses_successfully(self): + actual = self.convert("20h") + expected = utc(get_test_date(hours_ago=20)) + assert expected - actual < one_ms + + def test_when_given_magic_minutes_parses_successfully(self): + actual = self.convert("20m") + expected = utc(get_test_date(minutes_ago=20)) + assert expected - actual < one_ms diff --git a/tests/test_output_formats.py b/tests/test_output_formats.py new file mode 100644 index 000000000..d8295dac3 --- /dev/null +++ b/tests/test_output_formats.py @@ -0,0 +1,874 @@ +import json +from collections import OrderedDict + +import pytest +from numpy import nan as NaN +from pandas import DataFrame + +import code42cli.output_formats as output_formats_module +from code42cli.errors import Code42CLIError +from code42cli.maps import FILE_EVENT_TO_SIGNATURE_ID_MAP +from code42cli.output_formats import DataFrameOutputFormatter +from code42cli.output_formats import FileEventsOutputFormat +from code42cli.output_formats import FileEventsOutputFormatter +from code42cli.output_formats import OutputFormat +from code42cli.output_formats import to_cef + +TEST_DATA = [ + { + "type$": "RULE_METADATA", + "modifiedBy": "test.user+partners@example.com", + "modifiedAt": "2020-06-22T16:26:16.3875180Z", + "name": "outside td", + "description": "", + "severity": "HIGH", + "isSystem": False, + "isEnabled": True, + "ruleSource": "Alerting", + "tenantId": "1d71796f-af5b-4231-9d8e-df6434da4663", + "observerRuleId": "d12d54f0-5160-47a8-a48f-7d5fa5b051c5", + "type": "FED_CLOUD_SHARE_PERMISSIONS", + "id": "5157f1df-cb3e-4755-92a2-0f42c7841020", + "createdBy": "test.user+partners@example.com", + "createdAt": "2020-06-22T16:26:16.3875180Z", + }, + { + "type$": "RULE_METADATA", + "modifiedBy": "testuser@example.com", + "modifiedAt": "2020-07-16T08:09:44.4345110Z", + "name": "Test different filters", + "description": "Test different filters", + "severity": "MEDIUM", + "isSystem": False, + "isEnabled": True, + "ruleSource": "Alerting", + "tenantId": "1d71796f-af5b-4231-9d8e-df6434da4663", + "observerRuleId": "8b393324-c34c-44ac-9f79-4313601dd859", + "type": "FED_ENDPOINT_EXFILTRATION", + "id": "88354829-0958-4d60-a20d-69a53cf603b6", + "createdBy": "test.user+partners@example.com", + "createdAt": "2020-05-20T11:56:41.2324240Z", + }, + { + "type$": "RULE_METADATA", + "modifiedBy": "testuser@example.com", + "modifiedAt": "2020-05-28T16:19:19.5250970Z", + "name": "Test Alerts using CLI", + "description": "user", + "severity": "HIGH", + "isSystem": False, + "isEnabled": True, + "ruleSource": "Alerting", + "tenantId": "1d71796f-af5b-4231-9d8e-df6434da4663", + "observerRuleId": "5eabed1d-a406-4dfc-af81-f7485ee09b19", + "type": "FED_ENDPOINT_EXFILTRATION", + "id": "b2cb33e6-6683-4822-be1d-8de5ef87728e", + "createdBy": "testuser@example.com", + "createdAt": "2020-05-18T11:47:16.6109560Z", + }, +] +TEST_DATAFRAME = DataFrame.from_records(TEST_DATA) + + +TEST_HEADER = OrderedDict() +TEST_HEADER["observerRuleId"] = "RuleId" +TEST_HEADER["name"] = "Name" +TEST_HEADER["severity"] = "Severity" +TEST_HEADER["type"] = "Type" +TEST_HEADER["ruleSource"] = "Source" +TEST_HEADER["isEnabled"] = "Enabled" + + +TABLE_OUTPUT = "\n".join( + [ + """RuleId Name Severity Type Source Enabled """, + """d12d54f0-5160-47a8-a48f-7d5fa5b051c5 outside td HIGH FED_CLOUD_SHARE_PERMISSIONS Alerting True """, + """8b393324-c34c-44ac-9f79-4313601dd859 Test different filters MEDIUM FED_ENDPOINT_EXFILTRATION Alerting True """, + """5eabed1d-a406-4dfc-af81-f7485ee09b19 Test Alerts using CLI HIGH FED_ENDPOINT_EXFILTRATION Alerting True """, + ] +) + + +CSV_OUTPUT = """type$,modifiedBy,modifiedAt,name,description,severity,isSystem,isEnabled,ruleSource,tenantId,observerRuleId,type,id,createdBy,createdAt\r +RULE_METADATA,test.user+partners@example.com,2020-06-22T16:26:16.3875180Z,outside td,,HIGH,False,True,Alerting,1d71796f-af5b-4231-9d8e-df6434da4663,d12d54f0-5160-47a8-a48f-7d5fa5b051c5,FED_CLOUD_SHARE_PERMISSIONS,5157f1df-cb3e-4755-92a2-0f42c7841020,test.user+partners@example.com,2020-06-22T16:26:16.3875180Z\r +RULE_METADATA,testuser@example.com,2020-07-16T08:09:44.4345110Z,Test different filters,Test different filters,MEDIUM,False,True,Alerting,1d71796f-af5b-4231-9d8e-df6434da4663,8b393324-c34c-44ac-9f79-4313601dd859,FED_ENDPOINT_EXFILTRATION,88354829-0958-4d60-a20d-69a53cf603b6,test.user+partners@example.com,2020-05-20T11:56:41.2324240Z\r +RULE_METADATA,testuser@example.com,2020-05-28T16:19:19.5250970Z,Test Alerts using CLI,user,HIGH,False,True,Alerting,1d71796f-af5b-4231-9d8e-df6434da4663,5eabed1d-a406-4dfc-af81-f7485ee09b19,FED_ENDPOINT_EXFILTRATION,b2cb33e6-6683-4822-be1d-8de5ef87728e,testuser@example.com,2020-05-18T11:47:16.6109560Z\r +""" + + +TEST_NESTED_DATA = { + "test": "TEST", + "name": "outside td", + "description": "", + "severity": "HIGH", + "isSystem": False, + "isEnabled": True, + "ruleSource": ["Alerting"], + "tenantId": "1d71796f-af5b-4231-9d8e-df6434da4663", + "observerRuleId": {"test": ["d12d54f0-5160-47a8-a48f-7d5fa5b051c5"]}, + "type": ["FED_CLOUD_SHARE_PERMISSIONS"], + "id": "5157f1df-cb3e-4755-92a2-0f42c7841020", +} + +AED_CLOUD_ACTIVITY_EVENT_DICT = json.loads( + """{ + "url": "https://www.example.com", + "syncDestination": "TEST_SYNC_DESTINATION", + "sharedWith": [{"cloudUsername": "example1@example.com"}, {"cloudUsername": "example2@example.com"}], + "cloudDriveId": "TEST_CLOUD_DRIVE_ID", + "actor": "actor@example.com", + "tabUrl": "TEST_TAB_URL", + "windowTitle": "TEST_WINDOW_TITLE" + }""" +) + + +AED_REMOVABLE_MEDIA_EVENT_DICT = json.loads( + """{ + "removableMediaVendor": "TEST_VENDOR_NAME", + "removableMediaName": "TEST_NAME", + "removableMediaSerialNumber": "TEST_SERIAL_NUMBER", + "removableMediaCapacity": 5000000, + "removableMediaBusType": "TEST_BUS_TYPE" + }""" +) + + +AED_EMAIL_EVENT_DICT = json.loads( + """{ + "emailSender": "TEST_EMAIL_SENDER", + "emailRecipients": ["test.recipient1@example.com", "test.recipient2@example.com"] + }""" +) + + +AED_EVENT_DICT = json.loads( + """{ + "eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16", + "eventType": "READ_BY_APP", + "eventTimestamp": "2019-09-09T02:42:23.851Z", + "insertionTimestamp": "2019-09-09T22:47:42.724Z", + "filePath": "/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/", + "fileName": "InfoPlist.strings", + "fileType": "FILE", + "fileCategory": "UNCATEGORIZED", + "fileSize": 86, + "fileOwner": "testtesterson", + "md5Checksum": "19b92e63beb08c27ab4489fcfefbbe44", + "sha256Checksum": "2e0677355c37fa18fd20d372c7420b8b34de150c5801910c3bbb1e8e04c727ef", + "createTimestamp": "2012-07-22T02:19:29Z", + "modifyTimestamp": "2012-12-19T03:00:08Z", + "deviceUserName": "test.testerson+testair@example.com", + "osHostName": "Test's MacBook Air", + "domainName": "192.168.0.3", + "publicIpAddress": "71.34.4.22", + "privateIpAddresses": [ + "fe80:0:0:0:f053:a9bd:973:6c8c%utun1", + "fe80:0:0:0:a254:cb31:8840:9d6b%utun0", + "0:0:0:0:0:0:0:1%lo0", + "192.168.0.3", + "fe80:0:0:0:0:0:0:1%lo0", + "fe80:0:0:0:8c28:1ac9:5745:a6e7%utun3", + "fe80:0:0:0:2e4a:351c:bb9b:2f28%utun2", + "fe80:0:0:0:6df:855c:9436:37f8%utun4", + "fe80:0:0:0:ce:5072:e5f:7155%en0", + "fe80:0:0:0:b867:afff:fefc:1a82%awdl0", + "127.0.0.1" + ], + "deviceUid": "912339407325443353", + "userUid": "912338501981077099", + "actor": null, + "directoryId": [], + "source": "Endpoint", + "url": null, + "shared": null, + "sharedWith": [], + "sharingTypeAdded": [], + "cloudDriveId": null, + "detectionSourceAlias": null, + "fileId": null, + "exposure": [ + "ApplicationRead" + ], + "processOwner": "testtesterson", + "processName": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", + "removableMediaVendor": null, + "removableMediaName": null, + "removableMediaSerialNumber": null, + "removableMediaCapacity": null, + "removableMediaBusType": null, + "syncDestination": null + }""" +) + + +@pytest.fixture +def mock_file_event_removable_media_event(): + return AED_REMOVABLE_MEDIA_EVENT_DICT + + +@pytest.fixture +def mock_file_event_cloud_activity_event(): + return AED_CLOUD_ACTIVITY_EVENT_DICT + + +@pytest.fixture +def mock_file_event_email_event(): + return AED_EMAIL_EVENT_DICT + + +@pytest.fixture +def mock_file_event(): + return AED_EVENT_DICT + + +@pytest.fixture +def mock_to_cef(mocker): + return mocker.patch("code42cli.output_formats.to_cef") + + +def assert_csv_texts_are_equal(actual, expected): + """Have to be careful when testing ordering because of 3.5""" + actual = actual.replace("\r", ",") + actual = actual.replace("\n", ",") + expected = expected.replace("\r", ",") + expected = expected.replace("\n", ",") + actual = set(actual.split(",")) + expected = set(expected.split(",")) + assert actual == expected + + +def test_to_csv_formats_data_to_csv_format(): + formatted_output = output_formats_module.to_csv(TEST_DATA) + assert_csv_texts_are_equal(formatted_output, CSV_OUTPUT) + + +def test_to_csv_when_given_no_output_returns_none(): + assert output_formats_module.to_csv(None) is None + + +def test_to_table_formats_data_to_table_format(): + formatted_output = output_formats_module.to_table(TEST_DATA, TEST_HEADER) + assert formatted_output == TABLE_OUTPUT + + +def test_to_table_formats_when_given_no_output_returns_none(): + assert output_formats_module.to_table(None, None) is None + + +def test_to_table_when_not_given_header_creates_header_dynamically(): + formatted_output = output_formats_module.to_table(TEST_DATA, None) + assert len(formatted_output) > len(TABLE_OUTPUT) + assert "test.user+partners@example.com" in formatted_output + + +def test_to_json(): + formatted_output = output_formats_module.to_json(TEST_DATA) + assert formatted_output == f"{json.dumps(TEST_DATA)}\n" + + +def test_to_formatted_json(): + formatted_output = output_formats_module.to_formatted_json(TEST_DATA) + assert formatted_output == f"{json.dumps(TEST_DATA, indent=4)}\n" + + +class TestOutputFormatter: + def test_init_sets_format_func_to_formatted_json_function_when_json_format_option_is_passed( + self, mock_to_json + ): + output_format = output_formats_module.OutputFormat.RAW + formatter = output_formats_module.OutputFormatter(output_format) + for _ in formatter.get_formatted_output([{"TEST": "FOOBAR"}]): + pass + mock_to_json.assert_called_once_with({"TEST": "FOOBAR"}) + + def test_init_sets_format_func_to_json_function_when_raw_json_format_option_is_passed( + self, mock_to_formatted_json + ): + output_format = output_formats_module.OutputFormat.JSON + formatter = output_formats_module.OutputFormatter(output_format) + for _ in formatter.get_formatted_output(["TEST"]): + pass + mock_to_formatted_json.assert_called_once_with("TEST") + + def test_init_sets_format_func_to_table_function_when_table_format_option_is_passed( + self, mock_to_table + ): + output_format = output_formats_module.OutputFormat.TABLE + formatter = output_formats_module.OutputFormatter(output_format) + for _ in formatter.get_formatted_output("TEST"): + pass + mock_to_table.assert_called_once_with("TEST", None, include_header=True) + + def test_init_sets_format_func_to_csv_function_when_csv_format_option_is_passed( + self, mock_to_csv + ): + output_format = output_formats_module.OutputFormat.CSV + formatter = output_formats_module.OutputFormatter(output_format) + for _ in formatter.get_formatted_output("TEST"): + pass + mock_to_csv.assert_called_once_with("TEST") + + def test_init_sets_format_func_to_table_function_when_no_format_option_is_passed( + self, mock_to_table + ): + formatter = output_formats_module.OutputFormatter(None) + for _ in formatter.get_formatted_output("TEST"): + pass + mock_to_table.assert_called_once_with("TEST", None, include_header=True) + + +def test_to_cef_returns_cef_tagged_string(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[0] == "CEF:0" + + +def test_to_cef_uses_correct_vendor_name(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[1] == "Code42" + + +def test_to_cef_uses_correct_default_product_name(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[2] == "Advanced Exfiltration Detection" + + +def test_to_cef_uses_correct_default_severity(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert cef_parts[6] == "5" + + +def test_to_cef_excludes_none_values_from_output(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert "=None " not in cef_parts[-1] + + +def test_to_cef_excludes_empty_values_from_output(mock_file_event): + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + assert "= " not in cef_parts[-1] + + +def test_to_cef_excludes_file_event_fields_not_in_cef_map(mock_file_event): + test_value = "definitelyExcludedValue" + mock_file_event["unmappedFieldName"] = test_value + cef_out = to_cef(mock_file_event) + cef_parts = get_cef_parts(cef_out) + del mock_file_event["unmappedFieldName"] + assert test_value not in cef_parts[-1] + + +def test_to_cef_includes_os_hostname_if_present(mock_file_event): + expected_field_name = "shost" + expected_value = "Test's MacBook Air" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_public_ip_address_if_present(mock_file_event): + expected_field_name = "src" + expected_value = "71.34.4.22" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_user_uid_if_present(mock_file_event): + expected_field_name = "suid" + expected_value = "912338501981077099" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_device_username_if_present(mock_file_event): + expected_field_name = "suser" + expected_value = "test.testerson+testair@example.com" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_capacity_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cn1" + expected_value = "5000000" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_capacity_label_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cn1Label" + expected_value = "Code42AEDRemovableMediaCapacity" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_bus_type_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs1" + expected_value = "TEST_BUS_TYPE" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_bus_type_label_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs1Label" + expected_value = "Code42AEDRemovableMediaBusType" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_vendor_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs2" + expected_value = "TEST_VENDOR_NAME" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_vendor_label_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs2Label" + expected_value = "Code42AEDRemovableMediaVendor" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_name_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs3" + expected_value = "TEST_NAME" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_name_label_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs3Label" + expected_value = "Code42AEDRemovableMediaName" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_serial_number_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs4" + expected_value = "TEST_SERIAL_NUMBER" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_removable_media_serial_number_label_if_present( + mock_file_event_removable_media_event, +): + expected_field_name = "cs4Label" + expected_value = "Code42AEDRemovableMediaSerialNumber" + cef_out = to_cef(mock_file_event_removable_media_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_actor_if_present(mock_file_event_cloud_activity_event): + expected_field_name = "suser" + expected_value = "actor@example.com" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_sync_destination_if_present( + mock_file_event_cloud_activity_event, +): + expected_field_name = "destinationServiceName" + expected_value = "TEST_SYNC_DESTINATION" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_event_timestamp_if_present(mock_file_event): + expected_field_name = "end" + expected_value = "1567996943851" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_create_timestamp_if_present(mock_file_event): + expected_field_name = "fileCreateTime" + expected_value = "1342923569000" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_md5_checksum_if_present(mock_file_event): + expected_field_name = "fileHash" + expected_value = "19b92e63beb08c27ab4489fcfefbbe44" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_modify_timestamp_if_present(mock_file_event): + expected_field_name = "fileModificationTime" + expected_value = "1355886008000" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_file_path_if_present(mock_file_event): + expected_field_name = "filePath" + expected_value = "/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_file_name_if_present(mock_file_event): + expected_field_name = "fname" + expected_value = "InfoPlist.strings" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_file_size_if_present(mock_file_event): + expected_field_name = "fsize" + expected_value = "86" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_file_category_if_present(mock_file_event): + expected_field_name = "fileType" + expected_value = "UNCATEGORIZED" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_exposure_if_present(mock_file_event): + expected_field_name = "reason" + expected_value = "ApplicationRead" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_url_if_present(mock_file_event_cloud_activity_event): + expected_field_name = "filePath" + expected_value = "https://www.example.com" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_insertion_timestamp_if_present(mock_file_event): + expected_field_name = "rt" + expected_value = "1568069262724" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_process_name_if_present(mock_file_event): + expected_field_name = "sproc" + expected_value = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_event_id_if_present(mock_file_event): + expected_field_name = "externalId" + expected_value = "0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_device_uid_if_present(mock_file_event): + expected_field_name = "deviceExternalId" + expected_value = "912339407325443353" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_domain_name_if_present(mock_file_event): + expected_field_name = "dvchost" + expected_value = "192.168.0.3" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_source_if_present(mock_file_event): + expected_field_name = "sourceServiceName" + expected_value = "Endpoint" + cef_out = to_cef(mock_file_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_cloud_drive_id_if_present( + mock_file_event_cloud_activity_event, +): + expected_field_name = "aid" + expected_value = "TEST_CLOUD_DRIVE_ID" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_shared_with_if_present(mock_file_event_cloud_activity_event): + expected_field_name = "duser" + expected_value = "example1@example.com,example2@example.com" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_tab_url_if_present(mock_file_event_cloud_activity_event): + expected_field_name = "request" + expected_value = "TEST_TAB_URL" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_window_title_if_present(mock_file_event_cloud_activity_event): + expected_field_name = "requestClientApplication" + expected_value = "TEST_WINDOW_TITLE" + cef_out = to_cef(mock_file_event_cloud_activity_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_email_recipients_if_present(mock_file_event_email_event): + expected_field_name = "duser" + expected_value = "test.recipient1@example.com,test.recipient2@example.com" + cef_out = to_cef(mock_file_event_email_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_email_sender_if_present(mock_file_event_email_event): + expected_field_name = "suser" + expected_value = "TEST_EMAIL_SENDER" + cef_out = to_cef(mock_file_event_email_event) + assert key_value_pair_in_cef_extension(expected_field_name, expected_value, cef_out) + + +def test_to_cef_includes_correct_event_name_and_signature_id_for_created( + mock_file_event, +): + event_type = "CREATED" + mock_file_event["eventType"] = event_type + cef_out = to_cef(mock_file_event) + assert event_name_assigned_correct_signature_id(event_type, "C42200", cef_out) + + +def test_to_cef_includes_correct_event_name_and_signature_id_for_modified( + mock_file_event, +): + event_type = "MODIFIED" + mock_file_event["eventType"] = event_type + cef_out = to_cef(mock_file_event) + assert event_name_assigned_correct_signature_id(event_type, "C42201", cef_out) + + +def test_to_cef_includes_correct_event_name_and_signature_id_for_deleted( + mock_file_event, +): + event_type = "DELETED" + mock_file_event["eventType"] = event_type + cef_out = to_cef(mock_file_event) + assert event_name_assigned_correct_signature_id(event_type, "C42202", cef_out) + + +def test_to_cef_includes_correct_event_name_and_signature_id_for_read_by_app( + mock_file_event, +): + event_type = "READ_BY_APP" + mock_file_event["eventType"] = event_type + cef_out = to_cef(mock_file_event) + assert event_name_assigned_correct_signature_id(event_type, "C42203", cef_out) + + +def test_to_cef_includes_correct_event_name_and_signature_id_for_emailed( + mock_file_event_email_event, +): + event_type = "EMAILED" + mock_file_event_email_event["eventType"] = event_type + cef_out = to_cef(mock_file_event_email_event) + assert event_name_assigned_correct_signature_id(event_type, "C42204", cef_out) + + +def get_cef_parts(cef_str): + return cef_str.split("|") + + +def key_value_pair_in_cef_extension(field_name, field_value, cef_str): + cef_parts = get_cef_parts(cef_str) + kvp = f"{field_name}={field_value}" + return kvp in cef_parts[-1] + + +def event_name_assigned_correct_signature_id(event_name, signature_id, cef_out): + if event_name in FILE_EVENT_TO_SIGNATURE_ID_MAP: + cef_parts = get_cef_parts(cef_out) + return cef_parts[4] == signature_id and cef_parts[5] == event_name + + return False + + +def test_security_data_output_format_has_expected_options(): + options = FileEventsOutputFormat() + actual = list(options) + expected = ["CEF", "CSV", "RAW-JSON", "JSON", "TABLE"] + assert set(actual) == set(expected) + + +class TestDataFrameOutputFormatter: + test_df = DataFrame( + [ + {"string_column": "string1", "int_column": 42, "null_column": None}, + {"string_column": "string2", "int_column": 43, "null_column": NaN}, + ] + ) + + def test_format_when_none_passed_defaults_to_table(self): + formatter = DataFrameOutputFormatter(output_format=None) + assert formatter.output_format == OutputFormat.TABLE + + def test_format_when_unknown_format_raises_value_error(self): + with pytest.raises(Code42CLIError): + DataFrameOutputFormatter("NOT_A_FORMAT") + + with pytest.raises(Code42CLIError): + formatter = DataFrameOutputFormatter("JSON") + formatter.output_format = "NOT_A_FORMAT" + list(formatter.get_formatted_output(self.test_df)) + + def test_json_formatter_converts_to_expected_string(self): + formatter = DataFrameOutputFormatter(OutputFormat.JSON) + output = formatter.get_formatted_output(self.test_df) + assert ( + "".join(output) + == '{\n "string_column": "string1",\n "int_column": 42,\n "null_column": null\n}\n{\n "string_column": "string2",\n "int_column": 43,\n "null_column": null\n}\n' + ) + + def test_raw_formatter_converts_to_expected_string(self): + formatter = DataFrameOutputFormatter(OutputFormat.RAW) + output = formatter.get_formatted_output(self.test_df) + assert ( + "".join(output) + == '{"string_column": "string1", "int_column": 42, "null_column": null}\n{"string_column": "string2", "int_column": 43, "null_column": null}\n' + ) + + def test_csv_formatter_converts_to_expected_string(self): + formatter = DataFrameOutputFormatter(OutputFormat.CSV) + output = formatter.get_formatted_output(self.test_df) + assert ( + "".join(output) + == "string_column,int_column,null_column\nstring1,42,\nstring2,43,\n" + ) + + def test_table_formatter_converts_to_expected_string(self): + formatter = DataFrameOutputFormatter(OutputFormat.TABLE) + output = list(formatter.get_formatted_output(self.test_df)) + assert "string_column" in output[0] + assert "int_column" in output[0] + assert "null_column" in output[0] + assert "string1" in output[1] + assert "42" in output[1] + assert "null" not in output[1] + assert "NaN" not in output[1] + assert "string2" in output[2] + assert "43" in output[2] + assert "null" not in output[2] + assert "NaN" not in output[2] + + def test_echo_formatted_dataframes_uses_pager_when_len_rows_gt_threshold_const( + self, mocker + ): + mock_echo = mocker.patch("click.echo") + mock_pager = mocker.patch("click.echo_via_pager") + formatter = DataFrameOutputFormatter(OutputFormat.TABLE) + rows_len = output_formats_module.OUTPUT_VIA_PAGER_THRESHOLD + 1 + big_df = DataFrame([{"column": val} for val in range(rows_len)]) + small_df = DataFrame([{"column": val} for val in range(5)]) + formatter.echo_formatted_dataframes(big_df) + formatter.echo_formatted_dataframes(small_df) + assert mock_echo.call_count == 1 + assert mock_pager.call_count == 1 + + @pytest.mark.parametrize("fmt", OutputFormat.choices()) + def test_get_formatted_ouput_calls_checkpoint_func_on_every_row_in_df(self, fmt): + checkpointed = [] + + def checkpoint(event): + checkpointed.append(event["string_column"]) + + formatter = DataFrameOutputFormatter(fmt, checkpoint_func=checkpoint) + list(formatter.get_formatted_output(self.test_df)) + assert checkpointed == list(self.test_df.string_column.values) + + @pytest.mark.parametrize("fmt", OutputFormat.choices()) + def test_get_formatted_ouput_calls_checkpoint_func_on_every_row_in_df_when_checkpoint_key_not_in_column_list( + self, fmt + ): + checkpointed = [] + + def checkpoint(event): + checkpointed.append(event["string_column"]) + + formatter = DataFrameOutputFormatter(fmt, checkpoint_func=checkpoint) + list( + formatter.get_formatted_output( + self.test_df, columns=["int_column", "null_column"] + ) + ) + assert checkpointed == list(self.test_df.string_column.values) + + def test_iter_rows_calls_checkpoint_func_on_every_row_in_df(self): + checkpointed = [] + + def checkpoint(event): + checkpointed.append(event["string_column"]) + + formatter = DataFrameOutputFormatter(None, checkpoint_func=checkpoint) + list(formatter.iter_rows(self.test_df)) + assert checkpointed == list(self.test_df.string_column.values) + + @pytest.mark.parametrize("fmt", OutputFormat.choices()) + def test_echo_formatted_dataframes_prints_no_results_found_when_dataframes_empty( + self, fmt, capsys + ): + formatter = DataFrameOutputFormatter(fmt) + + def empty_results(): + yield DataFrame() + + formatter.echo_formatted_dataframes(empty_results()) + captured = capsys.readouterr() + assert "No results found." in captured.out + + +class TestFileEventsOutputFormatter: + test_df = DataFrame([AED_EVENT_DICT]) + + def test_format_when_none_passed_defaults_to_raw_json(self): + formatter = FileEventsOutputFormatter(output_format=None) + assert formatter.output_format == FileEventsOutputFormat.RAW + + def test_format_when_unknown_format_raises_CLI_error(self): + with pytest.raises(Code42CLIError): + FileEventsOutputFormatter("NOT_A_FORMAT") + + with pytest.raises(Code42CLIError): + formatter = FileEventsOutputFormatter(FileEventsOutputFormat.JSON) + formatter.output_format = "NOT_A_FORMAT" + list(formatter.get_formatted_output(self.test_df)) + + def test_CEF_formatter_converts_to_expected_string(self): + formatter = FileEventsOutputFormatter(FileEventsOutputFormat.CEF) + output = formatter.get_formatted_output(self.test_df) + assert ( + next(output) + == "CEF:0|Code42|Advanced Exfiltration Detection|1|C42203|READ_BY_APP|5|externalId=0_1d71796f-af5b-4231-9d8e-df6434da4663_912339407325443353_918253081700247636_16 end=1567996943851 rt=1568069262724 filePath=/Users/testtesterson/Downloads/About Downloads.lpdf/Contents/Resources/English.lproj/ fname=InfoPlist.strings fileType=UNCATEGORIZED fsize=86 fileHash=19b92e63beb08c27ab4489fcfefbbe44 fileCreateTime=1342923569000 fileModificationTime=1355886008000 suser=test.testerson+testair@example.com shost=Test's MacBook Air dvchost=192.168.0.3 src=71.34.4.22 deviceExternalId=912339407325443353 suid=912338501981077099 sourceServiceName=Endpoint reason=ApplicationRead spriv=testtesterson sproc=/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\n" + ) diff --git a/tests/test_password.py b/tests/test_password.py new file mode 100644 index 000000000..203a0727f --- /dev/null +++ b/tests/test_password.py @@ -0,0 +1,104 @@ +import pytest + +import code42cli.password as password + +_USERNAME = "test.username" + + +@pytest.fixture +def keyring_password_getter(mocker): + return mocker.patch("keyring.get_password") + + +@pytest.fixture(autouse=True) +def keyring_password_setter(mocker): + return mocker.patch("keyring.set_password") + + +@pytest.fixture(autouse=True) +def get_keyring(mocker): + mock = mocker.patch("keyring.get_keyring") + mock.return_value.priority = 10 + return mock + + +@pytest.fixture +def getpass_function(mocker): + return mocker.patch("code42cli.password.getpass") + + +@pytest.fixture +def user_agreement(mocker): + mock = mocker.patch("code42cli.password.does_user_agree") + mock.return_value = True + return mocker + + +@pytest.fixture +def user_disagreement(mocker): + mock = mocker.patch("code42cli.password.does_user_agree") + mock.return_value = False + return mocker + + +def test_get_stored_password_when_given_profile_name_gets_profile_for_that_name( + profile, keyring_password_getter +): + profile.name = "foo" + profile.username = "bar" + service_name = f"code42cli::{profile.name}" + password.get_stored_password(profile) + keyring_password_getter.assert_called_once_with(service_name, profile.username) + + +def test_get_stored_password_returns_expected_password( + profile, keyring_password_getter, keyring_password_setter +): + keyring_password_getter.return_value = "already stored password 123" + assert password.get_stored_password(profile) == "already stored password 123" + + +def test_set_password_uses_expected_service_name_username_and_password( + profile, keyring_password_setter, keyring_password_getter +): + keyring_password_getter.return_value = "test_password" + profile.name = "profile_name" + profile.username = "test.username" + password.set_password(profile, "test_password") + expected_service_name = "code42cli::profile_name" + keyring_password_setter.assert_called_once_with( + expected_service_name, profile.username, "test_password" + ) + + +def test_set_password_when_using_file_fallback_and_user_accepts_saves_password( + profile, + keyring_password_setter, + keyring_password_getter, + get_keyring, + user_agreement, +): + keyring_password_getter.return_value = "test_password" + profile.name = "profile_name" + profile.username = "test.username" + password.set_password(profile, "test_password") + expected_service_name = "code42cli::profile_name" + keyring_password_setter.assert_called_once_with( + expected_service_name, profile.username, "test_password" + ) + + +def test_set_password_when_using_file_fallback_and_user_rejects_does_not_saves_password( + profile, keyring_password_setter, get_keyring, user_disagreement +): + get_keyring.return_value.priority = 0.5 + keyring_password_getter.return_value = "test_password" + profile.name = "profile_name" + profile.username = "test.username" + password.set_password(profile, "test_password") + assert not keyring_password_setter.call_count + + +def test_prompt_for_password_calls_getpass(getpass_function): + password.get_password_from_prompt() + assert getpass_function.call_count diff --git a/tests/test_profile.py b/tests/test_profile.py new file mode 100644 index 000000000..d84813918 --- /dev/null +++ b/tests/test_profile.py @@ -0,0 +1,271 @@ +import pytest + +import code42cli.profile as cliprofile +from .conftest import create_mock_profile +from .conftest import MockSection +from code42cli.cmds.search.cursor_store import AlertCursorStore +from code42cli.cmds.search.cursor_store import AuditLogCursorStore +from code42cli.cmds.search.cursor_store import FileEventCursorStore +from code42cli.config import ConfigAccessor +from code42cli.config import NoConfigProfileError +from code42cli.errors import Code42CLIError + + +@pytest.fixture +def config_accessor(mocker): + mock = mocker.MagicMock(spec=ConfigAccessor, name="Config Accessor") + attr = mocker.patch("code42cli.profile.config_accessor", mock) + return attr + + +@pytest.fixture +def password_setter(mocker): + return mocker.patch("code42cli.password.set_password") + + +@pytest.fixture +def password_getter(mocker): + return mocker.patch("code42cli.password.get_stored_password") + + +@pytest.fixture +def password_deleter(mocker): + return mocker.patch("code42cli.password.delete_password") + + +class TestCode42Profile: + def test_get_password_when_is_none_returns_password_from_getpass( + self, mocker, password_getter + ): + password_getter.return_value = None + mock_getpass = mocker.patch("code42cli.password.get_password_from_prompt") + mock_getpass.return_value = "Test Password" + actual = create_mock_profile().get_password() + assert actual == "Test Password" + + def test_get_password_return_password_from_password_get_password( + self, password_getter + ): + password_getter.return_value = "Test Password" + actual = create_mock_profile().get_password() + assert actual == "Test Password" + + def test_authority_url_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.authority_url == "example.com" + + def test_name_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.name == "Test Profile Name" + + def test_username_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.username == "foo" + + def test_ignore_ssl_errors_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.ignore_ssl_errors == "True" + + def test_use_v2_file_events_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.use_v2_file_events == "False" + + def test_api_client_auth_returns_expected_value(self): + mock_profile = create_mock_profile() + assert mock_profile.api_client_auth == "False" + + +def test_get_profile_returns_expected_profile(config_accessor): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + profile = cliprofile.get_profile("testprofilename") + assert profile.name == "testprofilename" + + +def test_get_profile_when_config_accessor_raises_cli_error(config_accessor): + config_accessor.get_profile.side_effect = NoConfigProfileError() + with pytest.raises(Code42CLIError): + cliprofile.get_profile("testprofilename") + + +def test_default_profile_exists_when_exists_returns_true(config_accessor): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + assert cliprofile.default_profile_exists() + + +def test_default_profile_exists_when_not_exists_returns_false(config_accessor): + mock_section = MockSection(ConfigAccessor.DEFAULT_VALUE) + config_accessor.get_profile.return_value = mock_section + assert not cliprofile.default_profile_exists() + + +def test_validate_default_profile_prints_set_default_help_when_no_valid_default_but_another_profile_exists( + capsys, config_accessor +): + config_accessor.get_profile.side_effect = NoConfigProfileError() + config_accessor.get_all_profiles.return_value = [MockSection("thisprofilexists")] + with pytest.raises(Code42CLIError): + cliprofile.validate_default_profile() + capture = capsys.readouterr() + assert "No default profile set." in capture.out + + +def test_validate_default_profile_prints_create_profile_help_when_no_valid_default_and_no_other_profiles_exists( + capsys, config_accessor +): + config_accessor.get_profile.side_effect = NoConfigProfileError() + config_accessor.get_all_profiles.return_value = [] + with pytest.raises(Code42CLIError): + cliprofile.validate_default_profile() + capture = capsys.readouterr() + assert "No existing profile." in capture.out + + +def test_profile_exists_when_exists_returns_true(config_accessor): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + assert cliprofile.profile_exists("testprofilename") + + +def test_profile_exists_when_not_exists_returns_false(config_accessor): + config_accessor.get_profile.side_effect = NoConfigProfileError() + assert not cliprofile.profile_exists("idontexist") + + +def test_switch_default_profile_switches_to_expected_profile(config_accessor): + mock_section = MockSection("switchtome") + config_accessor.get_profile.return_value = mock_section + cliprofile.switch_default_profile("switchtome") + config_accessor.switch_default_profile.assert_called_once_with("switchtome") + + +def test_create_profile_when_user_credentials_uses_expected_profile_values( + config_accessor, +): + config_accessor.get_profile.side_effect = NoConfigProfileError() + profile_name = "profilename" + server = "server" + username = "username" + ssl_errors_disabled = True + cliprofile.create_profile( + profile_name, server, username, ssl_errors_disabled, False, False + ) + config_accessor.create_profile.assert_called_once_with( + profile_name, server, username, ssl_errors_disabled, False, False + ) + + +def test_create_profile_when_api_client_uses_expected_profile_values(config_accessor): + config_accessor.get_profile.side_effect = NoConfigProfileError() + profile_name = "profilename" + server = "server" + api_client_id = "key-42" + ssl_errors_disabled = True + cliprofile.create_profile( + profile_name, server, api_client_id, ssl_errors_disabled, False, True + ) + config_accessor.create_profile.assert_called_once_with( + profile_name, server, api_client_id, ssl_errors_disabled, False, True + ) + + +def test_create_profile_if_profile_exists_exits( + mocker, cli_state, caplog, config_accessor +): + config_accessor.get_profile.return_value = mocker.MagicMock() + with pytest.raises(Code42CLIError): + cliprofile.create_profile("foo", "bar", "baz", True, False, False) + + +def test_get_all_profiles_returns_expected_profile_list(config_accessor): + config_accessor.get_all_profiles.return_value = [ + create_mock_profile("one"), + create_mock_profile("two"), + ] + profiles = cliprofile.get_all_profiles() + assert len(profiles) == 2 + assert profiles[0].name == "one" + assert profiles[1].name == "two" + + +def test_get_stored_password_returns_expected_password( + config_accessor, password_getter +): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + password_getter.return_value = "testpassword" + assert cliprofile.get_stored_password("testprofilename") == "testpassword" + + +def test_get_stored_password_uses_expected_profile_name( + config_accessor, password_getter +): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + test_profile = "testprofilename" + password_getter.return_value = "testpassword" + cliprofile.get_stored_password(test_profile) + assert password_getter.call_args[0][0].name == test_profile + + +def test_set_password_uses_expected_profile_name(config_accessor, password_setter): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + test_profile = "testprofilename" + cliprofile.set_password("newpassword", test_profile) + assert password_setter.call_args[0][0].name == test_profile + + +def test_set_password_uses_expected_password(config_accessor, password_setter): + mock_section = MockSection("testprofilename") + config_accessor.get_profile.return_value = mock_section + test_profile = "testprofilename" + cliprofile.set_password("newpassword", test_profile) + assert password_setter.call_args[0][1] == "newpassword" + + +def test_delete_profile_deletes_profile(config_accessor, mocker): + name = "deleteme" + profile = create_mock_profile(name) + mock_get_profile = mocker.patch("code42cli.profile._get_profile") + mock_get_profile.return_value = profile + cliprofile.delete_profile(name) + config_accessor.delete_profile.assert_called_once_with(name) + + +def test_delete_profile_deletes_profile_from_object_name(config_accessor, mocker): + expected = "deleteme - different name than the arg" + profile = create_mock_profile(expected) + mock_get_profile = mocker.patch("code42cli.profile._get_profile") + mock_get_profile.return_value = profile + cliprofile.delete_profile("deleteme") + config_accessor.delete_profile.assert_called_once_with(expected) + + +def test_delete_profile_deletes_password_if_exists( + config_accessor, mocker, password_getter, password_deleter +): + profile = create_mock_profile("deleteme") + mock_get_profile = mocker.patch("code42cli.profile._get_profile") + mock_get_profile.return_value = profile + password_getter.return_value = "i_exist" + cliprofile.delete_profile("deleteme") + password_deleter.assert_called_once_with(profile) + + +def test_delete_profile_clears_checkpoints(config_accessor, mocker): + profile = create_mock_profile("deleteme") + mock_get_profile = mocker.patch("code42cli.profile._get_profile") + mock_get_profile.return_value = profile + event_store = mocker.MagicMock(spec=FileEventCursorStore) + alert_store = mocker.MagicMock(spec=AlertCursorStore) + auditlog_store = mocker.MagicMock(spec=AuditLogCursorStore) + mock_get_cursor_store = mocker.patch( + "code42cli.profile.get_all_cursor_stores_for_profile" + ) + mock_get_cursor_store.return_value = [event_store, alert_store, auditlog_store] + cliprofile.delete_profile("deleteme") + assert event_store.clean.call_count == 1 + assert alert_store.clean.call_count == 1 + assert auditlog_store.clean.call_count == 1 diff --git a/tests/test_sdk_client.py b/tests/test_sdk_client.py new file mode 100644 index 000000000..07f795a1d --- /dev/null +++ b/tests/test_sdk_client.py @@ -0,0 +1,189 @@ +from io import StringIO + +import py42.sdk +import py42.settings.debug as debug +import pytest +from py42.exceptions import Py42UnauthorizedError +from requests import Response +from requests.exceptions import ConnectionError +from requests.exceptions import HTTPError +from requests.exceptions import RequestException + +from .conftest import create_mock_profile +from code42cli.errors import Code42CLIError +from code42cli.errors import LoggedCLIError +from code42cli.main import cli +from code42cli.options import CLIState +from code42cli.sdk_client import create_sdk + + +@pytest.fixture +def sdk_logger(mocker): + return mocker.patch("code42cli.sdk_client.logger") + + +@pytest.fixture +def mock_sdk_factory(mocker): + return mocker.patch("py42.sdk.from_local_account") + + +@pytest.fixture +def mock_api_client_sdk_factory(mocker): + return mocker.patch("py42.sdk.from_api_client") + + +@pytest.fixture +def mock_profile_with_password(): + profile = create_mock_profile() + + def mock_get_password(): + return "Test Password" + + profile.get_password = mock_get_password + return profile + + +@pytest.fixture +def requests_exception(mocker): + mock_response = mocker.MagicMock(spec=Response) + mock_exception = mocker.MagicMock(spec=RequestException) + mock_exception.response = mock_response + return mock_exception + + +def test_create_sdk_when_profile_has_ssl_errors_disabled_sets_py42_setting_and_prints_warning( + profile, mocker, capsys +): + mock_py42 = mocker.patch("code42cli.sdk_client.py42") + profile.ignore_ssl_errors = "True" + create_sdk(profile, False) + output = capsys.readouterr() + assert not mock_py42.settings.verify_ssl_certs + assert ( + f"Warning: Profile '{profile.name}' has SSL verification disabled. Adding certificate " + "verification is strongly advised." in output.err + ) + + +def test_create_sdk_when_py42_exception_occurs_raises_and_logs_cli_error( + sdk_logger, mock_sdk_factory, requests_exception, mock_profile_with_password +): + + mock_sdk_factory.side_effect = Py42UnauthorizedError(requests_exception) + + with pytest.raises(Code42CLIError) as err: + create_sdk(mock_profile_with_password, False) + + assert "Invalid credentials for user" in err.value.message + assert sdk_logger.log_error.call_count == 1 + assert "Failure in HTTP call" in str(sdk_logger.log_error.call_args[0][0]) + + +def test_create_sdk_when_connection_exception_occurs_raises_and_logs_cli_error( + sdk_logger, mock_sdk_factory, mock_profile_with_password +): + mock_sdk_factory.side_effect = ConnectionError("connection message") + + with pytest.raises(LoggedCLIError) as err: + create_sdk(mock_profile_with_password, False) + + assert "Problem connecting to" in err.value.message + assert sdk_logger.log_error.call_count == 1 + assert "connection message" in str(sdk_logger.log_error.call_args[0][0]) + + +def test_create_sdk_when_unknown_exception_occurs_raises_and_logs_cli_error( + sdk_logger, mock_sdk_factory, mock_profile_with_password +): + mock_sdk_factory.side_effect = Exception("test message") + + with pytest.raises(LoggedCLIError) as err: + create_sdk(mock_profile_with_password, False) + + assert "Unknown problem validating" in err.value.message + assert sdk_logger.log_error.call_count == 1 + assert "test message" in str(sdk_logger.log_error.call_args[0][0]) + + +def test_create_sdk_when_told_to_debug_turns_on_debug( + mock_sdk_factory, mock_profile_with_password +): + create_sdk(mock_profile_with_password, True) + assert py42.settings.debug.level == debug.DEBUG + + +def test_create_sdk_uses_given_credentials( + mock_sdk_factory, mock_profile_with_password +): + create_sdk(mock_profile_with_password, False) + mock_sdk_factory.assert_called_once_with( + "example.com", "foo", "Test Password", totp=None + ) + + +@pytest.mark.parametrize("proxy_env", ["HTTPS_PROXY", "https_proxy"]) +def test_create_sdk_uses_proxy_when_env_var_set( + mock_profile_with_password, monkeypatch, proxy_env +): + monkeypatch.setenv(proxy_env, "http://test.domain") + with pytest.raises(LoggedCLIError) as err: + create_sdk(mock_profile_with_password, False) + + assert "Unable to connect to proxy!" in str(err.value) + assert py42.settings.proxies["https"] == "http://test.domain" + + +def test_create_sdk_connection_when_2FA_login_config_detected_prompts_for_totp( + mocker, monkeypatch, mock_sdk_factory, capsys, mock_profile_with_password +): + monkeypatch.setattr("sys.stdin", StringIO("101010")) + response = mocker.MagicMock(spec=Response) + exception = Py42UnauthorizedError(HTTPError(response=response)) + exception.args = ("LoginConfig: LOCAL_2FA",) + mock_sdk_factory.side_effect = [exception, None] + create_sdk(mock_profile_with_password, False) + output = capsys.readouterr() + assert "Multi-factor authentication required. Enter TOTP:" in output.out + + +def test_create_sdk_connection_when_mfa_token_invalid_raises_expected_cli_error( + mocker, mock_sdk_factory, mock_profile_with_password +): + response = mocker.MagicMock(spec=Response) + exception = Py42UnauthorizedError(HTTPError(response=response)) + error_text = "SDK initialization failed, double-check username/password, and provide two-factor TOTP token if Multi-Factor Auth configured for your user. User LoginConfig: LOCAL_2FA" + exception.args = (error_text,) + mock_sdk_factory.side_effect = exception + with pytest.raises(Code42CLIError) as err: + create_sdk(mock_profile_with_password, False, totp="1234") + assert str(err.value) == "Invalid credentials or TOTP token for user foo." + + +def test_create_sdk_connection_when_using_api_client_credentials_uses_api_client_function( + mock_api_client_sdk_factory, mock_profile_with_password +): + create_sdk( + mock_profile_with_password, + False, + password="api-client-secret-42", + api_client=True, + ) + mock_api_client_sdk_factory.assert_called_once_with( + "example.com", "foo", "api-client-secret-42" + ) + + +def test_totp_option_when_passed_is_passed_to_sdk_initialization( + mocker, profile, runner +): + mock_py42 = mocker.patch("code42cli.sdk_client.py42.sdk.from_local_account") + cli_state = CLIState() + totp = "123456" + profile.authority_url = "example.com" + profile.username = "user" + profile.get_password.return_value = "password" + cli_state._profile = profile + runner.invoke(cli, ["users", "list", "--totp", totp], obj=cli_state) + mock_py42.assert_called_once_with( + profile.authority_url, profile.username, "password", totp=totp + ) diff --git a/tests/test_util.py b/tests/test_util.py new file mode 100644 index 000000000..b2a8b2916 --- /dev/null +++ b/tests/test_util.py @@ -0,0 +1,171 @@ +import pytest + +from code42cli.util import _PADDING_SIZE +from code42cli.util import does_user_agree +from code42cli.util import find_format_width +from code42cli.util import format_string_list_to_columns +from code42cli.util import get_url_parts + +TEST_HEADER = {"key1": "Column 1", "key2": "Column 10", "key3": "Column 100"} + + +@pytest.fixture +def context_with_assume_yes(mocker, cli_state): + ctx = mocker.MagicMock() + ctx.obj = cli_state + cli_state.assume_yes = True + return mocker.patch("code42cli.util.get_current_context", return_value=ctx) + + +@pytest.fixture +def context_without_assume_yes(mocker, cli_state): + ctx = mocker.MagicMock() + ctx.obj = cli_state + cli_state.assume_yes = False + return mocker.patch("code42cli.util.get_current_context", return_value=ctx) + + +@pytest.fixture +def echo_output(mocker): + return mocker.patch("code42cli.util.echo") + + +_NAMESPACE = "code42cli.util" + + +def get_expected_row_width(max_col_len, max_width): + col_size = max_col_len + _PADDING_SIZE + num_cols = int(max_width / col_size) or 1 + return col_size * num_cols + + +def test_does_user_agree_when_user_says_y_returns_true( + mocker, context_without_assume_yes +): + mocker.patch("builtins.input", return_value="y") + assert does_user_agree("Test Prompt") + + +def test_does_user_agree_when_user_says_capital_y_returns_true( + mocker, context_without_assume_yes +): + mocker.patch("builtins.input", return_value="Y") + assert does_user_agree("Test Prompt") + + +def test_does_user_agree_when_user_says_n_returns_false( + mocker, context_without_assume_yes +): + mocker.patch("builtins.input", return_value="n") + assert not does_user_agree("Test Prompt") + + +def test_does_user_agree_when_assume_yes_argument_passed_returns_true_and_does_not_print_prompt( + context_with_assume_yes, capsys +): + result = does_user_agree("Test Prompt") + output = capsys.readouterr() + assert result + assert output.out == output.err == "" + + +def test_find_format_width_when_zero_records_sets_width_to_header_length(): + _, column_width = find_format_width([], TEST_HEADER) + assert column_width["key1"] == len(TEST_HEADER["key1"]) + assert column_width["key2"] == len(TEST_HEADER["key2"]) + assert column_width["key3"] == len(TEST_HEADER["key3"]) + + +def test_find_format_width_when_records_sets_width_to_greater_of_data_or_header_length(): + report = [ + {"key1": "test 1", "key2": "value xyz test", "key3": "test test test test"}, + {"key1": "1", "key2": "value xyz", "key3": "test test test test"}, + ] + _, column_width = find_format_width(report, TEST_HEADER) + assert column_width["key1"] == len(TEST_HEADER["key1"]) + assert column_width["key2"] == len(report[0]["key2"]) + assert column_width["key3"] == len(report[1]["key3"]) + + +def test_find_format_width_filters_keys_not_present_in_header(): + report = [ + {"key1": "test 1", "key2": "value xyz test", "key3": "test test test test"}, + {"key1": "1", "key2": "value xyz", "key3": "test test test test"}, + ] + header_with_subset_keys = {"key1": "Column 1", "key3": "Column 100"} + result, _ = find_format_width(report, header_with_subset_keys) + for item in result: + assert "key2" not in item.keys() + + +def test_format_string_list_to_columns_when_given_no_string_list_does_not_echo( + echo_output, +): + format_string_list_to_columns([], None) + format_string_list_to_columns(None, None) + assert not echo_output.call_count + + +def test_format_string_list_to_columns_when_not_given_max_uses_shell_size( + mocker, echo_output +): + terminal_size = mocker.patch("code42cli.util.shutil.get_terminal_size") + max_width = 30 + terminal_size.return_value = (max_width, None) # Cols, Rows + + columns = ["col1", "col2"] + format_string_list_to_columns(columns) + + printed_row = echo_output.call_args_list[0][0][0] + assert len(printed_row) == get_expected_row_width(4, max_width) + assert printed_row == "col1 col2 " + + +def test_format_string_list_to_columns_when_given_small_max_width_prints_one_column_per_row( + echo_output, +): + max_width = 5 + + columns = ["col1", "col2"] + format_string_list_to_columns(columns, max_width) + + expected_row_width = get_expected_row_width(4, max_width) + printed_row = echo_output.call_args_list[0][0][0] + assert len(printed_row) == expected_row_width + assert printed_row == "col1 " + + printed_row = echo_output.call_args_list[1][0][0] + assert len(printed_row) == expected_row_width + assert printed_row == "col2 " + + +def test_format_string_list_to_columns_uses_width_of_longest_string(echo_output): + max_width = 5 + + columns = ["col1", "col2_that_is_really_long"] + format_string_list_to_columns(columns, max_width) + + expected_row_width = get_expected_row_width( + len("col2_that_is_really_long"), max_width + ) + printed_row = echo_output.call_args_list[0][0][0] + assert len(printed_row) == expected_row_width + assert printed_row == "col1 " + + printed_row = echo_output.call_args_list[1][0][0] + assert len(printed_row) == expected_row_width + assert printed_row == "col2_that_is_really_long " + + +def test_url_parts(): + server, port = get_url_parts("localhost:3000") + assert server == "localhost" + assert port == 3000 + + server, port = get_url_parts("localhost") + assert server == "localhost" + assert port is None + + server, port = get_url_parts("127.0.0.1") + assert server == "127.0.0.1" + assert port is None diff --git a/tests/test_worker.py b/tests/test_worker.py new file mode 100644 index 000000000..4efe5b9b8 --- /dev/null +++ b/tests/test_worker.py @@ -0,0 +1,27 @@ +import time + +from code42cli.worker import Worker +from code42cli.worker import WorkerStats + + +class TestWorkerStats: + def test_successes_when_should_be_negative_returns_zero(self): + stats = WorkerStats(100) + stats._total_errors = 101 + assert not stats.total_successes + + +class TestWorker: + def test_is_async(self): + worker = Worker(5, 2) + demo_ls = [] + + def async_func(): + # Wait so that the line under `do_async` happens first, proving that it's async + time.sleep(0.01) + demo_ls.append(2) + + worker.do_async(async_func) + demo_ls.append(1) + worker.wait() + assert demo_ls == [1, 2] diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..b69f3de95 --- /dev/null +++ b/tox.ini @@ -0,0 +1,56 @@ +[tox] +envlist = + py{312,311,310,39} + docs + style +skip_missing_interpreters = true + +[testenv] +deps = + pytest == 7.2.0 + pytest-mock == 3.10.0 + pytest-cov == 4.0.0 + pandas >= 1.1.3 + pexpect == 4.8.0 + setuptools >= 66.0.0 + +commands = + # -v: verbose + # -rsxX: show extra test summary info for (s)skipped, (x)failed, (X)passed + # -l: show locals in tracebacks + # --tb=short: short traceback print mode + # --strict: marks not registered in configuration file raise errors + # --ignore=tests/integration: exclude integration tests + pytest --cov=code42cli --cov-report xml -v -rsxX -l --tb=short --strict --ignore=tests/integration + +[testenv:docs] +deps = + sphinx == 8.1.3 + myst-parser == 4.0.0 + sphinx_rtd_theme == 3.0.2 + sphinx-click +whitelist_externals = bash + +commands = + sphinx-build -W -b html -d "{envtmpdir}/doctrees" docs "{envtmpdir}/html" + bash -c "open {envtmpdir}/html/index.html || true" + +[testenv:style] +deps = pre-commit +skip_install = true +commands = pre-commit run --all-files --show-diff-on-failure + +[testenv:nightly] +deps = + pytest == 7.2.0 + pytest-mock == 3.10.0 + pytest-cov == 4.0.0 + git+https://github.com/code42/py42.git@main#egg=py42 + +[testenv:integration] +commands = + pytest -v -rsxX -l --tb=short --strict -m integration + +[pytest] +markers = + integration: mark test as a integration test.