From 46786ba579e71ec764f33015e3b46699ecc070f5 Mon Sep 17 00:00:00 2001 From: Harsh Mishra Date: Thu, 10 Jul 2025 13:12:44 +0530 Subject: [PATCH 1/4] start work --- .github/workflows/docs-parity-updates.yml | 107 ++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 .github/workflows/docs-parity-updates.yml diff --git a/.github/workflows/docs-parity-updates.yml b/.github/workflows/docs-parity-updates.yml new file mode 100644 index 0000000..f1b637d --- /dev/null +++ b/.github/workflows/docs-parity-updates.yml @@ -0,0 +1,107 @@ +name: Update Parity Docs + +on: + schedule: + - cron: 0 5 * * MON + workflow_dispatch: + inputs: + targetBranch: + required: false + type: string + default: 'master' + pull_request: + branches: + - master + +jobs: + update-parity-coverage-docs: + name: Update Parity Docs + runs-on: ubuntu-latest + steps: + - name: Checkout docs + uses: actions/checkout@v4 + with: + fetch-depth: 0 + path: docs + ref: ${{ github.event.inputs.targetBranch || 'master' }} + + - name: Set up system wide dependencies + run: | + sudo apt-get install jq wget + + - name: Set up Python 3.11 + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Download scripts from meta repository + run: | + curl -o /tmp/get_latest_github_metrics.sh -L https://raw.githubusercontent.com/localstack/meta/main/scripts/get_latest_github_metrics.sh -H 'Accept: application/vnd.github.v3.raw' + chmod +x /tmp/get_latest_github_metrics.sh + + - name: Download metrics data from Moto Integration test pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target main + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack-moto-test-coverage + ARTIFACT_ID: test-metrics + WORKFLOW: moto-integration-tests + PREFIX_ARTIFACT: moto-integration-test + FILTER_SUCCESS: 0 + + - name: Download metrics data from Terraform Integration test pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target main + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack-terraform-test + ARTIFACT_ID: test-metrics + WORKFLOW: "Terraform Tests" + FILTER_SUCCESS: 0 + + - name: Download metrics data from Pro pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target master + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack-ext + ARTIFACT_ID: parity-metric-ext-raw-* + WORKFLOW: "AWS / Build, Test, Push" + PREFIX_ARTIFACT: pro-integration-test + + - name: Download coverage (capture-notimplemented) data from Pro pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target master + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack-ext + ARTIFACT_ID: capture-notimplemented-pro + WORKFLOW: "AWS / Build, Test, Push" + RESOURCE_FOLDER: "metrics-implementation-details" + + - name: Download metrics data from Community pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target master + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack + ARTIFACT_ID: parity-metric-raw-amd* + WORKFLOW: "AWS / Build, Test, Push" + PREFIX_ARTIFACT: community-integration-test + + - name: Download coverage (capture-notimplemented) data from Community pipeline (GitHub) + working-directory: docs + run: /tmp/get_latest_github_metrics.sh ./target master + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + REPOSITORY_NAME: localstack + ARTIFACT_ID: capture-notimplemented + WORKFLOW: "AWS / Build, Test, Push" + RESOURCE_FOLDER: "metrics-implementation-details/community" \ No newline at end of file From d970477a16669e0d1e7ad2f412b1ec0a70a93da8 Mon Sep 17 00:00:00 2001 From: Harsh Mishra Date: Thu, 10 Jul 2025 13:33:38 +0530 Subject: [PATCH 2/4] try the script out --- .github/workflows/docs-parity-updates.yml | 14 +- .gitignore | 3 + scripts/create_data_coverage.py | 464 ++++++++++++++++++++++ 3 files changed, 479 insertions(+), 2 deletions(-) create mode 100644 scripts/create_data_coverage.py diff --git a/.github/workflows/docs-parity-updates.yml b/.github/workflows/docs-parity-updates.yml index f1b637d..df28602 100644 --- a/.github/workflows/docs-parity-updates.yml +++ b/.github/workflows/docs-parity-updates.yml @@ -23,7 +23,7 @@ jobs: with: fetch-depth: 0 path: docs - ref: ${{ github.event.inputs.targetBranch || 'master' }} + # ref: ${{ github.event.inputs.targetBranch || 'master' }} - name: Set up system wide dependencies run: | @@ -104,4 +104,14 @@ jobs: REPOSITORY_NAME: localstack ARTIFACT_ID: capture-notimplemented WORKFLOW: "AWS / Build, Test, Push" - RESOURCE_FOLDER: "metrics-implementation-details/community" \ No newline at end of file + RESOURCE_FOLDER: "metrics-implementation-details/community" + + - name: Create Parity Coverage Docs + working-directory: docs + run: | + ls -la + python3 scripts/create_data_coverage.py -i target/metrics-implementation-details -r target/metrics-raw -o target/updated_coverage -s src/data/coverage/service_display_name.json + # Move the resulting markdown file to the docs repo + # cp -r target/updated_coverage/md/* content/en/references/coverage && rm -R target/updated_coverage/md/ + mv -f target/updated_coverage/data/*.json src/data/coverage + git status diff --git a/.gitignore b/.gitignore index a79a8f3..6412b33 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ __pycache__/ # diff check resources/diff-check.log + +# Parity coverage +target/ diff --git a/scripts/create_data_coverage.py b/scripts/create_data_coverage.py new file mode 100644 index 0000000..78e0cf0 --- /dev/null +++ b/scripts/create_data_coverage.py @@ -0,0 +1,464 @@ +""" +Script to generate coverage md-files for services, and related data-templates +""" +import csv +import os +import sys +from pathlib import Path +import json +from json import JSONDecodeError +from pathlib import Path +import shutil +from operator import itemgetter + +# placeholder for the autogenerated md files (used for each service) +DOCS_MD = """--- +title: "{service_name_title}" +linkTitle: "{service}" +description: > + {description} +hide_readingtime: true +--- + +## Coverage Overview +{{{{< localstack_coverage_table service="{service}" >}}}} + +## Testing Details +{{{{< localstack_coverage_details service="{service}" >}}}} +""" + + +def create_markdown_files_for_services( + target_dir: str, + services: list[str], + service_lookup_details: str = None, + delete_if_exists: bool = False, +): + """ + Creates markdown files for each service, which in turn uses shortcodes + templates to construct the site + :param target_dir: directory where the md-files should be stored + :param services: list of services + :service_lookup_details: path to service_display_name.json that contains "long_name" and "short_name" for each service. + If set the names will be used for the description of the md. Else we use the original "service" name. + :param delete_if_exists: checks if the target_dir exists and deletes it before creating new md-files. default: False + """ + service_lookup = Path(service_lookup_details) + service_info = {} + if service_lookup.exists() and service_lookup.is_file(): + with open(service_lookup, "r") as f: + service_info = json.load(f) + + for service in services: + dirpath = Path(target_dir).joinpath(f"coverage_{service}") + if delete_if_exists: + if dirpath.exists() and dirpath.is_dir(): + shutil.rmtree(dirpath) + + dirpath.mkdir(parents=True, exist_ok=True) + + # default names in case there is no service_name_details + service_name_title = service + description = f"Implementation details for API {service}" + + if service_name_details := service_info.get(service, {}): + service_name_title = service_name_details.get("long_name", service) + if service_name_title and (short_name := service_name_details.get("short_name")): + service_name_title = f"{short_name} ({service_name_title})" + + + file_name = dirpath.joinpath("index.md") + with open(file_name, "w") as fd: + fd.write(DOCS_MD.format(service=service, description=description, service_name_title=service_name_title)) + + +def create_data_templates_for_service( + target_dir: str, metrics: dict, service: str, delete_if_exists: bool = False +): + """ + Creates the data-template for a service. + :param target_dir: the directory where the data-template will be stored + :param metrics: the collected metrics for the service + :param service: name of the service + :param delete_if_exists: checks if the target_dir exists and deletes it before creating new md-files. default: False + """ + output = {} + details = metrics.pop("details", {}) + operations = [] + + community_support = False + pro_support = False + for key, value in metrics.items(): + operations.append({key: value}) + + # check if the service supports community and/or pro: + if not community_support and value.get("availability") == "community": + community_support = True + if not pro_support and value.get("availability") == "pro": + pro_support = True + + output["service"] = service + if pro_support: + output["pro_support"] = True + if community_support: + output["community_support"] = True + + output["operations"] = operations + + # sort the details + for op_details, params in details.items(): + # alphabetically by parameters + details[op_details] = dict(sorted(params.items())) + for param, test_suites in details[op_details].items(): + # alphabetically by test-suite (ls-community/ls-pro) + details[op_details][param] = dict(sorted(test_suites.items())) + for test_suite, test_list in details[op_details][param].items(): + # by test details e.g. first response code then node_id + details[op_details][param][test_suite] = sorted( + test_list, key=itemgetter("response", "node_id") + ) + + # sort alphabetically by operation-name + output["details"] = dict(sorted(details.items())) + + # write data-template file + dirpath = Path(target_dir) + if delete_if_exists: + if dirpath.exists() and dirpath.is_dir(): + shutil.rmtree(dirpath) + + dirpath.mkdir(parents=True, exist_ok=True) + + file_name = dirpath.joinpath(f"{service}.json") + with open(file_name, "w") as fd: + json.dump(output, fd, indent=2) + + +def main( + path_to_implementation_details: str, + path_to_raw_metrics: str, + target_dir: str, + service_lookup_details: str = None, +): + impl_details = {} + # read the implementation-details for pro + community first and generate a dict + # with information about all services and operation, and indicator if those are implemented, and available only in pro: + # {"service_name": + # { + # "operation_name": {"implemented": True, "pro": False} + # } + # } + with open( + f"{path_to_implementation_details}/pro/implementation_coverage_full.csv", + mode="r", + ) as file: + # check pro implementation details first + csv_reader = csv.DictReader(file) + for row in csv_reader: + service_name = row["service"] + if service_name == "sqs-query": + # we currently have "sqs" + "sqs-query" endpoints because of different protocols + # the resulting coverage should not care about this though + continue + service = impl_details.setdefault(service_name, {}) + service[row["operation"]] = { + "implemented": True if row["is_implemented"] == "True" else False, + "pro": True, + } + with open( + f"{path_to_implementation_details}/community/implementation_coverage_full.csv", + mode="r", + ) as file: + csv_reader = csv.DictReader(file) + for row in csv_reader: + service_name = row["service"] + if service_name == "sqs-query": + # we currently have "sqs" + "sqs-query" endpoints because of different protocols + # the resulting coverage should not care about this though + continue + service = impl_details.setdefault(row["service"], {}) + # update all operations that are available in community + if row["is_implemented"] == "True": + service.setdefault(row["operation"], {"implemented": True}) + service[row["operation"]]["pro"] = False + + services = sorted(impl_details.keys()) + + # create the coverage-docs + services = sorted(impl_details.keys()) + create_markdown_files_for_services( + target_dir=target_dir + "/md", + services=services, + service_lookup_details=service_lookup_details, + ) + + for service in services: + # special handling for rds/neptune/docdb: the services "neptune" + "docdb" are recognized as "rds" calls + check_service = service + if service in ["neptune", "docdb"]: + check_service = "rds" + + services_of_interest = [check_service] + if service == "sqs": + # also collect all metrics for "sqs-query" and add to the service + services_of_interest.append("sqs-query") + + # now check the actual recorded test data and map the information + recorded_metrics = aggregate_recorded_raw_data( + base_dir=path_to_raw_metrics, + operations=impl_details.get(service), + services_of_interest=services_of_interest, + ) + + create_data_templates_for_service( + target_dir + "/data", recorded_metrics, service + ) + + +def _init_metric_recorder(operations_dict: dict): + """ + creates the base structure to collect raw data from the service_dict + :param operations_dict: + """ + operations = {} + + for operation, details in operations_dict.items(): + availability = "pro" if details["pro"] else "community" + + if not details["implemented"]: + availability = "" + op_attributes = { + "implemented": details["implemented"], + "availability": availability, + "internal_test_suite": False, + "external_test_suite": False, + "terraform_test_suite": False, + "aws_validated": False, + "snapshot_tested": False, + "snapshot_skipped": "", + } + operations[operation] = op_attributes + + return operations + + +def aggregate_recorded_raw_data( + base_dir: str, operations: dict, services_of_interest: list[str] +): + """ + collects all the raw metric data and maps them in a dict with information about the service, and a "details" + that includes details about any related test. + {"operation-name": + { + "implemented": true, + "availability": "community", + "internal_test_suite": false, + "external_test_suite": true, + "aws_validated": false, + "terraform_test_suite": false, + "snapshot_tested": false, + "snapshot_skipped": "" + }, + .... + "details": + {"operation-name": + {"parameters": { + "ls_community": [ + { + "node_id": "test-node-id", + "test": "short-display-name", + "response": "200", + "error": "", + "snapshot_skipped": "", + "aws_validated": True, + "snapshot_tested: True, + "origin": "external" + } + ], + "ls_pro": [...] + } + } + } + } + :param base_dir: directory where the raw-metrics csv-files are stored + :param operations: dict + :param service: service of interest + :returns: dict with details about invoked operations + """ + # contains internal + external calls + recorded_data = _init_metric_recorder(operations) + pathlist = Path(base_dir).rglob("*.csv") + for path in pathlist: + test_source = path.stem + # print(f"checking {str(path)}") + with open(path, "r") as csv_obj: + csv_dict_reader = csv.DictReader(csv_obj) + for metric in csv_dict_reader: + service = metric.get("service") + if service not in services_of_interest: + continue + + node_id = metric.get("node_id") or metric.get("test_node_id") + if not node_id: + # some records do not have a node-id -> relates to requests in the background between tests + continue + + # skip tests are marked as xfail + if str(metric.get("xfail", "")).lower() == "true": + continue + + op_name = metric.get("operation") + op_record = recorded_data.get(op_name) + if not op_record: + # some operations are only "phantoms" (e.g. s3.PostObject) + # and for docdb/neptune not all rds operations are available either -> we skip in that case + #print( + # f"---> operation {metric.get('service')}.{metric.get('operation')} was not found" + #) + continue + + internal_test = False + external_test = False + + if test_source.startswith("community"): + test_node_origin = "LocalStack Community" + internal_test = True + source = "ls_community" + elif test_source.startswith("pro"): + test_node_origin = "LocalStack Pro" + internal_test = True + source = "ls_pro" + else: + external_test = True + + + if external_test and metric.get("response_code") in ["500", "501"]: + # some external tests (e.g seen for terraform) seem to succeed even though single operation calls fail + # we do not include those as "passed tests" + print(f"skipping {service}.{op_name}: response_code {metric.get('response_code')} ({test_source})") + continue + + terraform_validated = True if test_source.startswith("terraform") else False + if internal_test and not op_record.get("internal_test_suite"): + op_record["internal_test_suite"] = True + if external_test and not op_record.get("external_test_suite"): + op_record["external_test_suite"] = True + + aws_validated = ( + str(metric.get("aws_validated", "false")).lower() == "true" + ) + + # snapshot_tested is set if the test uses the snapshot-fixture + does not skip everything + # (pytest.marker.skip_snapshot_verify) + snapshot_tested = ( + str(metric.get("snapshot", "false")).lower() == "true" + and metric.get("snapshot_skipped_paths", "") != "all" + ) + + if snapshot_tested and not aws_validated: + # the test did not have the marker aws_validated, but as it is snapshot_tested we can assume aws-validation + aws_validated = True + + if not op_record.get("snapshot_tested") and snapshot_tested: + op_record["snapshot_tested"] = True + op_record["aws_validated"] = True + + if not op_record.get("aws_validated") and aws_validated: + op_record["aws_validated"] = True + + if not op_record.get("terraform_test_suite") and terraform_validated: + op_record["terraform_test_suite"] = True + + if internal_test and not op_record["implemented"]: + print(f"WARN: {service}.{op_name} classified as 'not implemented', but found a test calling it: ({source}) {node_id}") + op_record["implemented"] = True + op_record["availability"] = "pro" if source == "ls_pro" else "community" + + # test details currently only considered for internal test suite + # TODO might change when we include terraform test results + if not internal_test: + continue + + # collect test details + details = recorded_data.setdefault("details", {}) + # one dict for each operation + details_tests = details.setdefault(op_name, {}) + + # grouped by parameters + params = metric.get("parameters", "None").split(",") + params.sort() + parameters = ", ".join(params) + if not parameters: + parameters = "- (without any parameters)" + + param_test_details = details_tests.setdefault(parameters, {}) + + # separate lists for source ("ls_community" and "ls_pro") + test_list = param_test_details.setdefault(source, []) + + if param_exception := metric.get("exception", ""): + if param_exception == "CommonServiceException": + # try to get more details about the CommonServiceException from the response + try: + data = json.loads(metric.get("response_data", "{}")) + param_exception = data.get("__type", param_exception) + except JSONDecodeError: + # in this case we just keep the original "CommonServiceException" information + pass + + # get simple test name (will be shown on coverage page) + if node_id.endswith("]"): + # workaround for tests that have a "::" as part of a parameterized test + # e.g. tests/integration/mytest.py::SomeTest::test_and_or_functions[Fn::Or-0-0-False] + tmp = node_id[0 : node_id.rfind("[")].split("::")[-1] + simple_test_name = tmp + node_id[node_id.rfind("[") :] + else: + simple_test_name = node_id.split("::")[-1] + test_detail = { + "node_id": f"{test_node_origin}: {node_id}", + "test": simple_test_name, + "response": metric.get("response_code", -1), + "error": param_exception, + "snapshot_skipped": metric.get("snapshot_skipped_paths", ""), + "aws_validated": aws_validated, + "snapshot_tested": snapshot_tested, + "origin": metric.get("origin", ""), + } + if test_detail not in test_list: + # avoid duplicates + test_list.append(test_detail) + + return recorded_data + + +def print_usage(): + print("missing arguments") + print( + "usage: python create_data_coverage.py " + ) + + +if __name__ == "__main__": + import argparse + + argParser = argparse.ArgumentParser() + argParser.add_argument("-i", "--implementation-details", required=True, help="path to implementation details") + argParser.add_argument("-r", "--raw-metrics", required=True, help="path to raw metrics") + argParser.add_argument("-o", "--output-dir", required=True, help="directory where the generated files will be stored") + argParser.add_argument("-s", "--service-details-json", help="path to service_display_name.json") + + args = argParser.parse_args() + + path_to_implementation_details = args.implementation_details + path_to_raw_metrics = sys.argv[2] + target_dir = sys.argv[3] + service_lookup_details = None + + if len(sys.argv) == 5: + # optional parameter, path to service_display_name.json + service_lookup_details = sys.argv[4] + main( + path_to_implementation_details=args.implementation_details, + path_to_raw_metrics=args.raw_metrics, + target_dir=args.output_dir, + service_lookup_details=args.service_details_json, + ) From 21326a42ba895c54e4bca3f34f97c33f8b17abd7 Mon Sep 17 00:00:00 2001 From: Harsh Mishra Date: Thu, 10 Jul 2025 13:59:51 +0530 Subject: [PATCH 3/4] revamp script --- .github/workflows/docs-parity-updates.yml | 2 +- scripts/create_data_coverage.py | 68 ----------------------- 2 files changed, 1 insertion(+), 69 deletions(-) diff --git a/.github/workflows/docs-parity-updates.yml b/.github/workflows/docs-parity-updates.yml index df28602..6374d0e 100644 --- a/.github/workflows/docs-parity-updates.yml +++ b/.github/workflows/docs-parity-updates.yml @@ -110,7 +110,7 @@ jobs: working-directory: docs run: | ls -la - python3 scripts/create_data_coverage.py -i target/metrics-implementation-details -r target/metrics-raw -o target/updated_coverage -s src/data/coverage/service_display_name.json + python3 -m scripts.create_data_coverage -i target/metrics-implementation-details -r target/metrics-raw -o target/updated_coverage -s src/data/coverage/service_display_name.json # Move the resulting markdown file to the docs repo # cp -r target/updated_coverage/md/* content/en/references/coverage && rm -R target/updated_coverage/md/ mv -f target/updated_coverage/data/*.json src/data/coverage diff --git a/scripts/create_data_coverage.py b/scripts/create_data_coverage.py index 78e0cf0..99d24e3 100644 --- a/scripts/create_data_coverage.py +++ b/scripts/create_data_coverage.py @@ -11,66 +11,6 @@ import shutil from operator import itemgetter -# placeholder for the autogenerated md files (used for each service) -DOCS_MD = """--- -title: "{service_name_title}" -linkTitle: "{service}" -description: > - {description} -hide_readingtime: true ---- - -## Coverage Overview -{{{{< localstack_coverage_table service="{service}" >}}}} - -## Testing Details -{{{{< localstack_coverage_details service="{service}" >}}}} -""" - - -def create_markdown_files_for_services( - target_dir: str, - services: list[str], - service_lookup_details: str = None, - delete_if_exists: bool = False, -): - """ - Creates markdown files for each service, which in turn uses shortcodes + templates to construct the site - :param target_dir: directory where the md-files should be stored - :param services: list of services - :service_lookup_details: path to service_display_name.json that contains "long_name" and "short_name" for each service. - If set the names will be used for the description of the md. Else we use the original "service" name. - :param delete_if_exists: checks if the target_dir exists and deletes it before creating new md-files. default: False - """ - service_lookup = Path(service_lookup_details) - service_info = {} - if service_lookup.exists() and service_lookup.is_file(): - with open(service_lookup, "r") as f: - service_info = json.load(f) - - for service in services: - dirpath = Path(target_dir).joinpath(f"coverage_{service}") - if delete_if_exists: - if dirpath.exists() and dirpath.is_dir(): - shutil.rmtree(dirpath) - - dirpath.mkdir(parents=True, exist_ok=True) - - # default names in case there is no service_name_details - service_name_title = service - description = f"Implementation details for API {service}" - - if service_name_details := service_info.get(service, {}): - service_name_title = service_name_details.get("long_name", service) - if service_name_title and (short_name := service_name_details.get("short_name")): - service_name_title = f"{short_name} ({service_name_title})" - - - file_name = dirpath.joinpath("index.md") - with open(file_name, "w") as fd: - fd.write(DOCS_MD.format(service=service, description=description, service_name_title=service_name_title)) - - def create_data_templates_for_service( target_dir: str, metrics: dict, service: str, delete_if_exists: bool = False ): @@ -183,14 +123,6 @@ def main( services = sorted(impl_details.keys()) - # create the coverage-docs - services = sorted(impl_details.keys()) - create_markdown_files_for_services( - target_dir=target_dir + "/md", - services=services, - service_lookup_details=service_lookup_details, - ) - for service in services: # special handling for rds/neptune/docdb: the services "neptune" + "docdb" are recognized as "rds" calls check_service = service From 5158b2577dbc1efc6c2832edaf1e2933dcfc4a57 Mon Sep 17 00:00:00 2001 From: Harsh Mishra Date: Thu, 10 Jul 2025 14:43:27 +0530 Subject: [PATCH 4/4] finish up --- .../bot_templates/PARITY_COVERAGE_DOCS_PR.md | 18 +++++++++ .github/workflows/docs-parity-updates.yml | 40 +++++++++++++++---- 2 files changed, 50 insertions(+), 8 deletions(-) create mode 100644 .github/bot_templates/PARITY_COVERAGE_DOCS_PR.md diff --git a/.github/bot_templates/PARITY_COVERAGE_DOCS_PR.md b/.github/bot_templates/PARITY_COVERAGE_DOCS_PR.md new file mode 100644 index 0000000..8832539 --- /dev/null +++ b/.github/bot_templates/PARITY_COVERAGE_DOCS_PR.md @@ -0,0 +1,18 @@ +# πŸ“– Parity Metrics Docs Update Report πŸ“– +This PR has been automatically generated to update the AWS parity coverage docs. +It aggregates the latest parity coverage test results from our [test pipeline on GitHub](https://github.com/localstack/localstack/actions/workflows/aws-main.yml?query=branch%3Amaster) as well as from our Pro integration tests. + +## πŸ‘·πŸ½ Handle this PR +The following options describe how to interact with this PR / the auto-update: + +βœ”οΈ **Accept Changes** +If the changes are satisfying, just squash-merge the PR and delete the source branch. + +🚫 **Ignore Changes** +If you want to ignore the changes in this PR, just close the PR and *do not delete* the source branch. The PR will not be opened and a new PR will not be created for as long as the generated code does not change (or the branch is deleted). As soon as there are new changes, a new PR will be created. + +✏️ **Adapt Changes** +*Don't do this.* The APIs are auto-generated. If you decide that the APIs should look different, you have to change the code-generation. + +⏸️ **Pause Updates** +Remove the cron-schedule trigger of the GitHub Action workflow which creates these PRs. The action can then still be triggered manually, but it will not be executed automatically. diff --git a/.github/workflows/docs-parity-updates.yml b/.github/workflows/docs-parity-updates.yml index 6374d0e..778363d 100644 --- a/.github/workflows/docs-parity-updates.yml +++ b/.github/workflows/docs-parity-updates.yml @@ -9,9 +9,6 @@ on: required: false type: string default: 'master' - pull_request: - branches: - - master jobs: update-parity-coverage-docs: @@ -23,7 +20,7 @@ jobs: with: fetch-depth: 0 path: docs - # ref: ${{ github.event.inputs.targetBranch || 'master' }} + ref: ${{ github.event.inputs.targetBranch || 'master' }} - name: Set up system wide dependencies run: | @@ -109,9 +106,36 @@ jobs: - name: Create Parity Coverage Docs working-directory: docs run: | - ls -la python3 -m scripts.create_data_coverage -i target/metrics-implementation-details -r target/metrics-raw -o target/updated_coverage -s src/data/coverage/service_display_name.json - # Move the resulting markdown file to the docs repo - # cp -r target/updated_coverage/md/* content/en/references/coverage && rm -R target/updated_coverage/md/ mv -f target/updated_coverage/data/*.json src/data/coverage - git status + + - name: Check for changes + id: check-for-changes + working-directory: docs + run: | + # Check if there are changed files and store the result in resources/diff-check.log + # Check against the PR branch if it exists, otherwise against the master + # Store the result in resources/diff-check.log and store the diff count in the GitHub Action output "diff-count" + mkdir -p resources + (git diff --name-only origin/parity-coverage-auto-updates data/coverage/ 2>/dev/null || git diff --name-only origin/${{ github.event.inputs.targetBranch || 'master' }} src/data/coverage/ 2>/dev/null) | tee -a resources/diff-check.log + echo "diff-count=$(cat resources/diff-check.log | wc -l)" >> $GITHUB_OUTPUT + + - name: Read PR markdown template + if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} + id: template + uses: juliangruber/read-file-action@v1 + with: + path: docs/.github/bot_templates/PARITY_COVERAGE_DOCS_PR.md + + - name: Create PR + uses: peter-evans/create-pull-request@v7 + if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} + with: + path: docs + title: "Update Parity Coverage Docs" + body: "${{ steps.template.outputs.content }}" + branch: "parity-coverage-auto-updates" + author: "LocalStack Bot " + committer: "LocalStack Bot " + commit-message: "update generated parity coverage docs" + token: ${{ secrets.PRO_ACCESS_TOKEN }}