From 78f322b5f63231146417618026c964683348f6f1 Mon Sep 17 00:00:00 2001 From: Artem Rys Date: Wed, 20 Sep 2023 14:53:16 +0200 Subject: [PATCH] fix: better error messages during the runs (#16) * fix: better error messages during the runs This commit improves the logging for this GitHub Action so developer can understand most of the thing from the console output without a need to look into the HTML report. This commit also removes couple of simple functions and embeds their functionality into the main function without losing readability. The HTML report will be anyways downloaded and stored for Splunk repositories. * chore: temporarily use Dockerfile in action.yml for testing * fix: make 15 retries for some bigger add-ons * fix: up to 20 retries for getting the results * refactor: combine 2 lines into 1 --- .coveragerc | 2 +- .github/workflows/build-test-release.yaml | 6 +- README.md | 15 +- action.yml | 2 +- main.py | 150 +++---- test/unit/test_main.py | 513 +++++++++++++++++++--- 6 files changed, 546 insertions(+), 142 deletions(-) diff --git a/.coveragerc b/.coveragerc index 52213b2..dae5284 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,2 +1,2 @@ [run] -plugins = covdefaults \ No newline at end of file +plugins = covdefaults diff --git a/.github/workflows/build-test-release.yaml b/.github/workflows/build-test-release.yaml index 54565c0..cd48527 100644 --- a/.github/workflows/build-test-release.yaml +++ b/.github/workflows/build-test-release.yaml @@ -30,8 +30,7 @@ jobs: - uses: actions/checkout@v3 with: persist-credentials: false - - name: Set up Python - uses: actions/setup-python@v4 + - uses: actions/setup-python@v4 with: python-version: "3.11" - name: Install dependencies @@ -41,7 +40,7 @@ jobs: pip install -r requirements-dev.txt - name: Test run: | - python -m pytest -v test/unit + python -m pytest -v test/unit --cov build_action: runs-on: ubuntu-latest @@ -90,6 +89,7 @@ jobs: git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} + update-semver: if: startsWith(github.ref, 'refs/tags/v') needs: build_action diff --git a/README.md b/README.md index 6fd6bc0..ee96121 100644 --- a/README.md +++ b/README.md @@ -25,33 +25,32 @@ jobs: | Name | Description | Notes | Default | |-----------------|--------------------------------------------------------------------------------|--------------|---------| -| `username` | Splunk.com user used to login to the appinspect API | **required** | | -| `password` | Splunk.com password used to login to the appinspect API | **required** | | +| `username` | Splunk.com user used to login to the AppInspect API | **required** | | +| `password` | Splunk.com password used to login to the AppInspect API | **required** | | | `app_path` | Path to the directory where addon is located, without filename | **required** | | -| `included_tags` | Comma separated list of [tags](#reference-docs) to include in appinspect job | | None | -| `excluded_tags` | Comma separated list of [tags](#reference-docs) to exclude from appinspect job | | None | +| `included_tags` | Comma separated list of [tags](#reference-docs) to include in AppInspect job | | None | +| `excluded_tags` | Comma separated list of [tags](#reference-docs) to exclude from AppInspect job | | None | | `log_level` | Python logging level for action | | `INFO` | You can explicitly include and exclude tags from a validation by including additional options in your request. Specifically, using the included_tags and excluded_tags options includes and excludes the tags you specify from a validation. If no tags are specified all checks will be done and no tags are excluded from the validation. -Appinspect failures are handled via `.appinspect_api.expect.yaml` file. To make exceptions the file should look like that: +AppInspect failures are handled via `.appinspect_api.expect.yaml` file. To make exceptions the file should look like that: ```yaml name_of_the_failed_checks: comment: jira-123 ``` -If you are a Splunker please specify jira issue in the comment where reason for exception is granted and explained +If you are a Splunker please specify a JIRA issue in the comment where reason for exception is granted and explained. ### Reference Docs For more info on check criteria, tags and the API see the [Splunk AppInspect reference](https://dev.splunk.com/enterprise/reference/appinspect). - ### Differences between v2 Missing parameters: - + - `failOnError` - hardcoded to be true - `failOnWarning` - hardcoded to be false - `ignoredChecks` - hardcoded to be None diff --git a/action.yml b/action.yml index 56e9dcf..2a418cb 100644 --- a/action.yml +++ b/action.yml @@ -25,4 +25,4 @@ inputs: required: false runs: using: "docker" - image: docker://ghcr.io/splunk/appinspect-api-action/appinspect-api-action:v3.0.1 + image: Dockerfile diff --git a/main.py b/main.py index 22dda5c..d56ceec 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,7 @@ from typing import Dict, Any, Tuple, Callable, Sequence, Optional, List NUM_RETRIES = 3 +APPINSPECT_EXPECT_FILENAME = ".appinspect_api.expect.yaml" class CouldNotAuthenticateException(Exception): @@ -21,12 +22,13 @@ class CouldNotRetryRequestException(Exception): pass -class AppinspectChecksFailuresException(Exception): - pass - - -class AppinspectFailures(Exception): - pass +logger = logging.getLogger("splunk_appinspect_api") +logger.setLevel(logging.INFO) +formatter = logging.Formatter("%(levelname)s: %(message)s") +stream_handler = logging.StreamHandler() +stream_handler.setLevel(logging.INFO) +stream_handler.setFormatter(formatter) +logger.addHandler(stream_handler) def _retry_request( @@ -42,17 +44,17 @@ def _retry_request( sleep: Callable[[float], Any] = time.sleep, rand: Callable[[], float] = random.random, validation_function: Callable[[requests.Response], bool] = lambda _: True, -): +) -> requests.Response: reason = "" for retry_num in range(num_retries): if retry_num > 0: sleep_time = rand() + retry_num - logging.info( + logger.info( f"Sleeping {sleep_time} seconds before retry " f"{retry_num} of {num_retries - 1}" ) if reason: - logging.info(reason) + logger.info(reason) sleep(sleep_time) response = requests.request( method, @@ -73,7 +75,6 @@ def _retry_request( reason = f"response status code: {response.status_code}, for message: {error_message}" continue if not validation_function(response): - logging.info("Response did not pass the validation, retrying...") continue return response raise CouldNotRetryRequestException() @@ -96,7 +97,7 @@ def _download_report( def login(username: str, password: str) -> requests.Response: - logging.debug("Sending request to retrieve login token") + logger.debug("Sending request to retrieve login token") try: return _retry_request( "GET", @@ -104,10 +105,10 @@ def login(username: str, password: str) -> requests.Response: auth=(username, password), ) except CouldNotAuthenticateException: - logging.error("Credentials are not correct, please check the configuration.") + logger.error("Credentials are not correct, please check the configuration.") sys.exit(1) except CouldNotRetryRequestException: - logging.error("Could not get response after all retries, exiting...") + logger.error("Could not get response after all retries, exiting...") sys.exit(1) @@ -118,7 +119,7 @@ def validate(token: str, build: Path, payload: Dict[str, str]) -> requests.Respo (build.name, open(build.as_posix(), "rb"), "application/octet-stream"), ) ] - logging.debug(f"Sending package `{build.name}` for validation") + logger.debug(f"Sending package `{build.name}` for validation") try: response = _retry_request( "POST", @@ -131,57 +132,58 @@ def validate(token: str, build: Path, payload: Dict[str, str]) -> requests.Respo ) return response except CouldNotAuthenticateException: - logging.error("Credentials are not correct, please check the configuration.") + logger.error("Credentials are not correct, please check the configuration.") sys.exit(1) except CouldNotRetryRequestException: - logging.error("Could not get response after all retries, exiting...") + logger.error("Could not get response after all retries, exiting...") sys.exit(1) -def submit(token: str, request_id: str) -> requests.Response: +def submit( + token: str, request_id: str, seconds_to_wait: float = 60.0 +) -> requests.Response: def _validate_validation_status(response: requests.Response) -> bool: - is_successful = response.json()["status"] == "SUCCESS" - if is_successful: - logging.debug( - f'Response status is `{response.json()["status"]}`, "SUCCESS" expected.' - ) + status = response.json()["status"] + is_successful = status == "SUCCESS" + if not is_successful: + logger.info(f'Response status is `{status}`, "SUCCESS" expected.') return is_successful - # appinspect api needs some time to process the request - # if the response status will be "PROCESSING" wait 60s and make another call + # Splunk AppInspect API needs some time to process the request. + # If the response status will be "PROCESSING" wait 60s and make another call. - # there is a problem with pycov marking this line as not covered - excluded from coverage + # There is a problem with pytest-cov marking this line as not covered - excluded from coverage. try: - logging.debug("Submitting package") + logger.debug("Submitting package") return _retry_request( # pragma: no cover "GET", f"https://appinspect.splunk.com/v1/app/validate/status/{request_id}", headers={ "Authorization": f"bearer {token}", }, - rand=lambda: 60.0, - num_retries=10, + rand=lambda: seconds_to_wait, + num_retries=20, validation_function=_validate_validation_status, ) except CouldNotAuthenticateException: - logging.error("Credentials are not correct, please check the configuration.") + logger.error("Credentials are not correct, please check the configuration.") sys.exit(1) except CouldNotRetryRequestException: - logging.error("Could not get response after all retries, exiting...") + logger.error("Could not get response after all retries, exiting...") sys.exit(1) def download_json_report( token: str, request_id: str, payload: Dict[str, Any] ) -> requests.Response: - logging.debug("Downloading response in json format") + logger.info("Downloading response in JSON format") return _download_report( token=token, request_id=request_id, payload=payload, response_type="json" ) def download_and_save_html_report(token: str, request_id: str, payload: Dict[str, Any]): - logging.debug("Downloading report in html format") + logger.info("Downloading report in HTML format") response = _download_report( token=token, request_id=request_id, payload=payload, response_type="html" ) @@ -191,17 +193,18 @@ def download_and_save_html_report(token: str, request_id: str, payload: Dict[str def get_appinspect_failures_list(response_dict: Dict[str, Any]) -> List[str]: - logging.debug("Parsing json response to find failed checks\n") + logger.debug("Parsing JSON response to find failed checks") reports = response_dict["reports"] groups = reports[0]["groups"] - failed_tests_list = [] - for group in groups: for check in group["checks"]: if check["result"] == "failure": failed_tests_list.append(check["name"]) - logging.debug(f"Failed appinspect check for name: {check['name']}\n") + logger.info(f"Failed AppInspect check for name: {check['name']}") + check_messages = check["messages"] + for check_message in check_messages: + logger.info(f"\t* {check_message['message']}") return failed_tests_list @@ -210,55 +213,41 @@ def read_yaml_as_dict(filename_path: Path) -> Dict[str, str]: try: out_dict = yaml.safe_load(file) except yaml.YAMLError as e: - logging.error(f"Can not read yaml file named {filename_path}") + logger.error(f"Can not read YAML file named {filename_path}") raise e return out_dict if out_dict else {} -def compare_failures(failures: List[str], expected: List[str]): - if sorted(failures) != sorted(expected): - logging.debug(f"Appinspect failures: {failures}") - logging.debug(f"Expected failures: {expected}") - raise AppinspectFailures - - -def parse_results(results: Dict[str, Any]): - print("\n======== AppInspect Api Results ========") - for metric, count in results["info"].items(): - print(f"{metric:>15} : {count: <4}") - if results["info"]["error"] > 0 or results["info"]["failure"] > 0: - logging.warning("Error or failures found in AppInspect Report") - raise AppinspectChecksFailuresException - - def build_payload(included_tags: str, excluded_tags: str) -> Dict[str, str]: payload = {} if included_tags != "": payload["included_tags"] = included_tags if excluded_tags != "": payload["excluded_tags"] = excluded_tags - return payload -def compare_against_known_failures(response_json: Dict[str, Any], exceptions_file_path): - logging.info( - f"Comparing AppInspect Failures with `{exceptions_file_path.name}` file" +def compare_against_known_failures( + response_json: Dict[str, Any], exceptions_file_path: Path +) -> None: + logger.info( + f"Comparing AppInspect failures with `{exceptions_file_path.name}` file" ) failures = get_appinspect_failures_list(response_json) if exceptions_file_path.exists(): expected_failures = list(read_yaml_as_dict(exceptions_file_path).keys()) - try: - compare_failures(failures, expected_failures) - except AppinspectFailures: - logging.error( - "Appinspect failures don't match appinspect.expect file, check for exceptions file" + if sorted(failures) != sorted(expected_failures): + logger.info(f"AppInspect failures: {failures}") + logger.info(f"Expected failures: {expected_failures}") + logger.error( + "AppInspect failures don't match appinspect.expect file, check for exceptions file" ) sys.exit(1) else: - logging.error( - f"File `{exceptions_file_path.name}` not found, please create `{exceptions_file_path.name}` file with exceptions\n" # noqa: E501 + logger.error( + f"File `{exceptions_file_path.name}` not found, " + f"please create `{exceptions_file_path.name}` file with exceptions" ) sys.exit(1) @@ -274,41 +263,42 @@ def main(argv: Optional[Sequence[str]] = None): parser.add_argument("excluded_tags") parser.add_argument("log_level") - appinspect_expect_filename = ".appinspect_api.expect.yaml" - args = parser.parse_args(argv) - logging.basicConfig(level=args.log_level) + logger.setLevel(args.log_level) - logging.info( - f"app_path={args.app_path}, included_tags={args.included_tags}, excluded_tags={args.excluded_tags}" + logger.info( + f"Running Splunk AppInspect API for app_path={args.app_path}, " + f"included_tags={args.included_tags}, excluded_tags={args.excluded_tags}" ) build = Path(args.app_path) login_response = login(args.username, args.password) token = login_response.json()["data"]["token"] - logging.debug("Successfully received token") + logger.info("Successfully received token after login") payload = build_payload(args.included_tags, args.excluded_tags) - logging.debug(f"Validation payload: {payload}") + logger.info(f"Validation payload {payload}") validate_response = validate(token, build, payload) - logging.debug(f"Successfully sent package for validation using {payload}") + logger.info(f"Successfully sent package for validation using {payload}") request_id = validate_response.json()["request_id"] submit_response = submit(token, request_id) - logging.info("Successfully submitted and validated package") - + logger.info("Successfully submitted and validated package") + submit_response_info = submit_response.json()["info"] + logger.info(f"Report info {submit_response_info}") download_and_save_html_report(token, request_id, payload) - # if this is true it compares the exceptions and results - try: - parse_results(submit_response.json()) - except AppinspectChecksFailuresException: + issues_in_response = False + if submit_response_info["error"] > 0 or submit_response_info["failure"] > 0: + issues_in_response = True + + if issues_in_response: + logger.info("Detected errors / failures in response") response_in_json = download_json_report(token, request_id, payload) response_json = json.loads(response_in_json.content.decode("utf-8")) - yaml_file_path = Path(appinspect_expect_filename).absolute() - + yaml_file_path = Path(APPINSPECT_EXPECT_FILENAME).absolute() compare_against_known_failures(response_json, yaml_file_path) diff --git a/test/unit/test_main.py b/test/unit/test_main.py index f72bdc1..227ac17 100644 --- a/test/unit/test_main.py +++ b/test/unit/test_main.py @@ -1,6 +1,7 @@ from unittest import mock import pytest +import requests import yaml import main @@ -147,6 +148,63 @@ def test_submit_success(mock_requests): assert response.json() == response_input_json +@mock.patch("main.requests") +def test_submit_check_retry_logic(mock_requests): + mock_response_1 = mock.create_autospec(requests.Response) + mock_response_1.status_code = 200 + mock_response_1.json.return_value = { + "request_id": "1234-1234-1234-1234-1234", + "links": [ + { + "href": "/v1/app/validate/status/1234-1234-1234-1234-1234", + "rel": "self", + }, + { + "href": "/v1/app/report/1234-1234-1234-1234-1234", + "rel": "report", + }, + ], + "status": "PROCESSING", + } + mock_response_2 = mock.create_autospec(requests.Response) + response_input_json_2 = { + "request_id": "1234-1234-1234-1234-1234", + "links": [ + { + "href": "/v1/app/validate/status/1234-1234-1234-1234-1234", + "rel": "self", + }, + { + "href": "/v1/app/report/1234-1234-1234-1234-1234", + "rel": "report", + }, + ], + "status": "SUCCESS", + "info": { + "error": 0, + "failure": 0, + "skipped": 0, + "manual_check": 8, + "not_applicable": 71, + "warning": 7, + "success": 137, + }, + } + mock_response_2.status_code = 200 + mock_response_2.json.return_value = response_input_json_2 + mock_requests.request.side_effect = [ + mock_response_1, + mock_response_2, + ] + + response = main.submit( + token="token", request_id="1234-1234-1234", seconds_to_wait=0.01 + ) + + assert response.status_code == 200 + assert response.json() == response_input_json_2 + + @mock.patch.object(main, "_retry_request") def test_submit_invalid_token(mock_retry_request, caplog): mock_retry_request.side_effect = main.CouldNotAuthenticateException @@ -183,24 +241,6 @@ def test_build_payload(included, excluded, payload): assert test_payload == payload -def test_parse_results_errors(): - results = {"info": {"error": 1, "failure": 1}} - with pytest.raises(main.AppinspectChecksFailuresException): - main.parse_results(results) - - -def test_parse_results_no_errors(capsys): - results = {"info": {"error": 0, "failure": 0}} - - main.parse_results(results) - - captured = capsys.readouterr() - assert ( - "\n======== AppInspect Api Results ========\n error : 0 \n failure : 0 \n" - in captured.out - ) - - @mock.patch("main.requests") def test_retry_request_always_400(mock_requests): mock_response = mock.MagicMock() @@ -391,7 +431,6 @@ def test_main_errors_in_except_file( @mock.patch("main.download_json_report") -@mock.patch("main.parse_results") @mock.patch("main.download_and_save_html_report") @mock.patch("main.submit") @mock.patch("main.validate") @@ -401,7 +440,6 @@ def test_main_failures_file_does_not_exist( mock_validate, mock_submit, mock_download_and_save_html_report, - mock_parse_results, mock_download_json_report, ): # mock login @@ -461,7 +499,7 @@ def test_main_failures_file_does_not_exist( "status": "SUCCESS", "info": { "error": 0, - "failure": 0, + "failure": 1, "skipped": 0, "manual_check": 8, "not_applicable": 71, @@ -490,9 +528,6 @@ def test_main_failures_file_does_not_exist( download_mock_response.status_code = 200 mock_download_and_save_html_report.request.return_value = download_mock_response - # mock parse_results - mock_parse_results.side_effect = main.AppinspectChecksFailuresException - # mock download_json_report mock_json_response = mock.MagicMock() mock_json_report = b'{"reports": [{"groups": [{"name": "check_viruses","checks": [{"name": "check_for_viruses", "result": "success"}]}]}]}' # noqa: E501 @@ -627,8 +662,116 @@ def test_get_appinspect_failures_list(): { "groups": [ { - "name": "check_viruses", - "checks": [{"name": "check_for_viruses", "result": "failure"}], + "description": "check_packaging_standards_description", + "name": "check_packaging_standards", + "checks": [ + { + "description": "description", + "name": "check_that_splunk_app_package_has_valid_static_dependencies", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "not_applicable", + "messages": [ + { + "result": "not_applicable", + "message": "message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_has_read_permission", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_extracts_to_visible_directory", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_does_not_contain_files_outside_of_app", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_extracted_splunk_app_does_not_contain_prohibited_directories", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_2", + "message_filename": None, + "message_line": None, + } + ], + }, + ], } ] } @@ -637,7 +780,10 @@ def test_get_appinspect_failures_list(): failed = main.get_appinspect_failures_list(response_dict) - assert failed == ["check_for_viruses"] + assert failed == [ + "check_that_splunk_app_package_does_not_contain_files_outside_of_app", + "check_that_extracted_splunk_app_does_not_contain_prohibited_directories", + ] def test_get_appinspect_failures_list_no_fails(): @@ -646,8 +792,68 @@ def test_get_appinspect_failures_list_no_fails(): { "groups": [ { - "name": "check_viruses", - "checks": [{"name": "check_for_viruses", "result": "success"}], + "description": "check_packaging_standards_description", + "name": "check_packaging_standards", + "checks": [ + { + "description": "description", + "name": "check_that_splunk_app_package_has_valid_static_dependencies", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "not_applicable", + "messages": [ + { + "result": "not_applicable", + "message": "message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_has_read_permission", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_extracts_to_visible_directory", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + ], } ] } @@ -659,15 +865,6 @@ def test_get_appinspect_failures_list_no_fails(): assert failed == [] -def test_compare_failures_no_fails(): - main.compare_failures(["1", "2"], ["2", "1"]) - - -def test_compare_failures_fails(): - with pytest.raises(main.AppinspectFailures): - main.compare_failures(["1"], ["1", "2"]) - - @mock.patch("yaml.safe_load") def test_read_yaml_as_dict_incorrect_yaml(mock_safe_load, caplog, tmp_path): mock_safe_load.side_effect = yaml.YAMLError @@ -677,17 +874,125 @@ def test_read_yaml_as_dict_incorrect_yaml(mock_safe_load, caplog, tmp_path): with pytest.raises(yaml.YAMLError): main.read_yaml_as_dict(file_path) - assert f"Can not read yaml file named {file_path}\n" in caplog.text + assert f"Can not read YAML file named {file_path}\n" in caplog.text def test_compare_known_failures_no_exceptions(tmp_path): - response_json = { + response_dict = { "reports": [ { "groups": [ { - "name": "check_viruses", - "checks": [{"name": "check_for_viruses", "result": "failure"}], + "description": "check_packaging_standards_description", + "name": "check_packaging_standards", + "checks": [ + { + "description": "description", + "name": "check_that_splunk_app_package_has_valid_static_dependencies", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "not_applicable", + "messages": [ + { + "result": "not_applicable", + "message": "message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_has_read_permission", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_extracts_to_visible_directory", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_does_not_contain_files_outside_of_app", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_extracted_splunk_app_does_not_contain_prohibited_directories", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_2", + "message_filename": None, + "message_line": None, + } + ], + }, + ], } ] } @@ -700,17 +1005,125 @@ def test_compare_known_failures_no_exceptions(tmp_path): exceptions_file.write_text(exceptions_content) with pytest.raises(SystemExit): - main.compare_against_known_failures(response_json, exceptions_file) + main.compare_against_known_failures(response_dict, exceptions_file) def test_compare_known_failures_with_exceptions(tmp_path): - response_json = { + response_dict = { "reports": [ { "groups": [ { - "name": "check_viruses", - "checks": [{"name": "check_for_viruses", "result": "failure"}], + "description": "check_packaging_standards_description", + "name": "check_packaging_standards", + "checks": [ + { + "description": "description", + "name": "check_that_splunk_app_package_has_valid_static_dependencies", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "not_applicable", + "messages": [ + { + "result": "not_applicable", + "message": "message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_has_read_permission", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_extracts_to_visible_directory", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "success", + "messages": [], + }, + { + "description": "description", + "name": "check_that_splunk_app_package_does_not_contain_files_outside_of_app", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_1", + "message_filename": None, + "message_line": None, + } + ], + }, + { + "description": "description", + "name": "check_that_extracted_splunk_app_does_not_contain_prohibited_directories", + "tags": [ + "splunk_appinspect", + "appapproval", + "cloud", + "packaging_standards", + "self-service", + "private_app", + "private_victoria", + "migration_victoria", + "private_classic", + ], + "result": "failure", + "messages": [ + { + "result": "failure", + "message": "failure_message_2", + "message_filename": None, + "message_line": None, + } + ], + }, + ], } ] } @@ -718,10 +1131,12 @@ def test_compare_known_failures_with_exceptions(tmp_path): } exceptions_content = """ - check_for_viruses: - comment: test + check_that_extracted_splunk_app_does_not_contain_prohibited_directories: + comment: exception granted + check_that_splunk_app_package_does_not_contain_files_outside_of_app: + comment: exception granted """ exceptions_file = tmp_path / "foo.yaml" exceptions_file.write_text(exceptions_content) - main.compare_against_known_failures(response_json, exceptions_file) + main.compare_against_known_failures(response_dict, exceptions_file)