diff --git a/pythonFiles/tests/unittestadapter/.data/test_subtest.py b/pythonFiles/tests/unittestadapter/.data/test_subtest.py new file mode 100644 index 000000000000..b913b8773701 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/test_subtest.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +# Test class for the test_subtest_run test. +# The test_failed_tests function should return a dictionary that has a "success" status +# and the "result" value is a dict with 6 entries, one for each subtest. + + +class NumbersTest(unittest.TestCase): + def test_even(self): + """ + Test that numbers between 0 and 5 are all even. + """ + for i in range(0, 6): + with self.subTest(i=i): + self.assertEqual(i % 2, 0) diff --git a/pythonFiles/tests/unittestadapter/test_execution.py b/pythonFiles/tests/unittestadapter/test_execution.py index 5353703c59f2..7f58049a56b7 100644 --- a/pythonFiles/tests/unittestadapter/test_execution.py +++ b/pythonFiles/tests/unittestadapter/test_execution.py @@ -94,6 +94,35 @@ def test_single_ids_run() -> None: assert id_result["outcome"] == "success" +def test_subtest_run() -> None: + """This test runs on a the test_subtest which has a single method, test_even, + that uses unittest subtest. + + The actual result of run should return a dict payload with 6 entry for the 6 subtests. + """ + id = "test_subtest.NumbersTest.test_even" + actual = run_tests( + os.fspath(TEST_DATA_PATH), [id], "test_subtest.py", None, "fake-uuid" + ) + subtests_ids = [ + "test_subtest.NumbersTest.test_even (i=0)", + "test_subtest.NumbersTest.test_even (i=1)", + "test_subtest.NumbersTest.test_even (i=2)", + "test_subtest.NumbersTest.test_even (i=3)", + "test_subtest.NumbersTest.test_even (i=4)", + "test_subtest.NumbersTest.test_even (i=5)", + ] + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert "result" in actual + result = actual["result"] + assert len(result) == 6 + for id in subtests_ids: + assert id in result + + @pytest.mark.parametrize( "test_ids, pattern, cwd, expected_outcome", [ diff --git a/pythonFiles/unittestadapter/execution.py b/pythonFiles/unittestadapter/execution.py index 5acb11b2609f..37288651f531 100644 --- a/pythonFiles/unittestadapter/execution.py +++ b/pythonFiles/unittestadapter/execution.py @@ -130,7 +130,10 @@ def formatResult( formatted = formatted[1:] tb = "".join(formatted) - test_id = test.id() + if subtest: + test_id = subtest.id() + else: + test_id = test.id() result = { "test": test.id(), diff --git a/src/client/testing/testController/pytest/runner.ts b/src/client/testing/testController/pytest/runner.ts index 2c6cff724398..83da819b861a 100644 --- a/src/client/testing/testController/pytest/runner.ts +++ b/src/client/testing/testController/pytest/runner.ts @@ -95,6 +95,10 @@ export class PytestRunner implements ITestsRunner { testArgs.push('--capture', 'no'); } + if (options.debug && !testArgs.some((a) => a.startsWith('--no-cov'))) { + testArgs.push('--no-cov'); + } + // Positional arguments control the tests to be run. const rawData = idToRawData.get(testNode.id); if (!rawData) { diff --git a/src/client/testing/testController/workspaceTestAdapter.ts b/src/client/testing/testController/workspaceTestAdapter.ts index dc9c65c431fd..8ce51e5dab56 100644 --- a/src/client/testing/testController/workspaceTestAdapter.ts +++ b/src/client/testing/testController/workspaceTestAdapter.ts @@ -22,6 +22,7 @@ import { sendTelemetryEvent } from '../../telemetry'; import { EventName } from '../../telemetry/constants'; import { TestProvider } from '../types'; import { + clearAllChildren, createErrorTestItem, DebugTestTag, ErrorTestItemOptions, @@ -135,8 +136,11 @@ export class WorkspaceTestAdapter { } if (rawTestExecData !== undefined && rawTestExecData.result !== undefined) { + // Map which holds the subtest information for each test item. + const subTestStats: Map = new Map(); + + // iterate through payload and update the UI accordingly. for (const keyTemp of Object.keys(rawTestExecData.result)) { - // check for result and update the UI accordingly. const testCases: TestItem[] = []; // grab leaf level test items @@ -147,7 +151,6 @@ export class WorkspaceTestAdapter { if ( rawTestExecData.result[keyTemp].outcome === 'failure' || - rawTestExecData.result[keyTemp].outcome === 'subtest-failure' || rawTestExecData.result[keyTemp].outcome === 'passed-unexpected' ) { const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? ''; @@ -175,8 +178,7 @@ export class WorkspaceTestAdapter { }); } else if ( rawTestExecData.result[keyTemp].outcome === 'success' || - rawTestExecData.result[keyTemp].outcome === 'expected-failure' || - rawTestExecData.result[keyTemp].outcome === 'subtest-passed' + rawTestExecData.result[keyTemp].outcome === 'expected-failure' ) { const grabTestItem = this.runIdToTestItem.get(keyTemp); const grabVSid = this.runIdToVSid.get(keyTemp); @@ -203,6 +205,73 @@ export class WorkspaceTestAdapter { } }); } + } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') { + // split on " " since the subtest ID has the parent test ID in the first part of the ID. + const parentTestCaseId = keyTemp.split(' ')[0]; + const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); + const data = rawTestExecData.result[keyTemp]; + // find the subtest's parent test item + if (parentTestItem) { + const subtestStats = subTestStats.get(parentTestCaseId); + if (subtestStats) { + subtestStats.failed += 1; + } else { + subTestStats.set(parentTestCaseId, { failed: 1, passed: 0 }); + runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); + // clear since subtest items don't persist between runs + clearAllChildren(parentTestItem); + } + const subtestId = keyTemp; + const subTestItem = testController?.createTestItem(subtestId, subtestId); + runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`)); + // create a new test item for the subtest + if (subTestItem) { + const traceback = data.traceback ?? ''; + const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`; + runInstance.appendOutput(fixLogLines(text)); + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? ''); + if (parentTestItem.uri && parentTestItem.range) { + message.location = new Location(parentTestItem.uri, parentTestItem.range); + } + runInstance.failed(subTestItem, message); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } + } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') { + // split on " " since the subtest ID has the parent test ID in the first part of the ID. + const parentTestCaseId = keyTemp.split(' ')[0]; + const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); + + // find the subtest's parent test item + if (parentTestItem) { + const subtestStats = subTestStats.get(parentTestCaseId); + if (subtestStats) { + subtestStats.passed += 1; + } else { + subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); + runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); + // clear since subtest items don't persist between runs + clearAllChildren(parentTestItem); + } + const subtestId = keyTemp; + const subTestItem = testController?.createTestItem(subtestId, subtestId); + // create a new test item for the subtest + if (subTestItem) { + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + runInstance.passed(subTestItem); + runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`)); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } } } }