Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[dnm] testing benching all pallets #53

Closed
wants to merge 10 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 31 additions & 16 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,30 @@
runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))

common_args = {
'--continue-on-fail': {"action": "store_true", "help": "Won't exit(1) on failed command and continue with next steps. "},
'--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"},
'--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"},
'--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"},
}

def print_and_log(message, output_file='/tmp/cmd/command_output.log'):
print(message)
with open(output_file, 'a') as f:
f.write(message + '\n')

def setup_logging():
if not os.path.exists('/tmp/cmd'):
os.makedirs('/tmp/cmd')
open('/tmp/cmd/command_output.log', 'w')

parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot-sdk repo', add_help=False)
parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help
for arg, config in common_args.items():
parser.add_argument(arg, **config)

subparsers = parser.add_subparsers(help='a command to run', dest='command')

setup_logging()

"""
BENCH
"""
Expand All @@ -39,8 +50,8 @@
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet

Runs bench for all pallets for westend runtime and continues even if some benchmarks fail
%(prog)s --runtime westend --continue-on-fail
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast

Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
Expand All @@ -53,6 +64,7 @@

parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')

"""
FMT
Expand Down Expand Up @@ -156,7 +168,9 @@ def main():
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
exit(1)
if args.fail_fast:
print_and_log(f'Error: {pallet} not found in dev runtime')
sys.exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
Expand Down Expand Up @@ -186,8 +200,9 @@ def main():
f"{config['bench_flags']}"
print(f'-- Running: {cmd} \n')
status = os.system(cmd)
if status != 0 and not args.continue_on_fail:
print(f'Failed to benchmark {pallet} in {runtime}')

if status != 0 and args.fail_fast:
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
Expand All @@ -198,39 +213,39 @@ def main():
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print('❌ Failed benchmarks of runtimes/pallets:')
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print(f'-- {runtime}: {pallets}')
print_and_log(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print('✅ Successful benchmarks of runtimes/pallets:')
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print(f'-- {runtime}: {pallets}')
print_and_log(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
print(f'Formatting with `{command}`')
nightly_status = os.system(f'{command}')
taplo_status = os.system('taplo format --config .config/taplo.toml')

if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail:
print('❌ Failed to format code')
if (nightly_status != 0 or taplo_status != 0):
print_and_log('❌ Failed to format code')
sys.exit(1)

elif args.command == 'update-ui':
command = 'sh ./scripts/update-ui-tests.sh'
print(f'Updating ui with `{command}`')
status = os.system(f'{command}')

if status != 0 and not args.continue_on_fail:
print('❌ Failed to format code')
if status != 0:
print_and_log('❌ Failed to update ui')
sys.exit(1)

elif args.command == 'prdoc':
# Call the main function from ./github/scripts/generate-prdoc.py module
exit_code = generate_prdoc.main(args)
if exit_code != 0 and not args.continue_on_fail:
print('❌ Failed to generate prdoc')
if exit_code != 0:
print_and_log('❌ Failed to generate prdoc')
sys.exit(exit_code)

print('🚀 Done')
Expand Down
20 changes: 10 additions & 10 deletions .github/scripts/cmd/test_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_bench_command_normal_execution_all_runtimes(self):
command='bench',
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
pallet=['pallet_balances'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -153,7 +153,7 @@ def test_bench_command_normal_execution(self):
command='bench',
runtime=['westend'],
pallet=['pallet_balances', 'pallet_staking'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -196,7 +196,7 @@ def test_bench_command_normal_execution_xcm(self):
command='bench',
runtime=['westend'],
pallet=['pallet_xcm_benchmarks::generic'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -232,7 +232,7 @@ def test_bench_command_two_runtimes_two_pallets(self):
command='bench',
runtime=['westend', 'rococo'],
pallet=['pallet_balances', 'pallet_staking'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -290,7 +290,7 @@ def test_bench_command_one_dev_runtime(self):
command='bench',
runtime=['dev'],
pallet=['pallet_balances'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -327,7 +327,7 @@ def test_bench_command_one_cumulus_runtime(self):
command='bench',
runtime=['asset-hub-westend'],
pallet=['pallet_assets'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -362,7 +362,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self):
command='bench',
runtime=['asset-hub-westend'],
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -400,7 +400,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self):

self.mock_system.assert_has_calls(expected_calls, any_order=True)

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), []))
@patch('os.system', return_value=0)
def test_fmt_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand All @@ -410,7 +410,7 @@ def test_fmt_command(self, mock_system, mock_parse_args):
mock_system.assert_any_call('cargo +nightly fmt')
mock_system.assert_any_call('taplo format --config .config/taplo.toml')

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), []))
@patch('os.system', return_value=0)
def test_update_ui_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand All @@ -419,7 +419,7 @@ def test_update_ui_command(self, mock_system, mock_parse_args):
mock_exit.assert_not_called()
mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh')

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), []))
@patch('os.system', return_value=0)
def test_prdoc_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand Down
34 changes: 31 additions & 3 deletions .github/workflows/cmd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ jobs:
echo "The repository is ${{ steps.get-pr.outputs.repo }}"

cmd:
timeout-minutes: 1800 # 30 hours as it could take a long time to run all the runtimes/pallets
needs: [set-image, get-pr-branch]
env:
JOB_NAME: "cmd"
Expand Down Expand Up @@ -368,6 +369,19 @@ jobs:
git status
git diff

if [ -f /tmp/cmd/command_output.log ]; then
CMD_OUTPUT=$(cat /tmp/cmd/command_output.log)
echo "cmd_output=$CMD_OUTPUT" >> $GITHUB_OUTPUT
echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY
fi

- name: Upload command output
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: command-output
path: /tmp/cmd/command_output.log

- name: Commit changes
run: |
if [ -n "$(git status --porcelain)" ]; then
Expand Down Expand Up @@ -413,35 +427,49 @@ jobs:
uses: actions/github-script@v7
env:
SUBWEIGHT: "${{ steps.subweight.outputs.result }}"
CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
let runUrl = ${{ steps.build-link.outputs.run_url }}
let subweight = process.env.SUBWEIGHT;
let cmdOutput = process.env.CMD_OUTPUT;

let subweightCollapsed = subweight
let subweightCollapsed = subweight.trim() !== ''
? `<details>\n\n<summary>Subweight results:</summary>\n\n${subweight}\n\n</details>`
: '';

let cmdOutputCollapsed = cmdOutput.trim() !== ''
? `<details>\n\n<summary>Command output:</summary>\n\n${cmdOutput}\n\n</details>`
: '';

github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}`
body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}`
})

- name: Comment PR (Failure)
if: ${{ failure() && !contains(github.event.comment.body, '--quiet') }}
uses: actions/github-script@v7
env:
CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
let jobUrl = ${{ steps.build-link.outputs.job_url }}
let cmdOutput = process.env.CMD_OUTPUT;

let cmdOutputCollapsed = cmdOutput.trim() !== ''
? `<details>\n\n<summary>Command output:</summary>\n\n${cmdOutput}\n\n</details>`
: '';

github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})`
body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}`
})

- name: Add 😕 reaction on failure
Expand Down
4 changes: 3 additions & 1 deletion .gitlab/pipeline/zombienet.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
.zombienet-refs:
extends: .build-refs
variables:
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.113"
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.114"
PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics"
DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs"
ZOMBIE_PROVIDER: "k8s"
RUST_LOG: "info,zombienet_orchestrator=debug"
RUN_IN_CI: "1"
KUBERNETES_CPU_REQUEST: "512m"
KUBERNETES_MEMORY_REQUEST: "512M"
timeout: 60m

include:
Expand Down
6 changes: 3 additions & 3 deletions .gitlab/pipeline/zombienet/polkadot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ zombienet-polkadot-functional-0002-parachains-disputes:
--local-dir="${LOCAL_DIR}/functional"
--test="0002-parachains-disputes.zndsl"

zombienet-polkadot-functional-0003-beefy-and-mmr:
.zombienet-polkadot-functional-0003-beefy-and-mmr:
extends:
- .zombienet-polkadot-common
script:
Expand Down Expand Up @@ -190,7 +190,7 @@ zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains:
--local-dir="${LOCAL_DIR}/elastic_scaling"
--test="0002-elastic-scaling-doesnt-break-parachains.zndsl"

zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
.zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
extends:
- .zombienet-polkadot-common
script:
Expand Down Expand Up @@ -277,7 +277,7 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke:
--local-dir="${LOCAL_DIR}/smoke"
--test="0002-parachains-upgrade-smoke-test.zndsl"

zombienet-polkadot-smoke-0003-deregister-register-validator:
.zombienet-polkadot-smoke-0003-deregister-register-validator:
extends:
- .zombienet-polkadot-common
script:
Expand Down
2 changes: 1 addition & 1 deletion .gitlab/pipeline/zombienet/substrate.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ zombienet-substrate-0001-basic-warp-sync:
--local-dir="${LOCAL_DIR}/0001-basic-warp-sync"
--test="test-warp-sync.zndsl"

zombienet-substrate-0002-validators-warp-sync:
.zombienet-substrate-0002-validators-warp-sync:
extends:
- .zombienet-substrate-warp-sync-common
before_script:
Expand Down
Loading
Loading