Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ci] Make publish work #56

Open
wants to merge 38 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
0e1717e
[DNM] Update README.md
mordamax Oct 8, 2024
bcf5064
Update from mordamax running command 'prdoc --audience node_dev --bum…
actions-user Oct 8, 2024
75f511e
flip continue-on-fail to fail-fast, save output to file + artifacts
mordamax Oct 8, 2024
a4d5618
Update README.md
mordamax Oct 8, 2024
23c6552
Delete pr_5977.prdoc
mordamax Oct 8, 2024
6c5cb68
add timeout to cmd
mordamax Oct 9, 2024
c1169b0
fix examples tasks name by convention
mordamax Oct 9, 2024
6049fe4
Merge branch 'master' into mak-flip-cmd-flag
mordamax Oct 10, 2024
a1cb739
log output multiline in workflow
mordamax Oct 10, 2024
6ba0a88
Update cmd.yml
mordamax Oct 10, 2024
3fd4216
tmp unfix
mordamax Oct 10, 2024
b66b2e9
[ci] Make publish work
alvicsam Oct 14, 2024
f8d9db5
commit-push
alvicsam Oct 14, 2024
e033bba
fix files
alvicsam Oct 14, 2024
cf9b98b
change name
alvicsam Oct 14, 2024
3a8a96e
check
alvicsam Oct 14, 2024
d03ccdb
test manual push
alvicsam Oct 14, 2024
9109438
fix git
alvicsam Oct 14, 2024
bc3a42f
debug auth
alvicsam Oct 14, 2024
277a702
test app creds
alvicsam Oct 14, 2024
367199a
orig
alvicsam Oct 14, 2024
cf13001
rm http section
alvicsam Oct 14, 2024
48d4b9d
try another user
alvicsam Oct 14, 2024
c9491a8
try token only
alvicsam Oct 14, 2024
cb86379
fix docs
alvicsam Oct 14, 2024
ed2ffe8
disable test
alvicsam Oct 14, 2024
540d220
test
alvicsam Oct 14, 2024
e037f7b
checkout manually
alvicsam Oct 14, 2024
f42a247
checkout
alvicsam Oct 14, 2024
0e67cc6
add run-id
alvicsam Oct 14, 2024
d118374
add secret
alvicsam Oct 14, 2024
e357462
wc -l
alvicsam Oct 14, 2024
8d0c5c3
sleep
alvicsam Oct 14, 2024
0ee6512
install git
alvicsam Oct 14, 2024
3d66b8a
sudo make me sandwich
alvicsam Oct 14, 2024
657d9aa
slep
alvicsam Oct 14, 2024
e8c2d62
T_T
alvicsam Oct 14, 2024
8553e37
fix command
alvicsam Oct 14, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 31 additions & 16 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,30 @@
runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))

common_args = {
'--continue-on-fail': {"action": "store_true", "help": "Won't exit(1) on failed command and continue with next steps. "},
'--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"},
'--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"},
'--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"},
}

def print_and_log(message, output_file='/tmp/cmd/command_output.log'):
print(message)
with open(output_file, 'a') as f:
f.write(message + '\n')

def setup_logging():
if not os.path.exists('/tmp/cmd'):
os.makedirs('/tmp/cmd')
open('/tmp/cmd/command_output.log', 'w')

parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot-sdk repo', add_help=False)
parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help
for arg, config in common_args.items():
parser.add_argument(arg, **config)

subparsers = parser.add_subparsers(help='a command to run', dest='command')

setup_logging()

"""
BENCH
"""
Expand All @@ -39,8 +50,8 @@
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet

Runs bench for all pallets for westend runtime and continues even if some benchmarks fail
%(prog)s --runtime westend --continue-on-fail
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast

Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
Expand All @@ -53,6 +64,7 @@

parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')

"""
FMT
Expand Down Expand Up @@ -156,7 +168,9 @@ def main():
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
exit(1)
if args.fail_fast:
print_and_log(f'Error: {pallet} not found in dev runtime')
sys.exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
Expand Down Expand Up @@ -186,8 +200,9 @@ def main():
f"{config['bench_flags']}"
print(f'-- Running: {cmd} \n')
status = os.system(cmd)
if status != 0 and not args.continue_on_fail:
print(f'Failed to benchmark {pallet} in {runtime}')

if status != 0 and args.fail_fast:
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
Expand All @@ -198,39 +213,39 @@ def main():
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print('❌ Failed benchmarks of runtimes/pallets:')
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print(f'-- {runtime}: {pallets}')
print_and_log(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print('✅ Successful benchmarks of runtimes/pallets:')
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print(f'-- {runtime}: {pallets}')
print_and_log(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
print(f'Formatting with `{command}`')
nightly_status = os.system(f'{command}')
taplo_status = os.system('taplo format --config .config/taplo.toml')

if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail:
print('❌ Failed to format code')
if (nightly_status != 0 or taplo_status != 0):
print_and_log('❌ Failed to format code')
sys.exit(1)

elif args.command == 'update-ui':
command = 'sh ./scripts/update-ui-tests.sh'
print(f'Updating ui with `{command}`')
status = os.system(f'{command}')

if status != 0 and not args.continue_on_fail:
print('❌ Failed to format code')
if status != 0:
print_and_log('❌ Failed to update ui')
sys.exit(1)

elif args.command == 'prdoc':
# Call the main function from ./github/scripts/generate-prdoc.py module
exit_code = generate_prdoc.main(args)
if exit_code != 0 and not args.continue_on_fail:
print('❌ Failed to generate prdoc')
if exit_code != 0:
print_and_log('❌ Failed to generate prdoc')
sys.exit(exit_code)

print('🚀 Done')
Expand Down
20 changes: 10 additions & 10 deletions .github/scripts/cmd/test_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_bench_command_normal_execution_all_runtimes(self):
command='bench',
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
pallet=['pallet_balances'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -153,7 +153,7 @@ def test_bench_command_normal_execution(self):
command='bench',
runtime=['westend'],
pallet=['pallet_balances', 'pallet_staking'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -196,7 +196,7 @@ def test_bench_command_normal_execution_xcm(self):
command='bench',
runtime=['westend'],
pallet=['pallet_xcm_benchmarks::generic'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -232,7 +232,7 @@ def test_bench_command_two_runtimes_two_pallets(self):
command='bench',
runtime=['westend', 'rococo'],
pallet=['pallet_balances', 'pallet_staking'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -290,7 +290,7 @@ def test_bench_command_one_dev_runtime(self):
command='bench',
runtime=['dev'],
pallet=['pallet_balances'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -327,7 +327,7 @@ def test_bench_command_one_cumulus_runtime(self):
command='bench',
runtime=['asset-hub-westend'],
pallet=['pallet_assets'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -362,7 +362,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self):
command='bench',
runtime=['asset-hub-westend'],
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
continue_on_fail=False,
fail_fast=True,
quiet=False,
clean=False,
image=None
Expand Down Expand Up @@ -400,7 +400,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self):

self.mock_system.assert_has_calls(expected_calls, any_order=True)

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), []))
@patch('os.system', return_value=0)
def test_fmt_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand All @@ -410,7 +410,7 @@ def test_fmt_command(self, mock_system, mock_parse_args):
mock_system.assert_any_call('cargo +nightly fmt')
mock_system.assert_any_call('taplo format --config .config/taplo.toml')

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), []))
@patch('os.system', return_value=0)
def test_update_ui_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand All @@ -419,7 +419,7 @@ def test_update_ui_command(self, mock_system, mock_parse_args):
mock_exit.assert_not_called()
mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh')

@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc', continue_on_fail=False), []))
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), []))
@patch('os.system', return_value=0)
def test_prdoc_command(self, mock_system, mock_parse_args):
with patch('sys.exit') as mock_exit:
Expand Down
Loading