forked from EESSI/eessi-bot-software-layer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.cfg.example
217 lines (168 loc) · 9.32 KB
/
app.cfg.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
# Also see documentation at https://github.com/EESSI/eessi-bot-software-layer/blob/main/README.md#step5.5
[github]
# replace '123456' with the ID of your GitHub App; see https://github.com/settings/apps
app_id = 123456
# a short (!) name for your app instance that can be used for example
# when adding/updating a comment to a PR
# (!) a short yet descriptive name is preferred because it appears in
# comments to the PR
# for example, the name could include the name of the cluster the bot
# runs on and the username which runs the bot
# NOTE avoid putting an actual username here as it will be visible on
# potentially publicly accessible GitHub pages.
app_name = MY-bot
# replace '12345678' with the ID of the installation of your GitHub App
# (can be derived by creating an event and then checking for the list
# of sent events and its payload either via the Smee channel's web page
# or via the Advanced section of your GitHub App on github.com)
installation_id = 12345678
# path to the private key that was generated when the GitHub App was registered
private_key = PATH_TO_PRIVATE_KEY
[bot_control]
# which GH accounts have the permission to send commands to the bot
# if value is left/empty everyone can send commands
# value can be a space delimited list of GH accounts
#
# NOTE, be careful to not list the bot's name (GH app name) or it may consider
# comments created/updated by itself as a bot command and thus enter an
# endless loop.
command_permission =
# format of the response when processing bot commands. NOTE, make sure the
# placeholders {app_name}, {comment_response} and {comment_result} are included.
command_response_fmt =
<details><summary>Updates by the bot instance <code>{app_name}</code>
<em>(click for details)</em></summary>
{comment_response}
{comment_result}
</details>
[buildenv]
# name of the job script used for building an EESSI stack
build_job_script = PATH_TO_EESSI_BOT/scripts/bot-build.slurm
# path to directory on shared filesystem that can be used for sharing data across build jobs
# (for example source tarballs used by EasyBuild)
shared_fs_path = PATH_TO_SHARED_DIRECTORY
# Path (directory) to which build logs for (only) failing builds should be copied by bot/build.sh script
build_logs_dir = PATH_TO_BUILD_LOGS_DIR
#The container_cachedir may be used to reuse downloaded container image files
#across jobs. Thus, jobs can more quickly launch containers.
container_cachedir = PATH_TO_SHARED_DIRECTORY
# it may happen that we need to customize some CVMFS configuration
# the value of cvmfs_customizations is a dictionary which maps a file
# name to an entry that needs to be added to that file
# cvmfs_customizations = { "/etc/cvmfs/default.local": "CVMFS_HTTP_PROXY=\"http://PROXY_DNS_NAME:3128|http://PROXY_IP_ADDRESS:3128\"" }
# if compute nodes have no internet connection, we need to set http(s)_proxy
# or commands such as pip3 cannot download software from package repositories
# for example, the temporary EasyBuild is installed via pip3 first
# http_proxy = http://PROXY_DNS:3128/
# https_proxy = http://PROXY_DNS:3128/
# directory under which the bot prepares directories per job
# structure created is as follows: YYYY.MM/pr_PR_NUMBER/event_EVENT_ID/run_RUN_NUMBER/OS+SUBDIR
jobs_base_dir = $HOME/jobs
# configure environment
# list of comma-separated modules to be loaded by build_job_script
# useful/needed if some tool is not provided as system-wide package
# (read by bot and handed over to build_job_script via parameter
# --load-modules)
load_modules =
# PATH to temporary directory on build node ... ends up being used for
# for example, EESSI_TMPDIR --> /tmp/$USER/EESSI
# escaping variables with '\' delays expansion to the start of the
# build_job_script; this can be used for referencing environment
# variables that are only set inside a Slurm job
local_tmp = /tmp/$USER/EESSI
# parameters to be added to all job submissions
# NOTE do not quote parameter string. Quotes are retained when reading in config and
# then the whole 'string' is recognised as a single parameter.
# NOTE 2 '--get-user-env' may be needed on systems where the job's environment needs
# to be initialised as if it is for a login shell.
slurm_params = --hold
# full path to the job submission command
submit_command = /usr/bin/sbatch
# which GH account has the permission to trigger the build (by setting
# the label 'bot:build' (apparently this cannot be restricted on GitHub)
# if value is left/empty everyone can trigger the build
# value can be a space delimited list of GH accounts
build_permission =
# template for comment when user who set a label has no permission to trigger build jobs
no_build_permission_comment = Label `bot:build` has been set by user `{build_labeler}`, but this person does not have permission to trigger builds
[deploycfg]
# script for uploading built software packages
tarball_upload_script = PATH_TO_EESSI_BOT/scripts/eessi-upload-to-staging
# URL to S3/minio bucket
# if attribute is set, bucket_base will be constructed as follows
# bucket_base=${endpoint_url}/${bucket_name}
# otherwise, bucket_base will be constructed as follows
# bucket_base=https://${bucket_name}.s3.amazonaws.com
# - The former variant is used for non AWS S3 services, eg, minio, or when
# the bucket name is not provided in the hostname (see latter case).
# - The latter variant is used for AWS S3 services.
endpoint_url = URL_TO_S3_SERVER
# bucket name
bucket_name = eessi-staging
# upload policy: defines what policy is used for uploading built artefacts
# to an S3 bucket
# 'all' ..: upload all artefacts (mulitple uploads of the same artefact possible)
# 'latest': for each build target (eessi-VERSION-{software,init,compat}-OS-ARCH)
# only upload the latest built artefact
# 'once' : only once upload any built artefact for the build target
# 'none' : do not upload any built artefacts
upload_policy = once
# which GH account has the permission to trigger the deployment (by setting
# the label 'bot:deploy' (apparently this cannot be restricted on GitHub)
# if value is left/empty everyone can trigger the deployment
# value can be a space delimited list of GH accounts
deploy_permission =
# template for comment when user who set a label has no permission to trigger deploying tarballs
no_deploy_permission_comment = Label `bot:deploy` has been set by user `{deploy_labeler}`, but this person does not have permission to trigger deployments
[architecturetargets]
# defines both for which architectures the bot will build
# and what submission parameters shall be used
arch_target_map = { "linux/x86_64/generic" : "--constraint shape=c4.2xlarge", "linux/x86_64/amd/zen2": "--constraint shape=c5a.2xlarge" }
[repo_targets]
# defines for which repository a arch_target should be build for
#
# EESSI/2021.12 and NESSI/2022.11
repo_target_map = { "linux/x86_64/amd/zen2" : ["eessi-2021.12","nessi.no-2022.11"] }
# points to definition of repositories (default EESSI-pilot defined by build container)
repos_cfg_dir = PATH_TO_SHARED_DIRECTORY/cfg_bundles
# configuration for event handler which receives events from a GitHub repository.
[event_handler]
# path to the log file to log messages for event handler
log_path = /path/to/eessi_bot_event_handler.log
[job_manager]
# path to the log file to log messages for job manager
log_path = /path/to/eessi_bot_job_manager.log
# directory where job manager stores information about jobs to be tracked
# e.g. as symbolic link JOBID -> directory to job
job_ids_dir = $HOME/jobs/ids
# full path to the job status checking command
poll_command = /usr/bin/squeue
# polling interval in seconds
poll_interval = 60
# full path to the command for manipulating existing jobs
scontrol_command = /usr/bin/scontrol
# Note 1. The value of the setting 'initial_comment' in section
# '[submitted_job_comments]' should not be changed because the bot
# uses regular expression pattern to identify a comment with this
# format.
# Note 2. Any name inside curly brackets is replaced by the bot with
# corresponding data. If the name is changed or the curly brackets
# are removed, the output (in PR comments) will lack important
# information.
[submitted_job_comments]
initial_comment = New job on instance `{app_name}` for architecture `{arch_name}` for repository `{repo_id}` in job dir `{symlink}`
awaits_release = job id `{job_id}` awaits release by job manager
[new_job_comments]
awaits_launch = job awaits launch by Slurm scheduler
[running_job_comments]
running_job = job `{job_id}` is running
[finished_job_comments]
success = :grin: SUCCESS tarball `{tarball_name}` ({tarball_size} GiB) in job dir
failure = :cry: FAILURE
no_slurm_out = No slurm output `{slurm_out}` in job dir
slurm_out = Found slurm output `{slurm_out}` in job dir
missing_modules = Slurm output lacks message "No missing modules!".
no_tarball_message = Slurm output lacks message about created tarball.
no_matching_tarball = No tarball matching `{tarball_pattern}` found in job dir.
multiple_tarballs = Found {num_tarballs} tarballs in job dir - only 1 matching `{tarball_pattern}` expected.
job_result_unknown_fmt = <details><summary>:shrug: UNKNOWN _(click triangle for detailed information)_<summary/><ul><li>Job results file `{filename}` does not exist in job directory or reading it failed.</li><li>No artefacts were found/reported.</li></ul></details>