From 9ba69587ba54b1a0bf29446d646e9b3e5d2f800f Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 13 Feb 2020 09:24:05 -0500 Subject: [PATCH 01/46] permissions: Make subdirectories writeable in test workspace (#237) Also set all other permissions to 0. --- Changelog => Changelog.md | 5 +++-- server/autotest_server.py | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) rename Changelog => Changelog.md (75%) diff --git a/Changelog b/Changelog.md similarity index 75% rename from Changelog rename to Changelog.md index 7d000510..89aa0834 100644 --- a/Changelog +++ b/Changelog.md @@ -1,10 +1,11 @@ # CHANGELOG - All notable changes to this project will be documented here. -_NOTE: This changelog starts from version 1.8.1 (changes prior to this version are not documented)_ +## [unreleased] +- allow tests to write to existing subdirectories but not overwrite existing test script files (#237) ## [1.8.1] +_NOTE: This changelog starts from version 1.8.1 (changes prior to this version are not documented)_ ### Added - changelog - for all changes prior to this version see https://github.com/MarkUsProject/markus-autotesting/pulls?utf8=%E2%9C%93&q=is%3Apr+created%3A%3C2019-12-19+ diff --git a/server/autotest_server.py b/server/autotest_server.py index 7b6b589a..cbf10b30 100755 --- a/server/autotest_server.py +++ b/server/autotest_server.py @@ -353,24 +353,24 @@ def setup_files(files_path, tests_path, markus_address, assignment_id): then make it the current working directory. The following permissions are also set: - tests_path directory: rwxrwx--T - - test subdirectories: rwxr-xr-x - - test files: rw-r--r-- - - student subdirectories: rwxrwxrwx - - student files: rw-rw-rw- + - test subdirectories: rwxrwx--T + - test files: rw-r----- + - student subdirectories: rwxrwx--- + - student files: rw-rw---- """ os.chmod(tests_path, 0o1770) student_files = move_tree(files_path, tests_path) for fd, file_or_dir in student_files: if fd == 'd': - os.chmod(file_or_dir, 0o777) + os.chmod(file_or_dir, 0o770) else: - os.chmod(file_or_dir, 0o666) + os.chmod(file_or_dir, 0o660) script_files = copy_test_script_files(markus_address, assignment_id, tests_path) for fd, file_or_dir in script_files: - permissions = 0o755 - if fd == 'f': - permissions -= 0o111 - os.chmod(file_or_dir, permissions) + if fd == 'd': + os.chmod(file_or_dir, 0o1770) + else: + os.chmod(file_or_dir, 0o640) return student_files, script_files def test_run_command(test_username=None): From ec51713d6d29efd4384d990750c7d2dea2d50482 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 12 Dec 2019 14:12:00 -0500 Subject: [PATCH 02/46] start-stop: add script for starting and stopping the supervisor process more cleanly --- server/autotest_server.py | 7 ++-- server/bin/install.sh | 9 ++--- server/bin/start-stop.sh | 69 +++++++++++++++++++++++++++++++++++++++ server/config.py | 11 ++++--- 4 files changed, 83 insertions(+), 13 deletions(-) create mode 100755 server/bin/start-stop.sh diff --git a/server/autotest_server.py b/server/autotest_server.py index cbf10b30..28f8772d 100755 --- a/server/autotest_server.py +++ b/server/autotest_server.py @@ -127,14 +127,13 @@ def recursive_iglob(root_dir): def redis_connection(): """ Return the currently open redis connection object. If there is no - connection currently open, one is created using the keyword arguments - specified in config.REDIS_CONNECTION_KWARGS + connection currently open, one is created using the url specified in + config.REDIS_URL """ conn = rq.get_current_connection() if conn: return conn - kwargs = config.REDIS_CONNECTION_KWARGS - rq.use_connection(redis=redis.Redis(**kwargs)) + rq.use_connection(redis=redis.Redis.from_url(config.REDIS_URL)) return rq.get_current_connection() def copy_tree(src, dst, exclude=[]): diff --git a/server/bin/install.sh b/server/bin/install.sh index bde181fd..3d2d7465 100755 --- a/server/bin/install.sh +++ b/server/bin/install.sh @@ -4,7 +4,7 @@ set -e install_packages() { echo "[AUTOTEST-INSTALL] Installing system packages" - sudo apt-get install "python${PYTHONVERSION}" "python${PYTHONVERSION}-venv" redis-server jq postgresql + sudo apt-get install "python${PYTHONVERSION}" "python${PYTHONVERSION}-venv" redis-server jq postgresql iptables } create_server_user() { @@ -45,11 +45,11 @@ create_worker_dir() { sudo mkdir -p ${workerdir} sudo chown ${SERVERUSEREFFECTIVE}:${workeruser} ${workerdir} sudo chmod ug=rwx,o=,+t ${workerdir} - redis-cli HSET ${REDISWORKERS} ${workeruser} ${workerdir} + redis-cli -u ${REDIS_URL} HSET ${REDISWORKERS} ${workeruser} ${workerdir} } create_worker_and_reaper_users() { - redis-cli DEL ${REDISWORKERS} > /dev/null + redis-cli -u ${REDIS_URL} DEL ${REDISWORKERS} > /dev/null if [[ -z ${WORKERUSERS} ]]; then echo "[AUTOTEST-INSTALL] No dedicated worker user, using '${SERVERUSEREFFECTIVE}'" create_worker_dir ${SERVERUSEREFFECTIVE} @@ -205,7 +205,7 @@ suggest_next_steps() { if [[ -n ${SERVERUSER} ]]; then echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVERUSER}'s '~/.ssh/authorized_keys'" fi - echo "[AUTOTEST-INSTALL] You may want to add 'source ${SERVERDIR}/venv/bin/activate && cd ${WORKSPACEDIR} && supervisord -c ${SERVERDIR}/supervisord.conf && deactivate' to ${SERVERUSEREFFECTIVE}'s crontab with a @reboot time" + echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVERUSEREFFECTIVE}'s crontab with a @reboot time" echo "[AUTOTEST-INSTALL] You should install the individual testers you plan to use" } @@ -247,6 +247,7 @@ REDISPREFIX=$(get_config_param REDIS_PREFIX) REDISWORKERS=${REDISPREFIX}$(get_config_param REDIS_WORKERS_HASH) REAPERPREFIX=$(get_config_param REAPER_USER_PREFIX) POSTGRESPREFIX=$(get_config_param POSTGRES_PREFIX) +REDIS_URL=$(get_config_param REDIS_URL) # main create_server_user diff --git a/server/bin/start-stop.sh b/server/bin/start-stop.sh new file mode 100755 index 00000000..2dde203a --- /dev/null +++ b/server/bin/start-stop.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +set -e + +start_supervisor() { + local pid_file=${LOGSDIR}/supervisord.pid + if [ -f ${pid_file} ]; then + echo "Supervisor appears to be running already (PID: $(cat ${pid_file}))" >&2 + exit 1 + fi + pushd ${LOGSDIR} > /dev/null + supervisord -c supervisord.conf + popd > /dev/null +} + +stop_supervisor() { + local pid_file=${LOGSDIR}/supervisord.pid + if [ ! -f ${pid_file} ]; then + echo 'Supervisor appears to be stopped already' >&2 + exit 1 + fi + kill $(cat ${pid_file}) +} + +get_config_param() { + echo $(cd ${SERVERDIR} && python3 -c "import config; print(config.$1)") +} + +# script starts here +THISSCRIPT=$(readlink -f ${BASH_SOURCE}) +THISDIR=$(dirname ${THISSCRIPT}) +SERVERDIR=$(dirname ${THISDIR}) +CONFIG=${SERVERDIR}/config.py + +source ${SERVERDIR}/venv/bin/activate + +SERVERUSER=$(get_config_param SERVERUSER) +WORKSPACEDIR=$(get_config_param WORKSPACE_DIR) +LOGSDIR=${WORKSPACEDIR}/$(get_config_param LOGS_DIR_NAME) + +if [[ -n ${SERVERUSER} ]]; then + SERVERUSEREFFECTIVE=${SERVERUSER} +else + SERVERUSEREFFECTIVE=$(whoami) +fi + +if [[ "$(whoami)" != "${SERVERUSEREFFECTIVE}" ]]; then + echo "Please run this script as user: ${SERVERUSEREFFECTIVE}" >&2 + exit 2 +fi + +case $1 in + start) + start_supervisor + ;; + stop) + stop_supervisor + ;; + restart) + stop_supervisor + start_supervisor + ;; + stat) + rq info ${@:2} + *) + echo "Usage: $0 [start | stop | restart | stat]" >&2 + exit 1 + ;; +esac diff --git a/server/config.py b/server/config.py index 52e79211..f8db0054 100755 --- a/server/config.py +++ b/server/config.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 +import os + #### CHANGE CONFIG PARAMETERS BELOW #### ## REDIS CONFIGS ## @@ -12,9 +14,8 @@ REDIS_WORKERS_HASH = 'workers' # name of redis integer used to access the next available port REDIS_PORT_INT = 'ports' -# dictionary containing keyword arguments to pass to rq.use_connection -# when connecting to a redis database (empty dictionary is default) -REDIS_CONNECTION_KWARGS = {} +# redis connection url +REDIS_URL = os.environ.get('REDIS_URL', 'redis://') # prefix to prepend to all redis keys generated by the autotester REDIS_PREFIX = 'autotest:' # prefix to prepend to all postgres databases created @@ -23,7 +24,7 @@ ## WORKING DIR CONFIGS ## # the main working directory -WORKSPACE_DIR = '/home/vagrant/markus-autotesting/server/workspace' +WORKSPACE_DIR = '/app/server/workspace' # name of the directory containing test scripts SCRIPTS_DIR_NAME = 'scripts' # name of the directory containing test results @@ -35,7 +36,7 @@ # name of the directory containing log files LOGS_DIR_NAME = 'logs' # name of the server user -SERVER_USER = '' +SERVER_USER = 'autotst' # names of the worker users WORKER_USERS = 'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7' # prefix used to name reaper users From c15144f3084329b5a31f47471039aea89a7e6b75 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 12 Dec 2019 14:52:33 -0500 Subject: [PATCH 03/46] reorganize: initial reorganization of all files (no edits, just movement and creation) --- .../autotester}/__init__.py | 0 .../autotester/cli.py | 0 .../autotester/server}/__init__.py | 0 .../autotester/server}/form_validation/__init__.py | 0 .../autotester/server/hooks_context}/__init__.py | 0 .../server}/hooks_context/builtin_hooks.py | 0 .../server}/hooks_context/hooks_context.py | 0 .../autotester/server}/hooks_context/utils.py | 0 .../autotester/server/server.py | 0 .../autotester/testers}/__init__.py | 0 .../autotester/testers/custom}/__init__.py | 0 .../autotester}/testers/custom/bin/default_hooks.py | 0 .../autotester}/testers/custom/bin/install.sh | 0 .../autotester}/testers/custom/bin/uninstall.sh | 0 .../testers/custom/markus_custom_tester.py | 0 .../custom/specs/default_install_settings.json | 0 .../testers/custom/specs/settings_schema.json | 0 .../custom/tests/script_files/autotest_01.sh | 0 .../autotester}/testers/custom/tests/specs.json | 0 .../custom/tests/student_files/submission.py | 0 .../autotester/testers/haskell}/__init__.py | 0 .../autotester}/testers/haskell/bin/install.sh | 0 .../autotester}/testers/haskell/bin/uninstall.sh | 0 .../testers/haskell/markus_haskell_tester.py | 0 .../testers/haskell/specs/settings_schema.json | 0 .../testers/haskell/tests/script_files/Test.hs | 0 .../autotester}/testers/haskell/tests/specs.json | 0 .../haskell/tests/student_files/Submission.hs | 0 .../autotester/testers/java}/__init__.py | 0 .../autotester}/testers/java/bin/install.sh | 0 .../autotester}/testers/java/bin/uninstall.sh | 0 .../autotester}/testers/java/lib/build.gradle | 0 .../java/lib/gradle/wrapper/gradle-wrapper.jar | Bin .../lib/gradle/wrapper/gradle-wrapper.properties | 0 .../autotester}/testers/java/lib/gradlew | 0 .../autotester}/testers/java/lib/gradlew.bat | 0 .../autotester}/testers/java/lib/settings.gradle | 0 .../java/edu/toronto/cs/teach/MarkusJavaTester.java | 0 .../autotester}/testers/java/markus_java_tester.py | 0 .../testers/java/specs/settings_schema.json | 0 .../testers/java/tests/script_files/Test1.java | 0 .../testers/java/tests/script_files/Test2.java | 0 .../autotester}/testers/java/tests/specs.json | 0 .../java/tests/student_files/Submission.java | 0 .../autotester}/testers/markus_test_specs.py | 0 .../autotester}/testers/markus_tester.py | 0 .../autotester/testers/py}/__init__.py | 0 .../testers/py/bin/create_environment.sh | 0 .../autotester}/testers/py/bin/install.sh | 0 .../autotester}/testers/py/bin/requirements.txt | 0 .../autotester}/testers/py/bin/uninstall.sh | 0 .../autotester}/testers/py/lib/c_helper.py | 0 .../autotester}/testers/py/lib/sql_helper.py | 0 .../autotester}/testers/py/markus_python_tester.py | 0 .../testers/py/specs/settings_schema.json | 0 .../testers/py/tests/script_files/data1.sql | 0 .../testers/py/tests/script_files/data2.sql | 0 .../testers/py/tests/script_files/schema.ddl | 0 .../testers/py/tests/script_files/test.py | 0 .../testers/py/tests/script_files/test2.py | 0 .../testers/py/tests/script_files/test_sql.py | 0 .../autotester}/testers/py/tests/specs.json | 0 .../testers/py/tests/student_files/submission.py | 0 .../testers/py/tests/student_files/submission.sql | 0 .../autotester/testers/pyta}/__init__.py | 0 .../testers/pyta/bin/create_environment.sh | 0 .../autotester}/testers/pyta/bin/install.sh | 0 .../autotester}/testers/pyta/bin/uninstall.sh | 0 .../autotester}/testers/pyta/markus_pyta_tester.py | 0 .../testers/pyta/specs/settings_schema.json | 0 .../autotester}/testers/pyta/tests/specs.json | 0 .../testers/pyta/tests/student_files/submission.py | 0 .../autotester/testers/racket/__init__.py | 0 .../autotester}/testers/racket/bin/install.sh | 0 .../autotester}/testers/racket/bin/uninstall.sh | 0 .../autotester}/testers/racket/lib/markus.rkt | 0 .../testers/racket/markus_racket_tester.py | 0 .../testers/racket/specs/settings_schema.json | 0 .../testers/racket/tests/script_files/test.rkt | 0 .../autotester}/testers/racket/tests/specs.json | 0 .../racket/tests/student_files/submission.rkt | 0 .../autotester}/testers/tests/.gitkeep | 0 autotester/autotester/tests/__init__.py | 0 .../autotester}/tests/autotest_enqueuer_test.py | 0 .../autotester}/tests/config_default.py | 0 autotester/setup.py | 0 {server/bin => bin}/archive_workspace.sh | 0 {server => bin}/generate_supervisord_conf.py | 0 {server/bin => bin}/install.sh | 0 {server/bin => bin}/kill_worker_procs.c | 0 {server => bin}/rq_fail_queue_contents.py | 0 {server/bin => bin}/start-stop.sh | 0 {server/bin => bin}/tester_schema_skeleton.json | 0 {server/bin => bin}/uninstall.sh | 0 {server => config}/config.py | 0 config/default_tester_requirements.txt | 0 {server/bin => config}/requirements.txt | 0 97 files changed, 0 insertions(+), 0 deletions(-) rename {server/hooks_context => autotester/autotester}/__init__.py (100%) rename server/autotest_enqueuer.py => autotester/autotester/cli.py (100%) rename {server/tests => autotester/autotester/server}/__init__.py (100%) rename {server => autotester/autotester/server}/form_validation/__init__.py (100%) rename {testers/testers => autotester/autotester/server/hooks_context}/__init__.py (100%) rename {server => autotester/autotester/server}/hooks_context/builtin_hooks.py (100%) rename {server => autotester/autotester/server}/hooks_context/hooks_context.py (100%) rename {server => autotester/autotester/server}/hooks_context/utils.py (100%) rename server/autotest_server.py => autotester/autotester/server/server.py (100%) rename {testers/testers/custom => autotester/autotester/testers}/__init__.py (100%) rename {testers/testers/haskell => autotester/autotester/testers/custom}/__init__.py (100%) rename {testers => autotester/autotester}/testers/custom/bin/default_hooks.py (100%) rename {testers => autotester/autotester}/testers/custom/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/custom/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/custom/markus_custom_tester.py (100%) rename {testers => autotester/autotester}/testers/custom/specs/default_install_settings.json (100%) rename {testers => autotester/autotester}/testers/custom/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/custom/tests/script_files/autotest_01.sh (100%) rename {testers => autotester/autotester}/testers/custom/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/custom/tests/student_files/submission.py (100%) rename {testers/testers/java => autotester/autotester/testers/haskell}/__init__.py (100%) rename {testers => autotester/autotester}/testers/haskell/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/haskell/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/haskell/markus_haskell_tester.py (100%) rename {testers => autotester/autotester}/testers/haskell/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/haskell/tests/script_files/Test.hs (100%) rename {testers => autotester/autotester}/testers/haskell/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/haskell/tests/student_files/Submission.hs (100%) rename {testers/testers/py => autotester/autotester/testers/java}/__init__.py (100%) rename {testers => autotester/autotester}/testers/java/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/java/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/java/lib/build.gradle (100%) rename {testers => autotester/autotester}/testers/java/lib/gradle/wrapper/gradle-wrapper.jar (100%) rename {testers => autotester/autotester}/testers/java/lib/gradle/wrapper/gradle-wrapper.properties (100%) rename {testers => autotester/autotester}/testers/java/lib/gradlew (100%) rename {testers => autotester/autotester}/testers/java/lib/gradlew.bat (100%) rename {testers => autotester/autotester}/testers/java/lib/settings.gradle (100%) rename {testers => autotester/autotester}/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java (100%) rename {testers => autotester/autotester}/testers/java/markus_java_tester.py (100%) rename {testers => autotester/autotester}/testers/java/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/java/tests/script_files/Test1.java (100%) rename {testers => autotester/autotester}/testers/java/tests/script_files/Test2.java (100%) rename {testers => autotester/autotester}/testers/java/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/java/tests/student_files/Submission.java (100%) rename {testers => autotester/autotester}/testers/markus_test_specs.py (100%) rename {testers => autotester/autotester}/testers/markus_tester.py (100%) rename {testers/testers/pyta => autotester/autotester/testers/py}/__init__.py (100%) rename {testers => autotester/autotester}/testers/py/bin/create_environment.sh (100%) rename {testers => autotester/autotester}/testers/py/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/py/bin/requirements.txt (100%) rename {testers => autotester/autotester}/testers/py/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/py/lib/c_helper.py (100%) rename {testers => autotester/autotester}/testers/py/lib/sql_helper.py (100%) rename {testers => autotester/autotester}/testers/py/markus_python_tester.py (100%) rename {testers => autotester/autotester}/testers/py/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/data1.sql (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/data2.sql (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/schema.ddl (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/test.py (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/test2.py (100%) rename {testers => autotester/autotester}/testers/py/tests/script_files/test_sql.py (100%) rename {testers => autotester/autotester}/testers/py/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/py/tests/student_files/submission.py (100%) rename {testers => autotester/autotester}/testers/py/tests/student_files/submission.sql (100%) rename {testers/testers/racket => autotester/autotester/testers/pyta}/__init__.py (100%) rename {testers => autotester/autotester}/testers/pyta/bin/create_environment.sh (100%) rename {testers => autotester/autotester}/testers/pyta/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/pyta/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/pyta/markus_pyta_tester.py (100%) rename {testers => autotester/autotester}/testers/pyta/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/pyta/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/pyta/tests/student_files/submission.py (100%) rename server/bin/default_tester_requirements.txt => autotester/autotester/testers/racket/__init__.py (100%) rename {testers => autotester/autotester}/testers/racket/bin/install.sh (100%) rename {testers => autotester/autotester}/testers/racket/bin/uninstall.sh (100%) rename {testers => autotester/autotester}/testers/racket/lib/markus.rkt (100%) rename {testers => autotester/autotester}/testers/racket/markus_racket_tester.py (100%) rename {testers => autotester/autotester}/testers/racket/specs/settings_schema.json (100%) rename {testers => autotester/autotester}/testers/racket/tests/script_files/test.rkt (100%) rename {testers => autotester/autotester}/testers/racket/tests/specs.json (100%) rename {testers => autotester/autotester}/testers/racket/tests/student_files/submission.rkt (100%) rename {testers => autotester/autotester}/testers/tests/.gitkeep (100%) create mode 100644 autotester/autotester/tests/__init__.py rename {server => autotester/autotester}/tests/autotest_enqueuer_test.py (100%) rename {server => autotester/autotester}/tests/config_default.py (100%) create mode 100644 autotester/setup.py rename {server/bin => bin}/archive_workspace.sh (100%) rename {server => bin}/generate_supervisord_conf.py (100%) rename {server/bin => bin}/install.sh (100%) rename {server/bin => bin}/kill_worker_procs.c (100%) rename {server => bin}/rq_fail_queue_contents.py (100%) rename {server/bin => bin}/start-stop.sh (100%) rename {server/bin => bin}/tester_schema_skeleton.json (100%) rename {server/bin => bin}/uninstall.sh (100%) rename {server => config}/config.py (100%) create mode 100644 config/default_tester_requirements.txt rename {server/bin => config}/requirements.txt (100%) diff --git a/server/hooks_context/__init__.py b/autotester/autotester/__init__.py similarity index 100% rename from server/hooks_context/__init__.py rename to autotester/autotester/__init__.py diff --git a/server/autotest_enqueuer.py b/autotester/autotester/cli.py similarity index 100% rename from server/autotest_enqueuer.py rename to autotester/autotester/cli.py diff --git a/server/tests/__init__.py b/autotester/autotester/server/__init__.py similarity index 100% rename from server/tests/__init__.py rename to autotester/autotester/server/__init__.py diff --git a/server/form_validation/__init__.py b/autotester/autotester/server/form_validation/__init__.py similarity index 100% rename from server/form_validation/__init__.py rename to autotester/autotester/server/form_validation/__init__.py diff --git a/testers/testers/__init__.py b/autotester/autotester/server/hooks_context/__init__.py similarity index 100% rename from testers/testers/__init__.py rename to autotester/autotester/server/hooks_context/__init__.py diff --git a/server/hooks_context/builtin_hooks.py b/autotester/autotester/server/hooks_context/builtin_hooks.py similarity index 100% rename from server/hooks_context/builtin_hooks.py rename to autotester/autotester/server/hooks_context/builtin_hooks.py diff --git a/server/hooks_context/hooks_context.py b/autotester/autotester/server/hooks_context/hooks_context.py similarity index 100% rename from server/hooks_context/hooks_context.py rename to autotester/autotester/server/hooks_context/hooks_context.py diff --git a/server/hooks_context/utils.py b/autotester/autotester/server/hooks_context/utils.py similarity index 100% rename from server/hooks_context/utils.py rename to autotester/autotester/server/hooks_context/utils.py diff --git a/server/autotest_server.py b/autotester/autotester/server/server.py similarity index 100% rename from server/autotest_server.py rename to autotester/autotester/server/server.py diff --git a/testers/testers/custom/__init__.py b/autotester/autotester/testers/__init__.py similarity index 100% rename from testers/testers/custom/__init__.py rename to autotester/autotester/testers/__init__.py diff --git a/testers/testers/haskell/__init__.py b/autotester/autotester/testers/custom/__init__.py similarity index 100% rename from testers/testers/haskell/__init__.py rename to autotester/autotester/testers/custom/__init__.py diff --git a/testers/testers/custom/bin/default_hooks.py b/autotester/autotester/testers/custom/bin/default_hooks.py similarity index 100% rename from testers/testers/custom/bin/default_hooks.py rename to autotester/autotester/testers/custom/bin/default_hooks.py diff --git a/testers/testers/custom/bin/install.sh b/autotester/autotester/testers/custom/bin/install.sh similarity index 100% rename from testers/testers/custom/bin/install.sh rename to autotester/autotester/testers/custom/bin/install.sh diff --git a/testers/testers/custom/bin/uninstall.sh b/autotester/autotester/testers/custom/bin/uninstall.sh similarity index 100% rename from testers/testers/custom/bin/uninstall.sh rename to autotester/autotester/testers/custom/bin/uninstall.sh diff --git a/testers/testers/custom/markus_custom_tester.py b/autotester/autotester/testers/custom/markus_custom_tester.py similarity index 100% rename from testers/testers/custom/markus_custom_tester.py rename to autotester/autotester/testers/custom/markus_custom_tester.py diff --git a/testers/testers/custom/specs/default_install_settings.json b/autotester/autotester/testers/custom/specs/default_install_settings.json similarity index 100% rename from testers/testers/custom/specs/default_install_settings.json rename to autotester/autotester/testers/custom/specs/default_install_settings.json diff --git a/testers/testers/custom/specs/settings_schema.json b/autotester/autotester/testers/custom/specs/settings_schema.json similarity index 100% rename from testers/testers/custom/specs/settings_schema.json rename to autotester/autotester/testers/custom/specs/settings_schema.json diff --git a/testers/testers/custom/tests/script_files/autotest_01.sh b/autotester/autotester/testers/custom/tests/script_files/autotest_01.sh similarity index 100% rename from testers/testers/custom/tests/script_files/autotest_01.sh rename to autotester/autotester/testers/custom/tests/script_files/autotest_01.sh diff --git a/testers/testers/custom/tests/specs.json b/autotester/autotester/testers/custom/tests/specs.json similarity index 100% rename from testers/testers/custom/tests/specs.json rename to autotester/autotester/testers/custom/tests/specs.json diff --git a/testers/testers/custom/tests/student_files/submission.py b/autotester/autotester/testers/custom/tests/student_files/submission.py similarity index 100% rename from testers/testers/custom/tests/student_files/submission.py rename to autotester/autotester/testers/custom/tests/student_files/submission.py diff --git a/testers/testers/java/__init__.py b/autotester/autotester/testers/haskell/__init__.py similarity index 100% rename from testers/testers/java/__init__.py rename to autotester/autotester/testers/haskell/__init__.py diff --git a/testers/testers/haskell/bin/install.sh b/autotester/autotester/testers/haskell/bin/install.sh similarity index 100% rename from testers/testers/haskell/bin/install.sh rename to autotester/autotester/testers/haskell/bin/install.sh diff --git a/testers/testers/haskell/bin/uninstall.sh b/autotester/autotester/testers/haskell/bin/uninstall.sh similarity index 100% rename from testers/testers/haskell/bin/uninstall.sh rename to autotester/autotester/testers/haskell/bin/uninstall.sh diff --git a/testers/testers/haskell/markus_haskell_tester.py b/autotester/autotester/testers/haskell/markus_haskell_tester.py similarity index 100% rename from testers/testers/haskell/markus_haskell_tester.py rename to autotester/autotester/testers/haskell/markus_haskell_tester.py diff --git a/testers/testers/haskell/specs/settings_schema.json b/autotester/autotester/testers/haskell/specs/settings_schema.json similarity index 100% rename from testers/testers/haskell/specs/settings_schema.json rename to autotester/autotester/testers/haskell/specs/settings_schema.json diff --git a/testers/testers/haskell/tests/script_files/Test.hs b/autotester/autotester/testers/haskell/tests/script_files/Test.hs similarity index 100% rename from testers/testers/haskell/tests/script_files/Test.hs rename to autotester/autotester/testers/haskell/tests/script_files/Test.hs diff --git a/testers/testers/haskell/tests/specs.json b/autotester/autotester/testers/haskell/tests/specs.json similarity index 100% rename from testers/testers/haskell/tests/specs.json rename to autotester/autotester/testers/haskell/tests/specs.json diff --git a/testers/testers/haskell/tests/student_files/Submission.hs b/autotester/autotester/testers/haskell/tests/student_files/Submission.hs similarity index 100% rename from testers/testers/haskell/tests/student_files/Submission.hs rename to autotester/autotester/testers/haskell/tests/student_files/Submission.hs diff --git a/testers/testers/py/__init__.py b/autotester/autotester/testers/java/__init__.py similarity index 100% rename from testers/testers/py/__init__.py rename to autotester/autotester/testers/java/__init__.py diff --git a/testers/testers/java/bin/install.sh b/autotester/autotester/testers/java/bin/install.sh similarity index 100% rename from testers/testers/java/bin/install.sh rename to autotester/autotester/testers/java/bin/install.sh diff --git a/testers/testers/java/bin/uninstall.sh b/autotester/autotester/testers/java/bin/uninstall.sh similarity index 100% rename from testers/testers/java/bin/uninstall.sh rename to autotester/autotester/testers/java/bin/uninstall.sh diff --git a/testers/testers/java/lib/build.gradle b/autotester/autotester/testers/java/lib/build.gradle similarity index 100% rename from testers/testers/java/lib/build.gradle rename to autotester/autotester/testers/java/lib/build.gradle diff --git a/testers/testers/java/lib/gradle/wrapper/gradle-wrapper.jar b/autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from testers/testers/java/lib/gradle/wrapper/gradle-wrapper.jar rename to autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar diff --git a/testers/testers/java/lib/gradle/wrapper/gradle-wrapper.properties b/autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from testers/testers/java/lib/gradle/wrapper/gradle-wrapper.properties rename to autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties diff --git a/testers/testers/java/lib/gradlew b/autotester/autotester/testers/java/lib/gradlew similarity index 100% rename from testers/testers/java/lib/gradlew rename to autotester/autotester/testers/java/lib/gradlew diff --git a/testers/testers/java/lib/gradlew.bat b/autotester/autotester/testers/java/lib/gradlew.bat similarity index 100% rename from testers/testers/java/lib/gradlew.bat rename to autotester/autotester/testers/java/lib/gradlew.bat diff --git a/testers/testers/java/lib/settings.gradle b/autotester/autotester/testers/java/lib/settings.gradle similarity index 100% rename from testers/testers/java/lib/settings.gradle rename to autotester/autotester/testers/java/lib/settings.gradle diff --git a/testers/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java b/autotester/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java similarity index 100% rename from testers/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java rename to autotester/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java diff --git a/testers/testers/java/markus_java_tester.py b/autotester/autotester/testers/java/markus_java_tester.py similarity index 100% rename from testers/testers/java/markus_java_tester.py rename to autotester/autotester/testers/java/markus_java_tester.py diff --git a/testers/testers/java/specs/settings_schema.json b/autotester/autotester/testers/java/specs/settings_schema.json similarity index 100% rename from testers/testers/java/specs/settings_schema.json rename to autotester/autotester/testers/java/specs/settings_schema.json diff --git a/testers/testers/java/tests/script_files/Test1.java b/autotester/autotester/testers/java/tests/script_files/Test1.java similarity index 100% rename from testers/testers/java/tests/script_files/Test1.java rename to autotester/autotester/testers/java/tests/script_files/Test1.java diff --git a/testers/testers/java/tests/script_files/Test2.java b/autotester/autotester/testers/java/tests/script_files/Test2.java similarity index 100% rename from testers/testers/java/tests/script_files/Test2.java rename to autotester/autotester/testers/java/tests/script_files/Test2.java diff --git a/testers/testers/java/tests/specs.json b/autotester/autotester/testers/java/tests/specs.json similarity index 100% rename from testers/testers/java/tests/specs.json rename to autotester/autotester/testers/java/tests/specs.json diff --git a/testers/testers/java/tests/student_files/Submission.java b/autotester/autotester/testers/java/tests/student_files/Submission.java similarity index 100% rename from testers/testers/java/tests/student_files/Submission.java rename to autotester/autotester/testers/java/tests/student_files/Submission.java diff --git a/testers/testers/markus_test_specs.py b/autotester/autotester/testers/markus_test_specs.py similarity index 100% rename from testers/testers/markus_test_specs.py rename to autotester/autotester/testers/markus_test_specs.py diff --git a/testers/testers/markus_tester.py b/autotester/autotester/testers/markus_tester.py similarity index 100% rename from testers/testers/markus_tester.py rename to autotester/autotester/testers/markus_tester.py diff --git a/testers/testers/pyta/__init__.py b/autotester/autotester/testers/py/__init__.py similarity index 100% rename from testers/testers/pyta/__init__.py rename to autotester/autotester/testers/py/__init__.py diff --git a/testers/testers/py/bin/create_environment.sh b/autotester/autotester/testers/py/bin/create_environment.sh similarity index 100% rename from testers/testers/py/bin/create_environment.sh rename to autotester/autotester/testers/py/bin/create_environment.sh diff --git a/testers/testers/py/bin/install.sh b/autotester/autotester/testers/py/bin/install.sh similarity index 100% rename from testers/testers/py/bin/install.sh rename to autotester/autotester/testers/py/bin/install.sh diff --git a/testers/testers/py/bin/requirements.txt b/autotester/autotester/testers/py/bin/requirements.txt similarity index 100% rename from testers/testers/py/bin/requirements.txt rename to autotester/autotester/testers/py/bin/requirements.txt diff --git a/testers/testers/py/bin/uninstall.sh b/autotester/autotester/testers/py/bin/uninstall.sh similarity index 100% rename from testers/testers/py/bin/uninstall.sh rename to autotester/autotester/testers/py/bin/uninstall.sh diff --git a/testers/testers/py/lib/c_helper.py b/autotester/autotester/testers/py/lib/c_helper.py similarity index 100% rename from testers/testers/py/lib/c_helper.py rename to autotester/autotester/testers/py/lib/c_helper.py diff --git a/testers/testers/py/lib/sql_helper.py b/autotester/autotester/testers/py/lib/sql_helper.py similarity index 100% rename from testers/testers/py/lib/sql_helper.py rename to autotester/autotester/testers/py/lib/sql_helper.py diff --git a/testers/testers/py/markus_python_tester.py b/autotester/autotester/testers/py/markus_python_tester.py similarity index 100% rename from testers/testers/py/markus_python_tester.py rename to autotester/autotester/testers/py/markus_python_tester.py diff --git a/testers/testers/py/specs/settings_schema.json b/autotester/autotester/testers/py/specs/settings_schema.json similarity index 100% rename from testers/testers/py/specs/settings_schema.json rename to autotester/autotester/testers/py/specs/settings_schema.json diff --git a/testers/testers/py/tests/script_files/data1.sql b/autotester/autotester/testers/py/tests/script_files/data1.sql similarity index 100% rename from testers/testers/py/tests/script_files/data1.sql rename to autotester/autotester/testers/py/tests/script_files/data1.sql diff --git a/testers/testers/py/tests/script_files/data2.sql b/autotester/autotester/testers/py/tests/script_files/data2.sql similarity index 100% rename from testers/testers/py/tests/script_files/data2.sql rename to autotester/autotester/testers/py/tests/script_files/data2.sql diff --git a/testers/testers/py/tests/script_files/schema.ddl b/autotester/autotester/testers/py/tests/script_files/schema.ddl similarity index 100% rename from testers/testers/py/tests/script_files/schema.ddl rename to autotester/autotester/testers/py/tests/script_files/schema.ddl diff --git a/testers/testers/py/tests/script_files/test.py b/autotester/autotester/testers/py/tests/script_files/test.py similarity index 100% rename from testers/testers/py/tests/script_files/test.py rename to autotester/autotester/testers/py/tests/script_files/test.py diff --git a/testers/testers/py/tests/script_files/test2.py b/autotester/autotester/testers/py/tests/script_files/test2.py similarity index 100% rename from testers/testers/py/tests/script_files/test2.py rename to autotester/autotester/testers/py/tests/script_files/test2.py diff --git a/testers/testers/py/tests/script_files/test_sql.py b/autotester/autotester/testers/py/tests/script_files/test_sql.py similarity index 100% rename from testers/testers/py/tests/script_files/test_sql.py rename to autotester/autotester/testers/py/tests/script_files/test_sql.py diff --git a/testers/testers/py/tests/specs.json b/autotester/autotester/testers/py/tests/specs.json similarity index 100% rename from testers/testers/py/tests/specs.json rename to autotester/autotester/testers/py/tests/specs.json diff --git a/testers/testers/py/tests/student_files/submission.py b/autotester/autotester/testers/py/tests/student_files/submission.py similarity index 100% rename from testers/testers/py/tests/student_files/submission.py rename to autotester/autotester/testers/py/tests/student_files/submission.py diff --git a/testers/testers/py/tests/student_files/submission.sql b/autotester/autotester/testers/py/tests/student_files/submission.sql similarity index 100% rename from testers/testers/py/tests/student_files/submission.sql rename to autotester/autotester/testers/py/tests/student_files/submission.sql diff --git a/testers/testers/racket/__init__.py b/autotester/autotester/testers/pyta/__init__.py similarity index 100% rename from testers/testers/racket/__init__.py rename to autotester/autotester/testers/pyta/__init__.py diff --git a/testers/testers/pyta/bin/create_environment.sh b/autotester/autotester/testers/pyta/bin/create_environment.sh similarity index 100% rename from testers/testers/pyta/bin/create_environment.sh rename to autotester/autotester/testers/pyta/bin/create_environment.sh diff --git a/testers/testers/pyta/bin/install.sh b/autotester/autotester/testers/pyta/bin/install.sh similarity index 100% rename from testers/testers/pyta/bin/install.sh rename to autotester/autotester/testers/pyta/bin/install.sh diff --git a/testers/testers/pyta/bin/uninstall.sh b/autotester/autotester/testers/pyta/bin/uninstall.sh similarity index 100% rename from testers/testers/pyta/bin/uninstall.sh rename to autotester/autotester/testers/pyta/bin/uninstall.sh diff --git a/testers/testers/pyta/markus_pyta_tester.py b/autotester/autotester/testers/pyta/markus_pyta_tester.py similarity index 100% rename from testers/testers/pyta/markus_pyta_tester.py rename to autotester/autotester/testers/pyta/markus_pyta_tester.py diff --git a/testers/testers/pyta/specs/settings_schema.json b/autotester/autotester/testers/pyta/specs/settings_schema.json similarity index 100% rename from testers/testers/pyta/specs/settings_schema.json rename to autotester/autotester/testers/pyta/specs/settings_schema.json diff --git a/testers/testers/pyta/tests/specs.json b/autotester/autotester/testers/pyta/tests/specs.json similarity index 100% rename from testers/testers/pyta/tests/specs.json rename to autotester/autotester/testers/pyta/tests/specs.json diff --git a/testers/testers/pyta/tests/student_files/submission.py b/autotester/autotester/testers/pyta/tests/student_files/submission.py similarity index 100% rename from testers/testers/pyta/tests/student_files/submission.py rename to autotester/autotester/testers/pyta/tests/student_files/submission.py diff --git a/server/bin/default_tester_requirements.txt b/autotester/autotester/testers/racket/__init__.py similarity index 100% rename from server/bin/default_tester_requirements.txt rename to autotester/autotester/testers/racket/__init__.py diff --git a/testers/testers/racket/bin/install.sh b/autotester/autotester/testers/racket/bin/install.sh similarity index 100% rename from testers/testers/racket/bin/install.sh rename to autotester/autotester/testers/racket/bin/install.sh diff --git a/testers/testers/racket/bin/uninstall.sh b/autotester/autotester/testers/racket/bin/uninstall.sh similarity index 100% rename from testers/testers/racket/bin/uninstall.sh rename to autotester/autotester/testers/racket/bin/uninstall.sh diff --git a/testers/testers/racket/lib/markus.rkt b/autotester/autotester/testers/racket/lib/markus.rkt similarity index 100% rename from testers/testers/racket/lib/markus.rkt rename to autotester/autotester/testers/racket/lib/markus.rkt diff --git a/testers/testers/racket/markus_racket_tester.py b/autotester/autotester/testers/racket/markus_racket_tester.py similarity index 100% rename from testers/testers/racket/markus_racket_tester.py rename to autotester/autotester/testers/racket/markus_racket_tester.py diff --git a/testers/testers/racket/specs/settings_schema.json b/autotester/autotester/testers/racket/specs/settings_schema.json similarity index 100% rename from testers/testers/racket/specs/settings_schema.json rename to autotester/autotester/testers/racket/specs/settings_schema.json diff --git a/testers/testers/racket/tests/script_files/test.rkt b/autotester/autotester/testers/racket/tests/script_files/test.rkt similarity index 100% rename from testers/testers/racket/tests/script_files/test.rkt rename to autotester/autotester/testers/racket/tests/script_files/test.rkt diff --git a/testers/testers/racket/tests/specs.json b/autotester/autotester/testers/racket/tests/specs.json similarity index 100% rename from testers/testers/racket/tests/specs.json rename to autotester/autotester/testers/racket/tests/specs.json diff --git a/testers/testers/racket/tests/student_files/submission.rkt b/autotester/autotester/testers/racket/tests/student_files/submission.rkt similarity index 100% rename from testers/testers/racket/tests/student_files/submission.rkt rename to autotester/autotester/testers/racket/tests/student_files/submission.rkt diff --git a/testers/testers/tests/.gitkeep b/autotester/autotester/testers/tests/.gitkeep similarity index 100% rename from testers/testers/tests/.gitkeep rename to autotester/autotester/testers/tests/.gitkeep diff --git a/autotester/autotester/tests/__init__.py b/autotester/autotester/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/server/tests/autotest_enqueuer_test.py b/autotester/autotester/tests/autotest_enqueuer_test.py similarity index 100% rename from server/tests/autotest_enqueuer_test.py rename to autotester/autotester/tests/autotest_enqueuer_test.py diff --git a/server/tests/config_default.py b/autotester/autotester/tests/config_default.py similarity index 100% rename from server/tests/config_default.py rename to autotester/autotester/tests/config_default.py diff --git a/autotester/setup.py b/autotester/setup.py new file mode 100644 index 00000000..e69de29b diff --git a/server/bin/archive_workspace.sh b/bin/archive_workspace.sh similarity index 100% rename from server/bin/archive_workspace.sh rename to bin/archive_workspace.sh diff --git a/server/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py similarity index 100% rename from server/generate_supervisord_conf.py rename to bin/generate_supervisord_conf.py diff --git a/server/bin/install.sh b/bin/install.sh similarity index 100% rename from server/bin/install.sh rename to bin/install.sh diff --git a/server/bin/kill_worker_procs.c b/bin/kill_worker_procs.c similarity index 100% rename from server/bin/kill_worker_procs.c rename to bin/kill_worker_procs.c diff --git a/server/rq_fail_queue_contents.py b/bin/rq_fail_queue_contents.py similarity index 100% rename from server/rq_fail_queue_contents.py rename to bin/rq_fail_queue_contents.py diff --git a/server/bin/start-stop.sh b/bin/start-stop.sh similarity index 100% rename from server/bin/start-stop.sh rename to bin/start-stop.sh diff --git a/server/bin/tester_schema_skeleton.json b/bin/tester_schema_skeleton.json similarity index 100% rename from server/bin/tester_schema_skeleton.json rename to bin/tester_schema_skeleton.json diff --git a/server/bin/uninstall.sh b/bin/uninstall.sh similarity index 100% rename from server/bin/uninstall.sh rename to bin/uninstall.sh diff --git a/server/config.py b/config/config.py similarity index 100% rename from server/config.py rename to config/config.py diff --git a/config/default_tester_requirements.txt b/config/default_tester_requirements.txt new file mode 100644 index 00000000..e69de29b diff --git a/server/bin/requirements.txt b/config/requirements.txt similarity index 100% rename from server/bin/requirements.txt rename to config/requirements.txt From c82d918883cc7031f8ba20d6ee88dfb3171c979f Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 12 Dec 2019 15:09:36 -0500 Subject: [PATCH 04/46] autotester-package: move requirements to setup.py file --- autotester/setup.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/autotester/setup.py b/autotester/setup.py index e69de29b..a21b5776 100644 --- a/autotester/setup.py +++ b/autotester/setup.py @@ -0,0 +1,25 @@ +from setuptools import setup + +setup(name='MarkUs Autotester', + version='2.0', + description='Automatic tester for programming assignments', + url='https://github.com/MarkUsProject/markus-autotesting', + author='Misha Schwartz, Alessio Di Sandro', + author_email='mschwa@cs.toronto.edu', + license='MIT', + packages=['autotester'], + zip_safe=False, + install_requires=[ + 'redis==3.3.8', + 'requests==2.22.0', + 'rq==1.1.0', + 'supervisor==4.0.4', + 'PyYAML==5.1.2', + 'psycopg2-binary==2.8.3', + 'markusapi==0.0.1', + 'jsonschema==3.0.2', + 'fakeredis==1.1.0', + ], + entry_points={ + 'console_scripts': 'markus_autotester = autotester.cli:cli' + }) \ No newline at end of file From 70fb2e50717457919998a1676f09e33b1d323a30 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 12 Dec 2019 15:51:36 -0500 Subject: [PATCH 05/46] config: introduce new yaml config format --- config/config.py | 57 -------------------------- config/config_default.yml | 16 ++++++++ config/default_tester_requirements.txt | 0 3 files changed, 16 insertions(+), 57 deletions(-) create mode 100644 config/config_default.yml delete mode 100644 config/default_tester_requirements.txt diff --git a/config/config.py b/config/config.py index f8db0054..998b6f9f 100755 --- a/config/config.py +++ b/config/config.py @@ -1,62 +1,5 @@ #!/usr/bin/env python3 -import os - -#### CHANGE CONFIG PARAMETERS BELOW #### - -## REDIS CONFIGS ## - -# name of redis hash used to store the locations of test script directories -REDIS_CURRENT_TEST_SCRIPT_HASH = 'curr_test_scripts' -# name of redis hash used to store pop interval data for each worker queue -REDIS_POP_HASH = 'pop_intervals' -# name of redis hash used to store workers data (username and worker directory) -REDIS_WORKERS_HASH = 'workers' -# name of redis integer used to access the next available port -REDIS_PORT_INT = 'ports' -# redis connection url -REDIS_URL = os.environ.get('REDIS_URL', 'redis://') -# prefix to prepend to all redis keys generated by the autotester -REDIS_PREFIX = 'autotest:' -# prefix to prepend to all postgres databases created -POSTGRES_PREFIX = 'autotest_' - -## WORKING DIR CONFIGS ## - -# the main working directory -WORKSPACE_DIR = '/app/server/workspace' -# name of the directory containing test scripts -SCRIPTS_DIR_NAME = 'scripts' -# name of the directory containing test results -RESULTS_DIR_NAME = 'results' -# name of the directory containing specs files -SPECS_DIR_NAME = 'specs' -# name of the directory containing workspaces for the workers -WORKERS_DIR_NAME = 'workers' -# name of the directory containing log files -LOGS_DIR_NAME = 'logs' -# name of the server user -SERVER_USER = 'autotst' -# names of the worker users -WORKER_USERS = 'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7' -# prefix used to name reaper users -# (reapers not used to kill worker processes if set to the empty string) -REAPER_USER_PREFIX = '' -# default tester environment name -DEFAULT_ENV_NAME = 'defaultenv' - -## RLIMIT SETTINGS FOR TESTER PROCESSES ## - -# values are: (soft limit, hard limit) -# see https://docs.python.org/3/library/resource.html for reference on limit options -# NOTE: these limits cannot be higher than the limits set for the tester user in -# /etc/security/limits.conf (or similar). These limits may be reduced in certain -# cases (see the docstring for get_test_preexec_fn and get_cleanup_preexec_fn in -# autotest_server.py) -RLIMIT_SETTINGS = { - 'RLIMIT_NPROC': (300, 300) -} - ### QUEUE CONFIGS ### # functions used to select which type of queue to use. They must accept any number diff --git a/config/config_default.yml b/config/config_default.yml new file mode 100644 index 00000000..ad16f6b2 --- /dev/null +++ b/config/config_default.yml @@ -0,0 +1,16 @@ +redis: + url: !ENV {REDIS_URL} + +workspace: '/app/server/workspace' + +users: + server: 'autotst' + workers: + - autotst0 + - autotst1 + reaper_user_prefix: null + +rlimit_settings: + nproc: + - 300 + - 300 \ No newline at end of file diff --git a/config/default_tester_requirements.txt b/config/default_tester_requirements.txt deleted file mode 100644 index e69de29b..00000000 From 0c37b4700d77c6ce7d1238443edc56acee8420f9 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 12 Dec 2019 16:20:22 -0500 Subject: [PATCH 06/46] cli: enable the command line interface in cli.py --- autotester/autotester/cli.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autotester/autotester/cli.py b/autotester/autotester/cli.py index 8af5c650..0c674ac6 100755 --- a/autotester/autotester/cli.py +++ b/autotester/autotester/cli.py @@ -231,7 +231,7 @@ def parse_arg_file(arg_file): 'cancel': cancel_test, 'schema': get_schema} -if __name__ == '__main__': +def cli(): parser = argparse.ArgumentParser() parser.add_argument('command', choices=COMMANDS) @@ -248,3 +248,6 @@ def parse_arg_file(arg_file): except MarkUsError as e: print(str(e)) sys.exit(1) + +if __name__ == '__main__': + cli() From d553acd9d360ef777a493c8bb2cfab1340f11798 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 13 Dec 2019 10:35:24 -0500 Subject: [PATCH 07/46] autotester-package: reorganize entire package structure. WARNING: THIS BREAKS EVERYTHING --- autotester/__init__.py | 0 autotester/autotester/cli.py | 44 +- autotester/autotester/exceptions/__init__.py | 6 + .../server/hooks_context/builtin_hooks.py | 2 +- .../server/hooks_context/hooks_context.py | 4 +- .../autotester/server/resources/__init__.py | 0 .../server/resources/ports/__init__.py | 29 ++ .../server/resources/postgresql/__init__.py | 27 ++ autotester/autotester/server/server.py | 449 +----------------- .../autotester/server/utils/__init__.py | 0 autotester/autotester/server/utils/config.py | 0 .../autotester/server/utils/constants.py | 23 + .../server/utils/file_management.py | 131 +++++ .../server/utils/redis_management.py | 119 +++++ .../server/utils/resource_management.py | 43 ++ .../server/utils/string_management.py | 37 ++ .../server/utils/user_management.py | 25 + config/{config.py => queue_config_default.py} | 0 18 files changed, 473 insertions(+), 466 deletions(-) create mode 100644 autotester/__init__.py create mode 100644 autotester/autotester/exceptions/__init__.py create mode 100644 autotester/autotester/server/resources/__init__.py create mode 100644 autotester/autotester/server/resources/ports/__init__.py create mode 100644 autotester/autotester/server/resources/postgresql/__init__.py create mode 100644 autotester/autotester/server/utils/__init__.py create mode 100644 autotester/autotester/server/utils/config.py create mode 100644 autotester/autotester/server/utils/constants.py create mode 100644 autotester/autotester/server/utils/file_management.py create mode 100644 autotester/autotester/server/utils/redis_management.py create mode 100644 autotester/autotester/server/utils/resource_management.py create mode 100644 autotester/autotester/server/utils/string_management.py create mode 100644 autotester/autotester/server/utils/user_management.py rename config/{config.py => queue_config_default.py} (100%) diff --git a/autotester/__init__.py b/autotester/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/cli.py b/autotester/autotester/cli.py index 0c674ac6..4579978f 100755 --- a/autotester/autotester/cli.py +++ b/autotester/autotester/cli.py @@ -7,19 +7,19 @@ import json import inspect import glob -import autotest_server as ats import time -import config import shutil from functools import wraps -import form_validation - -### ERROR CLASSES ### +from .exceptions import MarkUsError +from .server.utils.redis_management import redis_connection, get_avg_pop_interval +from .server.utils.file_management import test_script_directory, ignore_missing_dir_error +from .server.utils.constants import TEST_SCRIPTS_SETTINGS_FILENAME +from .server.utils.config import WORKER_QUEUES +from .server.form_validation import validate_with_defaults, best_match +from .server.server import run_test, update_test_specs -class MarkUsError(Exception): - pass - +### ERROR CLASSES ### class JobArgumentError(MarkUsError): pass @@ -63,9 +63,9 @@ def _get_queue(**kw): Return a queue. The returned queue is one whose condition function returns True when called with the arguments in **kw. """ - for queue_type in config.WORKER_QUEUES: + for queue_type in WORKER_QUEUES: if queue_type['filter'](**kw): - return rq.Queue(queue_type['name'], connection=ats.redis_connection()) + return rq.Queue(queue_type['name'], connection=redis_connection()) raise InvalidQueueError('cannot enqueue job: unable to determine correct queue type') @@ -76,12 +76,12 @@ def _print_queue_info(queue): from the queue and the number of jobs in the queue. """ count = queue.count - avg_pop_interval = ats.get_avg_pop_interval(queue.name) or 0 + avg_pop_interval = get_avg_pop_interval(queue.name) or 0 print(avg_pop_interval * count) def _check_test_script_files_exist(markus_address, assignment_id, **kw): - if ats.test_script_directory(markus_address, assignment_id) is None: + if test_script_directory(markus_address, assignment_id) is None: raise TestScriptFilesError('cannot find test script files: please upload some before running tests') @@ -98,7 +98,7 @@ def wrapper(*args, **kwargs): except Exception: files_path = kwargs.get('files_path') if files_path: - shutil.rmtree(files_path, onerror=ats.ignore_missing_dir_error) + shutil.rmtree(files_path, onerror=ignore_missing_dir_error) raise return wrapper @@ -129,20 +129,20 @@ def _get_job_timeout(test_specs, test_categories, multiplier=1.5): @_clean_on_error -def run_test(user_type, batch_id, **kw): +def enqueue_test(user_type, batch_id, **kw): """ Enqueue a test run job with keyword arguments specified in **kw """ kw['enqueue_time'] = time.time() queue = _get_queue(user_type=user_type, batch_id=batch_id, **kw) - _check_args(ats.run_test, kwargs=kw) + _check_args(run_test, kwargs=kw) _check_test_script_files_exist(**kw) - test_files_dir = ats.test_script_directory(kw['markus_address'], kw['assignment_id']) - with open(os.path.join(test_files_dir, ats.TEST_SCRIPTS_SETTINGS_FILENAME)) as f: + test_files_dir = test_script_directory(kw['markus_address'], kw['assignment_id']) + with open(os.path.join(test_files_dir, TEST_SCRIPTS_SETTINGS_FILENAME)) as f: test_specs = json.load(f) _print_queue_info(queue) timeout = _get_job_timeout(test_specs, kw['test_categories']) - queue.enqueue_call(ats.run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout) + queue.enqueue_call(run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout) @_clean_on_error @@ -154,7 +154,7 @@ def update_specs(test_specs, schema=None, **kw): errors = list(form_validation.validate_with_defaults(schema, test_specs)) if errors: raise form_validation.best_match(errors) - ats.update_test_specs(test_specs=test_specs, **kw) + update_test_specs(test_specs=test_specs, **kw) def cancel_test(markus_address, run_ids, **kw): @@ -162,7 +162,7 @@ def cancel_test(markus_address, run_ids, **kw): Cancel a test run job with the job_id defined using markus_address and run_id. """ - with rq.Connection(ats.redis_connection()): + with rq.Connection(redis_connection()): for run_id in run_ids: job_id = _format_job_id(markus_address, run_id) try: @@ -172,7 +172,7 @@ def cancel_test(markus_address, run_ids, **kw): if job.is_queued(): files_path = job.kwargs['files_path'] if files_path: - shutil.rmtree(files_path, onerror=ats.ignore_missing_dir_error) + shutil.rmtree(files_path, onerror=ignore_missing_dir_error) job.cancel() @@ -226,7 +226,7 @@ def parse_arg_file(arg_file): return kwargs -COMMANDS = {'run': run_test, +COMMANDS = {'run': enqueue_test, 'specs': update_specs, 'cancel': cancel_test, 'schema': get_schema} diff --git a/autotester/autotester/exceptions/__init__.py b/autotester/autotester/exceptions/__init__.py new file mode 100644 index 00000000..4170fee3 --- /dev/null +++ b/autotester/autotester/exceptions/__init__.py @@ -0,0 +1,6 @@ +""" +Custom Exception Type for use in MarkUs +""" + +class MarkUsError(Exception): + pass \ No newline at end of file diff --git a/autotester/autotester/server/hooks_context/builtin_hooks.py b/autotester/autotester/server/hooks_context/builtin_hooks.py index 8b117989..e7e63aae 100644 --- a/autotester/autotester/server/hooks_context/builtin_hooks.py +++ b/autotester/autotester/server/hooks_context/builtin_hooks.py @@ -7,7 +7,7 @@ import json import glob from pathlib import Path -from hooks_context.utils import add_path +from .utils import add_path HOOKS = {'upload_feedback_file' : {'context': 'after_each'}, 'upload_feedback_to_repo' : {'requires': ['clear_feedback_file'], diff --git a/autotester/autotester/server/hooks_context/hooks_context.py b/autotester/autotester/server/hooks_context/hooks_context.py index 1169f530..631ad013 100644 --- a/autotester/autotester/server/hooks_context/hooks_context.py +++ b/autotester/autotester/server/hooks_context/hooks_context.py @@ -10,8 +10,8 @@ from collections import defaultdict, deque from collections.abc import Callable from contextlib import contextmanager -from hooks_context import builtin_hooks -from hooks_context.utils import current_directory, add_path +from . import builtin_hooks +from .utils import current_directory, add_path class Hooks: diff --git a/autotester/autotester/server/resources/__init__.py b/autotester/autotester/server/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/server/resources/ports/__init__.py b/autotester/autotester/server/resources/ports/__init__.py new file mode 100644 index 00000000..5a3f7891 --- /dev/null +++ b/autotester/autotester/server/resources/ports/__init__.py @@ -0,0 +1,29 @@ +import socket +from ...utils.redis_management import REDIS_PREFIX, redis_connection + +PORT_MIN = 50000 +PORT_MAX = 65535 + +REDIS_PORT_INT = f'{REDIS_PREFIX}{ports}' + +def next_port(): + """ Return a port number that is greater than the last time this method was + called (by any process on this machine). + + This port number is not guaranteed to be free + """ + r = redis_connection() + return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN + + +def get_available_port(): + """ Return the next available open port on localhost. """ + while True: + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('localhost', next_port())) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + port = s.getsockname()[1] + return str(port) + except OSError: + continue \ No newline at end of file diff --git a/autotester/autotester/server/resources/postgresql/__init__.py b/autotester/autotester/server/resources/postgresql/__init__.py new file mode 100644 index 00000000..cd500bd4 --- /dev/null +++ b/autotester/autotester/server/resources/postgresql/__init__.py @@ -0,0 +1,27 @@ +import os +import getpass +import psycopg2 +import secrets +import string +from psycopg2.extensions import AsIs + +POSTGRES_PREFIX = 'autotest_' + +PGPASSFILE = os.path.join(config.WORKSPACE_DIR, config.LOGS_DIR_NAME, '.pgpass') + +def setup_database(test_username): + user = getpass.getuser() + database = f'{POSTGRES_PREFIX}{test_username}' + + with open(PGPASSFILE) as f: + password = f.read().strip() + + with psycopg2.connect(database=database, user=user, password=password, host='localhost') as conn: + with conn.cursor() as cursor: + cursor.execute("DROP OWNED BY CURRENT_USER;") + if test_username != user: + user = test_username + password = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) + cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) + + return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} \ No newline at end of file diff --git a/autotester/autotester/server/server.py b/autotester/autotester/server/server.py index 28f8772d..efd35516 100755 --- a/autotester/autotester/server/server.py +++ b/autotester/autotester/server/server.py @@ -1,377 +1,29 @@ #!/usr/bin/env python3 import os -import fcntl import shutil -import sys import time import json import subprocess import signal -import redis import rq -import pwd -from contextlib import contextmanager -from functools import wraps -from itertools import zip_longest -from hooks_context.hooks_context import Hooks -import resource -import uuid import tempfile -import hashlib -import yaml -import getpass -import secrets -import string -import psycopg2 -import socket -from psycopg2.extensions import AsIs from markusapi import Markus -import config - - -CURRENT_TEST_SCRIPT_FORMAT = '{}_{}' -TEST_SCRIPT_DIR = os.path.join(config.WORKSPACE_DIR, config.SCRIPTS_DIR_NAME) -TEST_RESULT_DIR = os.path.join(config.WORKSPACE_DIR, config.RESULTS_DIR_NAME) -TEST_SPECS_DIR = os.path.join(config.WORKSPACE_DIR, config.SPECS_DIR_NAME) -REDIS_CURRENT_TEST_SCRIPT_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_CURRENT_TEST_SCRIPT_HASH) -REDIS_WORKERS_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_WORKERS_HASH) -REDIS_PORT_INT = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_PORT_INT) -REDIS_POP_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_POP_HASH) -DEFAULT_ENV_DIR = os.path.join(TEST_SPECS_DIR, config.DEFAULT_ENV_NAME) -PGPASSFILE = os.path.join(config.WORKSPACE_DIR, config.LOGS_DIR_NAME, '.pgpass') - -TEST_SCRIPTS_SETTINGS_FILENAME = 'settings.json' -TEST_SCRIPTS_FILES_DIRNAME = 'files' -HOOKS_FILENAME = 'hooks.py' - -PORT_MIN = 50000 -PORT_MAX = 65535 - -# For each rlimit limit (key), make sure that cleanup processes -# have at least n=(value) resources more than tester processes -RLIMIT_ADJUSTMENTS = {'RLIMIT_NPROC': 10} - -TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', - 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', - 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', - 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', - 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', - 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} + +from ..exceptions import MarkUsError +from .utils import config, constants, string_management, file_management, resource_management +from .hooks_context.hooks_context import Hooks ### CUSTOM EXCEPTION CLASSES ### -class AutotestError(Exception): pass +class AutotestError(MarkUsError): pass ### HELPER FUNCTIONS ### -def stringify(*args): - for a in args: - yield str(a) - -def rlimit_str2int(rlimit_string): - return resource.__getattribute__(rlimit_string) - -def current_user(): - return pwd.getpwuid(os.getuid()).pw_name - -def get_reaper_username(test_username): - return '{}{}'.format(config.REAPER_USER_PREFIX, test_username) - -def decode_if_bytes(b, format='utf-8'): - return b.decode(format) if isinstance(b, bytes) else b - -def clean_dir_name(name): - """ Return name modified so that it can be used as a unix style directory name """ - return name.replace('/', '_') - -def random_tmpfile_name(): - return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) - -def get_test_script_key(markus_address, assignment_id): - """ - Return unique key for each assignment used for - storing the location of test scripts in Redis - """ - clean_markus_address = clean_dir_name(markus_address) - return CURRENT_TEST_SCRIPT_FORMAT.format(clean_markus_address, assignment_id) - -def test_script_directory(markus_address, assignment_id, set_to=None): - """ - Return the directory containing the test scripts for a specific assignment. - Optionally updates the location of the test script directory to the value - of the set_to keyword argument (if it is not None) - """ - key = get_test_script_key(markus_address, assignment_id) - r = redis_connection() - if set_to is not None: - r.hset(REDIS_CURRENT_TEST_SCRIPT_HASH, key, set_to) - out = r.hget(REDIS_CURRENT_TEST_SCRIPT_HASH, key) - return decode_if_bytes(out) - -def recursive_iglob(root_dir): - """ - Walk breadth first over a directory tree starting at root_dir and - yield the path to each directory or file encountered. - Yields a tuple containing a string indicating whether the path is to - a directory ("d") or a file ("f") and the path itself. Raise a - ValueError if the root_dir doesn't exist - """ - if os.path.isdir(root_dir): - for root, dirnames, filenames in os.walk(root_dir): - yield from (('d', os.path.join(root, d)) for d in dirnames) - yield from (('f', os.path.join(root, f)) for f in filenames) - else: - raise ValueError('directory does not exist: {}'.format(root_dir)) - -def redis_connection(): - """ - Return the currently open redis connection object. If there is no - connection currently open, one is created using the url specified in - config.REDIS_URL - """ - conn = rq.get_current_connection() - if conn: - return conn - rq.use_connection(redis=redis.Redis.from_url(config.REDIS_URL)) - return rq.get_current_connection() - -def copy_tree(src, dst, exclude=[]): - """ - Recursively copy all files and subdirectories in the path - indicated by src to the path indicated by dst. If directories - don't exist, they are created. Do not copy files or directories - in the exclude list. - """ - copied = [] - for fd, file_or_dir in recursive_iglob(src): - src_path = os.path.relpath(file_or_dir, src) - if src_path in exclude: - continue - target = os.path.join(dst, src_path) - if fd == 'd': - os.makedirs(target, exist_ok=True) - else: - os.makedirs(os.path.dirname(target), exist_ok=True) - shutil.copy2(file_or_dir, target) - copied.append((fd, target)) - return copied - -def ignore_missing_dir_error(_func, _path, excinfo): - """ Used by shutil.rmtree to ignore a FileNotFoundError """ - err_type, err_inst, traceback = excinfo - if err_type == FileNotFoundError: - return - raise err_inst - -def move_tree(src, dst): - """ - Recursively move all files and subdirectories in the path - indicated by src to the path indicated by dst. If directories - don't exist, they are created. - """ - os.makedirs(dst, exist_ok=True) - moved = copy_tree(src, dst) - shutil.rmtree(src, onerror=ignore_missing_dir_error) - return moved - -def loads_partial_json(json_string, expected_type=None): - """ - Return a list of objects loaded from a json string and a boolean - indicating whether the json_string was malformed. This will try - to load as many valid objects as possible from a (potentially - malformed) json string. If the optional expected_type keyword argument - is not None then only objects of the given type are returned, - if any objects of a different type are found, the string will - be treated as malfomed. - """ - i = 0 - decoder = json.JSONDecoder() - results = [] - malformed = False - json_string = json_string.strip() - while i < len(json_string): - try: - obj, ind = decoder.raw_decode(json_string[i:]) - if expected_type is None or isinstance(obj, expected_type): - results.append(obj) - elif json_string[i:i+ind].strip(): - malformed = True - i += ind - except json.JSONDecodeError: - if json_string[i].strip(): - malformed = True - i += 1 - return results, malformed - -@contextmanager -def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): - """ - Open the file or directory at path, yield its - file descriptor, and close it when finished. - flags, *args and **kwargs are passed on to os.open. - """ - fd = os.open(path, flags, *args, **kwargs) - try: - yield fd - finally: - os.close(fd) - -@contextmanager -def fd_lock(file_descriptor, exclusive=True): - """ - Lock the object with the given file descriptor and unlock it - when finished. A lock can either be exclusive or shared by - setting the exclusive keyword argument to True or False. - """ - fcntl.flock(file_descriptor, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) - try: - yield - finally: - fcntl.flock(file_descriptor, fcntl.LOCK_UN) - -def tester_user(): - """ - Get the workspace for the tester user specified by the MARKUSWORKERUSER - environment variable, return the user_name and path to that user's workspace. - - Raises an AutotestError if a tester user is not specified or if a workspace - has not been setup for that user. - """ - r = redis_connection() - - user_name = os.environ.get('MARKUSWORKERUSER') - if user_name is None: - raise AutotestError('No worker users available to run this job') - - user_workspace = r.hget(REDIS_WORKERS_HASH, user_name) - if user_workspace is None: - raise AutotestError(f'No workspace directory for user: {user_name}') - - return user_name, decode_if_bytes(user_workspace) - ### MAINTENANCE FUNCTIONS ### -def update_pop_interval_stat(queue_name): - """ - Update the values contained in the redis hash named REDIS_POP_HASH for - the queue named queue_name. This should be called whenever a new job - is popped from a queue for which we want to keep track of the popping - rate. For more details about the data updated see get_pop_interval_stat. - """ - r = redis_connection() - now = time.time() - r.hsetnx(REDIS_POP_HASH, '{}_start'.format(queue_name), now) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), now) - r.hincrby(REDIS_POP_HASH, '{}_count'.format(queue_name), 1) - -def clear_pop_interval_stat(queue_name): - """ - Reset the values contained in the redis hash named REDIS_POP_HASH for - the queue named queue_name. This should be called whenever a queue becomes - empty. For more details about the data updated see get_pop_interval_stat. - """ - r = redis_connection() - r.hdel(REDIS_POP_HASH, '{}_start'.format(queue_name)) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), 0) - r.hset(REDIS_POP_HASH, '{}_count'.format(queue_name), 0) - -def get_pop_interval_stat(queue_name): - """ - Return the following data about the queue named queue_name: - - the time the first job was popped from the queue during the - current burst of jobs. - - the number of jobs popped from the queue during the current - burst of jobs. - - the time the most recent job was popped from the queue during - current burst of jobs. - """ - r = redis_connection() - start = r.hget(REDIS_POP_HASH, '{}_start'.format(queue_name)) - last = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) - count = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) - return start, last, count - -def get_avg_pop_interval(queue_name): - """ - Return the average interval between pops off of the end of the - queue named queue_name during the current burst of jobs. - Return None if there are no jobs in the queue, indicating that - there is no current burst. - """ - start, last, count = get_pop_interval_stat(queue_name) - try: - start = float(start) - last = float(last) - count = int(count) - except TypeError: - return None - count -= 1 - return (last-start) / count if count else 0 - -def clean_up(): - """ Reset the pop interval data for each empty queue """ - with rq.Connection(redis_connection()): - for q in rq.Queue.all(): - if q.is_empty(): - clear_pop_interval_stat(q.name) - - -def clean_after(func): - """ - Call the clean_up function after the - decorated function func is finished - """ - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - finally: - clean_up() - return wrapper - ### RUN TESTS ### -def copy_test_script_files(markus_address, assignment_id, tests_path): - """ - Copy test script files for a given assignment to the tests_path - directory if they exist. tests_path may already exist and contain - files and subdirectories. - """ - test_script_outer_dir = test_script_directory(markus_address, assignment_id) - test_script_dir = os.path.join(test_script_outer_dir, TEST_SCRIPTS_FILES_DIRNAME) - if os.path.isdir(test_script_dir): - with fd_open(test_script_dir) as fd: - with fd_lock(fd, exclusive=False): - return copy_tree(test_script_dir, tests_path) - return [] - -def setup_files(files_path, tests_path, markus_address, assignment_id): - """ - Copy test script files and student files to the working directory tests_path, - then make it the current working directory. - The following permissions are also set: - - tests_path directory: rwxrwx--T - - test subdirectories: rwxrwx--T - - test files: rw-r----- - - student subdirectories: rwxrwx--- - - student files: rw-rw---- - """ - os.chmod(tests_path, 0o1770) - student_files = move_tree(files_path, tests_path) - for fd, file_or_dir in student_files: - if fd == 'd': - os.chmod(file_or_dir, 0o770) - else: - os.chmod(file_or_dir, 0o660) - script_files = copy_test_script_files(markus_address, assignment_id, tests_path) - for fd, file_or_dir in script_files: - if fd == 'd': - os.chmod(file_or_dir, 0o1770) - else: - os.chmod(file_or_dir, 0o640) - return student_files, script_files - def test_run_command(test_username=None): """ Return a command used to run test scripts as a the test_username @@ -405,49 +57,6 @@ def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None) 'malformed' : stdout if malformed else None, 'extra_info': extra_info or {}} -def get_test_preexec_fn(): - """ - Return a function that sets rlimit settings specified in config file - This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), - there are at least n=RLIMIT_ADJUSTMENTS[limit] resources available for cleanup - processes that are not available for test processes. This ensures that cleanup - processes will always be able to run. - """ - def preexec_fn(): - for limit_str in config.RLIMIT_SETTINGS.keys() | RLIMIT_ADJUSTMENTS.keys(): - limit = rlimit_str2int(limit_str) - - values = config.RLIMIT_SETTINGS.get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) - curr_soft, curr_hard = resource.getrlimit(limit) - soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) - # reduce the hard limit so that cleanup scripts will have at least - # adj more resources to use. - adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) - if (curr_hard - hard) < adj: - hard = curr_hard - adj - # make sure the soft limit doesn't exceed the hard limit - hard = max(hard, 0) - soft = max(min(hard, soft), 0) - - resource.setrlimit(limit, (soft, hard)) - - return preexec_fn - -def get_cleanup_preexec_fn(): - """ - Return a function that sets the rlimit settings specified in RLIMIT_ADJUSTMENTS - so that both the soft and hard limits are set as high as possible. This ensures - that cleanup processes will have as many resources as possible to run. - """ - def preexec_fn(): - for limit_str in RLIMIT_ADJUSTMENTS: - limit = rlimit_str2int(limit_str) - soft, hard = resource.getrlimit(limit) - soft = max(soft, hard) - resource.setrlimit(limit, (soft, hard)) - - return preexec_fn - def kill_with_reaper(test_username): """ Try to kill all processes currently being run by test_username using the method @@ -467,7 +76,7 @@ def kill_with_reaper(test_username): reaper_username = get_reaper_username(test_username) cwd = os.path.dirname(os.path.abspath(__file__)) kill_file_dst = random_tmpfile_name() - preexec_fn = get_cleanup_preexec_fn() + preexec_fn = set_rlimits_before_cleanup() copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format(test_username, kill_file_dst) copy_proc = subprocess.Popen(copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd) @@ -501,48 +110,6 @@ def create_test_script_command(env_dir, tester_type): venv_str = f'source {venv_activate}' return ' && '.join([venv_str, f'python -c "{python_str}"']) - -def setup_database(test_username): - user = getpass.getuser() - database = f'{config.POSTGRES_PREFIX}{test_username}' - - with open(PGPASSFILE) as f: - password = f.read().strip() - - with psycopg2.connect(database=database, user=user, password=password, host='localhost') as conn: - with conn.cursor() as cursor: - cursor.execute("DROP OWNED BY CURRENT_USER;") - if test_username != user: - user = test_username - password = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) - cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) - - return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} - - -def next_port(): - """ Return a port number that is greater than the last time this method was - called (by any process on this machine). - - This port number is not guaranteed to be free - """ - r = redis_connection() - return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN - - -def get_available_port(): - """ Return the next available open port on localhost. """ - while True: - try: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('localhost', next_port())) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - port = s.getsockname()[1] - return str(port) - except OSError: - continue - - def get_env_vars(test_username): """ Return a dictionary containing all environment variables to pass to the next test """ db_env_vars = setup_database(test_username) @@ -556,7 +123,7 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, command cmd. Return the results. """ results = [] - preexec_fn = get_test_preexec_fn() + preexec_fn = set_rlimits_before_test() with hooks.around('all'): for settings in test_specs['testers']: @@ -704,7 +271,7 @@ def run_test(markus_address, server_api_key, test_categories, files_path, assign store_results(results_data, markus_address, assignment_id, group_id, submission_id) report(results_data, api, assignment_id, group_id, run_id) -### UPDATE TEST SCRIPTS ### +### UPDATE TEST SCRIPTS ### def get_tester_root_dir(tester_type): """ diff --git a/autotester/autotester/server/utils/__init__.py b/autotester/autotester/server/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/server/utils/config.py b/autotester/autotester/server/utils/config.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/server/utils/constants.py b/autotester/autotester/server/utils/constants.py new file mode 100644 index 00000000..a45cc4bf --- /dev/null +++ b/autotester/autotester/server/utils/constants.py @@ -0,0 +1,23 @@ +import os + +TEST_SCRIPT_DIR = os.path.join(config.WORKSPACE_DIR, config.SCRIPTS_DIR_NAME) +TEST_RESULT_DIR = os.path.join(config.WORKSPACE_DIR, config.RESULTS_DIR_NAME) +TEST_SPECS_DIR = os.path.join(config.WORKSPACE_DIR, config.SPECS_DIR_NAME) +REDIS_WORKERS_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_WORKERS_HASH) +REDIS_POP_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_POP_HASH) +DEFAULT_ENV_DIR = os.path.join(TEST_SPECS_DIR, config.DEFAULT_ENV_NAME) + +TEST_SCRIPTS_SETTINGS_FILENAME = 'settings.json' +TEST_SCRIPTS_FILES_DIRNAME = 'files' +HOOKS_FILENAME = 'hooks.py' + +# For each rlimit limit (key), make sure that cleanup processes +# have at least n=(value) resources more than tester processes +RLIMIT_ADJUSTMENTS = {'RLIMIT_NPROC': 10} + +TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', + 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', + 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', + 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', + 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', + 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} \ No newline at end of file diff --git a/autotester/autotester/server/utils/file_management.py b/autotester/autotester/server/utils/file_management.py new file mode 100644 index 00000000..bdc4df17 --- /dev/null +++ b/autotester/autotester/server/utils/file_management.py @@ -0,0 +1,131 @@ +import os +import uuid +import tempfile +import shutil + +def clean_dir_name(name): + """ Return name modified so that it can be used as a unix style directory name """ + return name.replace('/', '_') + +def random_tmpfile_name(): + return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) + +def recursive_iglob(root_dir): + """ + Walk breadth first over a directory tree starting at root_dir and + yield the path to each directory or file encountered. + Yields a tuple containing a string indicating whether the path is to + a directory ("d") or a file ("f") and the path itself. Raise a + ValueError if the root_dir doesn't exist + """ + if os.path.isdir(root_dir): + for root, dirnames, filenames in os.walk(root_dir): + yield from (('d', os.path.join(root, d)) for d in dirnames) + yield from (('f', os.path.join(root, f)) for f in filenames) + else: + raise ValueError('directory does not exist: {}'.format(root_dir)) + +def copy_tree(src, dst, exclude=[]): + """ + Recursively copy all files and subdirectories in the path + indicated by src to the path indicated by dst. If directories + don't exist, they are created. Do not copy files or directories + in the exclude list. + """ + copied = [] + for fd, file_or_dir in recursive_iglob(src): + src_path = os.path.relpath(file_or_dir, src) + if src_path in exclude: + continue + target = os.path.join(dst, src_path) + if fd == 'd': + os.makedirs(target, exist_ok=True) + else: + os.makedirs(os.path.dirname(target), exist_ok=True) + shutil.copy2(file_or_dir, target) + copied.append((fd, target)) + return copied + +def ignore_missing_dir_error(_func, _path, excinfo): + """ Used by shutil.rmtree to ignore a FileNotFoundError """ + err_type, err_inst, traceback = excinfo + if err_type == FileNotFoundError: + return + raise err_inst + +def move_tree(src, dst): + """ + Recursively move all files and subdirectories in the path + indicated by src to the path indicated by dst. If directories + don't exist, they are created. + """ + os.makedirs(dst, exist_ok=True) + moved = copy_tree(src, dst) + shutil.rmtree(src, onerror=ignore_missing_dir_error) + return moved + +@contextmanager +def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): + """ + Open the file or directory at path, yield its + file descriptor, and close it when finished. + flags, *args and **kwargs are passed on to os.open. + """ + fd = os.open(path, flags, *args, **kwargs) + try: + yield fd + finally: + os.close(fd) + +@contextmanager +def fd_lock(file_descriptor, exclusive=True): + """ + Lock the object with the given file descriptor and unlock it + when finished. A lock can either be exclusive or shared by + setting the exclusive keyword argument to True or False. + """ + fcntl.flock(file_descriptor, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) + try: + yield + finally: + fcntl.flock(file_descriptor, fcntl.LOCK_UN) + +def copy_test_script_files(markus_address, assignment_id, tests_path): + """ + Copy test script files for a given assignment to the tests_path + directory if they exist. tests_path may already exist and contain + files and subdirectories. + """ + test_script_outer_dir = test_script_directory(markus_address, assignment_id) + test_script_dir = os.path.join(test_script_outer_dir, TEST_SCRIPTS_FILES_DIRNAME) + if os.path.isdir(test_script_dir): + with fd_open(test_script_dir) as fd: + with fd_lock(fd, exclusive=False): + return copy_tree(test_script_dir, tests_path) + return [] + +def setup_files(files_path, tests_path, markus_address, assignment_id): + """ + Copy test script files and student files to the working directory tests_path, + then make it the current working directory. + The following permissions are also set: + - tests_path directory: rwxrwx--T + - test subdirectories: rwxr-xr-x + - test files: rw-r--r-- + - student subdirectories: rwxrwxrwx + - student files: rw-rw-rw- + """ + os.chmod(tests_path, 0o1770) + student_files = move_tree(files_path, tests_path) + for fd, file_or_dir in student_files: + if fd == 'd': + os.chmod(file_or_dir, 0o777) + else: + os.chmod(file_or_dir, 0o666) + script_files = copy_test_script_files(markus_address, assignment_id, tests_path) + for fd, file_or_dir in script_files: + permissions = 0o755 + if fd == 'f': + permissions -= 0o111 + os.chmod(file_or_dir, permissions) + return student_files, script_files \ No newline at end of file diff --git a/autotester/autotester/server/utils/redis_management.py b/autotester/autotester/server/utils/redis_management.py new file mode 100644 index 00000000..9ca46c0f --- /dev/null +++ b/autotester/autotester/server/utils/redis_management.py @@ -0,0 +1,119 @@ +import redis +import rq +from . import file_management, config, string_management + +CURRENT_TEST_SCRIPT_FORMAT = '{}_{}' +REDIS_PREFIX='autotest:' +REDIS_CURRENT_TEST_SCRIPT_HASH = f'{REDIS_PREFIX}curr_test_scripts' + +def redis_connection(): + """ + Return the currently open redis connection object. If there is no + connection currently open, one is created using the url specified in + config.REDIS_URL + """ + conn = rq.get_current_connection() + if conn: + return conn + rq.use_connection(redis=redis.Redis.from_url(config.REDIS_URL)) + return rq.get_current_connection() + +def get_test_script_key(markus_address, assignment_id): + """ + Return unique key for each assignment used for + storing the location of test scripts in Redis + """ + clean_markus_address = file_management.clean_dir_name(markus_address) + return CURRENT_TEST_SCRIPT_FORMAT.format(clean_markus_address, assignment_id) + +def test_script_directory(markus_address, assignment_id, set_to=None): + """ + Return the directory containing the test scripts for a specific assignment. + Optionally updates the location of the test script directory to the value + of the set_to keyword argument (if it is not None) + """ + key = get_test_script_key(markus_address, assignment_id) + r = redis_connection() + if set_to is not None: + r.hset(REDIS_CURRENT_TEST_SCRIPT_HASH, key, set_to) + out = r.hget(REDIS_CURRENT_TEST_SCRIPT_HASH, key) + return string_management.decode_if_bytes(out) + + +def update_pop_interval_stat(queue_name): + """ + Update the values contained in the redis hash named REDIS_POP_HASH for + the queue named queue_name. This should be called whenever a new job + is popped from a queue for which we want to keep track of the popping + rate. For more details about the data updated see get_pop_interval_stat. + """ + r = redis_connection() + now = time.time() + r.hsetnx(REDIS_POP_HASH, '{}_start'.format(queue_name), now) + r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), now) + r.hincrby(REDIS_POP_HASH, '{}_count'.format(queue_name), 1) + +def clear_pop_interval_stat(queue_name): + """ + Reset the values contained in the redis hash named REDIS_POP_HASH for + the queue named queue_name. This should be called whenever a queue becomes + empty. For more details about the data updated see get_pop_interval_stat. + """ + r = redis_connection() + r.hdel(REDIS_POP_HASH, '{}_start'.format(queue_name)) + r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), 0) + r.hset(REDIS_POP_HASH, '{}_count'.format(queue_name), 0) + +def get_pop_interval_stat(queue_name): + """ + Return the following data about the queue named queue_name: + - the time the first job was popped from the queue during the + current burst of jobs. + - the number of jobs popped from the queue during the current + burst of jobs. + - the time the most recent job was popped from the queue during + current burst of jobs. + """ + r = redis_connection() + start = r.hget(REDIS_POP_HASH, '{}_start'.format(queue_name)) + last = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) + count = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) + return start, last, count + +def get_avg_pop_interval(queue_name): + """ + Return the average interval between pops off of the end of the + queue named queue_name during the current burst of jobs. + Return None if there are no jobs in the queue, indicating that + there is no current burst. + """ + start, last, count = get_pop_interval_stat(queue_name) + try: + start = float(start) + last = float(last) + count = int(count) + except TypeError: + return None + count -= 1 + return (last-start) / count if count else 0 + +def clean_up(): + """ Reset the pop interval data for each empty queue """ + with rq.Connection(redis_connection()): + for q in rq.Queue.all(): + if q.is_empty(): + clear_pop_interval_stat(q.name) + + +def clean_after(func): + """ + Call the clean_up function after the + decorated function func is finished + """ + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + finally: + clean_up() + return wrapper \ No newline at end of file diff --git a/autotester/autotester/server/utils/resource_management.py b/autotester/autotester/server/utils/resource_management.py new file mode 100644 index 00000000..0c655963 --- /dev/null +++ b/autotester/autotester/server/utils/resource_management.py @@ -0,0 +1,43 @@ +import resource +from .config import RLIMIT_SETTINGS +from .constants import RLIMIT_ADJUSTMENTS + +def rlimit_str2int(rlimit_string): + return resource.__getattribute__(rlimit_string) + +def set_rlimits_before_test(): + """ + Sets rlimit settings specified in config file + This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), + there are at least n=RLIMIT_ADJUSTMENTS[limit] resources available for cleanup + processes that are not available for test processes. This ensures that cleanup + processes will always be able to run. + """ + for limit_str in config.RLIMIT_SETTINGS.keys() | RLIMIT_ADJUSTMENTS.keys(): + limit = rlimit_str2int(limit_str) + + values = config.RLIMIT_SETTINGS.get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) + curr_soft, curr_hard = resource.getrlimit(limit) + soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) + # reduce the hard limit so that cleanup scripts will have at least + # adj more resources to use. + adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) + if (curr_hard - hard) < adj: + hard = curr_hard - adj + # make sure the soft limit doesn't exceed the hard limit + hard = max(hard, 0) + soft = max(min(hard, soft), 0) + + resource.setrlimit(limit, (soft, hard)) + +def set_rlimits_before_cleanup(): + """ + Sets the rlimit settings specified in RLIMIT_ADJUSTMENTS + so that both the soft and hard limits are set as high as possible. This ensures + that cleanup processes will have as many resources as possible to run. + """ + for limit_str in RLIMIT_ADJUSTMENTS: + limit = rlimit_str2int(limit_str) + soft, hard = resource.getrlimit(limit) + soft = max(soft, hard) + resource.setrlimit(limit, (soft, hard)) \ No newline at end of file diff --git a/autotester/autotester/server/utils/string_management.py b/autotester/autotester/server/utils/string_management.py new file mode 100644 index 00000000..0ef989c2 --- /dev/null +++ b/autotester/autotester/server/utils/string_management.py @@ -0,0 +1,37 @@ +from . import config + +def stringify(*args): + for a in args: + yield str(a) + +def decode_if_bytes(b, format='utf-8'): + return b.decode(format) if isinstance(b, bytes) else b + +def loads_partial_json(json_string, expected_type=None): + """ + Return a list of objects loaded from a json string and a boolean + indicating whether the json_string was malformed. This will try + to load as many valid objects as possible from a (potentially + malformed) json string. If the optional expected_type keyword argument + is not None then only objects of the given type are returned, + if any objects of a different type are found, the string will + be treated as malfomed. + """ + i = 0 + decoder = json.JSONDecoder() + results = [] + malformed = False + json_string = json_string.strip() + while i < len(json_string): + try: + obj, ind = decoder.raw_decode(json_string[i:]) + if expected_type is None or isinstance(obj, expected_type): + results.append(obj) + elif json_string[i:i+ind].strip(): + malformed = True + i += ind + except json.JSONDecodeError: + if json_string[i].strip(): + malformed = True + i += 1 + return results, malformed diff --git a/autotester/autotester/server/utils/user_management.py b/autotester/autotester/server/utils/user_management.py new file mode 100644 index 00000000..0162dc36 --- /dev/null +++ b/autotester/autotester/server/utils/user_management.py @@ -0,0 +1,25 @@ +def current_user(): + return pwd.getpwuid(os.getuid()).pw_name + +def tester_user(): + """ + Get the workspace for the tester user specified by the MARKUSWORKERUSER + environment variable, return the user_name and path to that user's workspace. + + Raises an AutotestError if a tester user is not specified or if a workspace + has not been setup for that user. + """ + r = redis_connection() + + user_name = os.environ.get('MARKUSWORKERUSER') + if user_name is None: + raise AutotestError('No worker users available to run this job') + + user_workspace = r.hget(REDIS_WORKERS_HASH, user_name) + if user_workspace is None: + raise AutotestError(f'No workspace directory for user: {user_name}') + + return user_name, decode_if_bytes(user_workspace) + +def get_reaper_username(test_username): + return '{}{}'.format(config.REAPER_USER_PREFIX, test_username) \ No newline at end of file diff --git a/config/config.py b/config/queue_config_default.py similarity index 100% rename from config/config.py rename to config/queue_config_default.py From d508bafe8e4b505409ec79d528e9a4a9869d274d Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 16 Dec 2019 10:32:15 -0500 Subject: [PATCH 08/46] config: new configuration setup with environment variables and using json schema to select queues --- autotester/__init__.py | 0 autotester/autotester/__init__.py | 5 ++ autotester/autotester/config/__init__.py | 76 +++++++++++++++++++ .../config/defaults/config_default.yml | 40 ++++++++++ .../config/defaults/config_env_vars.yml | 10 +++ autotester/autotester/server/utils/config.py | 0 config/config_default.yml | 16 ---- config/config_local.yml | 40 ++++++++++ config/queue_config_default.py | 28 ------- 9 files changed, 171 insertions(+), 44 deletions(-) delete mode 100644 autotester/__init__.py create mode 100644 autotester/autotester/config/__init__.py create mode 100644 autotester/autotester/config/defaults/config_default.yml create mode 100644 autotester/autotester/config/defaults/config_env_vars.yml delete mode 100644 autotester/autotester/server/utils/config.py delete mode 100644 config/config_default.yml create mode 100644 config/config_local.yml delete mode 100755 config/queue_config_default.py diff --git a/autotester/__init__.py b/autotester/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autotester/autotester/__init__.py b/autotester/autotester/__init__.py index e69de29b..770d42b3 100644 --- a/autotester/autotester/__init__.py +++ b/autotester/autotester/__init__.py @@ -0,0 +1,5 @@ +from os.path import abspath, dirname, join + +PROJECT_ROOT = dirname(abspath(__file__)) +AUTOTESTER_ROOT = dirname(PROJECT_ROOT) +CONFIG_ROOT = join(AUTOTESTER_ROOT, 'config') diff --git a/autotester/autotester/config/__init__.py b/autotester/autotester/config/__init__.py new file mode 100644 index 00000000..30bcd3e7 --- /dev/null +++ b/autotester/autotester/config/__init__.py @@ -0,0 +1,76 @@ +# Thanks to this blog post for how to load env vars with the yaml loader: +# https://medium.com/swlh/python-yaml-configuration-with-environment-variables-parsing-77930f4273ac + +import os +import re +from collections.abc import Mapping +import yaml +from autotester import CONFIG_ROOT + +DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), 'defaults') + +class _Config: + + _local_config = os.path.join(CONFIG_ROOT, 'config_local.yml') + _default_config = os.path.join(DEFAULT_ROOT, 'config_default.yml') + _env_var_config = os.path.join(DEFAULT_ROOT, 'config_env_vars.yml') + _env_pattern = re.compile(r'.*?\${(\w+)}.*?') + _env_tag = '!ENV' + _env_not_found_key = '!ENVIRONMENT VARIABLE NOT FOUND!' + + def __init__(self): + self._yaml_loader = yaml.SafeLoader + self._yaml_loader.add_implicit_resolver(self._env_tag, self._env_pattern, None) + self._yaml_loader.add_constructor(self._env_tag, self._env_var_constructor) + self._settings = self._load_from_yaml() + + def __getitem__(self, key): + try: + return self._settings[key] + except KeyError: + if isinstance(key, tuple): + d = self + for k in key: + d = d[k] + return d + raise + + @classmethod + def _merge_dicts(cls, dicts): + try: + _merged = dicts[0].copy() + except AttributeError: + _merged = dicts[0] + if all(isinstance(d, Mapping) for d in dicts): + for d in dicts[1:]: + for key, val in d.items(): + if key not in _merged or _merged[key] == cls._env_not_found_key: + _merged[key] = val + else: + _merged[key] = cls._merge_dicts([_merged[key], val]) + return _merged + + def _env_var_constructor(self, loader, node): + value = loader.construct_scalar(node) + match = self._env_pattern.findall(value) + if match: + full_value = value + for g in match: + full_value = full_value.replace(f'${{{g}}}', os.environ.get(g, self._env_not_found_key)) + return full_value + return value + + def _load_from_yaml(self): + config_dicts = [] + if os.path.isfile(self._local_config): + with open(self._local_config) as f: + local_config = yaml.load(f, Loader=self._yaml_loader) + if local_config is not None: + config_dicts.append(local_config) + with open(self._env_var_config) as f: + config_dicts.append(yaml.load(f, Loader=self._yaml_loader)) + with open(self._default_config) as f: + config_dicts.append(yaml.load(f, Loader=self._yaml_loader)) + return self._merge_dicts(config_dicts) + +config = _Config() diff --git a/autotester/autotester/config/defaults/config_default.yml b/autotester/autotester/config/defaults/config_default.yml new file mode 100644 index 00000000..2cd9e865 --- /dev/null +++ b/autotester/autotester/config/defaults/config_default.yml @@ -0,0 +1,40 @@ +workspace: null + +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} + +workers: + - n: 1 + queues: + - student + - single + - batch + +users: + server: + name: null + workers: + name: null + reaper: null + +redis: + url: redis://127.0.0.1:6379/0 + +rlimit_settings: + nproc: + - 300 + - 300 + +resources: + port: + min: 50000 + max: 65535 + postgresql: + database: autotest + password: null + host: localhost \ No newline at end of file diff --git a/autotester/autotester/config/defaults/config_env_vars.yml b/autotester/autotester/config/defaults/config_env_vars.yml new file mode 100644 index 00000000..a3f7461a --- /dev/null +++ b/autotester/autotester/config/defaults/config_env_vars.yml @@ -0,0 +1,10 @@ +redis: + url: !ENV ${REDIS_URL} + +workspace: !ENV ${AUTOTESTER_WORKSPACE} + +resources: + postgresql: + database: !ENV ${PGDATABASE} + password: !ENV ${PGPASSWORD} + host: !ENV ${PGHOST} \ No newline at end of file diff --git a/autotester/autotester/server/utils/config.py b/autotester/autotester/server/utils/config.py deleted file mode 100644 index e69de29b..00000000 diff --git a/config/config_default.yml b/config/config_default.yml deleted file mode 100644 index ad16f6b2..00000000 --- a/config/config_default.yml +++ /dev/null @@ -1,16 +0,0 @@ -redis: - url: !ENV {REDIS_URL} - -workspace: '/app/server/workspace' - -users: - server: 'autotst' - workers: - - autotst0 - - autotst1 - reaper_user_prefix: null - -rlimit_settings: - nproc: - - 300 - - 300 \ No newline at end of file diff --git a/config/config_local.yml b/config/config_local.yml new file mode 100644 index 00000000..2cd9e865 --- /dev/null +++ b/config/config_local.yml @@ -0,0 +1,40 @@ +workspace: null + +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} + +workers: + - n: 1 + queues: + - student + - single + - batch + +users: + server: + name: null + workers: + name: null + reaper: null + +redis: + url: redis://127.0.0.1:6379/0 + +rlimit_settings: + nproc: + - 300 + - 300 + +resources: + port: + min: 50000 + max: 65535 + postgresql: + database: autotest + password: null + host: localhost \ No newline at end of file diff --git a/config/queue_config_default.py b/config/queue_config_default.py deleted file mode 100755 index 998b6f9f..00000000 --- a/config/queue_config_default.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -### QUEUE CONFIGS ### - -# functions used to select which type of queue to use. They must accept any number -# of keyword arguments and should only return a boolean (see autotest_enqueuer._get_queue) -def batch_filter(**kwargs): - return kwargs.get('batch_id') is not None - -def single_filter(**kwargs): - return kwargs.get('user_type') == 'Admin' and not batch_filter(**kwargs) - -def student_filter(**kwargs): - return kwargs.get('user_type') == 'Student' and not batch_filter(**kwargs) - -# list of worker queues. Values of each are a string indicating the queue name, -# and a function used to select whether or not to use this type of queue -# (see autotest_enqueuer._get_queue) -batch_queue = {'name': 'batch', 'filter': batch_filter} -single_queue = {'name': 'single', 'filter': single_filter} -student_queue = {'name': 'student', 'filter': student_filter} -WORKER_QUEUES = [batch_queue, single_queue, student_queue] - -### WORKER CONFIGS ### - -WORKERS = [(4, [student_queue['name'], single_queue['name'], batch_queue['name']]), - (2, [single_queue['name'], student_queue['name'], batch_queue['name']]), - (2, [batch_queue['name'], student_queue['name'], single_queue['name']])] From 725165cd31dc3d2b2637ee22eae86479a6ed2220 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 16 Dec 2019 11:04:05 -0500 Subject: [PATCH 09/46] queue: select queues based on json schemas now --- autotester/autotester/cli.py | 16 ++++++++-------- .../server/form_validation/__init__.py | 17 +++++++++++++---- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/autotester/autotester/cli.py b/autotester/autotester/cli.py index 4579978f..ddcc9260 100755 --- a/autotester/autotester/cli.py +++ b/autotester/autotester/cli.py @@ -14,8 +14,8 @@ from .server.utils.redis_management import redis_connection, get_avg_pop_interval from .server.utils.file_management import test_script_directory, ignore_missing_dir_error from .server.utils.constants import TEST_SCRIPTS_SETTINGS_FILENAME -from .server.utils.config import WORKER_QUEUES -from .server.form_validation import validate_with_defaults, best_match +from autotester.config import config +from .server import form_validation from .server.server import run_test, update_test_specs @@ -63,9 +63,9 @@ def _get_queue(**kw): Return a queue. The returned queue is one whose condition function returns True when called with the arguments in **kw. """ - for queue_type in WORKER_QUEUES: - if queue_type['filter'](**kw): - return rq.Queue(queue_type['name'], connection=redis_connection()) + for queue in config['queues']: + if form_validation.is_valid(kw, queue['schema']): + return rq.Queue(queue['name'], connection=redis_connection()) raise InvalidQueueError('cannot enqueue job: unable to determine correct queue type') @@ -151,9 +151,9 @@ def update_specs(test_specs, schema=None, **kw): Run test spec update function after validating the form data. """ if schema is not None: - errors = list(form_validation.validate_with_defaults(schema, test_specs)) - if errors: - raise form_validation.best_match(errors) + error = form_validation.validate_with_defaults(schema, test_specs, best_only=True) + if error: + raise error update_test_specs(test_specs=test_specs, **kw) diff --git a/autotester/autotester/server/form_validation/__init__.py b/autotester/autotester/server/form_validation/__init__.py index 6f4285bf..250cb749 100644 --- a/autotester/autotester/server/form_validation/__init__.py +++ b/autotester/autotester/server/form_validation/__init__.py @@ -1,4 +1,4 @@ -from jsonschema import Draft7Validator, validators, ValidationError +from jsonschema import Draft7Validator, validators, ValidationError, validate from jsonschema.exceptions import best_match from copy import deepcopy @@ -90,7 +90,7 @@ def set_oneOf_defaults(validator, properties, instance, schema): return validators.extend(validator_class, custom_validators) -def validate_with_defaults(schema, obj, validator_class=Draft7Validator): +def validate_with_defaults(schema, obj, validator_class=Draft7Validator, best_only=True): """ Return an iterator that yields errors from validating obj on schema after first filling in defaults on obj. @@ -99,5 +99,14 @@ def validate_with_defaults(schema, obj, validator_class=Draft7Validator): # first time to fill in defaults since validating 'required', 'minProperties', # etc. can't be done until the instance has been properly filled with defaults. list(validator.iter_errors(obj)) - return validator.iter_errors(obj) - \ No newline at end of file + errors = list(validator.iter_errors(obj)) + if best_only: + return best_match(errors) + return errors + +def is_valid(obj, schema, validator_class=Draft7Validator): + """ + Return True if is valid for schema using the + validator . + """ + return validator_class(schema).is_valid(obj) From dd3686faf7020d7ffb22eac04f46885127b7ce59 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 16 Dec 2019 14:33:12 -0500 Subject: [PATCH 10/46] config: remove all required null values from default config --- autotester/autotester/config/__init__.py | 39 +++++---- .../config/defaults/config_default.yml | 9 --- .../config/defaults/config_env_vars.yml | 9 ++- config/config_local.yml | 80 +++++++++---------- 4 files changed, 70 insertions(+), 67 deletions(-) diff --git a/autotester/autotester/config/__init__.py b/autotester/autotester/config/__init__.py index 30bcd3e7..d06c97ea 100644 --- a/autotester/autotester/config/__init__.py +++ b/autotester/autotester/config/__init__.py @@ -5,23 +5,25 @@ import re from collections.abc import Mapping import yaml -from autotester import CONFIG_ROOT +import autotester DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), 'defaults') class _Config: - _local_config = os.path.join(CONFIG_ROOT, 'config_local.yml') + _local_config = os.path.join(autotester.CONFIG_ROOT, 'config_local.yml') _default_config = os.path.join(DEFAULT_ROOT, 'config_default.yml') _env_var_config = os.path.join(DEFAULT_ROOT, 'config_env_vars.yml') - _env_pattern = re.compile(r'.*?\${(\w+)}.*?') - _env_tag = '!ENV' - _env_not_found_key = '!ENVIRONMENT VARIABLE NOT FOUND!' + _replacement_pattern = re.compile(r'.*?\${(\w+)}.*?') + _not_found_key = '!VARIABLE NOT FOUND!' def __init__(self): self._yaml_loader = yaml.SafeLoader - self._yaml_loader.add_implicit_resolver(self._env_tag, self._env_pattern, None) - self._yaml_loader.add_constructor(self._env_tag, self._env_var_constructor) + + self._yaml_loader.add_implicit_resolver('!ENV', self._replacement_pattern, None) + env_constructor = self._constructor_factory(lambda g: os.environ.get(g, self._not_found_key)) + self._yaml_loader.add_constructor('!ENV', env_constructor) + self._settings = self._load_from_yaml() def __getitem__(self, key): @@ -44,21 +46,24 @@ def _merge_dicts(cls, dicts): if all(isinstance(d, Mapping) for d in dicts): for d in dicts[1:]: for key, val in d.items(): - if key not in _merged or _merged[key] == cls._env_not_found_key: + if key not in _merged or _merged[key] == cls._not_found_key: _merged[key] = val else: _merged[key] = cls._merge_dicts([_merged[key], val]) return _merged - def _env_var_constructor(self, loader, node): - value = loader.construct_scalar(node) - match = self._env_pattern.findall(value) - if match: - full_value = value - for g in match: - full_value = full_value.replace(f'${{{g}}}', os.environ.get(g, self._env_not_found_key)) - return full_value - return value + def _constructor_factory(self, replacement_func): + def constructor(loader, node, pattern=self._replacement_pattern): + value = loader.construct_scalar(node) + match = pattern.findall(value) + if match: + full_value = value + for g in match: + full_value = full_value.replace(f'${{{g}}}', replacement_func(g)) + return full_value + return value + + return constructor def _load_from_yaml(self): config_dicts = [] diff --git a/autotester/autotester/config/defaults/config_default.yml b/autotester/autotester/config/defaults/config_default.yml index 2cd9e865..5fea210f 100644 --- a/autotester/autotester/config/defaults/config_default.yml +++ b/autotester/autotester/config/defaults/config_default.yml @@ -1,5 +1,3 @@ -workspace: null - queues: - name: batch schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} @@ -15,13 +13,6 @@ workers: - single - batch -users: - server: - name: null - workers: - name: null - reaper: null - redis: url: redis://127.0.0.1:6379/0 diff --git a/autotester/autotester/config/defaults/config_env_vars.yml b/autotester/autotester/config/defaults/config_env_vars.yml index a3f7461a..d40fbb96 100644 --- a/autotester/autotester/config/defaults/config_env_vars.yml +++ b/autotester/autotester/config/defaults/config_env_vars.yml @@ -1,7 +1,14 @@ +workspace: !ENV ${HOME}/.markus-autotesting/ + redis: url: !ENV ${REDIS_URL} -workspace: !ENV ${AUTOTESTER_WORKSPACE} +users: + server: + name: !ENV ${USER} + workers: + - name: !ENV ${USER} + reaper: null resources: postgresql: diff --git a/config/config_local.yml b/config/config_local.yml index 2cd9e865..3c763f40 100644 --- a/config/config_local.yml +++ b/config/config_local.yml @@ -1,40 +1,40 @@ -workspace: null - -queues: - - name: batch - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} - - name: single - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} - - name: student - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} - -workers: - - n: 1 - queues: - - student - - single - - batch - -users: - server: - name: null - workers: - name: null - reaper: null - -redis: - url: redis://127.0.0.1:6379/0 - -rlimit_settings: - nproc: - - 300 - - 300 - -resources: - port: - min: 50000 - max: 65535 - postgresql: - database: autotest - password: null - host: localhost \ No newline at end of file +#workspace: !ENV ${HOME}/.markus-autotesting/workspace +# +#queues: +# - name: batch +# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} +# - name: single +# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} +# - name: student +# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} +# +#workers: +# - n: 1 +# queues: +# - student +# - single +# - batch +# +#users: +# server: +# name: !ENV ${USER} +# workers: +# - name: !ENV ${USER} +# reaper: null +# +#redis: +# url: redis://127.0.0.1:6379/0 +# +#rlimit_settings: +# nproc: +# - 300 +# - 300 +# +#resources: +# port: +# min: 50000 +# max: 65535 +# postgresql: +# database: autotest +# password: null +# host: localhost \ No newline at end of file From 67a888584f51fb192b238c5ba2ee1e2d6fd6a76d Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 17 Dec 2019 16:51:33 -0500 Subject: [PATCH 11/46] autotester_package: reorganize entire autotester package --- .gitignore | 3 +- autotester/autotester/cli.py | 44 ++++++---------- .../config/defaults/config_default.yml | 25 ++++++++- autotester/autotester/exceptions/__init__.py | 21 ++++++++ .../{server => }/resources/__init__.py | 0 .../{server => }/resources/ports/__init__.py | 17 +++--- .../resources/postgresql/__init__.py | 6 +-- .../server/form_validation/__init__.py | 3 +- .../server/hooks_context/builtin_hooks.py | 26 ++++------ .../server/hooks_context/hooks_context.py | 5 +- autotester/autotester/server/server.py | 52 ++++++++++++------- .../autotester/server/utils/constants.py | 23 -------- .../server/utils/file_management.py | 4 +- .../utils.py => utils/path_management.py} | 3 +- .../server/utils/redis_management.py | 38 +++++++------- .../server/utils/resource_management.py | 11 ++-- .../server/utils/string_management.py | 6 +-- .../server/utils/user_management.py | 20 +++++-- .../testers/custom/{bin => }/default_hooks.py | 2 +- .../testers/pyta/bin/requirements.txt | 0 config/requirements.txt | 9 ---- 21 files changed, 172 insertions(+), 146 deletions(-) rename autotester/autotester/{server => }/resources/__init__.py (100%) rename autotester/autotester/{server => }/resources/ports/__init__.py (57%) rename autotester/autotester/{server => }/resources/postgresql/__init__.py (81%) delete mode 100644 autotester/autotester/server/utils/constants.py rename autotester/autotester/server/{hooks_context/utils.py => utils/path_management.py} (99%) rename autotester/autotester/testers/custom/{bin => }/default_hooks.py (83%) rename {testers => autotester/autotester}/testers/pyta/bin/requirements.txt (100%) delete mode 100644 config/requirements.txt diff --git a/.gitignore b/.gitignore index 94c4171b..9789c4f2 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,8 @@ __pycache__ .DS_Store .hypothesis/ .pytest_cache/ - +*.egg-info +venv # server server/venv diff --git a/autotester/autotester/cli.py b/autotester/autotester/cli.py index ddcc9260..92b181d0 100755 --- a/autotester/autotester/cli.py +++ b/autotester/autotester/cli.py @@ -9,37 +9,23 @@ import glob import time import shutil +from rq.exceptions import NoSuchJobError from functools import wraps -from .exceptions import MarkUsError -from .server.utils.redis_management import redis_connection, get_avg_pop_interval -from .server.utils.file_management import test_script_directory, ignore_missing_dir_error -from .server.utils.constants import TEST_SCRIPTS_SETTINGS_FILENAME +from autotester.exceptions import * +from autotester.server.utils.redis_management import redis_connection, get_avg_pop_interval, test_script_directory +from autotester.server.utils.file_management import ignore_missing_dir_error from autotester.config import config -from .server import form_validation -from .server.server import run_test, update_test_specs +from autotester.server import form_validation +from autotester.server.server import run_test, update_test_specs +SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] ### ERROR CLASSES ### -class JobArgumentError(MarkUsError): - pass - - -class InvalidQueueError(MarkUsError): - pass - - -class TestScriptFilesError(MarkUsError): - pass - - -class TestParameterError(MarkUsError): - pass - ### HELPER FUNCTIONS ### -def _format_job_id(markus_address, run_id, **kw): +def _format_job_id(markus_address, run_id, **_kw): """ Return a unique job id for each enqueued job based on the markus_address and the run_id @@ -47,11 +33,13 @@ def _format_job_id(markus_address, run_id, **kw): return '{}_{}'.format(markus_address, run_id) -def _check_args(func, args=[], kwargs={}): +def _check_args(func, args=None, kwargs=None): """ Raises an error if calling the function func with args and kwargs would raise an error. """ + args = args or [] + kwargs = kwargs or {} try: inspect.signature(func).bind(*args, **kwargs) except TypeError as e: @@ -80,7 +68,7 @@ def _print_queue_info(queue): print(avg_pop_interval * count) -def _check_test_script_files_exist(markus_address, assignment_id, **kw): +def _check_test_script_files_exist(markus_address, assignment_id, **_kw): if test_script_directory(markus_address, assignment_id) is None: raise TestScriptFilesError('cannot find test script files: please upload some before running tests') @@ -138,7 +126,7 @@ def enqueue_test(user_type, batch_id, **kw): _check_args(run_test, kwargs=kw) _check_test_script_files_exist(**kw) test_files_dir = test_script_directory(kw['markus_address'], kw['assignment_id']) - with open(os.path.join(test_files_dir, TEST_SCRIPTS_SETTINGS_FILENAME)) as f: + with open(os.path.join(test_files_dir, SETTINGS_FILENAME)) as f: test_specs = json.load(f) _print_queue_info(queue) timeout = _get_job_timeout(test_specs, kw['test_categories']) @@ -157,7 +145,7 @@ def update_specs(test_specs, schema=None, **kw): update_test_specs(test_specs=test_specs, **kw) -def cancel_test(markus_address, run_ids, **kw): +def cancel_test(markus_address, run_ids, **_kw): """ Cancel a test run job with the job_id defined using markus_address and run_id. @@ -167,7 +155,7 @@ def cancel_test(markus_address, run_ids, **kw): job_id = _format_job_id(markus_address, run_id) try: job = rq.job.Job.fetch(job_id) - except rq.exceptions.NoSuchJobError: + except NoSuchJobError: return if job.is_queued(): files_path = job.kwargs['files_path'] @@ -176,7 +164,7 @@ def cancel_test(markus_address, run_ids, **kw): job.cancel() -def get_schema(**kw): +def get_schema(**_kw): """ Print a json to stdout representing a json schema that indicates the required specs for each installed tester type. diff --git a/autotester/autotester/config/defaults/config_default.yml b/autotester/autotester/config/defaults/config_default.yml index 5fea210f..6a52b087 100644 --- a/autotester/autotester/config/defaults/config_default.yml +++ b/autotester/autotester/config/defaults/config_default.yml @@ -1,3 +1,7 @@ +# Default settings. +# Settings prefixed with an underscore are technically overwritable by +# a local settings file but it is not recommended. + queues: - name: batch schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} @@ -14,7 +18,12 @@ workers: - batch redis: - url: redis://127.0.0.1:6379/0 + url: 'redis://127.0.0.1:6379/0' + _prefix: 'redis:' + _current_test_script_hash: current_test_scripts + _workers_hash: workers + _pop_interval_hash: pop_interval + rlimit_settings: nproc: @@ -23,9 +32,21 @@ rlimit_settings: resources: port: + _redis_int: port min: 50000 max: 65535 postgresql: + _prefix: autotest_ database: autotest password: null - host: localhost \ No newline at end of file + host: localhost + +_workspace_contents: + _scripts: scripts + _results: results + _specs: specs + _logs: logs + _default_venv_name: defaultvenv + _settings_file: settings.json + _files_dir: files + _hooks_file: hooks.py diff --git a/autotester/autotester/exceptions/__init__.py b/autotester/autotester/exceptions/__init__.py index 4170fee3..7dc90e70 100644 --- a/autotester/autotester/exceptions/__init__.py +++ b/autotester/autotester/exceptions/__init__.py @@ -3,4 +3,25 @@ """ class MarkUsError(Exception): + pass + +class TesterCreationError(MarkUsError): + pass + +class TesterUserError(MarkUsError): + pass + +class JobArgumentError(MarkUsError): + pass + + +class InvalidQueueError(MarkUsError): + pass + + +class TestScriptFilesError(MarkUsError): + pass + + +class TestParameterError(MarkUsError): pass \ No newline at end of file diff --git a/autotester/autotester/server/resources/__init__.py b/autotester/autotester/resources/__init__.py similarity index 100% rename from autotester/autotester/server/resources/__init__.py rename to autotester/autotester/resources/__init__.py diff --git a/autotester/autotester/server/resources/ports/__init__.py b/autotester/autotester/resources/ports/__init__.py similarity index 57% rename from autotester/autotester/server/resources/ports/__init__.py rename to autotester/autotester/resources/ports/__init__.py index 5a3f7891..6c1fc5d8 100644 --- a/autotester/autotester/server/resources/ports/__init__.py +++ b/autotester/autotester/resources/ports/__init__.py @@ -1,10 +1,11 @@ import socket -from ...utils.redis_management import REDIS_PREFIX, redis_connection +from autotester.server.utils.redis_management import redis_connection +from autotester.config import config -PORT_MIN = 50000 -PORT_MAX = 65535 - -REDIS_PORT_INT = f'{REDIS_PREFIX}{ports}' +PORT_MIN = config['resources', 'port', 'min'] +PORT_MAX = config['resources', 'port', 'max'] +REDIS_PREFIX = config['redis', '_prefix'] +REDIS_PORT_INT = f"{REDIS_PREFIX}{config['resources', 'port', '_redis_int']}" def next_port(): """ Return a port number that is greater than the last time this method was @@ -16,12 +17,12 @@ def next_port(): return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN -def get_available_port(): - """ Return the next available open port on localhost. """ +def get_available_port(host='localhost'): + """ Return the next available open port on . """ while True: try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('localhost', next_port())) + s.bind((host, next_port())) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) port = s.getsockname()[1] return str(port) diff --git a/autotester/autotester/server/resources/postgresql/__init__.py b/autotester/autotester/resources/postgresql/__init__.py similarity index 81% rename from autotester/autotester/server/resources/postgresql/__init__.py rename to autotester/autotester/resources/postgresql/__init__.py index cd500bd4..2b4153a4 100644 --- a/autotester/autotester/server/resources/postgresql/__init__.py +++ b/autotester/autotester/resources/postgresql/__init__.py @@ -4,10 +4,10 @@ import secrets import string from psycopg2.extensions import AsIs +from autotester.config import config -POSTGRES_PREFIX = 'autotest_' - -PGPASSFILE = os.path.join(config.WORKSPACE_DIR, config.LOGS_DIR_NAME, '.pgpass') +POSTGRES_PREFIX = config['resources', 'postgresql', '_prefix'] +PGPASSFILE = os.path.join(config['workspace'], config['_workspace_contents', '_logs'], '.pgpass') def setup_database(test_username): user = getpass.getuser() diff --git a/autotester/autotester/server/form_validation/__init__.py b/autotester/autotester/server/form_validation/__init__.py index 250cb749..e6f6b207 100644 --- a/autotester/autotester/server/form_validation/__init__.py +++ b/autotester/autotester/server/form_validation/__init__.py @@ -1,4 +1,4 @@ -from jsonschema import Draft7Validator, validators, ValidationError, validate +from jsonschema import Draft7Validator, validators, ValidationError from jsonschema.exceptions import best_match from copy import deepcopy @@ -9,7 +9,6 @@ def extend_with_default(validator_class=Draft7Validator): """ validate_props = validator_class.VALIDATORS["properties"] validate_array = validator_class.VALIDATORS["items"] - validate_oneOf = validator_class.VALIDATORS["oneOf"] # must use draft 4+ def set_defaults(validator, properties, instance, schema): """ Set defaults within a "properties" context """ diff --git a/autotester/autotester/server/hooks_context/builtin_hooks.py b/autotester/autotester/server/hooks_context/builtin_hooks.py index e7e63aae..05fe463a 100644 --- a/autotester/autotester/server/hooks_context/builtin_hooks.py +++ b/autotester/autotester/server/hooks_context/builtin_hooks.py @@ -3,11 +3,10 @@ """ import os -import sys import json -import glob -from pathlib import Path -from .utils import add_path +import pkgutil +import importlib +from autotester import testers HOOKS = {'upload_feedback_file' : {'context': 'after_each'}, 'upload_feedback_to_repo' : {'requires': ['clear_feedback_file'], @@ -16,7 +15,7 @@ 'clear_feedback_file' : {'context': 'before_each'}} -def clear_feedback_file(test_data, **kwargs): +def clear_feedback_file(test_data, **_kwargs): """ Remove any previous feedback file before the tests run. """ @@ -25,7 +24,7 @@ def clear_feedback_file(test_data, **kwargs): os.remove(feedback_file) -def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **kwargs): +def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **_kwargs): """ Upload the feedback file to the group's repo. """ @@ -35,7 +34,7 @@ def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **kwargs): api.upload_file_to_repo(assignment_id, group_id, feedback_file, feedback_open.read()) -def upload_feedback_file(api, assignment_id, group_id, test_data, **kwargs): +def upload_feedback_file(api, assignment_id, group_id, test_data, **_kwargs): """ Upload the feedback file using MarkUs' api. """ @@ -45,7 +44,7 @@ def upload_feedback_file(api, assignment_id, group_id, test_data, **kwargs): api.upload_feedback_file(assignment_id, group_id, feedback_file, feedback_open.read()) -def upload_annotations(api, assignment_id, group_id, test_data, **kwargs): +def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): """ Upload annotations using MarkUs' api. """ @@ -59,15 +58,12 @@ def upload_annotations(api, assignment_id, group_id, test_data, **kwargs): def _load_default_hooks(): """ - Return a dictionary containing all hooks loaded from any default_hooks.py - files in any of the bin/ directories for each tester. + Return a dictionary containing all hooks loaded from any default_hooks.py in the testers package. """ - glob_pat = os.path.join(Path(__file__).resolve().parents[2], 'testers', 'testers', '*', 'bin', 'default_hooks.py') defaults = {} - for hooks_file in glob.glob(glob_pat): - bin_dir = os.path.dirname(hooks_file) - with add_path(bin_dir): - default_hooks = __import__('default_hooks') + for _finder, name, _ispkg in pkgutil.walk_packages(testers.__path__, f'{testers.__name__}.'): + if name.endswith('default_hooks'): + default_hooks = importlib.import_module(name) for hook in default_hooks.HOOKS: defaults[hook.__name__] = hook return defaults diff --git a/autotester/autotester/server/hooks_context/hooks_context.py b/autotester/autotester/server/hooks_context/hooks_context.py index 631ad013..ef772556 100644 --- a/autotester/autotester/server/hooks_context/hooks_context.py +++ b/autotester/autotester/server/hooks_context/hooks_context.py @@ -5,13 +5,12 @@ """ import os -import sys import traceback from collections import defaultdict, deque from collections.abc import Callable from contextlib import contextmanager -from . import builtin_hooks -from .utils import current_directory, add_path +from autotester.server.hooks_context import builtin_hooks +from autotester.server.utils.path_management import current_directory, add_path class Hooks: diff --git a/autotester/autotester/server/server.py b/autotester/autotester/server/server.py index efd35516..1dd570b7 100755 --- a/autotester/autotester/server/server.py +++ b/autotester/autotester/server/server.py @@ -10,17 +10,31 @@ import tempfile from markusapi import Markus -from ..exceptions import MarkUsError -from .utils import config, constants, string_management, file_management, resource_management -from .hooks_context.hooks_context import Hooks - -### CUSTOM EXCEPTION CLASSES ### - -class AutotestError(MarkUsError): pass - -### HELPER FUNCTIONS ### - -### MAINTENANCE FUNCTIONS ### +from autotester.exceptions import TesterCreationError +from autotester.config import config +from autotester.server.hooks_context.hooks_context import Hooks +from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes, stringify +from autotester.server.utils.user_management import get_reaper_username, current_user, tester_user +from autotester.server.utils.file_management import random_tmpfile_name, clean_dir_name, setup_files, ignore_missing_dir_error, fd_open, fd_lock, move_tree +from autotester.server.utils.resource_management import set_rlimits_before_cleanup, set_rlimits_before_test +from autotester.server.utils.redis_management import clean_after, test_script_directory, update_pop_interval_stat +from autotester.resources.ports import get_available_port +from autotester.resources.postgresql import setup_database + +DEFAULT_ENV_DIR = config['_workspace_contents', '_default_venv_name'] +TEST_RESULT_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_results']) +HOOKS_FILENAME = config['_workspace_contents', '_hooks_file'] +SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] +FILES_DIRNAME = config['_workspace_contents', '_files_dir'] +TEST_SPECS_DIR = config['_workspace_contents', '_specs'] +TEST_SCRIPT_DIR = config['_workspace_contents', '_scripts'] + +TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', + 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', + 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', + 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', + 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', + 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} ### RUN TESTS ### @@ -72,8 +86,8 @@ def kill_with_reaper(test_username): run by the test_username user, deletes itself and exits with a 0 exit code if sucessful. """ - if config.REAPER_USER_PREFIX: - reaper_username = get_reaper_username(test_username) + reaper_username = get_reaper_username(test_username) + if reaper_username is not None: cwd = os.path.dirname(os.path.abspath(__file__)) kill_file_dst = random_tmpfile_name() preexec_fn = set_rlimits_before_cleanup() @@ -237,7 +251,7 @@ def run_test(markus_address, server_api_key, test_categories, files_path, assign test_script_path = test_script_directory(markus_address, assignment_id) hooks_script_path = os.path.join(test_script_path, HOOKS_FILENAME) - test_specs_path = os.path.join(test_script_path, TEST_SCRIPTS_SETTINGS_FILENAME) + test_specs_path = os.path.join(test_script_path, SETTINGS_FILENAME) api = Markus(server_api_key, markus_address) with open(test_specs_path) as f: @@ -315,7 +329,7 @@ def create_tester_environments(files_path, test_specs): cmd = [f'{create_file}', json.dumps(settings), files_path] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise AutotestError(f'create tester environment failed with:\n{proc.stderr}') + raise TesterCreationError(f'create tester environment failed with:\n{proc.stderr}') else: settings['env_loc'] = DEFAULT_ENV_DIR test_specs['testers'][i] = settings @@ -323,7 +337,7 @@ def create_tester_environments(files_path, test_specs): return test_specs def destroy_tester_environments(old_test_script_dir): - test_specs_file = os.path.join(old_test_script_dir, TEST_SCRIPTS_SETTINGS_FILENAME) + test_specs_file = os.path.join(old_test_script_dir, SETTINGS_FILENAME) with open(test_specs_file) as f: test_specs = json.load(f) for settings in test_specs['testers']: @@ -336,7 +350,7 @@ def destroy_tester_environments(old_test_script_dir): cmd = [f'{destroy_file}', json.dumps(settings)] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise AutotestError(f'destroy tester environment failed with:\n{proc.stderr}') + raise TesterCreationError(f'destroy tester environment failed with:\n{proc.stderr}') shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) @clean_after @@ -353,14 +367,14 @@ def update_test_specs(files_path, assignment_id, markus_address, test_specs): test_script_dir_name = "test_scripts_{}".format(int(time.time())) clean_markus_address = clean_dir_name(markus_address) new_dir = os.path.join(*stringify(TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name)) - new_files_dir = os.path.join(new_dir, TEST_SCRIPTS_FILES_DIRNAME) + new_files_dir = os.path.join(new_dir, FILES_DIRNAME) move_tree(files_path, new_files_dir) if 'hooks_file' in test_specs: src = os.path.isfile(os.path.join(new_files_dir, test_specs['hooks_file'])) if os.path.isfile(src): os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) test_specs = create_tester_environments(new_files_dir, test_specs) - settings_filename = os.path.join(new_dir, TEST_SCRIPTS_SETTINGS_FILENAME) + settings_filename = os.path.join(new_dir, SETTINGS_FILENAME) with open(settings_filename, 'w') as f: json.dump(test_specs, f) old_test_script_dir = test_script_directory(markus_address, assignment_id) diff --git a/autotester/autotester/server/utils/constants.py b/autotester/autotester/server/utils/constants.py deleted file mode 100644 index a45cc4bf..00000000 --- a/autotester/autotester/server/utils/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -import os - -TEST_SCRIPT_DIR = os.path.join(config.WORKSPACE_DIR, config.SCRIPTS_DIR_NAME) -TEST_RESULT_DIR = os.path.join(config.WORKSPACE_DIR, config.RESULTS_DIR_NAME) -TEST_SPECS_DIR = os.path.join(config.WORKSPACE_DIR, config.SPECS_DIR_NAME) -REDIS_WORKERS_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_WORKERS_HASH) -REDIS_POP_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_POP_HASH) -DEFAULT_ENV_DIR = os.path.join(TEST_SPECS_DIR, config.DEFAULT_ENV_NAME) - -TEST_SCRIPTS_SETTINGS_FILENAME = 'settings.json' -TEST_SCRIPTS_FILES_DIRNAME = 'files' -HOOKS_FILENAME = 'hooks.py' - -# For each rlimit limit (key), make sure that cleanup processes -# have at least n=(value) resources more than tester processes -RLIMIT_ADJUSTMENTS = {'RLIMIT_NPROC': 10} - -TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', - 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', - 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', - 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', - 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', - 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} \ No newline at end of file diff --git a/autotester/autotester/server/utils/file_management.py b/autotester/autotester/server/utils/file_management.py index bdc4df17..565f7fd1 100644 --- a/autotester/autotester/server/utils/file_management.py +++ b/autotester/autotester/server/utils/file_management.py @@ -2,6 +2,8 @@ import uuid import tempfile import shutil +import fcntl +from contextlib import contextmanager def clean_dir_name(name): """ Return name modified so that it can be used as a unix style directory name """ @@ -25,7 +27,7 @@ def recursive_iglob(root_dir): else: raise ValueError('directory does not exist: {}'.format(root_dir)) -def copy_tree(src, dst, exclude=[]): +def copy_tree(src, dst, exclude=tuple()): """ Recursively copy all files and subdirectories in the path indicated by src to the path indicated by dst. If directories diff --git a/autotester/autotester/server/hooks_context/utils.py b/autotester/autotester/server/utils/path_management.py similarity index 99% rename from autotester/autotester/server/hooks_context/utils.py rename to autotester/autotester/server/utils/path_management.py index e2a1d84f..8fa7eba1 100644 --- a/autotester/autotester/server/hooks_context/utils.py +++ b/autotester/autotester/server/utils/path_management.py @@ -1,6 +1,7 @@ -from contextlib import contextmanager import os import sys +from contextlib import contextmanager + @contextmanager def current_directory(path): diff --git a/autotester/autotester/server/utils/redis_management.py b/autotester/autotester/server/utils/redis_management.py index 9ca46c0f..349d1398 100644 --- a/autotester/autotester/server/utils/redis_management.py +++ b/autotester/autotester/server/utils/redis_management.py @@ -1,21 +1,23 @@ import redis import rq -from . import file_management, config, string_management +import time +from functools import wraps +from autotester.server.utils import file_management, string_management +from autotester.config import config -CURRENT_TEST_SCRIPT_FORMAT = '{}_{}' -REDIS_PREFIX='autotest:' -REDIS_CURRENT_TEST_SCRIPT_HASH = f'{REDIS_PREFIX}curr_test_scripts' +CURRENT_TEST_SCRIPT_HASH = config['redis', '_current_test_script_hash'] +POP_INTERVAL_HASH = config['redis', '_pop_interval_hash'] def redis_connection(): """ Return the currently open redis connection object. If there is no connection currently open, one is created using the url specified in - config.REDIS_URL + config['redis', 'url'] """ conn = rq.get_current_connection() if conn: return conn - rq.use_connection(redis=redis.Redis.from_url(config.REDIS_URL)) + rq.use_connection(redis=redis.Redis.from_url(config['redis', 'url'])) return rq.get_current_connection() def get_test_script_key(markus_address, assignment_id): @@ -24,7 +26,7 @@ def get_test_script_key(markus_address, assignment_id): storing the location of test scripts in Redis """ clean_markus_address = file_management.clean_dir_name(markus_address) - return CURRENT_TEST_SCRIPT_FORMAT.format(clean_markus_address, assignment_id) + return f'{clean_markus_address}_{assignment_id}' def test_script_directory(markus_address, assignment_id, set_to=None): """ @@ -35,8 +37,8 @@ def test_script_directory(markus_address, assignment_id, set_to=None): key = get_test_script_key(markus_address, assignment_id) r = redis_connection() if set_to is not None: - r.hset(REDIS_CURRENT_TEST_SCRIPT_HASH, key, set_to) - out = r.hget(REDIS_CURRENT_TEST_SCRIPT_HASH, key) + r.hset(CURRENT_TEST_SCRIPT_HASH, key, set_to) + out = r.hget(CURRENT_TEST_SCRIPT_HASH, key) return string_management.decode_if_bytes(out) @@ -49,9 +51,9 @@ def update_pop_interval_stat(queue_name): """ r = redis_connection() now = time.time() - r.hsetnx(REDIS_POP_HASH, '{}_start'.format(queue_name), now) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), now) - r.hincrby(REDIS_POP_HASH, '{}_count'.format(queue_name), 1) + r.hsetnx(POP_INTERVAL_HASH, '{}_start'.format(queue_name), now) + r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), now) + r.hincrby(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 1) def clear_pop_interval_stat(queue_name): """ @@ -60,9 +62,9 @@ def clear_pop_interval_stat(queue_name): empty. For more details about the data updated see get_pop_interval_stat. """ r = redis_connection() - r.hdel(REDIS_POP_HASH, '{}_start'.format(queue_name)) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), 0) - r.hset(REDIS_POP_HASH, '{}_count'.format(queue_name), 0) + r.hdel(POP_INTERVAL_HASH, '{}_start'.format(queue_name)) + r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), 0) + r.hset(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 0) def get_pop_interval_stat(queue_name): """ @@ -75,9 +77,9 @@ def get_pop_interval_stat(queue_name): current burst of jobs. """ r = redis_connection() - start = r.hget(REDIS_POP_HASH, '{}_start'.format(queue_name)) - last = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) - count = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) + start = r.hget(POP_INTERVAL_HASH, '{}_start'.format(queue_name)) + last = r.hget(POP_INTERVAL_HASH, '{}_count'.format(queue_name)) + count = r.hget(POP_INTERVAL_HASH, '{}_count'.format(queue_name)) return start, last, count def get_avg_pop_interval(queue_name): diff --git a/autotester/autotester/server/utils/resource_management.py b/autotester/autotester/server/utils/resource_management.py index 0c655963..b97f1072 100644 --- a/autotester/autotester/server/utils/resource_management.py +++ b/autotester/autotester/server/utils/resource_management.py @@ -1,9 +1,10 @@ import resource -from .config import RLIMIT_SETTINGS -from .constants import RLIMIT_ADJUSTMENTS +from autotester.config import config + +RLIMIT_ADJUSTMENTS = {'nproc': 10} def rlimit_str2int(rlimit_string): - return resource.__getattribute__(rlimit_string) + return getattr(resource, f'RLIMIT_{rlimit_string.upper()}') def set_rlimits_before_test(): """ @@ -13,10 +14,10 @@ def set_rlimits_before_test(): processes that are not available for test processes. This ensures that cleanup processes will always be able to run. """ - for limit_str in config.RLIMIT_SETTINGS.keys() | RLIMIT_ADJUSTMENTS.keys(): + for limit_str in config['rlimit_settings'].keys() | RLIMIT_ADJUSTMENTS.keys(): limit = rlimit_str2int(limit_str) - values = config.RLIMIT_SETTINGS.get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) + values = config['rlimit_settings'].get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) curr_soft, curr_hard = resource.getrlimit(limit) soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) # reduce the hard limit so that cleanup scripts will have at least diff --git a/autotester/autotester/server/utils/string_management.py b/autotester/autotester/server/utils/string_management.py index 0ef989c2..360d7e85 100644 --- a/autotester/autotester/server/utils/string_management.py +++ b/autotester/autotester/server/utils/string_management.py @@ -1,11 +1,11 @@ -from . import config +import json def stringify(*args): for a in args: yield str(a) -def decode_if_bytes(b, format='utf-8'): - return b.decode(format) if isinstance(b, bytes) else b +def decode_if_bytes(b, format_='utf-8'): + return b.decode(format_) if isinstance(b, bytes) else b def loads_partial_json(json_string, expected_type=None): """ diff --git a/autotester/autotester/server/utils/user_management.py b/autotester/autotester/server/utils/user_management.py index 0162dc36..0758a361 100644 --- a/autotester/autotester/server/utils/user_management.py +++ b/autotester/autotester/server/utils/user_management.py @@ -1,3 +1,13 @@ +import os +import pwd +from autotester.exceptions import TesterUserError +from autotester.config import config +from autotester.server.utils.string_management import decode_if_bytes +from autotester.server.utils.redis_management import redis_connection + +WORKERS_HASH = config['redis', '_workers_hash'] +WORKERS = config['users', 'workers'] + def current_user(): return pwd.getpwuid(os.getuid()).pw_name @@ -13,13 +23,15 @@ def tester_user(): user_name = os.environ.get('MARKUSWORKERUSER') if user_name is None: - raise AutotestError('No worker users available to run this job') + raise TesterUserError('No worker users available to run this job') - user_workspace = r.hget(REDIS_WORKERS_HASH, user_name) + user_workspace = r.hget(WORKERS_HASH, user_name) if user_workspace is None: - raise AutotestError(f'No workspace directory for user: {user_name}') + raise TesterUserError(f'No workspace directory for user: {user_name}') return user_name, decode_if_bytes(user_workspace) def get_reaper_username(test_username): - return '{}{}'.format(config.REAPER_USER_PREFIX, test_username) \ No newline at end of file + for workers in WORKERS: + if workers['name'] == test_username: + return workers['reaper'] diff --git a/autotester/autotester/testers/custom/bin/default_hooks.py b/autotester/autotester/testers/custom/default_hooks.py similarity index 83% rename from autotester/autotester/testers/custom/bin/default_hooks.py rename to autotester/autotester/testers/custom/default_hooks.py index 21649236..6f35d9f8 100644 --- a/autotester/autotester/testers/custom/bin/default_hooks.py +++ b/autotester/autotester/testers/custom/default_hooks.py @@ -1,6 +1,6 @@ import os -def before_all_custom(settings, **kwargs): +def before_all_custom(settings, **_kwargs): """ Make script files executable """ for test_data in settings['test_data']: for script_file in test_data['script_files']: diff --git a/testers/testers/pyta/bin/requirements.txt b/autotester/autotester/testers/pyta/bin/requirements.txt similarity index 100% rename from testers/testers/pyta/bin/requirements.txt rename to autotester/autotester/testers/pyta/bin/requirements.txt diff --git a/config/requirements.txt b/config/requirements.txt deleted file mode 100644 index 77ac59b5..00000000 --- a/config/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -redis>=3.3.11 -requests>=2.22.0 -rq>=1.1.0 -supervisor>=4.1.0 -PyYAML>=5.2 -psycopg2-binary>=2.8.4 -markusapi>=0.0.1 -jsonschema>=3.2.0 -fakeredis>=1.1.0 From 8a994c3d7bd8ecc2e250109b6bf90407931d9995 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 19 Dec 2019 15:59:49 -0500 Subject: [PATCH 12/46] reorganization: intermediate step --- MANIFEST.in | 1 + autotester/autotester/__init__.py | 5 ----- .../specs/default_install_settings.json | 3 --- .../config_example.yml | 0 doc/{hooks.py => hooks_example.py} | 0 autotester/setup.py => setup.py | 17 +++++++++----- .../resources => src/autotester}/__init__.py | 0 {autotester => src}/autotester/cli.py | 2 +- .../__init__.py => src/autotester/config.py | 21 ++++++++++++++---- .../config_defaults}/config_default.yml | 14 ++++++++++++ .../config_defaults}/config_env_vars.yml | 0 .../autotester/exceptions.py | 3 --- .../autotester/form_validation.py | 0 .../autotester/resources}/__init__.py | 0 .../autotester/resources/ports/__init__.py | 2 +- .../resources/postgresql/__init__.py | 2 +- .../autotester/server}/__init__.py | 0 .../server/hooks_context}/__init__.py | 0 .../server/hooks_context/builtin_hooks.py | 0 .../server/hooks_context/hooks_context.py | 0 .../autotester/server/server.py | 2 +- .../autotester/server/utils}/__init__.py | 0 .../server/utils/file_management.py | 0 .../server/utils/path_management.py | 0 .../server/utils/redis_management.py | 2 +- .../server/utils/resource_management.py | 2 +- .../server/utils/string_management.py | 0 .../server/utils/user_management.py | 2 +- src/autotester/setup.py | 13 +++++++++++ .../autotester/testers}/__init__.py | 0 .../autotester/testers/custom}/__init__.py | 0 .../autotester/testers/custom/bin/install.sh | 0 .../testers/custom/bin/uninstall.sh | 0 .../testers/custom/default_hooks.py | 0 .../testers/custom/markus_custom_tester.py | 0 .../testers/custom/specs/settings_schema.json | 0 .../testers/custom/tests}/__init__.py | 0 .../custom/tests/script_files/autotest_01.sh | 0 .../testers/custom/tests/specs.json | 0 .../custom/tests/student_files}/__init__.py | 0 .../custom/tests/student_files/submission.py | 0 .../autotester/testers/haskell}/__init__.py | 0 .../autotester/testers/haskell/bin/install.sh | 0 .../testers/haskell/bin/uninstall.sh | 0 .../testers/haskell/markus_haskell_tester.py | 0 .../haskell/specs/settings_schema.json | 0 .../haskell/tests/script_files/Test.hs | 0 .../testers/haskell/tests/specs.json | 0 .../haskell/tests/student_files/Submission.hs | 0 .../autotester/testers/java}/__init__.py | 0 .../autotester/testers/java/bin/install.sh | 0 .../autotester/testers/java/bin/uninstall.sh | 0 .../autotester/testers/java/lib/build.gradle | 0 .../lib/gradle/wrapper/gradle-wrapper.jar | Bin .../gradle/wrapper/gradle-wrapper.properties | 0 .../autotester/testers/java/lib/gradlew | 0 .../autotester/testers/java/lib/gradlew.bat | 0 .../testers/java/lib/settings.gradle | 0 .../toronto/cs/teach/MarkusJavaTester.java | 0 .../testers/java/markus_java_tester.py | 0 .../testers/java/specs/settings_schema.json | 0 .../java/tests/script_files/Test1.java | 0 .../java/tests/script_files/Test2.java | 0 .../autotester/testers/java/tests/specs.json | 0 .../java/tests/student_files/Submission.java | 0 .../autotester/testers/markus_test_specs.py | 0 .../autotester/testers/markus_tester.py | 0 .../autotester/testers/py}/__init__.py | 0 .../testers/py/bin/create_environment.sh | 0 .../autotester/testers/py/bin/install.sh | 0 .../testers/py/bin/requirements.txt | 0 .../autotester/testers/py/bin/uninstall.sh | 0 .../autotester/testers/py/lib/__init__.py | 0 .../autotester/testers/py/lib/c_helper.py | 0 .../autotester/testers/py/lib/sql_helper.py | 0 .../testers/py/markus_python_tester.py | 0 .../testers/py/specs/settings_schema.json | 0 src/autotester/testers/py/tests/__init__.py | 0 .../testers/py/tests/script_files/__init__.py | 0 .../testers/py/tests/script_files/data1.sql | 0 .../testers/py/tests/script_files/data2.sql | 0 .../testers/py/tests/script_files/schema.ddl | 0 .../testers/py/tests/script_files/test.py | 0 .../testers/py/tests/script_files/test2.py | 0 .../testers/py/tests/script_files/test_sql.py | 0 .../autotester/testers/py/tests/specs.json | 0 .../py/tests/student_files/__init__.py | 0 .../py/tests/student_files/submission.py | 0 .../py/tests/student_files/submission.sql | 0 src/autotester/testers/pyta/__init__.py | 0 .../testers/pyta/bin/create_environment.sh | 0 .../autotester/testers/pyta/bin/install.sh | 0 .../testers/pyta/bin/requirements.txt | 0 .../autotester/testers/pyta/bin/uninstall.sh | 0 .../testers/pyta/markus_pyta_tester.py | 0 .../testers/pyta/specs/settings_schema.json | 0 src/autotester/testers/pyta/tests/__init__.py | 0 .../autotester/testers/pyta/tests/specs.json | 0 .../pyta/tests/student_files/__init__.py | 0 .../pyta/tests/student_files/submission.py | 0 src/autotester/testers/racket/__init__.py | 0 .../autotester/testers/racket/bin/install.sh | 0 .../testers/racket/bin/uninstall.sh | 0 .../autotester/testers/racket/lib/markus.rkt | 0 .../testers/racket/markus_racket_tester.py | 0 .../testers/racket/specs/settings_schema.json | 0 .../racket/tests/script_files/test.rkt | 0 .../testers/racket/tests/specs.json | 0 .../racket/tests/student_files/submission.rkt | 0 src/autotester/testers/tests/.gitkeep | 0 src/autotester/tests/__init__.py | 0 .../tests/autotest_enqueuer_test.py | 0 .../autotester/tests/config_default.py | 0 113 files changed, 64 insertions(+), 27 deletions(-) create mode 100644 MANIFEST.in delete mode 100644 autotester/autotester/__init__.py delete mode 100644 autotester/autotester/testers/custom/specs/default_install_settings.json rename config/config_local.yml => doc/config_example.yml (100%) rename doc/{hooks.py => hooks_example.py} (100%) rename autotester/setup.py => setup.py (56%) rename {autotester/autotester/resources => src/autotester}/__init__.py (100%) rename {autotester => src}/autotester/cli.py (99%) rename autotester/autotester/config/__init__.py => src/autotester/config.py (79%) rename {autotester/autotester/config/defaults => src/autotester/config_defaults}/config_default.yml (87%) rename {autotester/autotester/config/defaults => src/autotester/config_defaults}/config_env_vars.yml (100%) rename autotester/autotester/exceptions/__init__.py => src/autotester/exceptions.py (99%) rename autotester/autotester/server/form_validation/__init__.py => src/autotester/form_validation.py (100%) rename {autotester/autotester/server => src/autotester/resources}/__init__.py (100%) rename {autotester => src}/autotester/resources/ports/__init__.py (96%) rename {autotester => src}/autotester/resources/postgresql/__init__.py (96%) rename {autotester/autotester/server/hooks_context => src/autotester/server}/__init__.py (100%) rename {autotester/autotester/server/utils => src/autotester/server/hooks_context}/__init__.py (100%) rename {autotester => src}/autotester/server/hooks_context/builtin_hooks.py (100%) rename {autotester => src}/autotester/server/hooks_context/hooks_context.py (100%) rename {autotester => src}/autotester/server/server.py (99%) rename {autotester/autotester/testers => src/autotester/server/utils}/__init__.py (100%) rename {autotester => src}/autotester/server/utils/file_management.py (100%) rename {autotester => src}/autotester/server/utils/path_management.py (100%) rename {autotester => src}/autotester/server/utils/redis_management.py (99%) rename {autotester => src}/autotester/server/utils/resource_management.py (98%) rename {autotester => src}/autotester/server/utils/string_management.py (100%) rename {autotester => src}/autotester/server/utils/user_management.py (97%) create mode 100644 src/autotester/setup.py rename {autotester/autotester/testers/custom => src/autotester/testers}/__init__.py (100%) rename {autotester/autotester/testers/haskell => src/autotester/testers/custom}/__init__.py (100%) rename {autotester => src}/autotester/testers/custom/bin/install.sh (100%) rename {autotester => src}/autotester/testers/custom/bin/uninstall.sh (100%) rename {autotester => src}/autotester/testers/custom/default_hooks.py (100%) rename {autotester => src}/autotester/testers/custom/markus_custom_tester.py (100%) rename {autotester => src}/autotester/testers/custom/specs/settings_schema.json (100%) rename {autotester/autotester/testers/java => src/autotester/testers/custom/tests}/__init__.py (100%) rename {autotester => src}/autotester/testers/custom/tests/script_files/autotest_01.sh (100%) rename {autotester => src}/autotester/testers/custom/tests/specs.json (100%) rename {autotester/autotester/testers/py => src/autotester/testers/custom/tests/student_files}/__init__.py (100%) rename {autotester => src}/autotester/testers/custom/tests/student_files/submission.py (100%) rename {autotester/autotester/testers/pyta => src/autotester/testers/haskell}/__init__.py (100%) rename {autotester => src}/autotester/testers/haskell/bin/install.sh (100%) rename {autotester => src}/autotester/testers/haskell/bin/uninstall.sh (100%) rename {autotester => src}/autotester/testers/haskell/markus_haskell_tester.py (100%) rename {autotester => src}/autotester/testers/haskell/specs/settings_schema.json (100%) rename {autotester => src}/autotester/testers/haskell/tests/script_files/Test.hs (100%) rename {autotester => src}/autotester/testers/haskell/tests/specs.json (100%) rename {autotester => src}/autotester/testers/haskell/tests/student_files/Submission.hs (100%) rename {autotester/autotester/testers/racket => src/autotester/testers/java}/__init__.py (100%) rename {autotester => src}/autotester/testers/java/bin/install.sh (100%) rename {autotester => src}/autotester/testers/java/bin/uninstall.sh (100%) rename {autotester => src}/autotester/testers/java/lib/build.gradle (100%) rename {autotester => src}/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar (100%) rename {autotester => src}/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties (100%) rename {autotester => src}/autotester/testers/java/lib/gradlew (100%) rename {autotester => src}/autotester/testers/java/lib/gradlew.bat (100%) rename {autotester => src}/autotester/testers/java/lib/settings.gradle (100%) rename {autotester => src}/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java (100%) rename {autotester => src}/autotester/testers/java/markus_java_tester.py (100%) rename {autotester => src}/autotester/testers/java/specs/settings_schema.json (100%) rename {autotester => src}/autotester/testers/java/tests/script_files/Test1.java (100%) rename {autotester => src}/autotester/testers/java/tests/script_files/Test2.java (100%) rename {autotester => src}/autotester/testers/java/tests/specs.json (100%) rename {autotester => src}/autotester/testers/java/tests/student_files/Submission.java (100%) rename {autotester => src}/autotester/testers/markus_test_specs.py (100%) rename {autotester => src}/autotester/testers/markus_tester.py (100%) rename {autotester/autotester/tests => src/autotester/testers/py}/__init__.py (100%) rename {autotester => src}/autotester/testers/py/bin/create_environment.sh (100%) rename {autotester => src}/autotester/testers/py/bin/install.sh (100%) rename {autotester => src}/autotester/testers/py/bin/requirements.txt (100%) rename {autotester => src}/autotester/testers/py/bin/uninstall.sh (100%) rename autotester/autotester/testers/tests/.gitkeep => src/autotester/testers/py/lib/__init__.py (100%) rename {autotester => src}/autotester/testers/py/lib/c_helper.py (100%) rename {autotester => src}/autotester/testers/py/lib/sql_helper.py (100%) rename {autotester => src}/autotester/testers/py/markus_python_tester.py (100%) rename {autotester => src}/autotester/testers/py/specs/settings_schema.json (100%) create mode 100644 src/autotester/testers/py/tests/__init__.py create mode 100644 src/autotester/testers/py/tests/script_files/__init__.py rename {autotester => src}/autotester/testers/py/tests/script_files/data1.sql (100%) rename {autotester => src}/autotester/testers/py/tests/script_files/data2.sql (100%) rename {autotester => src}/autotester/testers/py/tests/script_files/schema.ddl (100%) rename {autotester => src}/autotester/testers/py/tests/script_files/test.py (100%) rename {autotester => src}/autotester/testers/py/tests/script_files/test2.py (100%) rename {autotester => src}/autotester/testers/py/tests/script_files/test_sql.py (100%) rename {autotester => src}/autotester/testers/py/tests/specs.json (100%) create mode 100644 src/autotester/testers/py/tests/student_files/__init__.py rename {autotester => src}/autotester/testers/py/tests/student_files/submission.py (100%) rename {autotester => src}/autotester/testers/py/tests/student_files/submission.sql (100%) create mode 100644 src/autotester/testers/pyta/__init__.py rename {autotester => src}/autotester/testers/pyta/bin/create_environment.sh (100%) rename {autotester => src}/autotester/testers/pyta/bin/install.sh (100%) rename {autotester => src}/autotester/testers/pyta/bin/requirements.txt (100%) rename {autotester => src}/autotester/testers/pyta/bin/uninstall.sh (100%) rename {autotester => src}/autotester/testers/pyta/markus_pyta_tester.py (100%) rename {autotester => src}/autotester/testers/pyta/specs/settings_schema.json (100%) create mode 100644 src/autotester/testers/pyta/tests/__init__.py rename {autotester => src}/autotester/testers/pyta/tests/specs.json (100%) create mode 100644 src/autotester/testers/pyta/tests/student_files/__init__.py rename {autotester => src}/autotester/testers/pyta/tests/student_files/submission.py (100%) create mode 100644 src/autotester/testers/racket/__init__.py rename {autotester => src}/autotester/testers/racket/bin/install.sh (100%) rename {autotester => src}/autotester/testers/racket/bin/uninstall.sh (100%) rename {autotester => src}/autotester/testers/racket/lib/markus.rkt (100%) rename {autotester => src}/autotester/testers/racket/markus_racket_tester.py (100%) rename {autotester => src}/autotester/testers/racket/specs/settings_schema.json (100%) rename {autotester => src}/autotester/testers/racket/tests/script_files/test.rkt (100%) rename {autotester => src}/autotester/testers/racket/tests/specs.json (100%) rename {autotester => src}/autotester/testers/racket/tests/student_files/submission.rkt (100%) create mode 100644 src/autotester/testers/tests/.gitkeep create mode 100644 src/autotester/tests/__init__.py rename {autotester => src}/autotester/tests/autotest_enqueuer_test.py (100%) rename {autotester => src}/autotester/tests/config_default.py (100%) diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..bd02d27c --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include src/autotester/config_defaults/* \ No newline at end of file diff --git a/autotester/autotester/__init__.py b/autotester/autotester/__init__.py deleted file mode 100644 index 770d42b3..00000000 --- a/autotester/autotester/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from os.path import abspath, dirname, join - -PROJECT_ROOT = dirname(abspath(__file__)) -AUTOTESTER_ROOT = dirname(PROJECT_ROOT) -CONFIG_ROOT = join(AUTOTESTER_ROOT, 'config') diff --git a/autotester/autotester/testers/custom/specs/default_install_settings.json b/autotester/autotester/testers/custom/specs/default_install_settings.json deleted file mode 100644 index a2867fc3..00000000 --- a/autotester/autotester/testers/custom/specs/default_install_settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "executable_scripts": true -} diff --git a/config/config_local.yml b/doc/config_example.yml similarity index 100% rename from config/config_local.yml rename to doc/config_example.yml diff --git a/doc/hooks.py b/doc/hooks_example.py similarity index 100% rename from doc/hooks.py rename to doc/hooks_example.py diff --git a/autotester/setup.py b/setup.py similarity index 56% rename from autotester/setup.py rename to setup.py index a21b5776..4fbc4cd6 100644 --- a/autotester/setup.py +++ b/setup.py @@ -1,13 +1,16 @@ -from setuptools import setup +from setuptools import setup, find_packages -setup(name='MarkUs Autotester', +test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] + +setup(name='markus-autotester', version='2.0', description='Automatic tester for programming assignments', url='https://github.com/MarkUsProject/markus-autotesting', author='Misha Schwartz, Alessio Di Sandro', author_email='mschwa@cs.toronto.edu', license='MIT', - packages=['autotester'], + package_dir={'': 'src'}, + packages=find_packages(where='src', exclude=test_exclusions), zip_safe=False, install_requires=[ 'redis==3.3.8', @@ -17,9 +20,13 @@ 'PyYAML==5.1.2', 'psycopg2-binary==2.8.3', 'markusapi==0.0.1', - 'jsonschema==3.0.2', - 'fakeredis==1.1.0', + 'jsonschema==3.0.2' + ], + tests_require=[ + 'fakeredis==1.1.0' ], + include_package_data=True, + # data_files=[('config_defaults', ['config/config_default.yml', 'config/config_env_vars.yml'])], entry_points={ 'console_scripts': 'markus_autotester = autotester.cli:cli' }) \ No newline at end of file diff --git a/autotester/autotester/resources/__init__.py b/src/autotester/__init__.py similarity index 100% rename from autotester/autotester/resources/__init__.py rename to src/autotester/__init__.py diff --git a/autotester/autotester/cli.py b/src/autotester/cli.py similarity index 99% rename from autotester/autotester/cli.py rename to src/autotester/cli.py index 92b181d0..1f95e403 100755 --- a/autotester/autotester/cli.py +++ b/src/autotester/cli.py @@ -14,7 +14,7 @@ from autotester.exceptions import * from autotester.server.utils.redis_management import redis_connection, get_avg_pop_interval, test_script_directory from autotester.server.utils.file_management import ignore_missing_dir_error -from autotester.config import config +from autotester import config from autotester.server import form_validation from autotester.server.server import run_test, update_test_specs diff --git a/autotester/autotester/config/__init__.py b/src/autotester/config.py similarity index 79% rename from autotester/autotester/config/__init__.py rename to src/autotester/config.py index d06c97ea..810d22fc 100644 --- a/autotester/autotester/config/__init__.py +++ b/src/autotester/config.py @@ -5,13 +5,26 @@ import re from collections.abc import Mapping import yaml -import autotester -DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), 'defaults') +DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), 'config_defaults') +CONFIG_FILENAME = 'markus_autotester_config' +CONFIG_ENV_VAR = 'MARKUS_AUTOTESTER_CONFIG' + +def _find_local_config(): + system_config = os.path.join(os.path.sep, 'etc', CONFIG_FILENAME) + user_config = os.path.join(os.environ.get('HOME'), f'.{CONFIG_FILENAME}') + env_config = os.environ.get(CONFIG_ENV_VAR) + + if env_config is not None: + return env_config + if os.path.isfile(user_config): + return user_config + if os.path.isfile(system_config): + return system_config class _Config: - _local_config = os.path.join(autotester.CONFIG_ROOT, 'config_local.yml') + _local_config = _find_local_config() _default_config = os.path.join(DEFAULT_ROOT, 'config_default.yml') _env_var_config = os.path.join(DEFAULT_ROOT, 'config_env_vars.yml') _replacement_pattern = re.compile(r'.*?\${(\w+)}.*?') @@ -67,7 +80,7 @@ def constructor(loader, node, pattern=self._replacement_pattern): def _load_from_yaml(self): config_dicts = [] - if os.path.isfile(self._local_config): + if self._local_config is not None and os.path.isfile(self._local_config): with open(self._local_config) as f: local_config = yaml.load(f, Loader=self._yaml_loader) if local_config is not None: diff --git a/autotester/autotester/config/defaults/config_default.yml b/src/autotester/config_defaults/config_default.yml similarity index 87% rename from autotester/autotester/config/defaults/config_default.yml rename to src/autotester/config_defaults/config_default.yml index 6a52b087..20269d9f 100644 --- a/autotester/autotester/config/defaults/config_default.yml +++ b/src/autotester/config_defaults/config_default.yml @@ -30,6 +30,20 @@ rlimit_settings: - 300 - 300 +testers: + custom: + enable: true + haskell: + enable: true + java: + enable: true + py: + enable: true + pyta: + enable: true + racket: + enable: true + resources: port: _redis_int: port diff --git a/autotester/autotester/config/defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml similarity index 100% rename from autotester/autotester/config/defaults/config_env_vars.yml rename to src/autotester/config_defaults/config_env_vars.yml diff --git a/autotester/autotester/exceptions/__init__.py b/src/autotester/exceptions.py similarity index 99% rename from autotester/autotester/exceptions/__init__.py rename to src/autotester/exceptions.py index 7dc90e70..cd5f1ae4 100644 --- a/autotester/autotester/exceptions/__init__.py +++ b/src/autotester/exceptions.py @@ -14,14 +14,11 @@ class TesterUserError(MarkUsError): class JobArgumentError(MarkUsError): pass - class InvalidQueueError(MarkUsError): pass - class TestScriptFilesError(MarkUsError): pass - class TestParameterError(MarkUsError): pass \ No newline at end of file diff --git a/autotester/autotester/server/form_validation/__init__.py b/src/autotester/form_validation.py similarity index 100% rename from autotester/autotester/server/form_validation/__init__.py rename to src/autotester/form_validation.py diff --git a/autotester/autotester/server/__init__.py b/src/autotester/resources/__init__.py similarity index 100% rename from autotester/autotester/server/__init__.py rename to src/autotester/resources/__init__.py diff --git a/autotester/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py similarity index 96% rename from autotester/autotester/resources/ports/__init__.py rename to src/autotester/resources/ports/__init__.py index 6c1fc5d8..ca7b5c21 100644 --- a/autotester/autotester/resources/ports/__init__.py +++ b/src/autotester/resources/ports/__init__.py @@ -1,6 +1,6 @@ import socket from autotester.server.utils.redis_management import redis_connection -from autotester.config import config +from autotester import config PORT_MIN = config['resources', 'port', 'min'] PORT_MAX = config['resources', 'port', 'max'] diff --git a/autotester/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py similarity index 96% rename from autotester/autotester/resources/postgresql/__init__.py rename to src/autotester/resources/postgresql/__init__.py index 2b4153a4..a923618d 100644 --- a/autotester/autotester/resources/postgresql/__init__.py +++ b/src/autotester/resources/postgresql/__init__.py @@ -4,7 +4,7 @@ import secrets import string from psycopg2.extensions import AsIs -from autotester.config import config +from autotester import config POSTGRES_PREFIX = config['resources', 'postgresql', '_prefix'] PGPASSFILE = os.path.join(config['workspace'], config['_workspace_contents', '_logs'], '.pgpass') diff --git a/autotester/autotester/server/hooks_context/__init__.py b/src/autotester/server/__init__.py similarity index 100% rename from autotester/autotester/server/hooks_context/__init__.py rename to src/autotester/server/__init__.py diff --git a/autotester/autotester/server/utils/__init__.py b/src/autotester/server/hooks_context/__init__.py similarity index 100% rename from autotester/autotester/server/utils/__init__.py rename to src/autotester/server/hooks_context/__init__.py diff --git a/autotester/autotester/server/hooks_context/builtin_hooks.py b/src/autotester/server/hooks_context/builtin_hooks.py similarity index 100% rename from autotester/autotester/server/hooks_context/builtin_hooks.py rename to src/autotester/server/hooks_context/builtin_hooks.py diff --git a/autotester/autotester/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py similarity index 100% rename from autotester/autotester/server/hooks_context/hooks_context.py rename to src/autotester/server/hooks_context/hooks_context.py diff --git a/autotester/autotester/server/server.py b/src/autotester/server/server.py similarity index 99% rename from autotester/autotester/server/server.py rename to src/autotester/server/server.py index 1dd570b7..ee36b5c5 100755 --- a/autotester/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -11,7 +11,7 @@ from markusapi import Markus from autotester.exceptions import TesterCreationError -from autotester.config import config +from autotester import config from autotester.server.hooks_context.hooks_context import Hooks from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes, stringify from autotester.server.utils.user_management import get_reaper_username, current_user, tester_user diff --git a/autotester/autotester/testers/__init__.py b/src/autotester/server/utils/__init__.py similarity index 100% rename from autotester/autotester/testers/__init__.py rename to src/autotester/server/utils/__init__.py diff --git a/autotester/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py similarity index 100% rename from autotester/autotester/server/utils/file_management.py rename to src/autotester/server/utils/file_management.py diff --git a/autotester/autotester/server/utils/path_management.py b/src/autotester/server/utils/path_management.py similarity index 100% rename from autotester/autotester/server/utils/path_management.py rename to src/autotester/server/utils/path_management.py diff --git a/autotester/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py similarity index 99% rename from autotester/autotester/server/utils/redis_management.py rename to src/autotester/server/utils/redis_management.py index 349d1398..ae759587 100644 --- a/autotester/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -3,7 +3,7 @@ import time from functools import wraps from autotester.server.utils import file_management, string_management -from autotester.config import config +from autotester import config CURRENT_TEST_SCRIPT_HASH = config['redis', '_current_test_script_hash'] POP_INTERVAL_HASH = config['redis', '_pop_interval_hash'] diff --git a/autotester/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py similarity index 98% rename from autotester/autotester/server/utils/resource_management.py rename to src/autotester/server/utils/resource_management.py index b97f1072..64073357 100644 --- a/autotester/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -1,5 +1,5 @@ import resource -from autotester.config import config +from autotester import config RLIMIT_ADJUSTMENTS = {'nproc': 10} diff --git a/autotester/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py similarity index 100% rename from autotester/autotester/server/utils/string_management.py rename to src/autotester/server/utils/string_management.py diff --git a/autotester/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py similarity index 97% rename from autotester/autotester/server/utils/user_management.py rename to src/autotester/server/utils/user_management.py index 0758a361..21355e19 100644 --- a/autotester/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -1,7 +1,7 @@ import os import pwd from autotester.exceptions import TesterUserError -from autotester.config import config +from autotester import config from autotester.server.utils.string_management import decode_if_bytes from autotester.server.utils.redis_management import redis_connection diff --git a/src/autotester/setup.py b/src/autotester/setup.py new file mode 100644 index 00000000..10a18775 --- /dev/null +++ b/src/autotester/setup.py @@ -0,0 +1,13 @@ +from setuptools import setup, find_packages + +test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] + +setup(name='markus-autotester-testers', + version='2.0', + description='Testers for the automatic tester for programming assignments', + url='https://github.com/MarkUsProject/markus-autotesting', + author='Misha Schwartz, Alessio Di Sandro', + author_email='mschwa@cs.toronto.edu', + license='MIT', + packages=find_packages(where='testers', exclude=test_exclusions), + zip_safe=False) \ No newline at end of file diff --git a/autotester/autotester/testers/custom/__init__.py b/src/autotester/testers/__init__.py similarity index 100% rename from autotester/autotester/testers/custom/__init__.py rename to src/autotester/testers/__init__.py diff --git a/autotester/autotester/testers/haskell/__init__.py b/src/autotester/testers/custom/__init__.py similarity index 100% rename from autotester/autotester/testers/haskell/__init__.py rename to src/autotester/testers/custom/__init__.py diff --git a/autotester/autotester/testers/custom/bin/install.sh b/src/autotester/testers/custom/bin/install.sh similarity index 100% rename from autotester/autotester/testers/custom/bin/install.sh rename to src/autotester/testers/custom/bin/install.sh diff --git a/autotester/autotester/testers/custom/bin/uninstall.sh b/src/autotester/testers/custom/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/custom/bin/uninstall.sh rename to src/autotester/testers/custom/bin/uninstall.sh diff --git a/autotester/autotester/testers/custom/default_hooks.py b/src/autotester/testers/custom/default_hooks.py similarity index 100% rename from autotester/autotester/testers/custom/default_hooks.py rename to src/autotester/testers/custom/default_hooks.py diff --git a/autotester/autotester/testers/custom/markus_custom_tester.py b/src/autotester/testers/custom/markus_custom_tester.py similarity index 100% rename from autotester/autotester/testers/custom/markus_custom_tester.py rename to src/autotester/testers/custom/markus_custom_tester.py diff --git a/autotester/autotester/testers/custom/specs/settings_schema.json b/src/autotester/testers/custom/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/custom/specs/settings_schema.json rename to src/autotester/testers/custom/specs/settings_schema.json diff --git a/autotester/autotester/testers/java/__init__.py b/src/autotester/testers/custom/tests/__init__.py similarity index 100% rename from autotester/autotester/testers/java/__init__.py rename to src/autotester/testers/custom/tests/__init__.py diff --git a/autotester/autotester/testers/custom/tests/script_files/autotest_01.sh b/src/autotester/testers/custom/tests/script_files/autotest_01.sh similarity index 100% rename from autotester/autotester/testers/custom/tests/script_files/autotest_01.sh rename to src/autotester/testers/custom/tests/script_files/autotest_01.sh diff --git a/autotester/autotester/testers/custom/tests/specs.json b/src/autotester/testers/custom/tests/specs.json similarity index 100% rename from autotester/autotester/testers/custom/tests/specs.json rename to src/autotester/testers/custom/tests/specs.json diff --git a/autotester/autotester/testers/py/__init__.py b/src/autotester/testers/custom/tests/student_files/__init__.py similarity index 100% rename from autotester/autotester/testers/py/__init__.py rename to src/autotester/testers/custom/tests/student_files/__init__.py diff --git a/autotester/autotester/testers/custom/tests/student_files/submission.py b/src/autotester/testers/custom/tests/student_files/submission.py similarity index 100% rename from autotester/autotester/testers/custom/tests/student_files/submission.py rename to src/autotester/testers/custom/tests/student_files/submission.py diff --git a/autotester/autotester/testers/pyta/__init__.py b/src/autotester/testers/haskell/__init__.py similarity index 100% rename from autotester/autotester/testers/pyta/__init__.py rename to src/autotester/testers/haskell/__init__.py diff --git a/autotester/autotester/testers/haskell/bin/install.sh b/src/autotester/testers/haskell/bin/install.sh similarity index 100% rename from autotester/autotester/testers/haskell/bin/install.sh rename to src/autotester/testers/haskell/bin/install.sh diff --git a/autotester/autotester/testers/haskell/bin/uninstall.sh b/src/autotester/testers/haskell/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/haskell/bin/uninstall.sh rename to src/autotester/testers/haskell/bin/uninstall.sh diff --git a/autotester/autotester/testers/haskell/markus_haskell_tester.py b/src/autotester/testers/haskell/markus_haskell_tester.py similarity index 100% rename from autotester/autotester/testers/haskell/markus_haskell_tester.py rename to src/autotester/testers/haskell/markus_haskell_tester.py diff --git a/autotester/autotester/testers/haskell/specs/settings_schema.json b/src/autotester/testers/haskell/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/haskell/specs/settings_schema.json rename to src/autotester/testers/haskell/specs/settings_schema.json diff --git a/autotester/autotester/testers/haskell/tests/script_files/Test.hs b/src/autotester/testers/haskell/tests/script_files/Test.hs similarity index 100% rename from autotester/autotester/testers/haskell/tests/script_files/Test.hs rename to src/autotester/testers/haskell/tests/script_files/Test.hs diff --git a/autotester/autotester/testers/haskell/tests/specs.json b/src/autotester/testers/haskell/tests/specs.json similarity index 100% rename from autotester/autotester/testers/haskell/tests/specs.json rename to src/autotester/testers/haskell/tests/specs.json diff --git a/autotester/autotester/testers/haskell/tests/student_files/Submission.hs b/src/autotester/testers/haskell/tests/student_files/Submission.hs similarity index 100% rename from autotester/autotester/testers/haskell/tests/student_files/Submission.hs rename to src/autotester/testers/haskell/tests/student_files/Submission.hs diff --git a/autotester/autotester/testers/racket/__init__.py b/src/autotester/testers/java/__init__.py similarity index 100% rename from autotester/autotester/testers/racket/__init__.py rename to src/autotester/testers/java/__init__.py diff --git a/autotester/autotester/testers/java/bin/install.sh b/src/autotester/testers/java/bin/install.sh similarity index 100% rename from autotester/autotester/testers/java/bin/install.sh rename to src/autotester/testers/java/bin/install.sh diff --git a/autotester/autotester/testers/java/bin/uninstall.sh b/src/autotester/testers/java/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/java/bin/uninstall.sh rename to src/autotester/testers/java/bin/uninstall.sh diff --git a/autotester/autotester/testers/java/lib/build.gradle b/src/autotester/testers/java/lib/build.gradle similarity index 100% rename from autotester/autotester/testers/java/lib/build.gradle rename to src/autotester/testers/java/lib/build.gradle diff --git a/autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar b/src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar rename to src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar diff --git a/autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties b/src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from autotester/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties rename to src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties diff --git a/autotester/autotester/testers/java/lib/gradlew b/src/autotester/testers/java/lib/gradlew similarity index 100% rename from autotester/autotester/testers/java/lib/gradlew rename to src/autotester/testers/java/lib/gradlew diff --git a/autotester/autotester/testers/java/lib/gradlew.bat b/src/autotester/testers/java/lib/gradlew.bat similarity index 100% rename from autotester/autotester/testers/java/lib/gradlew.bat rename to src/autotester/testers/java/lib/gradlew.bat diff --git a/autotester/autotester/testers/java/lib/settings.gradle b/src/autotester/testers/java/lib/settings.gradle similarity index 100% rename from autotester/autotester/testers/java/lib/settings.gradle rename to src/autotester/testers/java/lib/settings.gradle diff --git a/autotester/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java b/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java similarity index 100% rename from autotester/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java rename to src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java diff --git a/autotester/autotester/testers/java/markus_java_tester.py b/src/autotester/testers/java/markus_java_tester.py similarity index 100% rename from autotester/autotester/testers/java/markus_java_tester.py rename to src/autotester/testers/java/markus_java_tester.py diff --git a/autotester/autotester/testers/java/specs/settings_schema.json b/src/autotester/testers/java/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/java/specs/settings_schema.json rename to src/autotester/testers/java/specs/settings_schema.json diff --git a/autotester/autotester/testers/java/tests/script_files/Test1.java b/src/autotester/testers/java/tests/script_files/Test1.java similarity index 100% rename from autotester/autotester/testers/java/tests/script_files/Test1.java rename to src/autotester/testers/java/tests/script_files/Test1.java diff --git a/autotester/autotester/testers/java/tests/script_files/Test2.java b/src/autotester/testers/java/tests/script_files/Test2.java similarity index 100% rename from autotester/autotester/testers/java/tests/script_files/Test2.java rename to src/autotester/testers/java/tests/script_files/Test2.java diff --git a/autotester/autotester/testers/java/tests/specs.json b/src/autotester/testers/java/tests/specs.json similarity index 100% rename from autotester/autotester/testers/java/tests/specs.json rename to src/autotester/testers/java/tests/specs.json diff --git a/autotester/autotester/testers/java/tests/student_files/Submission.java b/src/autotester/testers/java/tests/student_files/Submission.java similarity index 100% rename from autotester/autotester/testers/java/tests/student_files/Submission.java rename to src/autotester/testers/java/tests/student_files/Submission.java diff --git a/autotester/autotester/testers/markus_test_specs.py b/src/autotester/testers/markus_test_specs.py similarity index 100% rename from autotester/autotester/testers/markus_test_specs.py rename to src/autotester/testers/markus_test_specs.py diff --git a/autotester/autotester/testers/markus_tester.py b/src/autotester/testers/markus_tester.py similarity index 100% rename from autotester/autotester/testers/markus_tester.py rename to src/autotester/testers/markus_tester.py diff --git a/autotester/autotester/tests/__init__.py b/src/autotester/testers/py/__init__.py similarity index 100% rename from autotester/autotester/tests/__init__.py rename to src/autotester/testers/py/__init__.py diff --git a/autotester/autotester/testers/py/bin/create_environment.sh b/src/autotester/testers/py/bin/create_environment.sh similarity index 100% rename from autotester/autotester/testers/py/bin/create_environment.sh rename to src/autotester/testers/py/bin/create_environment.sh diff --git a/autotester/autotester/testers/py/bin/install.sh b/src/autotester/testers/py/bin/install.sh similarity index 100% rename from autotester/autotester/testers/py/bin/install.sh rename to src/autotester/testers/py/bin/install.sh diff --git a/autotester/autotester/testers/py/bin/requirements.txt b/src/autotester/testers/py/bin/requirements.txt similarity index 100% rename from autotester/autotester/testers/py/bin/requirements.txt rename to src/autotester/testers/py/bin/requirements.txt diff --git a/autotester/autotester/testers/py/bin/uninstall.sh b/src/autotester/testers/py/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/py/bin/uninstall.sh rename to src/autotester/testers/py/bin/uninstall.sh diff --git a/autotester/autotester/testers/tests/.gitkeep b/src/autotester/testers/py/lib/__init__.py similarity index 100% rename from autotester/autotester/testers/tests/.gitkeep rename to src/autotester/testers/py/lib/__init__.py diff --git a/autotester/autotester/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py similarity index 100% rename from autotester/autotester/testers/py/lib/c_helper.py rename to src/autotester/testers/py/lib/c_helper.py diff --git a/autotester/autotester/testers/py/lib/sql_helper.py b/src/autotester/testers/py/lib/sql_helper.py similarity index 100% rename from autotester/autotester/testers/py/lib/sql_helper.py rename to src/autotester/testers/py/lib/sql_helper.py diff --git a/autotester/autotester/testers/py/markus_python_tester.py b/src/autotester/testers/py/markus_python_tester.py similarity index 100% rename from autotester/autotester/testers/py/markus_python_tester.py rename to src/autotester/testers/py/markus_python_tester.py diff --git a/autotester/autotester/testers/py/specs/settings_schema.json b/src/autotester/testers/py/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/py/specs/settings_schema.json rename to src/autotester/testers/py/specs/settings_schema.json diff --git a/src/autotester/testers/py/tests/__init__.py b/src/autotester/testers/py/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/testers/py/tests/script_files/__init__.py b/src/autotester/testers/py/tests/script_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/py/tests/script_files/data1.sql b/src/autotester/testers/py/tests/script_files/data1.sql similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/data1.sql rename to src/autotester/testers/py/tests/script_files/data1.sql diff --git a/autotester/autotester/testers/py/tests/script_files/data2.sql b/src/autotester/testers/py/tests/script_files/data2.sql similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/data2.sql rename to src/autotester/testers/py/tests/script_files/data2.sql diff --git a/autotester/autotester/testers/py/tests/script_files/schema.ddl b/src/autotester/testers/py/tests/script_files/schema.ddl similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/schema.ddl rename to src/autotester/testers/py/tests/script_files/schema.ddl diff --git a/autotester/autotester/testers/py/tests/script_files/test.py b/src/autotester/testers/py/tests/script_files/test.py similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/test.py rename to src/autotester/testers/py/tests/script_files/test.py diff --git a/autotester/autotester/testers/py/tests/script_files/test2.py b/src/autotester/testers/py/tests/script_files/test2.py similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/test2.py rename to src/autotester/testers/py/tests/script_files/test2.py diff --git a/autotester/autotester/testers/py/tests/script_files/test_sql.py b/src/autotester/testers/py/tests/script_files/test_sql.py similarity index 100% rename from autotester/autotester/testers/py/tests/script_files/test_sql.py rename to src/autotester/testers/py/tests/script_files/test_sql.py diff --git a/autotester/autotester/testers/py/tests/specs.json b/src/autotester/testers/py/tests/specs.json similarity index 100% rename from autotester/autotester/testers/py/tests/specs.json rename to src/autotester/testers/py/tests/specs.json diff --git a/src/autotester/testers/py/tests/student_files/__init__.py b/src/autotester/testers/py/tests/student_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/py/tests/student_files/submission.py b/src/autotester/testers/py/tests/student_files/submission.py similarity index 100% rename from autotester/autotester/testers/py/tests/student_files/submission.py rename to src/autotester/testers/py/tests/student_files/submission.py diff --git a/autotester/autotester/testers/py/tests/student_files/submission.sql b/src/autotester/testers/py/tests/student_files/submission.sql similarity index 100% rename from autotester/autotester/testers/py/tests/student_files/submission.sql rename to src/autotester/testers/py/tests/student_files/submission.sql diff --git a/src/autotester/testers/pyta/__init__.py b/src/autotester/testers/pyta/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/pyta/bin/create_environment.sh b/src/autotester/testers/pyta/bin/create_environment.sh similarity index 100% rename from autotester/autotester/testers/pyta/bin/create_environment.sh rename to src/autotester/testers/pyta/bin/create_environment.sh diff --git a/autotester/autotester/testers/pyta/bin/install.sh b/src/autotester/testers/pyta/bin/install.sh similarity index 100% rename from autotester/autotester/testers/pyta/bin/install.sh rename to src/autotester/testers/pyta/bin/install.sh diff --git a/autotester/autotester/testers/pyta/bin/requirements.txt b/src/autotester/testers/pyta/bin/requirements.txt similarity index 100% rename from autotester/autotester/testers/pyta/bin/requirements.txt rename to src/autotester/testers/pyta/bin/requirements.txt diff --git a/autotester/autotester/testers/pyta/bin/uninstall.sh b/src/autotester/testers/pyta/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/pyta/bin/uninstall.sh rename to src/autotester/testers/pyta/bin/uninstall.sh diff --git a/autotester/autotester/testers/pyta/markus_pyta_tester.py b/src/autotester/testers/pyta/markus_pyta_tester.py similarity index 100% rename from autotester/autotester/testers/pyta/markus_pyta_tester.py rename to src/autotester/testers/pyta/markus_pyta_tester.py diff --git a/autotester/autotester/testers/pyta/specs/settings_schema.json b/src/autotester/testers/pyta/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/pyta/specs/settings_schema.json rename to src/autotester/testers/pyta/specs/settings_schema.json diff --git a/src/autotester/testers/pyta/tests/__init__.py b/src/autotester/testers/pyta/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/pyta/tests/specs.json b/src/autotester/testers/pyta/tests/specs.json similarity index 100% rename from autotester/autotester/testers/pyta/tests/specs.json rename to src/autotester/testers/pyta/tests/specs.json diff --git a/src/autotester/testers/pyta/tests/student_files/__init__.py b/src/autotester/testers/pyta/tests/student_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/pyta/tests/student_files/submission.py b/src/autotester/testers/pyta/tests/student_files/submission.py similarity index 100% rename from autotester/autotester/testers/pyta/tests/student_files/submission.py rename to src/autotester/testers/pyta/tests/student_files/submission.py diff --git a/src/autotester/testers/racket/__init__.py b/src/autotester/testers/racket/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/testers/racket/bin/install.sh b/src/autotester/testers/racket/bin/install.sh similarity index 100% rename from autotester/autotester/testers/racket/bin/install.sh rename to src/autotester/testers/racket/bin/install.sh diff --git a/autotester/autotester/testers/racket/bin/uninstall.sh b/src/autotester/testers/racket/bin/uninstall.sh similarity index 100% rename from autotester/autotester/testers/racket/bin/uninstall.sh rename to src/autotester/testers/racket/bin/uninstall.sh diff --git a/autotester/autotester/testers/racket/lib/markus.rkt b/src/autotester/testers/racket/lib/markus.rkt similarity index 100% rename from autotester/autotester/testers/racket/lib/markus.rkt rename to src/autotester/testers/racket/lib/markus.rkt diff --git a/autotester/autotester/testers/racket/markus_racket_tester.py b/src/autotester/testers/racket/markus_racket_tester.py similarity index 100% rename from autotester/autotester/testers/racket/markus_racket_tester.py rename to src/autotester/testers/racket/markus_racket_tester.py diff --git a/autotester/autotester/testers/racket/specs/settings_schema.json b/src/autotester/testers/racket/specs/settings_schema.json similarity index 100% rename from autotester/autotester/testers/racket/specs/settings_schema.json rename to src/autotester/testers/racket/specs/settings_schema.json diff --git a/autotester/autotester/testers/racket/tests/script_files/test.rkt b/src/autotester/testers/racket/tests/script_files/test.rkt similarity index 100% rename from autotester/autotester/testers/racket/tests/script_files/test.rkt rename to src/autotester/testers/racket/tests/script_files/test.rkt diff --git a/autotester/autotester/testers/racket/tests/specs.json b/src/autotester/testers/racket/tests/specs.json similarity index 100% rename from autotester/autotester/testers/racket/tests/specs.json rename to src/autotester/testers/racket/tests/specs.json diff --git a/autotester/autotester/testers/racket/tests/student_files/submission.rkt b/src/autotester/testers/racket/tests/student_files/submission.rkt similarity index 100% rename from autotester/autotester/testers/racket/tests/student_files/submission.rkt rename to src/autotester/testers/racket/tests/student_files/submission.rkt diff --git a/src/autotester/testers/tests/.gitkeep b/src/autotester/testers/tests/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/tests/__init__.py b/src/autotester/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autotester/autotester/tests/autotest_enqueuer_test.py b/src/autotester/tests/autotest_enqueuer_test.py similarity index 100% rename from autotester/autotester/tests/autotest_enqueuer_test.py rename to src/autotester/tests/autotest_enqueuer_test.py diff --git a/autotester/autotester/tests/config_default.py b/src/autotester/tests/config_default.py similarity index 100% rename from autotester/autotester/tests/config_default.py rename to src/autotester/tests/config_default.py From d50dd2f69ebe836166a71205e23db0e6874a7b33 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 7 Jan 2020 15:06:31 -0500 Subject: [PATCH 13/46] unused files: remove out of data error checker as well as use the enw start-stop.sh file to do initial start up --- bin/install.sh | 3 +-- bin/rq_fail_queue_contents.py | 5 ----- 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 bin/rq_fail_queue_contents.py diff --git a/bin/install.sh b/bin/install.sh index 3d2d7465..c7090999 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -116,8 +116,7 @@ start_workers() { echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" sudo -u ${SERVERUSEREFFECTIVE} -- bash -c "source ${servervenv} && ${SERVERDIR}/generate_supervisord_conf.py ${supervisorconf} ${worker_users} && - cd ${LOGSDIR} && - supervisord -c ${supervisorconf} && + ${BINDIR}/start-stop.sh start deactivate" } diff --git a/bin/rq_fail_queue_contents.py b/bin/rq_fail_queue_contents.py deleted file mode 100644 index dc0af95b..00000000 --- a/bin/rq_fail_queue_contents.py +++ /dev/null @@ -1,5 +0,0 @@ -import autotest_server as ats - -with ats.rq.Connection(ats.redis_connection()): - for job in ats.rq.get_failed_queue().jobs: - print(job.exc_info) \ No newline at end of file From 66df8fcffba05f3fb8da932631203d25b6cd700c Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 24 Jan 2020 17:50:18 -0500 Subject: [PATCH 14/46] docker: WIP creating a docker development environment --- .dockerfiles/Dockerfile | 19 + .dockerfiles/docker-config.yml | 12 + .dockerfiles/entrypoint-dev.sh | 10 + .gitignore | 4 +- bin/default_tester_venv_requirements.txt | 0 bin/generate_supervisord_conf.py | 9 +- bin/install.sh | 490 ++++++++++-------- bin/start-stop.sh | 64 +-- doc/config_example.yml | 79 ++- docker-compose.yml | 54 ++ setup.py | 6 +- src/autotester/config.py | 4 + .../config_defaults/config_default.yml | 5 +- .../config_defaults/config_env_vars.yml | 3 +- .../resources/postgresql/__init__.py | 2 +- src/autotester/server/server.py | 2 +- .../server/utils/user_management.py | 8 +- 17 files changed, 472 insertions(+), 299 deletions(-) create mode 100644 .dockerfiles/Dockerfile create mode 100644 .dockerfiles/docker-config.yml create mode 100755 .dockerfiles/entrypoint-dev.sh create mode 100644 bin/default_tester_venv_requirements.txt create mode 100644 docker-compose.yml diff --git a/.dockerfiles/Dockerfile b/.dockerfiles/Dockerfile new file mode 100644 index 00000000..3763e5bd --- /dev/null +++ b/.dockerfiles/Dockerfile @@ -0,0 +1,19 @@ +ARG UBUNTU_VERSION + +FROM ubuntu:$UBUNTU_VERSION + +ARG LOGIN_USER + +RUN apt-get update && \ + apt-get -y install sudo + +# Create a directory for the app code (keep the name generic) +RUN mkdir -p /app + +RUN useradd -ms /bin/bash $LOGIN_USER && \ + usermod -aG sudo $LOGIN_USER && \ + echo "$LOGIN_USER ALL=(ALL) NOPASSWD:ALL" | sudo tee "/etc/sudoers.d/$LOGIN_USER" + +USER $LOGIN_USER + +WORKDIR /app diff --git a/.dockerfiles/docker-config.yml b/.dockerfiles/docker-config.yml new file mode 100644 index 00000000..35f27642 --- /dev/null +++ b/.dockerfiles/docker-config.yml @@ -0,0 +1,12 @@ +workers: + - n: 1 + queues: + - student + - single + - batch +users: + workers: + - name: autotst0 + - name: autotst1 + - name: autotst2 + - name: autotst3 diff --git a/.dockerfiles/entrypoint-dev.sh b/.dockerfiles/entrypoint-dev.sh new file mode 100755 index 00000000..c2d0dfd5 --- /dev/null +++ b/.dockerfiles/entrypoint-dev.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +if [ ! -f /.installed ]; then + /app/bin/install.sh -p '3.8' --docker + sudo touch /.installed +fi + +exec "$@" diff --git a/.gitignore b/.gitignore index 9789c4f2..07cd89f9 100644 --- a/.gitignore +++ b/.gitignore @@ -7,11 +7,13 @@ __pycache__ *.egg-info venv +# bin +bin/kill_worker_procs + # server server/venv server/workspace server/bin/kill_worker_procs -markus_config.rb # testers testers/testers/*/specs/.installed diff --git a/bin/default_tester_venv_requirements.txt b/bin/default_tester_venv_requirements.txt new file mode 100644 index 00000000..e69de29b diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index 141ddb63..3d5dabea 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -import config +from autotester.config import config import sys import os import shutil @@ -36,8 +36,7 @@ def write_conf_file(conf_filename, user_names): try: - rkw = config.REDIS_CONNECTION_KWARGS - redis_url = '--url redis://{}:{}/{}'.format(rkw['host'], rkw['port'], rkw['db']) + redis_url = f'--url {config["redis", "url"]}' except KeyError: redis_url = '' @@ -45,7 +44,9 @@ def write_conf_file(conf_filename, user_names): f.write(HEADER) user_name_set = set(user_names) enough_users = True - for numprocs, queues in config.WORKERS: + for worker_data in config["workers"]: + numprocs = worker_data['n'] + queues = worker_data['queues'] if enough_users: for _ in range(numprocs): try: diff --git a/bin/install.sh b/bin/install.sh index c7090999..e81bf3a8 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -1,262 +1,328 @@ -#!/usr/bin/env bash +#!/bin/bash set -e -install_packages() { - echo "[AUTOTEST-INSTALL] Installing system packages" - sudo apt-get install "python${PYTHONVERSION}" "python${PYTHONVERSION}-venv" redis-server jq postgresql iptables +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +BINDIR=$(dirname "${THISSCRIPT}") +PROJECTROOT=$(dirname "${BINDIR}") +SERVER_VENV="${PROJECTROOT}/venv" + +_check_python_version() { + # check if the python3 is at least version 3.6 + if dpkg --compare-versions "$1" 'lt' '3.6'; then + echo "[AUTOTEST-INSTALL-ERROR] Python3 must be at least version 3.6. Found version $1" 1>&2 + exit 1 + fi } -create_server_user() { - if [[ -z ${SERVERUSER} ]]; then - echo "[AUTOTEST-INSTALL] No dedicated server user, using '${THISUSER}'" - mkdir -p ${WORKSPACEDIR} - else - if id ${SERVERUSER} &> /dev/null; then - echo "[AUTOTEST-INSTALL] Using existing server user '${SERVERUSER}'" - else - echo "[AUTOTEST-INSTALL] Creating server user '${SERVERUSER}'" - sudo adduser --disabled-password ${SERVERUSER} - fi - sudo mkdir -p ${WORKSPACEDIR} - sudo chown ${SERVERUSER}:${SERVERUSER} ${WORKSPACEDIR} - sudo chmod u=rwx,go=rx ${WORKSPACEDIR} +set_python_version() { + # get the python version from the argument passed to this script or use python3.8 by default + if [ -z "${PYTHON_VERSION}" ]; then + PYTHON_VERSION=3.8 + else + # check if both a major and minor version have been specified + if [[ ${PYTHON_VERSION} != $(echo "${PYTHON_VERSION}" | grep -ow '^[0-9].[0-9]$') ]]; then + echo "[AUTOTEST-INSTALL-ERROR] Please specify a major and minor python version only. Found ${PYTHON_VERSION}" 1>&2 + exit 1 fi + _check_python_version "${PYTHON_VERSION}" + fi } -create_unprivileged_user() { - local username=$1 - local usertype=$2 +install_packages() { + # install required system packages + echo "[AUTOTEST-INSTALL] Installing system packages" + local debian_frontend + local apt_opts + local apt_yes - if id ${username} &> /dev/null; then - echo "[AUTOTEST-INSTALL] Reusing existing ${usertype} user '${username}'" - else - echo "[AUTOTEST-INSTALL] Creating ${usertype} user '${username}'" - sudo adduser --disabled-login --no-create-home ${username} - fi - sudo iptables -I OUTPUT -p tcp --dport 6379 -m owner --uid-owner ${username} -j REJECT - echo "${SERVERUSEREFFECTIVE} ALL=(${username}) NOPASSWD:ALL" | sudo EDITOR="tee -a" visudo + if [ -n "${NON_INTERACTIVE}" ]; then + debian_frontend=noninteractive + apt_opts=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-confold') + apt_yes='-y' + fi + + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" update + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install software-properties-common + sudo add-apt-repository ${apt_yes} ppa:deadsnakes/ppa + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install "python${PYTHON_VERSION}" \ + "python${PYTHON_VERSION}-venv" \ + redis-server \ + jq \ + postgresql-client \ + libpq-dev \ + openssh-server \ + gcc + if [ -z "${DOCKER}" ]; then + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install iptables postgresql + fi + + _check_python_version "$(python3 --version | grep -oP '\s(\d).(\d)')" } -create_worker_dir() { - local workeruser=$1 - local workerdir=${WORKERSSDIR}/${workeruser} +create_venv() { + # create a virtual environment which will be used to run the autotester and install the + # autotester package (in editable mode). + echo "[AUTOTEST-INSTALL] Installing server virtual environment at '${SERVER_VENV}'" + rm -rf "${SERVER_VENV}" + "python${PYTHON_VERSION}" -m venv "${SERVER_VENV}" + + PYTHON="${SERVER_VENV}/bin/python" + + echo "[AUTOTEST-INSTALL] Installing python packages into virtual environment" + local pip="${SERVER_VENV}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel # must be installed before requirements + ${pip} install -e "${PROJECTROOT}" +} - sudo mkdir -p ${workerdir} - sudo chown ${SERVERUSEREFFECTIVE}:${workeruser} ${workerdir} - sudo chmod ug=rwx,o=,+t ${workerdir} - redis-cli -u ${REDIS_URL} HSET ${REDISWORKERS} ${workeruser} ${workerdir} +_create_server_user() { + # create a user to run the autotester server if they do not already exist + if id "${SERVER_USER}" &> /dev/null; then + echo "[AUTOTEST-INSTALL] Using existing server user '${SERVER_USER}'" + else + echo "[AUTOTEST-INSTALL] Creating server user '${SERVER_USER}'" + local gecos + gecos=() + if [ -n "${NON_INTERACTIVE}" ]; then + gecos=('--gecos' '') + fi + sudo adduser --disabled-password "${gecos[@]}" "${SERVER_USER}" + fi } -create_worker_and_reaper_users() { - redis-cli -u ${REDIS_URL} DEL ${REDISWORKERS} > /dev/null - if [[ -z ${WORKERUSERS} ]]; then - echo "[AUTOTEST-INSTALL] No dedicated worker user, using '${SERVERUSEREFFECTIVE}'" - create_worker_dir ${SERVERUSEREFFECTIVE} - else - for workeruser in ${WORKERUSERS}; do - create_unprivileged_user ${workeruser} worker - create_worker_dir ${workeruser} - if [[ -n ${REAPERPREFIX} ]]; then - local reaperuser="${REAPERPREFIX}${workeruser}" - create_unprivileged_user ${reaperuser} reaper - sudo usermod -g ${workeruser} ${reaperuser} - fi - done +_create_unprivileged_user() { + # create a user with limited permissions: + # - no home directory + # - no access to the port used by redis-server + # - the SERVER_USER will have sudo access to this unprivileged user + local username=$1 + + if id "${username}" &> /dev/null; then + echo "[AUTOTEST-INSTALL] Reusing existing user '${username}'" + else + echo "[AUTOTEST-INSTALL] Creating user '${username}'" + local gecos + gecos=() + if [ -n "${NON_INTERACTIVE}" ]; then + gecos=('--gecos' '') + fi + sudo adduser --disabled-login --no-create-home "${gecos[@]}" "${username}" + fi + if [ -z "${DOCKER}" ]; then + sudo iptables -I OUTPUT -p tcp --dport "${REDIS_PORT}" -m owner --uid-owner "${username}" -j REJECT + else + echo "[AUTOTEST-INSTALL] worker users are not restricted from accessing redis in a docker installation" + fi + echo "${SERVER_USER} ALL=(${username}) NOPASSWD:ALL" | sudo EDITOR="tee -a" visudo +} + +_create_worker_and_reaper_users() { + # create worker users and reapers users according to the configuration settings + # all user names for these users should be unique. + local worker_user + local reaper_user + + while read -r worker_user; do + read -r reaper_user + if [[ "${SERVER_USER}" != "${worker_user}" ]]; then + _create_unprivileged_user "${worker_user}" + fi + if [[ "${reaper_user}" != 'null' ]]; then + _create_unprivileged_user "${reaper_user}" + sudo usermod -g "${worker_user}" "${reaper_user}" fi + done <<< "${WORKER_AND_REAPER_USERS}" } -create_workspace_dirs() { - echo "[AUTOTEST-INSTALL] Creating workspace directories at '${WORKSPACEDIR}'" - sudo mkdir -p ${RESULTSDIR} - sudo mkdir -p ${SCRIPTSDIR} - sudo mkdir -p ${SPECSDIR} - sudo mkdir -p ${WORKERSSDIR} - sudo mkdir -p ${LOGSDIR} - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${RESULTSDIR} ${SCRIPTSDIR} ${SPECSDIR} ${WORKERSSDIR} ${LOGSDIR} - sudo chmod u=rwx,go= ${RESULTSDIR} ${SCRIPTSDIR} ${LOGSDIR} - sudo chmod u=rwx,go=rx ${SPECSDIR} ${WORKERSSDIR} +create_users() { + # create all users required to run the autotester + _create_server_user + _create_worker_and_reaper_users } -install_venv() { - local servervenv=${SERVERDIR}/venv +_create_workspace_subdir() { + local subdir + local permissions + subdir="$1" + permissions="$2" - echo "[AUTOTEST-INSTALL] Installing server virtual environment at '${servervenv}'" - rm -rf ${servervenv} - "python${PYTHONVERSION}" -m venv ${servervenv} - source ${servervenv}/bin/activate - pip install wheel # must be installed before requirements - pip install -r ${BINDIR}/requirements.txt - deactivate + sudo mkdir -p "${subdir}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${subdir}" + sudo chmod "${permissions}" "${subdir}" } -install_default_tester_venv() { - local defaultvenv=${SPECSDIR}/$(get_config_param DEFAULT_ENV_NAME)/venv - local pth_file=${defaultvenv}/lib/python${PYTHONVERSION}/site-packages/testers.pth - - echo "[AUTOTEST-INSTALL] Installing default tester virtual environment at '${defaultvenv}'" - rm -rf ${defaultvenv} - "python${PYTHONVERSION}" -m venv ${defaultvenv} - echo ${TESTERSDIR} >| ${pth_file} - source ${defaultvenv}/bin/activate - pip install wheel - pip install -r ${BINDIR}/default_tester_requirements.txt - deactivate +_create_worker_dirs() { + # create directories for each worker use to run tests in + local worker_dir + while read -r worker_user; do + worker_dir="${WORKSPACE_SUBDIRS[WORKERS]}/${worker_user}" + mkdir -p "${worker_dir}" + sudo chown "${SERVER_USER}:${worker_user}" "${worker_dir}" + sudo chmod "ug=rwx,o=,+t" "${worker_dir}" + done <<< "${WORKER_USERS}" } -start_workers() { - local servervenv=${SERVERDIR}/venv/bin/activate - local supervisorconf=${LOGSDIR}/supervisord.conf - if [[ -z ${WORKERUSERS} ]]; then - local worker_users=${SERVERUSEREFFECTIVE} - else - local worker_users=${WORKERUSERS} - fi +create_workspace() { + # create the workspace directory and populate it with the relevant directory structure + echo "[AUTOTEST-INSTALL] Creating workspace directories at '${WORKSPACE_DIR}'" + mkdir -p "${WORKSPACE_DIR}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${WORKSPACE_DIR}" - echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" - sudo -u ${SERVERUSEREFFECTIVE} -- bash -c "source ${servervenv} && - ${SERVERDIR}/generate_supervisord_conf.py ${supervisorconf} ${worker_users} && - ${BINDIR}/start-stop.sh start - deactivate" + _create_workspace_subdir "${WORKSPACE_SUBDIRS[SCRIPTS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[RESULTS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[LOGS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[SPECS]}" 'u=rwx,go=rx' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[WORKERS]}" 'u=rwx,go=rx' + + _create_worker_dirs } create_worker_dbs() { - echo "[AUTOTEST-INSTALL] Creating databases for worker users" - local serverpwd=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-15) - local pgpassfile=${LOGSDIR}/.pgpass - local pgport=$(sudo -u postgres psql -t -P format=unaligned -c "select setting from pg_settings where name = 'port';") - sudo touch ${pgpassfile} - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${pgpassfile} - sudo chmod 600 ${pgpassfile} - sudo -u postgres psql <<-EOF - DROP ROLE IF EXISTS ${SERVERUSEREFFECTIVE}; - CREATE ROLE ${SERVERUSEREFFECTIVE} LOGIN PASSWORD '${serverpwd}'; - ALTER ROLE ${SERVERUSEREFFECTIVE} CREATEROLE; - EOF - echo -e "${serverpwd}" | sudo -u ${SERVERUSEREFFECTIVE} tee -a ${pgpassfile} > /dev/null - if [[ -z ${WORKERUSERS} ]]; then - local database="${POSTGRESPREFIX}${SERVERUSEREFFECTIVE}" - sudo -u postgres psql <<-EOF - DROP DATABASE IF EXISTS ${database}; - CREATE DATABASE ${database} OWNER ${SERVERUSEREFFECTIVE}; - REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC; - EOF - else - for workeruser in ${WORKERUSERS}; do - local database="${POSTGRESPREFIX}${workeruser}" - sudo -u postgres psql <<-EOF - DROP DATABASE IF EXISTS ${database}; - DROP ROLE IF EXISTS ${workeruser}; - CREATE ROLE ${workeruser} LOGIN PASSWORD null; - CREATE DATABASE ${database} OWNER ${SERVERUSEREFFECTIVE}; - REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC; - GRANT CONNECT, CREATE ON DATABASE ${database} TO ${workeruser}; - EOF - done - fi -} + echo "[AUTOTEST-INSTALL] Creating databases for worker users" + local serverpwd + local pgpassfile + local psql_string + local psql + serverpwd=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-15) + pgpassfile="${WORKSPACE_SUBDIRS[LOGS]}/.pgpass" -compile_reaper_script() { - local reaperexe="${BINDIR}/kill_worker_procs" + if [ -z "${DOCKER}" ]; then + psql=(sudo -u postgres psql -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}") + else + psql=(psql -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U postgres) + fi + + sudo touch "${pgpassfile}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${pgpassfile}" + sudo chmod 'u=rw,go=' "${pgpassfile}" + echo -e "${serverpwd}" | sudo -u "${SERVER_USER}" tee -a "${pgpassfile}" > /dev/null + + psql_string="DROP ROLE IF EXISTS ${SERVER_USER}; + CREATE ROLE ${SERVER_USER} LOGIN PASSWORD '${serverpwd}'; + ALTER ROLE ${SERVER_USER} CREATEROLE;" + "${psql[@]}" <<< "${psql_string}" + + while read -r worker_user; do + local database="${POSTGRES_PREFIX}${worker_user}" + psql_string="DROP DATABASE IF EXISTS ${database}; + CREATE DATABASE ${database} OWNER ${SERVER_USER}; + REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC;" - echo "[AUTOTEST-INSTALL] Compiling reaper script at '${reaperexe}'" - gcc "${reaperexe}.c" -o ${reaperexe} - chmod ugo=r ${reaperexe} + if [[ "${worker_user}" != "${SERVER_USER}" ]]; then + psql_string="${psql_string} + DROP ROLE IF EXISTS ${worker_user}; + CREATE ROLE ${worker_user} LOGIN PASSWORD null; + " + fi + psql_string="${psql_string} + GRANT CONNECT, CREATE ON DATABASE ${database} TO ${worker_user};" + + "${psql[@]}" <<< "${psql_string}" + done <<< "${WORKER_USERS}" } -create_enqueuer_wrapper() { - local enqueuer=/usr/local/bin/autotest_enqueuer +create_default_tester_venv() { + local default_tester_venv + default_tester_venv="${WORKSPACE_SUBDIRS[SCRIPTS]}/"$(echo "${config_json}" | \ + jq --raw-output '._workspace_contents._default_venv_name') + "python${PYTHON_VERSION}" -m venv "${default_tester_venv}" + local pip + pip="${default_tester_venv}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel # must be installed before requirements + ${pip} install -r "${BINDIR}/default_tester_venv_requirements.txt" +} - echo "[AUTOTEST-INSTALL] Creating enqueuer wrapper at '${enqueuer}'" - # this heredoc requires actual tabs - cat <<-EOF | sudo tee ${enqueuer} > /dev/null - #!/usr/bin/env bash +compile_reaper_script() { + local reaperexe + reaperexe="${BINDIR}/kill_worker_procs" - source ${SERVERDIR}/venv/bin/activate - ${SERVERDIR}/autotest_enqueuer.py "\$@" - EOF - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${enqueuer} - sudo chmod u=rwx,go=r ${enqueuer} + echo "[AUTOTEST-INSTALL] Compiling reaper script at '${reaperexe}'" + gcc "${reaperexe}.c" -o "${reaperexe}" + chmod ugo=r "${reaperexe}" } -create_markus_config() { - local serverconf="" - if [[ -n ${SERVERUSER} ]]; then - serverconf="'${SERVERUSER}'" - else - serverconf="nil" - fi +start_workers() { + local supervisorconf + local worker_users + local generate_script + + supervisorconf="${WORKSPACE_SUBDIRS[LOGS]}/supervisord.conf" + worker_users=$(echo "${WORKER_USERS}" | tr '\n' ' ') + generate_script="${BINDIR}/generate_supervisord_conf.py" - echo "[AUTOTEST-INSTALL] Creating Markus web server config snippet at 'markus_config.rb'" - echo " - AUTOTEST_ON = true - AUTOTEST_STUDENT_TESTS_ON = false - AUTOTEST_STUDENT_TESTS_BUFFER_TIME = 1.hour - AUTOTEST_CLIENT_DIR = 'TODO_markus_dir' - AUTOTEST_SERVER_HOST = '$(hostname).$(dnsdomainname)' - AUTOTEST_SERVER_USERNAME = ${serverconf} - AUTOTEST_SERVER_DIR = '${WORKSPACEDIR}' - AUTOTEST_SERVER_COMMAND = 'autotest_enqueuer' - " >| markus_config.rb + echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" + sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${supervisorconf} ${worker_users} && + ${BINDIR}/start-stop.sh start" } suggest_next_steps() { - if [[ -n ${SERVERUSER} ]]; then - echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVERUSER}'s '~/.ssh/authorized_keys'" - fi - echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVERUSEREFFECTIVE}'s crontab with a @reboot time" - echo "[AUTOTEST-INSTALL] You should install the individual testers you plan to use" + echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" + echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVER_USER}'s crontab with a @reboot time" + echo "[AUTOTEST-INSTALL] You should install the individual testers you plan to use" } -get_config_param() { - echo $(cd ${SERVERDIR} && python3 -c "import config; print(config.$1)") +load_config_settings() { + # Get the configuration settings as a json string and load config settings needed for this + # installation script + local config_json + config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") + + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.users.server.name') + WORKER_AND_REAPER_USERS=$(echo "${config_json}" | jq --raw-output '.users.workers | .[] | (.name, .reaper)') + REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') + REDIS_PORT=$(redis-cli --raw -u "${REDIS_URL}" CONFIG GET port | tail -1) + WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') + POSTGRES_PREFIX=$(echo "${config_json}" | jq --raw-output '.resources.postgresql._prefix') + POSTGRES_PORT=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.port') + POSTGRES_HOST=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.host') + WORKER_USERS=$(echo "${WORKER_AND_REAPER_USERS}" | sed -n 'p;n') + + declare -gA WORKSPACE_SUBDIRS + WORKSPACE_SUBDIRS=( + ['SCRIPTS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._scripts') + ['RESULTS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._results') + ['LOGS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._logs') + ['SPECS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._specs') + ['WORKERS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._workers') + ) } -# script starts here -if [[ $# -gt 0 ]]; then - echo "Usage: $0" - exit 1 -fi +# PARSE ARGUMENTS -# vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -BINDIR=$(dirname ${THISSCRIPT}) -SERVERDIR=$(dirname ${BINDIR}) -TESTERSDIR=$(dirname ${SERVERDIR})/testers -THISUSER=$(whoami) -PYTHONVERSION="3.8" +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -p|--python_version) + PYTHON_VERSION="$2" + shift + shift + ;; + --non-interactive) + NON_INTERACTIVE=1 + shift + ;; + --docker) + NON_INTERACTIVE=1 + DOCKER=1 + shift + ;; + *) + echo "Usage: $0 [-p|--python_version python_version] [--non-interactive] [--docker]" 1>&2 + exit 1 + ;; + esac +done -# install python here so we can parse arguments from the config file more easily +set_python_version install_packages - -SERVERUSER=$(get_config_param SERVER_USER) -if [[ -n ${SERVERUSER} ]]; then - SERVERUSEREFFECTIVE=${SERVERUSER} -else - SERVERUSEREFFECTIVE=${THISUSER} -fi -WORKERUSERS=$(get_config_param WORKER_USERS) -WORKSPACEDIR=$(get_config_param WORKSPACE_DIR) -SPECSDIR=${WORKSPACEDIR}/$(get_config_param SPECS_DIR_NAME) -RESULTSDIR=${WORKSPACEDIR}/$(get_config_param RESULTS_DIR_NAME) -SCRIPTSDIR=${WORKSPACEDIR}/$(get_config_param SCRIPTS_DIR_NAME) -WORKERSSDIR=${WORKSPACEDIR}/$(get_config_param WORKERS_DIR_NAME) -LOGSDIR=${WORKSPACEDIR}/$(get_config_param LOGS_DIR_NAME) -REDISPREFIX=$(get_config_param REDIS_PREFIX) -REDISWORKERS=${REDISPREFIX}$(get_config_param REDIS_WORKERS_HASH) -REAPERPREFIX=$(get_config_param REAPER_USER_PREFIX) -POSTGRESPREFIX=$(get_config_param POSTGRES_PREFIX) -REDIS_URL=$(get_config_param REDIS_URL) - -# main -create_server_user -create_worker_and_reaper_users -create_workspace_dirs -create_worker_dbs -install_venv -install_default_tester_venv +create_venv +load_config_settings +create_users +create_workspace +create_default_tester_venv compile_reaper_script -create_enqueuer_wrapper -create_markus_config +create_worker_dbs start_workers suggest_next_steps diff --git a/bin/start-stop.sh b/bin/start-stop.sh index 2dde203a..5cfc08ae 100755 --- a/bin/start-stop.sh +++ b/bin/start-stop.sh @@ -2,50 +2,55 @@ set -e +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +PROJECTROOT=$(dirname "${THISDIR}") +PYTHON="${PROJECTROOT}/venv/bin/python" +RQ="${PROJECTROOT}/venv/bin/rq" +SUPERVISORD="${PROJECTROOT}/venv/bin/supervisord" + start_supervisor() { - local pid_file=${LOGSDIR}/supervisord.pid - if [ -f ${pid_file} ]; then - echo "Supervisor appears to be running already (PID: $(cat ${pid_file}))" >&2 + local pid_file + pid_file="${LOGS_DIR}/supervisord.pid" + if [ -f "${pid_file}" ]; then + local supervisor_pid + supervisor_pid=$(cat "${pid_file}") + echo "Supervisor appears to be running already (PID: ${supervisor_pid})" >&2 exit 1 fi - pushd ${LOGSDIR} > /dev/null - supervisord -c supervisord.conf - popd > /dev/null + (cd "${LOGS_DIR}" && ${SUPERVISORD} -c supervisord.conf) } stop_supervisor() { - local pid_file=${LOGSDIR}/supervisord.pid - if [ ! -f ${pid_file} ]; then + local pid_file + pid_file="${LOGS_DIR}/supervisord.pid" + if [ ! -f "${pid_file}" ]; then echo 'Supervisor appears to be stopped already' >&2 exit 1 fi - kill $(cat ${pid_file}) + local supervisor_pid + supervisor_pid=$(cat "${pid_file}") + kill "${supervisor_pid}" } -get_config_param() { - echo $(cd ${SERVERDIR} && python3 -c "import config; print(config.$1)") +load_config_settings() { + # Get the configuration settings as a json string and load config settings needed for this + # installation script + local config_json + config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") + + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.users.server.name') + WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') + LOGS_DIR="${WORKSPACE_DIR}/"$(echo "${config_json}" | jq --raw-output '._workspace_contents._logs') + REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') } # script starts here -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -THISDIR=$(dirname ${THISSCRIPT}) -SERVERDIR=$(dirname ${THISDIR}) -CONFIG=${SERVERDIR}/config.py - -source ${SERVERDIR}/venv/bin/activate -SERVERUSER=$(get_config_param SERVERUSER) -WORKSPACEDIR=$(get_config_param WORKSPACE_DIR) -LOGSDIR=${WORKSPACEDIR}/$(get_config_param LOGS_DIR_NAME) +load_config_settings -if [[ -n ${SERVERUSER} ]]; then - SERVERUSEREFFECTIVE=${SERVERUSER} -else - SERVERUSEREFFECTIVE=$(whoami) -fi - -if [[ "$(whoami)" != "${SERVERUSEREFFECTIVE}" ]]; then - echo "Please run this script as user: ${SERVERUSEREFFECTIVE}" >&2 +if [[ "$(whoami)" != "${SERVER_USER}" ]]; then + echo "Please run this script as user: ${SERVER_USER}" >&2 exit 2 fi @@ -61,7 +66,8 @@ case $1 in start_supervisor ;; stat) - rq info ${@:2} + "${RQ}" info --url "${REDIS_URL}" "${@:2}" + ;; *) echo "Usage: $0 [start | stop | restart | stat]" >&2 exit 1 diff --git a/doc/config_example.yml b/doc/config_example.yml index 3c763f40..509196c6 100644 --- a/doc/config_example.yml +++ b/doc/config_example.yml @@ -1,40 +1,39 @@ -#workspace: !ENV ${HOME}/.markus-autotesting/workspace -# -#queues: -# - name: batch -# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} -# - name: single -# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} -# - name: student -# schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} -# -#workers: -# - n: 1 -# queues: -# - student -# - single -# - batch -# -#users: -# server: -# name: !ENV ${USER} -# workers: -# - name: !ENV ${USER} -# reaper: null -# -#redis: -# url: redis://127.0.0.1:6379/0 -# -#rlimit_settings: -# nproc: -# - 300 -# - 300 -# -#resources: -# port: -# min: 50000 -# max: 65535 -# postgresql: -# database: autotest -# password: null -# host: localhost \ No newline at end of file +workspace: !ENV ${HOME}/.markus-autotesting/workspace + +workers: + - n: 1 + queues: + - student + - single + - batch + +users: + server: + name: !ENV ${USER} + workers: + - name: !ENV ${USER} + reaper: null + +redis: + url: redis://127.0.0.1:6379/0 + +rlimit_settings: + nproc: + - 300 + - 300 + +resources: + port: + min: 50000 + max: 65535 + postgresql: + port: 5432 + host: localhost + +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..6b71d895 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,54 @@ +version: '3.7' + +services: + app: &app + build: + context: . + dockerfile: ./.dockerfiles/Dockerfile + args: + UBUNTU_VERSION: '18.04' + LOGIN_USER: 'docker' + image: markus-autotest-dev:1.0.0 + stdin_open: true + tty: true + user: docker + volumes: + - .:/app:cached + environment: + - USER=docker + - REDIS_URL=redis://redis_autotest:6379/ + - PGHOST=postgres_autotest + - PGPORT=5432 + - EDITOR=vi + - MARKUS_AUTOTESTER_CONFIG=/app/.dockerfiles/docker-config.yml + depends_on: + - postgres_autotest + - redis_autotest + + autotest: + <<: *app + entrypoint: .dockerfiles/entrypoint-dev.sh + command: '/bin/bash' + + postgres_autotest: + image: postgres:10 + volumes: + - .psqlrc:/root/.psqlrc:ro + - postgres_autotest:/var/lib/postgresql/data + - ./log:/root/log:cached + environment: + - PSQL_HISTFILE=/root/log/.psql_history + ports: + - '45432:5432' + + redis_autotest: + image: redis:3.2-alpine + volumes: + - redis_autotest:/data + ports: + - 6379 + +volumes: + postgres_autotest: + redis_autotest: + diff --git a/setup.py b/setup.py index 4fbc4cd6..6c25b359 100644 --- a/setup.py +++ b/setup.py @@ -16,9 +16,9 @@ 'redis==3.3.8', 'requests==2.22.0', 'rq==1.1.0', - 'supervisor==4.0.4', + 'supervisor==4.1.0', 'PyYAML==5.1.2', - 'psycopg2-binary==2.8.3', + 'psycopg2-binary==2.8.4', 'markusapi==0.0.1', 'jsonschema==3.0.2' ], @@ -29,4 +29,4 @@ # data_files=[('config_defaults', ['config/config_default.yml', 'config/config_env_vars.yml'])], entry_points={ 'console_scripts': 'markus_autotester = autotester.cli:cli' - }) \ No newline at end of file + }) diff --git a/src/autotester/config.py b/src/autotester/config.py index 810d22fc..f5d2b918 100644 --- a/src/autotester/config.py +++ b/src/autotester/config.py @@ -3,6 +3,7 @@ import os import re +import json from collections.abc import Mapping import yaml @@ -50,6 +51,9 @@ def __getitem__(self, key): return d raise + def to_json(self): + return json.dumps(self._settings) + @classmethod def _merge_dicts(cls, dicts): try: diff --git a/src/autotester/config_defaults/config_default.yml b/src/autotester/config_defaults/config_default.yml index 20269d9f..76210679 100644 --- a/src/autotester/config_defaults/config_default.yml +++ b/src/autotester/config_defaults/config_default.yml @@ -21,7 +21,6 @@ redis: url: 'redis://127.0.0.1:6379/0' _prefix: 'redis:' _current_test_script_hash: current_test_scripts - _workers_hash: workers _pop_interval_hash: pop_interval @@ -51,8 +50,7 @@ resources: max: 65535 postgresql: _prefix: autotest_ - database: autotest - password: null + port: 5432 host: localhost _workspace_contents: @@ -60,6 +58,7 @@ _workspace_contents: _results: results _specs: specs _logs: logs + _workers: workers _default_venv_name: defaultvenv _settings_file: settings.json _files_dir: files diff --git a/src/autotester/config_defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml index d40fbb96..e0dab0fc 100644 --- a/src/autotester/config_defaults/config_env_vars.yml +++ b/src/autotester/config_defaults/config_env_vars.yml @@ -12,6 +12,5 @@ users: resources: postgresql: - database: !ENV ${PGDATABASE} - password: !ENV ${PGPASSWORD} + port: !ENV ${PGPORT} host: !ENV ${PGHOST} \ No newline at end of file diff --git a/src/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py index a923618d..2b4153a4 100644 --- a/src/autotester/resources/postgresql/__init__.py +++ b/src/autotester/resources/postgresql/__init__.py @@ -4,7 +4,7 @@ import secrets import string from psycopg2.extensions import AsIs -from autotester import config +from autotester.config import config POSTGRES_PREFIX = config['resources', 'postgresql', '_prefix'] PGPASSFILE = os.path.join(config['workspace'], config['_workspace_contents', '_logs'], '.pgpass') diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index ee36b5c5..1dd570b7 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -11,7 +11,7 @@ from markusapi import Markus from autotester.exceptions import TesterCreationError -from autotester import config +from autotester.config import config from autotester.server.hooks_context.hooks_context import Hooks from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes, stringify from autotester.server.utils.user_management import get_reaper_username, current_user, tester_user diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index 21355e19..8179f91a 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -1,7 +1,7 @@ import os import pwd from autotester.exceptions import TesterUserError -from autotester import config +from autotester.config import config from autotester.server.utils.string_management import decode_if_bytes from autotester.server.utils.redis_management import redis_connection @@ -25,8 +25,10 @@ def tester_user(): if user_name is None: raise TesterUserError('No worker users available to run this job') - user_workspace = r.hget(WORKERS_HASH, user_name) - if user_workspace is None: + user_workspace = os.path.join(config['workspace'], + config['_workspace_contents', '_workers'], + user_name) + if not os.path.isdir(user_workspace): raise TesterUserError(f'No workspace directory for user: {user_name}') return user_name, decode_if_bytes(user_workspace) From 1571f13d0ff29dbcff2af6f7bb698624af0f2b9d Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 3 Feb 2020 11:33:18 -0500 Subject: [PATCH 15/46] cli: fix imports --- src/autotester/cli.py | 13 ++++--------- .../{ => server/utils}/form_validation.py | 0 2 files changed, 4 insertions(+), 9 deletions(-) rename src/autotester/{ => server/utils}/form_validation.py (100%) diff --git a/src/autotester/cli.py b/src/autotester/cli.py index 1f95e403..ba1d865d 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -14,16 +14,12 @@ from autotester.exceptions import * from autotester.server.utils.redis_management import redis_connection, get_avg_pop_interval, test_script_directory from autotester.server.utils.file_management import ignore_missing_dir_error -from autotester import config -from autotester.server import form_validation +from autotester.config import config +from autotester.server.utils import form_validation from autotester.server.server import run_test, update_test_specs SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] -### ERROR CLASSES ### - -### HELPER FUNCTIONS ### - def _format_job_id(markus_address, run_id, **_kw): """ @@ -113,9 +109,6 @@ def _get_job_timeout(test_specs, test_categories, multiplier=1.5): raise TestParameterError(f'there are no tests of the given categories: {test_categories}') -### COMMAND FUNCTIONS ### - - @_clean_on_error def enqueue_test(user_type, batch_id, **kw): """ @@ -219,6 +212,7 @@ def parse_arg_file(arg_file): 'cancel': cancel_test, 'schema': get_schema} + def cli(): parser = argparse.ArgumentParser() @@ -237,5 +231,6 @@ def cli(): print(str(e)) sys.exit(1) + if __name__ == '__main__': cli() diff --git a/src/autotester/form_validation.py b/src/autotester/server/utils/form_validation.py similarity index 100% rename from src/autotester/form_validation.py rename to src/autotester/server/utils/form_validation.py From 366531756cb9bd050ab2b402b9f8ceb3c589e597 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 4 Feb 2020 16:01:52 -0500 Subject: [PATCH 16/46] general: fixes to make sure all libraries and config options are being set properly --- bin/generate_supervisord_conf.py | 8 +-- bin/install.sh | 38 ++++++++++--- bin/tester_schema_skeleton.json | 54 ------------------- src/autotester/cli.py | 2 +- src/autotester/resources/ports/__init__.py | 4 +- src/autotester/server/server.py | 6 +-- .../server/utils/file_management.py | 10 ++-- .../server/utils/redis_management.py | 4 +- .../server/utils/resource_management.py | 4 +- .../server/utils/user_management.py | 2 - 10 files changed, 52 insertions(+), 80 deletions(-) delete mode 100644 bin/tester_schema_skeleton.json diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index 3d5dabea..d0c51414 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -3,7 +3,6 @@ from autotester.config import config import sys import os -import shutil import argparse HEADER = """[supervisord] @@ -34,7 +33,7 @@ THIS_DIR = os.path.dirname(os.path.abspath(__file__)) -def write_conf_file(conf_filename, user_names): +def write_conf_file(rq, conf_filename, user_names): try: redis_url = f'--url {config["redis", "url"]}' except KeyError: @@ -58,7 +57,7 @@ def write_conf_file(conf_filename, user_names): break queue_str = ' '.join(queues) c = CONTENT.format(worker_user=worker_user, - rq=shutil.which('rq'), + rq=rq, worker_args=redis_url, queues=queue_str, numprocs=1, @@ -67,8 +66,9 @@ def write_conf_file(conf_filename, user_names): if __name__ == '__main__': parser = argparse.ArgumentParser() + parser.add_argument('rq') parser.add_argument('conf_filename') parser.add_argument('user_names', nargs='+') args = parser.parse_args() - write_conf_file(args.conf_filename, args.user_names) + write_conf_file(args.rq, args.conf_filename, args.user_names) diff --git a/bin/install.sh b/bin/install.sh index e81bf3a8..9cfdd6fa 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -188,7 +188,13 @@ create_worker_dbs() { pgpassfile="${WORKSPACE_SUBDIRS[LOGS]}/.pgpass" if [ -z "${DOCKER}" ]; then - psql=(sudo -u postgres psql -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}") + local pghost_args + if [[ "${POSTGRES_HOST}" == 'localhost' ]]; then + pghost_args='' # this allows for local peer authentication if it is configured + else + pghost_args="-h ${POSTGRES_HOST}" + fi + psql=(sudo -u postgres psql "${pghost_args}" -p "${POSTGRES_PORT}") else psql=(psql -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U postgres) fi @@ -196,10 +202,11 @@ create_worker_dbs() { sudo touch "${pgpassfile}" sudo chown "${SERVER_USER}:${SERVER_USER}" "${pgpassfile}" sudo chmod 'u=rw,go=' "${pgpassfile}" - echo -e "${serverpwd}" | sudo -u "${SERVER_USER}" tee -a "${pgpassfile}" > /dev/null + echo -e "${serverpwd}" | sudo -u "${SERVER_USER}" tee "${pgpassfile}" > /dev/null psql_string="DROP ROLE IF EXISTS ${SERVER_USER}; - CREATE ROLE ${SERVER_USER} LOGIN PASSWORD '${serverpwd}'; + CREATE ROLE ${SERVER_USER}; + ALTER ROLE ${SERVER_USER} LOGIN PASSWORD '${serverpwd}'; ALTER ROLE ${SERVER_USER} CREATEROLE;" "${psql[@]}" <<< "${psql_string}" @@ -224,8 +231,8 @@ create_worker_dbs() { create_default_tester_venv() { local default_tester_venv - default_tester_venv="${WORKSPACE_SUBDIRS[SCRIPTS]}/"$(echo "${config_json}" | \ - jq --raw-output '._workspace_contents._default_venv_name') + default_tester_venv="${WORKSPACE_SUBDIRS[SPECS]}/${DEFAULT_VENV_NAME}" + "python${PYTHON_VERSION}" -m venv "${default_tester_venv}" local pip pip="${default_tester_venv}/bin/pip" @@ -243,17 +250,33 @@ compile_reaper_script() { chmod ugo=r "${reaperexe}" } +create_enqueuer_wrapper() { + local enqueuer + enqueuer=/usr/local/bin/autotest_enqueuer + + echo "[AUTOTEST-INSTALL] Creating enqueuer wrapper at '${enqueuer}'" + + echo "#!/usr/bin/env bash + ${SERVER_VENV}/bin/markus_autotester \"\$@\"" | sudo tee ${enqueuer} > /dev/null + sudo chown "${SERVER_USER}:${SERVERUSER}" "${enqueuer}" + sudo chmod u=rwx,go=r ${enqueuer} + +} + start_workers() { local supervisorconf local worker_users local generate_script + local rq supervisorconf="${WORKSPACE_SUBDIRS[LOGS]}/supervisord.conf" worker_users=$(echo "${WORKER_USERS}" | tr '\n' ' ') generate_script="${BINDIR}/generate_supervisord_conf.py" + rq="${SERVER_VENV}/bin/rq" + echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" - sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${supervisorconf} ${worker_users} && + sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${rq} ${supervisorconf} ${worker_users} && ${BINDIR}/start-stop.sh start" } @@ -278,7 +301,7 @@ load_config_settings() { POSTGRES_PORT=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.port') POSTGRES_HOST=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.host') WORKER_USERS=$(echo "${WORKER_AND_REAPER_USERS}" | sed -n 'p;n') - + DEFAULT_VENV_NAME=$(echo "${config_json}" | jq --raw-output '._workspace_contents._default_venv_name') declare -gA WORKSPACE_SUBDIRS WORKSPACE_SUBDIRS=( ['SCRIPTS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._scripts') @@ -323,6 +346,7 @@ create_users create_workspace create_default_tester_venv compile_reaper_script +create_enqueuer_wrapper create_worker_dbs start_workers suggest_next_steps diff --git a/bin/tester_schema_skeleton.json b/bin/tester_schema_skeleton.json deleted file mode 100644 index a949f2b3..00000000 --- a/bin/tester_schema_skeleton.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "definitions": { - "files_list": { - "type": "string", - "enum": [] - }, - "test_data_categories": { - "type": "string", - "enum": [], - "enumNames": [] - }, - "extra_group_data": {}, - "installed_testers": { - "type": "string", - "enum": [] - }, - "tester_schemas": { - "oneOf": [] - } - }, - "type": "object", - "required": [ - "testers" - ], - "properties": { - "testers": { - "title": "Testers", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "required": [ - "tester_type", - "test_data" - ], - "properties": { - "tester_type": { - "$ref": "#/definitions/installed_testers", - "title": "Tester type" - } - }, - "dependencies": { - "tester_type": { - "$ref": "#/definitions/tester_schemas" - } - } - } - }, - "hooks_file": { - "$ref": "#/definitions/files_list", - "title": "Custom hooks file" - } - } -} \ No newline at end of file diff --git a/src/autotester/cli.py b/src/autotester/cli.py index ba1d865d..5a5c9991 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -168,7 +168,7 @@ def get_schema(**_kw): this_dir = os.path.dirname(os.path.abspath(__file__)) root_dir = os.path.dirname(this_dir) - with open(os.path.join(this_dir, 'bin', 'tester_schema_skeleton.json')) as f: + with open(os.path.join(this_dir, 'lib', 'tester_schema_skeleton.json')) as f: schema_skeleton = json.load(f) glob_pattern = os.path.join(root_dir, 'testers', 'testers', '*', 'specs', '.installed') diff --git a/src/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py index ca7b5c21..90f74179 100644 --- a/src/autotester/resources/ports/__init__.py +++ b/src/autotester/resources/ports/__init__.py @@ -1,6 +1,6 @@ import socket from autotester.server.utils.redis_management import redis_connection -from autotester import config +from autotester.config import config PORT_MIN = config['resources', 'port', 'min'] PORT_MAX = config['resources', 'port', 'max'] @@ -27,4 +27,4 @@ def get_available_port(host='localhost'): port = s.getsockname()[1] return str(port) except OSError: - continue \ No newline at end of file + continue diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 1dd570b7..f5267ea9 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -26,8 +26,8 @@ HOOKS_FILENAME = config['_workspace_contents', '_hooks_file'] SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] FILES_DIRNAME = config['_workspace_contents', '_files_dir'] -TEST_SPECS_DIR = config['_workspace_contents', '_specs'] -TEST_SCRIPT_DIR = config['_workspace_contents', '_scripts'] +TEST_SPECS_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_specs']) +TEST_SCRIPT_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_scripts']) TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', @@ -293,7 +293,7 @@ def get_tester_root_dir(tester_type): """ this_dir = os.path.dirname(os.path.abspath(__file__)) root_dir = os.path.dirname(this_dir) - tester_dir = os.path.join(root_dir, 'testers', 'testers', tester_type) + tester_dir = os.path.join(root_dir, 'testers', tester_type) if not os.path.isdir(tester_dir): raise FileNotFoundError(f'{tester_type} is not a valid tester name') return tester_dir diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index 565f7fd1..3a59974b 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -3,8 +3,12 @@ import tempfile import shutil import fcntl +from autotester.server.utils import redis_management +from autotester.config import config from contextlib import contextmanager +FILES_DIRNAME = config['_workspace_contents', '_files_dir'] + def clean_dir_name(name): """ Return name modified so that it can be used as a unix style directory name """ return name.replace('/', '_') @@ -98,8 +102,8 @@ def copy_test_script_files(markus_address, assignment_id, tests_path): directory if they exist. tests_path may already exist and contain files and subdirectories. """ - test_script_outer_dir = test_script_directory(markus_address, assignment_id) - test_script_dir = os.path.join(test_script_outer_dir, TEST_SCRIPTS_FILES_DIRNAME) + test_script_outer_dir = redis_management.test_script_directory(markus_address, assignment_id) + test_script_dir = os.path.join(test_script_outer_dir, FILES_DIRNAME) if os.path.isdir(test_script_dir): with fd_open(test_script_dir) as fd: with fd_lock(fd, exclusive=False): @@ -130,4 +134,4 @@ def setup_files(files_path, tests_path, markus_address, assignment_id): if fd == 'f': permissions -= 0o111 os.chmod(file_or_dir, permissions) - return student_files, script_files \ No newline at end of file + return student_files, script_files diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py index ae759587..b16a345d 100644 --- a/src/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -3,7 +3,7 @@ import time from functools import wraps from autotester.server.utils import file_management, string_management -from autotester import config +from autotester.config import config CURRENT_TEST_SCRIPT_HASH = config['redis', '_current_test_script_hash'] POP_INTERVAL_HASH = config['redis', '_pop_interval_hash'] @@ -118,4 +118,4 @@ def wrapper(*args, **kwargs): return func(*args, **kwargs) finally: clean_up() - return wrapper \ No newline at end of file + return wrapper diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py index 64073357..aa929d36 100644 --- a/src/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -1,5 +1,5 @@ import resource -from autotester import config +from autotester.config import config RLIMIT_ADJUSTMENTS = {'nproc': 10} @@ -41,4 +41,4 @@ def set_rlimits_before_cleanup(): limit = rlimit_str2int(limit_str) soft, hard = resource.getrlimit(limit) soft = max(soft, hard) - resource.setrlimit(limit, (soft, hard)) \ No newline at end of file + resource.setrlimit(limit, (soft, hard)) diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index 8179f91a..0cfd9383 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -5,8 +5,6 @@ from autotester.server.utils.string_management import decode_if_bytes from autotester.server.utils.redis_management import redis_connection -WORKERS_HASH = config['redis', '_workers_hash'] -WORKERS = config['users', 'workers'] def current_user(): return pwd.getpwuid(os.getuid()).pw_name From 07fb86eaba21d4198c435899c12f0ec122cf2d5b Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 4 Feb 2020 16:08:00 -0500 Subject: [PATCH 17/46] schema: move schema skeleton --- .../lib/tester_schema_skeleton.json | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 src/autotester/lib/tester_schema_skeleton.json diff --git a/src/autotester/lib/tester_schema_skeleton.json b/src/autotester/lib/tester_schema_skeleton.json new file mode 100644 index 00000000..a949f2b3 --- /dev/null +++ b/src/autotester/lib/tester_schema_skeleton.json @@ -0,0 +1,54 @@ +{ + "definitions": { + "files_list": { + "type": "string", + "enum": [] + }, + "test_data_categories": { + "type": "string", + "enum": [], + "enumNames": [] + }, + "extra_group_data": {}, + "installed_testers": { + "type": "string", + "enum": [] + }, + "tester_schemas": { + "oneOf": [] + } + }, + "type": "object", + "required": [ + "testers" + ], + "properties": { + "testers": { + "title": "Testers", + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "required": [ + "tester_type", + "test_data" + ], + "properties": { + "tester_type": { + "$ref": "#/definitions/installed_testers", + "title": "Tester type" + } + }, + "dependencies": { + "tester_type": { + "$ref": "#/definitions/tester_schemas" + } + } + } + }, + "hooks_file": { + "$ref": "#/definitions/files_list", + "title": "Custom hooks file" + } + } +} \ No newline at end of file From 1b12053bac45b47ae61bbbfac2af93890be32ffb Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Wed, 5 Feb 2020 10:30:36 -0500 Subject: [PATCH 18/46] testers: set up tester default venv properly and use the python executable directly --- bin/install.sh | 2 +- src/autotester/server/server.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index 9cfdd6fa..556a9574 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -231,7 +231,7 @@ create_worker_dbs() { create_default_tester_venv() { local default_tester_venv - default_tester_venv="${WORKSPACE_SUBDIRS[SPECS]}/${DEFAULT_VENV_NAME}" + default_tester_venv="${WORKSPACE_SUBDIRS[SPECS]}/${DEFAULT_VENV_NAME}/venv" "python${PYTHON_VERSION}" -m venv "${default_tester_venv}" local pip diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index f5267ea9..92c8eecd 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -119,10 +119,9 @@ def create_test_script_command(env_dir, tester_type): import_line, 'from testers.markus_test_specs import MarkusTestSpecs', f'Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()'] - venv_activate = os.path.join(os.path.abspath(env_dir), 'venv', 'bin', 'activate') + python_ex = os.path.join(os.path.join(TEST_SPECS_DIR, env_dir), 'venv', 'bin', 'python') python_str = '; '.join(python_lines) - venv_str = f'source {venv_activate}' - return ' && '.join([venv_str, f'python -c "{python_str}"']) + return f'{python_ex} -c "{python_str}"' def get_env_vars(test_username): """ Return a dictionary containing all environment variables to pass to the next test """ From 1fb4c29fe0d0aa158a5df725004bae53d3035536 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 6 Feb 2020 13:48:49 -0500 Subject: [PATCH 19/46] install: install testers and server all at once --- .gitignore | 16 ++--- bin/default_tester_venv_requirements.txt | 0 bin/install.sh | 63 ++++++++++++++++--- src/autotester/MANIFEST.in | 3 + src/autotester/setup.py | 5 +- .../testers/py/bin/create_environment.sh | 2 +- .../testers/pyta/bin/create_environment.sh | 2 +- 7 files changed, 72 insertions(+), 19 deletions(-) delete mode 100644 bin/default_tester_venv_requirements.txt create mode 100644 src/autotester/MANIFEST.in diff --git a/.gitignore b/.gitignore index 07cd89f9..3d28497c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,20 +11,20 @@ venv bin/kill_worker_procs # server -server/venv -server/workspace -server/bin/kill_worker_procs +src/autotester/server/venv +src/autotester/server/workspace +src/autotester/server/bin/kill_worker_procs # testers -testers/testers/*/specs/.installed -testers/testers/*/specs/install_settings.json +src/autotester/testers/*/specs/.installed +src/autotester/testers/*/specs/install_settings.json # java -testers/testers/java/lib/.gradle -testers/testers/java/lib/build +src/autotester/testers/java/lib/.gradle +src/autotester/testers/java/lib/build # racket -testers/testers/racket/**/compiled/ +src/autotester/testers/racket/**/compiled/ # haskell markus_cabal diff --git a/bin/default_tester_venv_requirements.txt b/bin/default_tester_venv_requirements.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/bin/install.sh b/bin/install.sh index 556a9574..31b27d7f 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -5,7 +5,11 @@ set -e THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") BINDIR=$(dirname "${THISSCRIPT}") PROJECTROOT=$(dirname "${BINDIR}") +TESTERSROOT="${PROJECTROOT}/src/autotester" SERVER_VENV="${PROJECTROOT}/venv" +INSTALLABLE_TESTERS=(custom haskell java py pyta racket) +TESTERS=() +USAGE_MESSAGE="Usage: $0 [-p|--python_version python_version] [--non-interactive] [--docker] [-t|--testers tester ...]" _check_python_version() { # check if the python3 is at least version 3.6 @@ -238,7 +242,7 @@ create_default_tester_venv() { pip="${default_tester_venv}/bin/pip" ${pip} install --upgrade pip ${pip} install wheel # must be installed before requirements - ${pip} install -r "${BINDIR}/default_tester_venv_requirements.txt" + ${pip} install "${TESTERSROOT}" } compile_reaper_script() { @@ -280,6 +284,20 @@ start_workers() { ${BINDIR}/start-stop.sh start" } +install_testers() { + local tester + local to_install + if [[ -n ${INSTALL_ALL_TESTERS} ]]; then + to_install=( "${INSTALLABLE_TESTERS[@]}" ) + else + to_install=( "${TESTERS[@]}" ) + fi + for tester in "${to_install[@]}"; do + echo "[AUTOTEST-INSTALL] installing tester: ${tester}" + "${TESTERSROOT}/testers/${tester}/bin/install.sh" + done +} + suggest_next_steps() { echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVER_USER}'s crontab with a @reboot time" @@ -312,38 +330,69 @@ load_config_settings() { ) } -# PARSE ARGUMENTS +_add_valid_tester() { + local tester + for tester in "${INSTALLABLE_TESTERS[@]}"; do + if [[ "$1" == "${tester}" ]]; then + TESTERS=( "${TESTERS[@]}" "${tester}" ) + return 0 + fi + done + + TESTER_MESSAGE="$1 is not an installable tester. Choose from: ${INSTALLABLE_TESTERS[*]}\n${TESTER_MESSAGE}" + return 1 +} while [[ $# -gt 0 ]]; do key="$1" case $key in -p|--python_version) + SELECTING_TESTERS= PYTHON_VERSION="$2" - shift - shift + shift 2 ;; --non-interactive) + SELECTING_TESTERS= NON_INTERACTIVE=1 shift ;; --docker) + SELECTING_TESTERS= NON_INTERACTIVE=1 DOCKER=1 shift ;; + -a|--all-testers) + INSTALL_ALL_TESTERS=1 + shift + ;; + -t|--testers) + shift + SELECTING_TESTERS=1 + while [[ -n "${1// }" && "-t --testers" != *"$1"* ]] && _add_valid_tester "$1"; do + shift + done + ;; *) - echo "Usage: $0 [-p|--python_version python_version] [--non-interactive] [--docker]" 1>&2 - exit 1 - ;; + BAD_USAGE=1 + shift + ;; esac done +if [[ -n ${BAD_USAGE} ]]; then + [[ -n "${SELECTING_TESTERS}" && -z ${INSTALL_ALL_TESTERS} ]] && echo -e "${TESTER_MESSAGE}" 1>&2 + echo "${USAGE_MESSAGE}" 1>&2 + exit 1 +fi + set_python_version install_packages create_venv load_config_settings create_users create_workspace +install_testers create_default_tester_venv compile_reaper_script create_enqueuer_wrapper diff --git a/src/autotester/MANIFEST.in b/src/autotester/MANIFEST.in new file mode 100644 index 00000000..fc768370 --- /dev/null +++ b/src/autotester/MANIFEST.in @@ -0,0 +1,3 @@ +include testers/racket/lib/markus.rkt +graft testers/java/lib +include testers/*/specs/install_settings.json diff --git a/src/autotester/setup.py b/src/autotester/setup.py index 10a18775..110d7046 100644 --- a/src/autotester/setup.py +++ b/src/autotester/setup.py @@ -9,5 +9,6 @@ author='Misha Schwartz, Alessio Di Sandro', author_email='mschwa@cs.toronto.edu', license='MIT', - packages=find_packages(where='testers', exclude=test_exclusions), - zip_safe=False) \ No newline at end of file + include_package_data=True, + packages=['testers'] + [f'testers.{pkg}' for pkg in find_packages(where='testers', exclude=test_exclusions)], + zip_safe=False) diff --git a/src/autotester/testers/py/bin/create_environment.sh b/src/autotester/testers/py/bin/create_environment.sh index 717fe81c..899592f0 100755 --- a/src/autotester/testers/py/bin/create_environment.sh +++ b/src/autotester/testers/py/bin/create_environment.sh @@ -8,11 +8,11 @@ create_venv() { source ${VENV_DIR}/bin/activate pip install --upgrade pip pip install wheel + pip install "${TESTERS_DIR}" pip install -r "${THIS_DIR}/requirements.txt" pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth echo ${LIB_DIR} >> ${pth_file} - echo ${TESTERS_DIR} >> ${pth_file} } # script starts here diff --git a/src/autotester/testers/pyta/bin/create_environment.sh b/src/autotester/testers/pyta/bin/create_environment.sh index d398daeb..14e001c9 100755 --- a/src/autotester/testers/pyta/bin/create_environment.sh +++ b/src/autotester/testers/pyta/bin/create_environment.sh @@ -8,11 +8,11 @@ create_venv() { source ${VENV_DIR}/bin/activate pip install --upgrade pip pip install wheel + pip install "${TESTERS_DIR}" pip install -r "${THIS_DIR}/requirements.txt" pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth echo ${LIB_DIR} >> ${pth_file} - echo ${TESTERS_DIR} >> ${pth_file} } # script starts here From 425433829c8349d00f123b3bde7ad4ec124fa864 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 6 Feb 2020 14:00:29 -0500 Subject: [PATCH 20/46] uninstall: deprecate the uninstaller temporarily --- bin/uninstall.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bin/uninstall.sh b/bin/uninstall.sh index 8f07909a..cfa2c36b 100755 --- a/bin/uninstall.sh +++ b/bin/uninstall.sh @@ -131,6 +131,16 @@ if [[ $# -gt 0 ]]; then exit 1 fi +# TODO: this uninstaller need to be updated +echo 'This uninstaller is broken, do not use until it has been updated. +To uninstall the autotester please run: + +$ bin/start-stop.sh stop + +and then optionally remove all tester users, server user, postgres databases, and remove any unneeded files in +the workspace directory' 1>&2 +exit 1 + # vars THISSCRIPT=$(readlink -f ${BASH_SOURCE}) BINDIR=$(dirname ${THISSCRIPT}) From 6e949bbb54e253661d93060d62a39d4402c69bf7 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 6 Feb 2020 14:09:30 -0500 Subject: [PATCH 21/46] layerfile: update layerfile --- Layerfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Layerfile b/Layerfile index 6271b980..cd557af6 100644 --- a/Layerfile +++ b/Layerfile @@ -8,11 +8,10 @@ CHECKPOINT RUN python3 -m venv /tmp/venv -RUN /tmp/venv/bin/pip install -U pip -COPY server/bin/requirements.txt /tmp/ -RUN /tmp/venv/bin/pip install -U pytest hypothesis attrs -r /tmp/requirements.txt CHECKPOINT WORKDIR /app COPY . . -RUN /tmp/venv/bin/pytest --ignore testers/testers/py/tests +RUN /tmp/venv/bin/pip install -U pytest hypothesis attrs . + +RUN /tmp/venv/bin/pytest --ignore src/autotester/testers/py/tests From df5ee2617dc9bdf09bc11ea31b2c4ec0d4436fb6 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 14:04:51 -0500 Subject: [PATCH 22/46] tests: update tests for new code structure --- src/autotester/cli.py | 3 +- ...{autotest_enqueuer_test.py => cli_test.py} | 191 ++++++++---------- src/autotester/tests/config_default.py | 88 -------- 3 files changed, 86 insertions(+), 196 deletions(-) rename src/autotester/tests/{autotest_enqueuer_test.py => cli_test.py} (63%) delete mode 100755 src/autotester/tests/config_default.py diff --git a/src/autotester/cli.py b/src/autotester/cli.py index 5a5c9991..9cd1edbf 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -166,12 +166,11 @@ def get_schema(**_kw): (https://github.com/mozilla-services/react-jsonschema-form) or similar. """ this_dir = os.path.dirname(os.path.abspath(__file__)) - root_dir = os.path.dirname(this_dir) with open(os.path.join(this_dir, 'lib', 'tester_schema_skeleton.json')) as f: schema_skeleton = json.load(f) - glob_pattern = os.path.join(root_dir, 'testers', 'testers', '*', 'specs', '.installed') + glob_pattern = os.path.join(this_dir, 'testers', '*', 'specs', '.installed') for path in sorted(glob.glob(glob_pattern)): tester_type = os.path.basename(os.path.dirname(os.path.dirname(path))) specs_dir = os.path.dirname(path) diff --git a/src/autotester/tests/autotest_enqueuer_test.py b/src/autotester/tests/cli_test.py similarity index 63% rename from src/autotester/tests/autotest_enqueuer_test.py rename to src/autotester/tests/cli_test.py index a91c783e..0c8d8f1b 100644 --- a/src/autotester/tests/autotest_enqueuer_test.py +++ b/src/autotester/tests/cli_test.py @@ -1,4 +1,3 @@ -import sys import os import json import re @@ -10,20 +9,15 @@ from unittest.mock import patch, ANY, Mock from contextlib import contextmanager from fakeredis import FakeStrictRedis -from tests import config_default - -sys.path.append('..') -import autotest_enqueuer as ate # noqa: E402 -import autotest_server as ats # noqa: E402 -ate.config = config_default -ats.config = config_default +from autotester import cli @pytest.fixture(autouse=True) def redis(): fake_redis = FakeStrictRedis() - with patch('autotest_server.redis_connection', return_value=fake_redis): - yield fake_redis + with patch('autotester.cli.redis_connection', return_value=fake_redis): + with patch('autotester.server.utils.redis_management.redis_connection', return_value=fake_redis): + yield fake_redis @contextmanager @@ -35,29 +29,26 @@ def tmp_script_dir(settings_dict): pass with open(os.path.join(tmp_dir, 'settings.json'), 'w') as f: json.dump(settings_dict, f) - with patch('autotest_server.test_script_directory', return_value=tmp_dir): + with patch('autotester.cli.test_script_directory', return_value=tmp_dir): yield tmp_dir @pytest.fixture(autouse=True) -def empty_test_script_dir(request): - if 'no_test_script_dir' in request.keywords: - yield - else: - empty_settings = {"testers": [{"test_data": []}]} - with tmp_script_dir(empty_settings) as tmp_dir: - yield tmp_dir +def empty_test_script_dir(request, redis): + empty_settings = {"testers": [{"test_data": []}]} + with tmp_script_dir(empty_settings) as tmp_dir: + yield tmp_dir @pytest.fixture def non_existant_test_script_dir(): - with patch('autotest_server.test_script_directory', return_value=None): + with patch('autotester.cli.test_script_directory', return_value=None): yield @pytest.fixture def pop_interval(): - with patch('autotest_server.get_avg_pop_interval', return_value=None): + with patch('autotester.server.utils.redis_management.get_avg_pop_interval', return_value=None): yield @@ -77,160 +68,153 @@ class DummyTestError(Exception): pass -class TestRunTest: +class TestEnqueueTest: def get_kwargs(self, **kw): - param_kwargs = {k: '' for k in inspect.signature(ats.run_test).parameters} + param_kwargs = {k: '' for k in inspect.signature(cli.run_test).parameters} return {**param_kwargs, **kw} def test_fails_missing_required_args(self): try: - ate.run_test('Admin', 1) - except ate.JobArgumentError: + cli.enqueue_test('Admin', 1) + except cli.JobArgumentError: return - except ate.MarkUsError as e: + except cli.MarkUsError as e: pytest.fail(f'should have failed because kwargs are missing but instead failed with: {e}') pytest.fail('should have failed because kwargs are missing') def test_accepts_same_kwargs_as_server_run_test_method(self): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.JobArgumentError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.JobArgumentError: pytest.fail('should not have failed because kwargs are not missing') - except ate.MarkUsError: + except cli.MarkUsError: pass def test_fails_if_cannot_find_valid_queue(self): try: - ate.run_test('Tim', None, **self.get_kwargs()) - except ate.InvalidQueueError: + cli.enqueue_test('Tim', None, **self.get_kwargs()) + except cli.InvalidQueueError: return - except ate.MarkUsError as e: + except cli.MarkUsError as e: pytest.fail(f'should have failed because a valid queue is not found but instead failed with: {e}') pytest.fail('should have failed because a valid queue is not found') def test_can_find_valid_queue(self): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.InvalidQueueError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.InvalidQueueError: pytest.fail('should not have failed because a valid queue is available') - except ate.MarkUsError: + except cli.MarkUsError: pass def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestScriptFilesError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.TestScriptFilesError: return - except ate.MarkUsError as e: + except cli.MarkUsError as e: pytest.fail(f'should have failed because no test scripts could be found but instead failed with: {e}') pytest.fail('should have failed because no test scripts could be found') def test_can_find_test_files(self): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestScriptFilesError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.TestScriptFilesError: pytest.fail('should not have failed because no test scripts could be found') - except ate.MarkUsError: + except cli.MarkUsError: pass def test_writes_queue_info_to_stdout(self, capfd, pop_interval): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.MarkUsError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.MarkUsError: pass out, _err = capfd.readouterr() assert re.search(r'^\d+$', out) def test_fails_if_no_tests_groups(self): try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestParameterError: + cli.enqueue_test('Admin', 1, **self.get_kwargs()) + except cli.TestParameterError: return - except ate.MarkUsError: + except cli.MarkUsError: pass - @pytest.mark.no_test_script_dir def test_fails_if_no_groups_in_category(self): settings = {"testers": [{"test_data": [{"category": ['admin']}]}]} with tmp_script_dir(settings): try: - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['student'])) - except ate.TestParameterError: + cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['student'])) + except cli.TestParameterError: return - except ate.MarkUsError: + except cli.MarkUsError: pass - @pytest.mark.no_test_script_dir def test_can_find_tests_in_given_category(self): settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 30}]}]} with tmp_script_dir(settings): try: - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) - except ate.TestParameterError: + cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) + except cli.TestParameterError: pytest.fail('should not have failed to find an admin test') - except ate.MarkUsError: + except cli.MarkUsError: pass - @pytest.mark.no_test_script_dir def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 10}]}]} with tmp_script_dir(settings): - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) + cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) mock_enqueue_call.assert_called_with(ANY, kwargs=ANY, job_id=ANY, timeout=15) def test_cleans_up_files_on_error(self, mock_rmtree): try: - ate.run_test('Admin', 1, **self.get_kwargs(files_path='something')) - except Exception: + cli.enqueue_test('Admin', 1, **self.get_kwargs(files_path='something')) + except Exception as e: mock_rmtree.assert_called_once() else: pytest.fail('This call to run_test should have failed. See other failures for details') -@pytest.fixture -def update_test_specs(): - with patch('autotest_server.update_test_specs') as mock_func: - yield mock_func - - class TestUpdateSpecs: def get_kwargs(self, **kw): - param_kwargs = {k: '' for k in inspect.signature(ats.update_test_specs).parameters} + param_kwargs = {k: '' for k in inspect.signature(cli.update_test_specs).parameters} return {**param_kwargs, **kw} - def test_fails_when_schema_is_invalid(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=['something']): - with patch('form_validation.best_match', return_value=DummyTestError('error')): + def test_fails_when_schema_is_invalid(self): + with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=DummyTestError('error')): + with patch('autotester.cli.update_test_specs'): try: - ate.update_specs({}, **self.get_kwargs(schema={})) + cli.update_specs('', **self.get_kwargs(schema={})) except DummyTestError: return pytest.fail('should have failed because the form is invalid') - def test_succeeds_when_schema_is_valid(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=[]): - with patch('form_validation.best_match', return_value=DummyTestError('error')): + def test_succeeds_when_schema_is_valid(self): + with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=[]): + with patch('autotester.cli.update_test_specs') as p: + print(cli.update_test_specs) try: - ate.update_specs({}, **self.get_kwargs(schema={})) + cli.update_specs('', **self.get_kwargs(schema={})) except DummyTestError: pytest.fail('should not have failed because the form is valid') - def test_calls_update_test_specs(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=[]): - with patch('form_validation.best_match', return_value=DummyTestError('error')): - ate.update_specs({}, **self.get_kwargs(schema={})) - update_test_specs.assert_called_once() + def test_calls_update_test_specs(self): + with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=[]): + with patch('autotester.cli.update_test_specs') as update_test_specs: + cli.update_specs('', **self.get_kwargs(schema={})) + update_test_specs.assert_called_once() def test_cleans_up_files_on_error(self, mock_rmtree): - with patch('form_validation.validate_with_defaults', side_effect=Exception): - try: - ate.update_specs({}, **self.get_kwargs(schema={}, files_path='something')) - except Exception: - mock_rmtree.assert_called_once() - else: - pytest.fail('This call to update_specs should have failed. See other failures for details') + with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=DummyTestError('error')): + with patch('autotester.cli.update_test_specs'): + try: + cli.update_specs(**self.get_kwargs(schema={}, files_path='test_files')) + except Exception: + mock_rmtree.assert_called_once() + else: + pytest.fail('This call to update_specs should have failed. See other failures for details') @pytest.fixture @@ -246,20 +230,20 @@ class TestCancelTest: def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): Job, mock_job = mock_rq_job Job.fetch.side_effect = rq.exceptions.NoSuchJobError - ate.cancel_test('something', [1]) + cli.cancel_test('something', [1]) mock_job.cancel.assert_not_called() def test_do_nothing_if_job_not_enqueued(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = False - ate.cancel_test('something', [1]) + cli.cancel_test('something', [1]) mock_job.cancel.assert_not_called() def test_cancel_job(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True mock_job.kwargs = {'files_path': None} - ate.cancel_test('something', [1]) + cli.cancel_test('something', [1]) mock_job.cancel.assert_called_once() def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): @@ -267,14 +251,14 @@ def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): mock_job.is_queued.return_value = True files_path = 'something' mock_job.kwargs = {'files_path': files_path} - ate.cancel_test('something', [1]) + cli.cancel_test('something', [1]) mock_rmtree.assert_called_once_with(files_path, onerror=ANY) def test_cancel_multiple_jobs(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True mock_job.kwargs = {'files_path': None} - ate.cancel_test('something', [1, 2]) + cli.cancel_test('something', [1, 2]) assert mock_job.cancel.call_count == 2 def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtree): @@ -282,18 +266,17 @@ def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtr mock_job.is_queued.return_value = True files_path = 'something' mock_job.kwargs = {'files_path': files_path} - ate.cancel_test('something', [1, 2]) + cli.cancel_test('something', [1, 2]) assert mock_rmtree.call_count == 2 class TestGetSchema: def fake_installed_testers(self, installed): - server_dir = os.path.dirname(os.path.abspath(ate.__file__)) - root_dir = os.path.dirname(server_dir) + root_dir = os.path.dirname(os.path.abspath(cli.__file__)) paths = [] for tester in installed: - glob_pattern = os.path.join(root_dir, 'testers', 'testers', tester, 'specs') + glob_pattern = os.path.join(root_dir, 'testers', tester, 'specs') paths.append(os.path.join(glob.glob(glob_pattern)[0], '.installed')) return paths @@ -306,24 +289,24 @@ def assert_tester_in_schema(self, tester, schema): def test_prints_skeleton_when_none_installed(self, capfd): with patch('glob.glob', return_value=[]): - ate.get_schema() + cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) - server_dir = os.path.dirname(os.path.abspath(ate.__file__)) - with open(os.path.join(server_dir, 'bin', 'tester_schema_skeleton.json')) as f: + root_dir = os.path.dirname(os.path.abspath(cli.__file__)) + with open(os.path.join(root_dir, 'lib', 'tester_schema_skeleton.json')) as f: skeleton = json.load(f) assert schema == skeleton def test_prints_test_schema_when_one_installed(self, capfd): with patch('glob.glob', return_value=self.fake_installed_testers(['custom'])): - ate.get_schema() + cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) self.assert_tester_in_schema('custom', schema) def test_prints_test_schema_when_multiple_installed(self, capfd): with patch('glob.glob', return_value=self.fake_installed_testers(['custom', 'py'])): - ate.get_schema() + cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) self.assert_tester_in_schema('custom', schema) @@ -332,40 +315,36 @@ def test_prints_test_schema_when_multiple_installed(self, capfd): class TestParseArgFile: - @pytest.mark.no_test_script_dir def test_loads_arg_file(self): settings = {'some': 'data'} with tmp_script_dir(settings) as tmp_dir: arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) + kwargs = cli.parse_arg_file(arg_file) try: kwargs.pop('files_path') except KeyError: pass assert settings == kwargs - @pytest.mark.no_test_script_dir def test_remove_arg_file(self): settings = {'some': 'data'} with tmp_script_dir(settings) as tmp_dir: arg_file = os.path.join(tmp_dir, 'settings.json') - ate.parse_arg_file(arg_file) + cli.parse_arg_file(arg_file) assert not os.path.isfile(arg_file) - @pytest.mark.no_test_script_dir def test_adds_file_path_if_not_present(self): settings = {'some': 'data'} with tmp_script_dir(settings) as tmp_dir: arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) + kwargs = cli.parse_arg_file(arg_file) assert 'files_path' in kwargs assert os.path.realpath(kwargs['files_path']) == os.path.realpath(tmp_dir) - @pytest.mark.no_test_script_dir def test_does_not_add_file_path_if_present(self): settings = {'some': 'data', 'files_path': 'something'} with tmp_script_dir(settings) as tmp_dir: arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) + kwargs = cli.parse_arg_file(arg_file) assert 'files_path' in kwargs assert kwargs['files_path'] == 'something' diff --git a/src/autotester/tests/config_default.py b/src/autotester/tests/config_default.py deleted file mode 100755 index f66a88db..00000000 --- a/src/autotester/tests/config_default.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 - -#### CHANGE CONFIG PARAMETERS BELOW #### - -## REDIS CONFIGS ## - -# name of redis hash used to store the locations of test script directories -REDIS_CURRENT_TEST_SCRIPT_HASH = 'curr_test_scripts' -# name of redis hash used to store pop interval data for each worker queue -REDIS_POP_HASH = 'pop_intervals' -# name of redis hash used to store workers data (username and worker directory) -REDIS_WORKERS_HASH = 'workers' -# name of redis integer used to access the next available port -REDIS_PORT_INT = 'ports' -# dictionary containing keyword arguments to pass to rq.use_connection -# when connecting to a redis database (empty dictionary is default) -REDIS_CONNECTION_KWARGS = {} -# prefix to prepend to all redis keys generated by the autotester -REDIS_PREFIX = 'autotest:' -# prefix to prepend to all postgres databases created -POSTGRES_PREFIX = 'autotest_' - -## WORKING DIR CONFIGS ## - -# the main working directory -WORKSPACE_DIR = '/home/vagrant/markus-autotesting/server/workspace' -# name of the directory containing test scripts -SCRIPTS_DIR_NAME = 'scripts' -# name of the directory containing test results -RESULTS_DIR_NAME = 'results' -# name of the directory containing specs files -SPECS_DIR_NAME = 'specs' -# name of the directory containing workspaces for the workers -WORKERS_DIR_NAME = 'workers' -# name of the directory containing log files -LOGS_DIR_NAME = 'logs' -# name of the server user -SERVER_USER = '' -# names of the worker users -WORKER_USERS = 'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7' -# prefix used to name reaper users -# (reapers not used to kill worker processes if set to the empty string) -REAPER_USER_PREFIX = '' -# default tester environment name -DEFAULT_ENV_NAME = 'defaultenv' - -## RLIMIT SETTINGS FOR TESTER PROCESSES ## - -# values are: (soft limit, hard limit) -# see https://docs.python.org/3/library/resource.html for reference on limit options -# NOTE: these limits cannot be higher than the limits set for the tester user in -# /etc/security/limits.conf (or similar). These limits may be reduced in certain -# cases (see the docstring for get_test_preexec_fn and get_cleanup_preexec_fn in -# autotest_server.py) -RLIMIT_SETTINGS = { - 'RLIMIT_NPROC': (300, 300) -} - - -### QUEUE CONFIGS ### - -# functions used to select which type of queue to use. They must accept any number -# of keyword arguments and should only return a boolean (see autotest_enqueuer._get_queue) -def batch_filter(**kwargs): - return kwargs.get('batch_id') is not None - - -def single_filter(**kwargs): - return kwargs.get('user_type') == 'Admin' and not batch_filter(**kwargs) - - -def student_filter(**kwargs): - return kwargs.get('user_type') == 'Student' and not batch_filter(**kwargs) - - -# list of worker queues. Values of each are a string indicating the queue name, -# and a function used to select whether or not to use this type of queue -# (see autotest_enqueuer._get_queue) -batch_queue = {'name': 'batch', 'filter': batch_filter} -single_queue = {'name': 'single', 'filter': single_filter} -student_queue = {'name': 'student', 'filter': student_filter} -WORKER_QUEUES = [batch_queue, single_queue, student_queue] - -### WORKER CONFIGS ### - -WORKERS = [(4, [student_queue['name'], single_queue['name'], batch_queue['name']]), - (2, [single_queue['name'], student_queue['name'], batch_queue['name']]), - (2, [batch_queue['name'], student_queue['name'], single_queue['name']])] From 8937da35ddf995589db362a760320192644b0c9e Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 14:05:09 -0500 Subject: [PATCH 23/46] travis: update travis --- .travis.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index e34b7ab7..36cad9e4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,9 +6,7 @@ python: - "3.8" # command to install dependencies install: - - pip install pytest - - pip install hypothesis - - pip install -r server/bin/requirements.txt + - pip install -U pytest hypothesis fakeredis . # command to run tests script: - - pytest --ignore testers/testers/py/tests + - pytest --ignore src/autotester/testers/py/tests From 3dacee8821588984190592faf2b4e533a0d6ad22 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 14:05:57 -0500 Subject: [PATCH 24/46] manifest: include tester files and schema skeleton in package --- MANIFEST.in | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index bd02d27c..d2a30e18 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,3 @@ -include src/autotester/config_defaults/* \ No newline at end of file +include src/autotester/config_defaults/* +include src/autotester/lib/* +include src/autotester/testers/*/specs/* \ No newline at end of file From e84b360f9e1be83ce834d72f21a2d7a948a9fe0a Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 14:21:22 -0500 Subject: [PATCH 25/46] travis: update attrs package (required by hypothesis for python3.6 only) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 36cad9e4..0e742793 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,7 +6,7 @@ python: - "3.8" # command to install dependencies install: - - pip install -U pytest hypothesis fakeredis . + - pip install --upgrade attrs && pip install -U pytest hypothesis fakeredis . # command to run tests script: - pytest --ignore src/autotester/testers/py/tests From 47458b50a0f0c87dd087337b180666af50da2cd0 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 15:31:03 -0500 Subject: [PATCH 26/46] archiver: deprecate archiver until it can be updated --- bin/archive_workspace.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bin/archive_workspace.sh b/bin/archive_workspace.sh index 8eb5b66b..4eac5f3a 100755 --- a/bin/archive_workspace.sh +++ b/bin/archive_workspace.sh @@ -22,6 +22,14 @@ if [[ $# -lt 1 ]]; then exit 1 fi +# TODO: this file needs to be updated +echo 'This archiver is broken, do not use until it has been updated. +To archive the workspace, run: + +$ tar cJf +' 1>&2 +exit 1 + # vars THISSCRIPT=$(readlink -f ${BASH_SOURCE}) BINDIR=$(dirname ${THISSCRIPT}) From 8cbe815d714e43d2b229bd7ca86d3c3c9d907466 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 16:22:51 -0500 Subject: [PATCH 27/46] tests: run tests through setup.py automatically --- .gitignore | 1 + .travis.yml | 5 +---- Layerfile | 4 +--- setup.cfg | 5 +++++ setup.py | 4 +++- 5 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 setup.cfg diff --git a/.gitignore b/.gitignore index 3d28497c..b60fa3eb 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ __pycache__ .hypothesis/ .pytest_cache/ *.egg-info +.eggs venv # bin diff --git a/.travis.yml b/.travis.yml index 0e742793..14b06eaa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,9 +4,6 @@ python: - "3.6" - "3.7" - "3.8" -# command to install dependencies -install: - - pip install --upgrade attrs && pip install -U pytest hypothesis fakeredis . # command to run tests script: - - pytest --ignore src/autotester/testers/py/tests + - python setup.py test diff --git a/Layerfile b/Layerfile index cd557af6..941d08dc 100644 --- a/Layerfile +++ b/Layerfile @@ -12,6 +12,4 @@ CHECKPOINT WORKDIR /app COPY . . -RUN /tmp/venv/bin/pip install -U pytest hypothesis attrs . - -RUN /tmp/venv/bin/pytest --ignore src/autotester/testers/py/tests +RUN /tmp/venv/bin/python setup.py test diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..84050d65 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,5 @@ +[aliases] +test=pytest + +[tool:pytest] +addopts = --ignore=src/autotester/testers/py/tests diff --git a/setup.py b/setup.py index 6c25b359..f3a552b8 100644 --- a/setup.py +++ b/setup.py @@ -23,10 +23,12 @@ 'jsonschema==3.0.2' ], tests_require=[ + 'pytest==5.3.1', + 'hypothesis==4.47.3', 'fakeredis==1.1.0' ], + setup_requires=['pytest-runner'], include_package_data=True, - # data_files=[('config_defaults', ['config/config_default.yml', 'config/config_env_vars.yml'])], entry_points={ 'console_scripts': 'markus_autotester = autotester.cli:cli' }) From 662a7dd999d666a4a0077925047f0df9586d6c74 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 17:51:14 -0500 Subject: [PATCH 28/46] style: various style fixes --- .flake8.ini | 2 +- .hound.yml | 1 - MANIFEST.in | 2 +- bin/install.sh | 2 +- doc/hooks_example.py | 6 +- docker-compose.yml | 1 - src/autotester/config.py | 3 + .../config_defaults/config_env_vars.yml | 2 +- src/autotester/exceptions.py | 9 +- .../lib/tester_schema_skeleton.json | 2 +- src/autotester/resources/ports/__init__.py | 1 + .../resources/postgresql/__init__.py | 3 +- .../server/hooks_context/builtin_hooks.py | 15 ++- .../server/hooks_context/hooks_context.py | 20 ++-- src/autotester/server/server.py | 104 ++++++++++-------- .../server/utils/file_management.py | 10 ++ .../server/utils/form_validation.py | 3 + .../server/utils/path_management.py | 3 +- .../server/utils/redis_management.py | 7 ++ .../server/utils/resource_management.py | 3 + .../server/utils/string_management.py | 3 + .../server/utils/user_management.py | 8 +- src/autotester/testers/custom/bin/install.sh | 8 +- .../testers/custom/bin/uninstall.sh | 8 +- .../testers/custom/default_hooks.py | 2 + .../testers/custom/specs/settings_schema.json | 2 +- src/autotester/testers/haskell/bin/install.sh | 8 +- .../testers/haskell/bin/uninstall.sh | 8 +- .../testers/haskell/markus_haskell_tester.py | 21 ++-- .../haskell/specs/settings_schema.json | 2 +- src/autotester/testers/java/bin/install.sh | 14 +-- src/autotester/testers/java/bin/uninstall.sh | 16 +-- .../testers/java/specs/settings_schema.json | 2 +- src/autotester/testers/markus_tester.py | 9 +- .../testers/py/bin/create_environment.sh | 37 ++++--- src/autotester/testers/py/bin/install.sh | 8 +- src/autotester/testers/py/bin/uninstall.sh | 8 +- .../testers/py/markus_python_tester.py | 13 +-- .../testers/pyta/bin/create_environment.sh | 41 +++---- src/autotester/testers/pyta/bin/install.sh | 8 +- src/autotester/testers/pyta/bin/uninstall.sh | 8 +- .../testers/pyta/markus_pyta_tester.py | 8 +- src/autotester/testers/racket/bin/install.sh | 8 +- .../testers/racket/bin/uninstall.sh | 8 +- src/autotester/testers/racket/lib/markus.rkt | 1 - .../testers/racket/specs/settings_schema.json | 2 +- src/autotester/tests/cli_test.py | 43 ++++---- 47 files changed, 280 insertions(+), 223 deletions(-) diff --git a/.flake8.ini b/.flake8.ini index 14951b5d..0e0f5037 100644 --- a/.flake8.ini +++ b/.flake8.ini @@ -1,3 +1,3 @@ [flake8] max-line-length = 120 -ignore = E266 \ No newline at end of file +ignore = E266 diff --git a/.hound.yml b/.hound.yml index b36fe97f..89747ff9 100644 --- a/.hound.yml +++ b/.hound.yml @@ -1,4 +1,3 @@ flake8: enabled: true config_file: .flake8.ini - diff --git a/MANIFEST.in b/MANIFEST.in index d2a30e18..29494940 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,3 @@ include src/autotester/config_defaults/* include src/autotester/lib/* -include src/autotester/testers/*/specs/* \ No newline at end of file +include src/autotester/testers/*/specs/* diff --git a/bin/install.sh b/bin/install.sh index 31b27d7f..13c9705f 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -262,7 +262,7 @@ create_enqueuer_wrapper() { echo "#!/usr/bin/env bash ${SERVER_VENV}/bin/markus_autotester \"\$@\"" | sudo tee ${enqueuer} > /dev/null - sudo chown "${SERVER_USER}:${SERVERUSER}" "${enqueuer}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${enqueuer}" sudo chmod u=rwx,go=r ${enqueuer} } diff --git a/doc/hooks_example.py b/doc/hooks_example.py index 23c64c41..31630f14 100644 --- a/doc/hooks_example.py +++ b/doc/hooks_example.py @@ -22,18 +22,18 @@ def upload_svn_file(api, group_repo_name, assignment_name, file_name, svn_user, # Hooks -def before_all(api, assignment_id, group_id, group_repo_name): +def before_all(_api, _assignment_id, _group_id, _group_repo_name): # clean up unwanted files pattern = os.path.join('**', '*.o') for file_path in glob.glob(pattern, recursive=True): os.remove(file_path) -def before_each(api, assignment_id, group_id, group_repo_name): +def before_each(_api, _assignment_id, _group_id, _group_repo_name): pass -def after_each(api, assignment_id, group_id, group_repo_name): +def after_each(_api, _assignment_id, _group_id, _group_repo_name): pass diff --git a/docker-compose.yml b/docker-compose.yml index 6b71d895..f326e27d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,4 +51,3 @@ services: volumes: postgres_autotest: redis_autotest: - diff --git a/src/autotester/config.py b/src/autotester/config.py index f5d2b918..85fdeb15 100644 --- a/src/autotester/config.py +++ b/src/autotester/config.py @@ -11,6 +11,7 @@ CONFIG_FILENAME = 'markus_autotester_config' CONFIG_ENV_VAR = 'MARKUS_AUTOTESTER_CONFIG' + def _find_local_config(): system_config = os.path.join(os.path.sep, 'etc', CONFIG_FILENAME) user_config = os.path.join(os.environ.get('HOME'), f'.{CONFIG_FILENAME}') @@ -23,6 +24,7 @@ def _find_local_config(): if os.path.isfile(system_config): return system_config + class _Config: _local_config = _find_local_config() @@ -95,4 +97,5 @@ def _load_from_yaml(self): config_dicts.append(yaml.load(f, Loader=self._yaml_loader)) return self._merge_dicts(config_dicts) + config = _Config() diff --git a/src/autotester/config_defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml index e0dab0fc..27e30888 100644 --- a/src/autotester/config_defaults/config_env_vars.yml +++ b/src/autotester/config_defaults/config_env_vars.yml @@ -13,4 +13,4 @@ users: resources: postgresql: port: !ENV ${PGPORT} - host: !ENV ${PGHOST} \ No newline at end of file + host: !ENV ${PGHOST} diff --git a/src/autotester/exceptions.py b/src/autotester/exceptions.py index cd5f1ae4..dff31ce3 100644 --- a/src/autotester/exceptions.py +++ b/src/autotester/exceptions.py @@ -2,23 +2,30 @@ Custom Exception Type for use in MarkUs """ + class MarkUsError(Exception): pass + class TesterCreationError(MarkUsError): pass + class TesterUserError(MarkUsError): pass + class JobArgumentError(MarkUsError): pass + class InvalidQueueError(MarkUsError): pass + class TestScriptFilesError(MarkUsError): pass + class TestParameterError(MarkUsError): - pass \ No newline at end of file + pass diff --git a/src/autotester/lib/tester_schema_skeleton.json b/src/autotester/lib/tester_schema_skeleton.json index a949f2b3..47c7e85d 100644 --- a/src/autotester/lib/tester_schema_skeleton.json +++ b/src/autotester/lib/tester_schema_skeleton.json @@ -51,4 +51,4 @@ "title": "Custom hooks file" } } -} \ No newline at end of file +} diff --git a/src/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py index 90f74179..92f42bdf 100644 --- a/src/autotester/resources/ports/__init__.py +++ b/src/autotester/resources/ports/__init__.py @@ -7,6 +7,7 @@ REDIS_PREFIX = config['redis', '_prefix'] REDIS_PORT_INT = f"{REDIS_PREFIX}{config['resources', 'port', '_redis_int']}" + def next_port(): """ Return a port number that is greater than the last time this method was called (by any process on this machine). diff --git a/src/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py index 2b4153a4..18df17c8 100644 --- a/src/autotester/resources/postgresql/__init__.py +++ b/src/autotester/resources/postgresql/__init__.py @@ -9,6 +9,7 @@ POSTGRES_PREFIX = config['resources', 'postgresql', '_prefix'] PGPASSFILE = os.path.join(config['workspace'], config['_workspace_contents', '_logs'], '.pgpass') + def setup_database(test_username): user = getpass.getuser() database = f'{POSTGRES_PREFIX}{test_username}' @@ -24,4 +25,4 @@ def setup_database(test_username): password = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) - return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} \ No newline at end of file + return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} diff --git a/src/autotester/server/hooks_context/builtin_hooks.py b/src/autotester/server/hooks_context/builtin_hooks.py index 05fe463a..90c74721 100644 --- a/src/autotester/server/hooks_context/builtin_hooks.py +++ b/src/autotester/server/hooks_context/builtin_hooks.py @@ -8,11 +8,11 @@ import importlib from autotester import testers -HOOKS = {'upload_feedback_file' : {'context': 'after_each'}, - 'upload_feedback_to_repo' : {'requires': ['clear_feedback_file'], - 'context': 'after_each'}, - 'upload_annotations' : {'context': 'after_each'}, - 'clear_feedback_file' : {'context': 'before_each'}} +HOOKS = {'upload_feedback_file': {'context': 'after_each'}, + 'upload_feedback_to_repo': {'requires': ['clear_feedback_file'], + 'context': 'after_each'}, + 'upload_annotations': {'context': 'after_each'}, + 'clear_feedback_file': {'context': 'before_each'}} def clear_feedback_file(test_data, **_kwargs): @@ -43,7 +43,7 @@ def upload_feedback_file(api, assignment_id, group_id, test_data, **_kwargs): with open(feedback_file) as feedback_open: api.upload_feedback_file(assignment_id, group_id, feedback_file, feedback_open.read()) - + def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): """ Upload annotations using MarkUs' api. @@ -53,8 +53,6 @@ def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): with open(annotations_name) as annotations_open: api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) -## DEFAULT TESTER HOOKS ## - def _load_default_hooks(): """ @@ -68,4 +66,5 @@ def _load_default_hooks(): defaults[hook.__name__] = hook return defaults + DEFAULT_HOOKS = _load_default_hooks() diff --git a/src/autotester/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py index ef772556..a782adff 100644 --- a/src/autotester/server/hooks_context/hooks_context.py +++ b/src/autotester/server/hooks_context/hooks_context.py @@ -116,15 +116,15 @@ def _merge_hook_dicts(*hook_dicts): order. """ merged = defaultdict(list) - sort_key = lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__) for d in hook_dicts: for key, hooks in d.items(): merged[key].extend(h for h in hooks if h) for key, hooks in merged.items(): - merged[key] = sorted((h for h in hooks if h), key=sort_key, reverse=(key=='after')) + merged[key] = sorted((h for h in hooks if h), + key=lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__), + reverse=(key == 'after')) return merged - def _load_all(self): """ Return a dictionary containing all hooks that may be run over the course of a test run. @@ -143,7 +143,7 @@ def _load_all(self): custom_hooks_module = self._load_module(self.custom_hooks_path) for hook_name in Hooks.HOOK_BASENAMES: - hook_type, hook_context = hook_name.split('_') # eg. "before_all" -> ("before", "all") + hook_type, hook_context = hook_name.split('_') # eg. "before_all" -> ("before", "all") custom_hook = self._load_hook(custom_hooks_module, hook_name) builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(hook_name) hooks[None][hook_context][hook_type].extend([custom_hook, builtin_hook]) @@ -169,8 +169,8 @@ def _load_module(self, hooks_script_path): with add_path(dirpath): hooks_module = __import__(module_name) return hooks_module - except Exception: - self.load_errors.append((module_name, traceback.format_exc())) + except Exception as e: + self.load_errors.append((module_name, f'{traceback.format_exc()}\n{e}')) return None def _load_hook(self, module, function_name): @@ -198,7 +198,7 @@ def _run(self, func, extra_args=None, extra_kwargs=None): try: func(*args, **kwargs) except BaseException as e: - self.run_errors.append((func.__name__, args, kwargs, traceback.format_exc())) + self.run_errors.append((func.__name__, args, kwargs, f'{traceback.format_exc()}\n{e}')) def _get_hooks(self, tester_type, builtin_selector=None): """ @@ -224,7 +224,6 @@ def _get_hooks(self, tester_type, builtin_selector=None): hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get('all', {})) return hooks.get('before', []), hooks.get('after', []) - @contextmanager def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwargs=None, cwd=None): """ @@ -261,7 +260,6 @@ def format_errors(self): for module_name, tb in self.load_errors: error_list.append(f'module_name: {module_name}\ntraceback:\n{tb}') for hook_name, args, kwargs, tb in self.run_errors: - error_list.append(f'function_name: {hook_name}\nargs: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}') + error_list.append(f'function_name: {hook_name}\n' + f'args: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}') return '\n\n'.join(error_list) - - diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 92c8eecd..67206b95 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -3,7 +3,7 @@ import os import shutil import time -import json +import json import subprocess import signal import rq @@ -15,7 +15,8 @@ from autotester.server.hooks_context.hooks_context import Hooks from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes, stringify from autotester.server.utils.user_management import get_reaper_username, current_user, tester_user -from autotester.server.utils.file_management import random_tmpfile_name, clean_dir_name, setup_files, ignore_missing_dir_error, fd_open, fd_lock, move_tree +from autotester.server.utils.file_management import random_tmpfile_name, clean_dir_name, setup_files, \ + ignore_missing_dir_error, fd_open, fd_lock, move_tree from autotester.server.utils.resource_management import set_rlimits_before_cleanup, set_rlimits_before_test from autotester.server.utils.redis_management import clean_after, test_script_directory, update_pop_interval_stat from autotester.resources.ports import get_available_port @@ -29,26 +30,25 @@ TEST_SPECS_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_specs']) TEST_SCRIPT_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_scripts']) -TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', - 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', - 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', - 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', - 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', - 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} +TESTER_IMPORT_LINE = {'custom': 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', + 'haskell': 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', + 'java': 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', + 'py': 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', + 'pyta': 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', + 'racket': 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} -### RUN TESTS ### -def test_run_command(test_username=None): +def run_test_command(test_username=None): """ Return a command used to run test scripts as a the test_username user, with the correct arguments. Set test_username to None to run as the current user. >>> test_script = 'mysscript.py' - >>> test_run_command('f').format(test_script) + >>> run_test_command('f').format(test_script) 'sudo -u f -- bash -c "./myscript.py"' - >>> test_run_command().format(test_script) + >>> run_test_command().format(test_script) './myscript.py' """ cmd = '{}' @@ -58,19 +58,21 @@ def test_run_command(test_username=None): return cmd + def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None): """ Return the arguments passed to this function in a dictionary. If stderr is falsy, change it to None. Load the json string in stdout as a dictionary. """ test_results, malformed = loads_partial_json(stdout, dict) - return {'time' : run_time, - 'timeout' : timeout, - 'tests' : test_results, - 'stderr' : stderr or None, - 'malformed' : stdout if malformed else None, + return {'time': run_time, + 'timeout': timeout, + 'tests': test_results, + 'stderr': stderr or None, + 'malformed': stdout if malformed else None, 'extra_info': extra_info or {}} + def kill_with_reaper(test_username): """ Try to kill all processes currently being run by test_username using the method @@ -92,9 +94,10 @@ def kill_with_reaper(test_username): kill_file_dst = random_tmpfile_name() preexec_fn = set_rlimits_before_cleanup() - copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format(test_username, kill_file_dst) + copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format(test_username, + kill_file_dst) copy_proc = subprocess.Popen(copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd) - if copy_proc.wait() < 0: # wait returns the return code of the proc + if copy_proc.wait() < 0: # wait returns the return code of the proc return False kill_cmd = 'sudo -u {} -- bash -c {}'.format(reaper_username, kill_file_dst) @@ -102,6 +105,7 @@ def kill_with_reaper(test_username): return kill_proc.wait() == 0 return False + def kill_without_reaper(test_username): """ Kill all processes that test_username is able to kill @@ -109,20 +113,22 @@ def kill_without_reaper(test_username): kill_cmd = f"sudo -u {test_username} -- bash -c 'kill -KILL -1'" subprocess.run(kill_cmd, shell=True) + def create_test_script_command(env_dir, tester_type): """ Return string representing a command line command to run tests. """ import_line = TESTER_IMPORT_LINE[tester_type] - python_lines = [ 'import sys, json', - import_line, - 'from testers.markus_test_specs import MarkusTestSpecs', + python_lines = ['import sys, json', + import_line, + 'from testers.markus_test_specs import MarkusTestSpecs', f'Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()'] python_ex = os.path.join(os.path.join(TEST_SPECS_DIR, env_dir), 'venv', 'bin', 'python') python_str = '; '.join(python_lines) return f'{python_ex} -c "{python_str}"' + def get_env_vars(test_username): """ Return a dictionary containing all environment variables to pass to the next test """ db_env_vars = setup_database(test_username) @@ -149,8 +155,9 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, args = cmd.format(cmd_str) for test_data in settings['test_data']: - test_category = test_data.get('category', []) - if set(test_category) & set(test_categories): #TODO: make sure test_categories is non-string collection type + test_category = test_data.get('category', []) + if set(test_category) & set( + test_categories): # TODO: make sure test_categories is non-string collection type extra_hook_kwargs = {'test_data': test_data} with hooks.around('each', builtin_selector=test_data, extra_kwargs=extra_hook_kwargs): start = time.time() @@ -159,8 +166,8 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, timeout = test_data.get('timeout') try: env_vars = get_env_vars(test_username) - proc = subprocess.Popen(args, start_new_session=True, cwd=tests_path, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + proc = subprocess.Popen(args, start_new_session=True, cwd=tests_path, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, preexec_fn=preexec_fn, env={**os.environ, **env_vars}) try: @@ -180,11 +187,13 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, finally: out = decode_if_bytes(out) err = decode_if_bytes(err) - duration = int(round(time.time()-start, 3) * 1000) + duration = int(round(time.time() - start, 3) * 1000) extra_info = test_data.get('extra_info', {}) - results.append(create_test_group_result(out, err, duration, extra_info, timeout_expired)) + results.append( + create_test_group_result(out, err, duration, extra_info, timeout_expired)) return results, hooks.format_errors() + def store_results(results_data, markus_address, assignment_id, group_id, submission_id): """ Write the results of multiple test script runs to an output file as a json string. @@ -193,11 +202,14 @@ def store_results(results_data, markus_address, assignment_id, group_id, submiss """ clean_markus_address = clean_dir_name(markus_address) run_time = "run_{}".format(int(time.time())) - destination = os.path.join(*stringify(TEST_RESULT_DIR, clean_markus_address, assignment_id, group_id, 's{}'.format(submission_id or ''), run_time)) + destination = os.path.join( + *stringify(TEST_RESULT_DIR, clean_markus_address, assignment_id, group_id, 's{}'.format(submission_id or ''), + run_time)) os.makedirs(destination, exist_ok=True) with open(os.path.join(destination, 'output.json'), 'w') as f: json.dump(results_data, f, indent=4) + def clear_working_directory(tests_path, test_username): """ Run commands that clear the tests_path working directory @@ -206,14 +218,15 @@ def clear_working_directory(tests_path, test_username): chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format(test_username, tests_path) else: chmod_cmd = 'chmod -Rf ugo+rwX {}'.format(tests_path) - + subprocess.run(chmod_cmd, shell=True) - + # be careful not to remove the tests_path dir itself since we have to # set the group ownership with sudo (and that is only done in ../install.sh) clean_cmd = 'rm -rf {0}/.[!.]* {0}/*'.format(tests_path) subprocess.run(clean_cmd, shell=True) + def stop_tester_processes(test_username): """ Run a command that kills all tester processes either by killing all @@ -224,20 +237,23 @@ def stop_tester_processes(test_username): if not kill_with_reaper(test_username): kill_without_reaper(test_username) + def finalize_results_data(results, error, all_hooks_error, time_to_service): """ Return a dictionary of test script results combined with test run info """ - return {'test_groups' : results, - 'error' : error, - 'hooks_error' : all_hooks_error, - 'time_to_service' : time_to_service} + return {'test_groups': results, + 'error': error, + 'hooks_error': all_hooks_error, + 'time_to_service': time_to_service} + def report(results_data, api, assignment_id, group_id, run_id): """ Post the results of running test scripts to the markus api """ api.upload_test_group_results(assignment_id, group_id, run_id, json.dumps(results_data)) + @clean_after -def run_test(markus_address, server_api_key, test_categories, files_path, assignment_id, - group_id, group_repo_name, submission_id, run_id, enqueue_time): +def run_test(markus_address, server_api_key, test_categories, files_path, assignment_id, + group_id, submission_id, run_id, enqueue_time): """ Run autotesting tests using the tests in the test_specs json file on the files in files_path. @@ -267,7 +283,7 @@ def run_test(markus_address, server_api_key, test_categories, files_path, assign hooks = Hooks(hooks_script_path, testers, cwd=tests_path, kwargs=hooks_kwargs) try: setup_files(files_path, tests_path, markus_address, assignment_id) - cmd = test_run_command(test_username=test_username) + cmd = run_test_command(test_username=test_username) results, hooks_error = run_test_specs(cmd, test_specs, test_categories, @@ -284,7 +300,6 @@ def run_test(markus_address, server_api_key, test_categories, files_path, assign store_results(results_data, markus_address, assignment_id, group_id, submission_id) report(results_data, api, assignment_id, group_id, run_id) -### UPDATE TEST SCRIPTS ### def get_tester_root_dir(tester_type): """ @@ -297,6 +312,7 @@ def get_tester_root_dir(tester_type): raise FileNotFoundError(f'{tester_type} is not a valid tester name') return tester_dir + def update_settings(settings, specs_dir): """ Return a dictionary containing all the default settings and the installation settings @@ -312,7 +328,8 @@ def update_settings(settings, specs_dir): full_settings.update(settings) return full_settings -def create_tester_environments(files_path, test_specs): + +def create_tester_environments(files_path, test_specs): for i, settings in enumerate(test_specs['testers']): tester_dir = get_tester_root_dir(settings["tester_type"]) specs_dir = os.path.join(tester_dir, 'specs') @@ -335,6 +352,7 @@ def create_tester_environments(files_path, test_specs): return test_specs + def destroy_tester_environments(old_test_script_dir): test_specs_file = os.path.join(old_test_script_dir, SETTINGS_FILENAME) with open(test_specs_file) as f: @@ -352,6 +370,7 @@ def destroy_tester_environments(old_test_script_dir): raise TesterCreationError(f'destroy tester environment failed with:\n{proc.stderr}') shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) + @clean_after def update_test_specs(files_path, assignment_id, markus_address, test_specs): """ @@ -369,7 +388,7 @@ def update_test_specs(files_path, assignment_id, markus_address, test_specs): new_files_dir = os.path.join(new_dir, FILES_DIRNAME) move_tree(files_path, new_files_dir) if 'hooks_file' in test_specs: - src = os.path.isfile(os.path.join(new_files_dir, test_specs['hooks_file'])) + src = os.path.join(new_files_dir, test_specs['hooks_file']) if os.path.isfile(src): os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) test_specs = create_tester_environments(new_files_dir, test_specs) @@ -378,10 +397,9 @@ def update_test_specs(files_path, assignment_id, markus_address, test_specs): json.dump(test_specs, f) old_test_script_dir = test_script_directory(markus_address, assignment_id) test_script_directory(markus_address, assignment_id, set_to=new_dir) - + if old_test_script_dir is not None: with fd_open(old_test_script_dir) as fd: with fd_lock(fd, exclusive=True): destroy_tester_environments(old_test_script_dir) shutil.rmtree(old_test_script_dir, onerror=ignore_missing_dir_error) - diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index 3a59974b..45ca6a87 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -9,13 +9,16 @@ FILES_DIRNAME = config['_workspace_contents', '_files_dir'] + def clean_dir_name(name): """ Return name modified so that it can be used as a unix style directory name """ return name.replace('/', '_') + def random_tmpfile_name(): return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) + def recursive_iglob(root_dir): """ Walk breadth first over a directory tree starting at root_dir and @@ -31,6 +34,7 @@ def recursive_iglob(root_dir): else: raise ValueError('directory does not exist: {}'.format(root_dir)) + def copy_tree(src, dst, exclude=tuple()): """ Recursively copy all files and subdirectories in the path @@ -52,6 +56,7 @@ def copy_tree(src, dst, exclude=tuple()): copied.append((fd, target)) return copied + def ignore_missing_dir_error(_func, _path, excinfo): """ Used by shutil.rmtree to ignore a FileNotFoundError """ err_type, err_inst, traceback = excinfo @@ -59,6 +64,7 @@ def ignore_missing_dir_error(_func, _path, excinfo): return raise err_inst + def move_tree(src, dst): """ Recursively move all files and subdirectories in the path @@ -70,6 +76,7 @@ def move_tree(src, dst): shutil.rmtree(src, onerror=ignore_missing_dir_error) return moved + @contextmanager def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): """ @@ -83,6 +90,7 @@ def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): finally: os.close(fd) + @contextmanager def fd_lock(file_descriptor, exclusive=True): """ @@ -96,6 +104,7 @@ def fd_lock(file_descriptor, exclusive=True): finally: fcntl.flock(file_descriptor, fcntl.LOCK_UN) + def copy_test_script_files(markus_address, assignment_id, tests_path): """ Copy test script files for a given assignment to the tests_path @@ -110,6 +119,7 @@ def copy_test_script_files(markus_address, assignment_id, tests_path): return copy_tree(test_script_dir, tests_path) return [] + def setup_files(files_path, tests_path, markus_address, assignment_id): """ Copy test script files and student files to the working directory tests_path, diff --git a/src/autotester/server/utils/form_validation.py b/src/autotester/server/utils/form_validation.py index e6f6b207..96f51a3b 100644 --- a/src/autotester/server/utils/form_validation.py +++ b/src/autotester/server/utils/form_validation.py @@ -2,6 +2,7 @@ from jsonschema.exceptions import best_match from copy import deepcopy + def extend_with_default(validator_class=Draft7Validator): """ Extends a validator class to add defaults before validation. @@ -89,6 +90,7 @@ def set_oneOf_defaults(validator, properties, instance, schema): return validators.extend(validator_class, custom_validators) + def validate_with_defaults(schema, obj, validator_class=Draft7Validator, best_only=True): """ Return an iterator that yields errors from validating obj on schema @@ -103,6 +105,7 @@ def validate_with_defaults(schema, obj, validator_class=Draft7Validator, best_on return best_match(errors) return errors + def is_valid(obj, schema, validator_class=Draft7Validator): """ Return True if is valid for schema using the diff --git a/src/autotester/server/utils/path_management.py b/src/autotester/server/utils/path_management.py index 8fa7eba1..531869e8 100644 --- a/src/autotester/server/utils/path_management.py +++ b/src/autotester/server/utils/path_management.py @@ -19,6 +19,7 @@ def current_directory(path): else: yield + @contextmanager def add_path(path, prepend=True): """ @@ -37,4 +38,4 @@ def add_path(path, prepend=True): i = (sys.path if prepend else sys.path[::-1]).index(path) sys.path.pop(i) except ValueError: - pass \ No newline at end of file + pass diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py index b16a345d..afabf5c5 100644 --- a/src/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -8,6 +8,7 @@ CURRENT_TEST_SCRIPT_HASH = config['redis', '_current_test_script_hash'] POP_INTERVAL_HASH = config['redis', '_pop_interval_hash'] + def redis_connection(): """ Return the currently open redis connection object. If there is no @@ -20,6 +21,7 @@ def redis_connection(): rq.use_connection(redis=redis.Redis.from_url(config['redis', 'url'])) return rq.get_current_connection() + def get_test_script_key(markus_address, assignment_id): """ Return unique key for each assignment used for @@ -28,6 +30,7 @@ def get_test_script_key(markus_address, assignment_id): clean_markus_address = file_management.clean_dir_name(markus_address) return f'{clean_markus_address}_{assignment_id}' + def test_script_directory(markus_address, assignment_id, set_to=None): """ Return the directory containing the test scripts for a specific assignment. @@ -55,6 +58,7 @@ def update_pop_interval_stat(queue_name): r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), now) r.hincrby(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 1) + def clear_pop_interval_stat(queue_name): """ Reset the values contained in the redis hash named REDIS_POP_HASH for @@ -66,6 +70,7 @@ def clear_pop_interval_stat(queue_name): r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), 0) r.hset(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 0) + def get_pop_interval_stat(queue_name): """ Return the following data about the queue named queue_name: @@ -82,6 +87,7 @@ def get_pop_interval_stat(queue_name): count = r.hget(POP_INTERVAL_HASH, '{}_count'.format(queue_name)) return start, last, count + def get_avg_pop_interval(queue_name): """ Return the average interval between pops off of the end of the @@ -99,6 +105,7 @@ def get_avg_pop_interval(queue_name): count -= 1 return (last-start) / count if count else 0 + def clean_up(): """ Reset the pop interval data for each empty queue """ with rq.Connection(redis_connection()): diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py index aa929d36..915d6a8b 100644 --- a/src/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -3,9 +3,11 @@ RLIMIT_ADJUSTMENTS = {'nproc': 10} + def rlimit_str2int(rlimit_string): return getattr(resource, f'RLIMIT_{rlimit_string.upper()}') + def set_rlimits_before_test(): """ Sets rlimit settings specified in config file @@ -31,6 +33,7 @@ def set_rlimits_before_test(): resource.setrlimit(limit, (soft, hard)) + def set_rlimits_before_cleanup(): """ Sets the rlimit settings specified in RLIMIT_ADJUSTMENTS diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py index 360d7e85..81a99afa 100644 --- a/src/autotester/server/utils/string_management.py +++ b/src/autotester/server/utils/string_management.py @@ -1,12 +1,15 @@ import json + def stringify(*args): for a in args: yield str(a) + def decode_if_bytes(b, format_='utf-8'): return b.decode(format_) if isinstance(b, bytes) else b + def loads_partial_json(json_string, expected_type=None): """ Return a list of objects loaded from a json string and a boolean diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index 0cfd9383..6f0b3f68 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -9,6 +9,7 @@ def current_user(): return pwd.getpwuid(os.getuid()).pw_name + def tester_user(): """ Get the workspace for the tester user specified by the MARKUSWORKERUSER @@ -31,7 +32,8 @@ def tester_user(): return user_name, decode_if_bytes(user_workspace) + def get_reaper_username(test_username): - for workers in WORKERS: - if workers['name'] == test_username: - return workers['reaper'] + for worker_name, reaper_name in config['users', 'workers']: + if worker_name == test_username: + return reaper_name diff --git a/src/autotester/testers/custom/bin/install.sh b/src/autotester/testers/custom/bin/install.sh index d667c5fb..cb7db56a 100755 --- a/src/autotester/testers/custom/bin/install.sh +++ b/src/autotester/testers/custom/bin/install.sh @@ -9,9 +9,9 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/custom/bin/uninstall.sh b/src/autotester/testers/custom/bin/uninstall.sh index 123b84af..679a58f6 100755 --- a/src/autotester/testers/custom/bin/uninstall.sh +++ b/src/autotester/testers/custom/bin/uninstall.sh @@ -7,9 +7,9 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/custom/default_hooks.py b/src/autotester/testers/custom/default_hooks.py index 6f35d9f8..c6800893 100644 --- a/src/autotester/testers/custom/default_hooks.py +++ b/src/autotester/testers/custom/default_hooks.py @@ -1,9 +1,11 @@ import os + def before_all_custom(settings, **_kwargs): """ Make script files executable """ for test_data in settings['test_data']: for script_file in test_data['script_files']: os.chmod(script_file, 0o755) + HOOKS = [before_all_custom] diff --git a/src/autotester/testers/custom/specs/settings_schema.json b/src/autotester/testers/custom/specs/settings_schema.json index 81db9977..7eb79e7d 100644 --- a/src/autotester/testers/custom/specs/settings_schema.json +++ b/src/autotester/testers/custom/specs/settings_schema.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/src/autotester/testers/haskell/bin/install.sh b/src/autotester/testers/haskell/bin/install.sh index b3fb9953..275513fb 100755 --- a/src/autotester/testers/haskell/bin/install.sh +++ b/src/autotester/testers/haskell/bin/install.sh @@ -24,11 +24,11 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages install_haskell_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/haskell/bin/uninstall.sh b/src/autotester/testers/haskell/bin/uninstall.sh index 74b0420a..880e64a9 100755 --- a/src/autotester/testers/haskell/bin/uninstall.sh +++ b/src/autotester/testers/haskell/bin/uninstall.sh @@ -7,11 +7,11 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[HASKELL-UNINSTALL] The following system packages have not been uninstalled: ghc cabal-install python3. You may uninstall them if you wish." echo "[HASKELL-UNINSTALL] The following cabal packages have not been uninstalled: tasty-stats tasty-discover tasty-quickcheck. You may uninstall them if you can figure out how." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/haskell/markus_haskell_tester.py b/src/autotester/testers/haskell/markus_haskell_tester.py index 746e3000..c9af4f3b 100644 --- a/src/autotester/testers/haskell/markus_haskell_tester.py +++ b/src/autotester/testers/haskell/markus_haskell_tester.py @@ -5,6 +5,7 @@ from testers.markus_tester import MarkusTester, MarkusTest, MarkusTestError + class MarkusHaskellTest(MarkusTest): def __init__(self, tester, test_file, result, feedback_open=None): @@ -29,18 +30,18 @@ def run(self): else: return self.error(message=self.message) -class MarkusHaskellTester(MarkusTester): +class MarkusHaskellTester(MarkusTester): # column indexes of relevant data from tasty-stats csv # reference: http://hackage.haskell.org/package/tasty-stats - TASTYSTATS = {'name' : 1, - 'time' : 2, - 'result' : 3, - 'description' : -1} + TASTYSTATS = {'name': 1, + 'time': 2, + 'result': 3, + 'description': -1} def __init__(self, specs, test_class=MarkusHaskellTest): super().__init__(specs, test_class) - + def _test_run_flags(self, test_file): """ Return a list of additional arguments to the tasty-discover executable @@ -61,10 +62,10 @@ def _parse_test_results(self, reader): """ test_results = [] for line in reader: - result = {'status' : line[self.TASTYSTATS['result']], - 'name' : line[self.TASTYSTATS['name']], - 'description' : line[self.TASTYSTATS['description']], - 'time' : line[self.TASTYSTATS['time']]} + result = {'status': line[self.TASTYSTATS['result']], + 'name': line[self.TASTYSTATS['name']], + 'description': line[self.TASTYSTATS['description']], + 'time': line[self.TASTYSTATS['time']]} test_results.append(result) return test_results diff --git a/src/autotester/testers/haskell/specs/settings_schema.json b/src/autotester/testers/haskell/specs/settings_schema.json index 463b1d59..c7defbd3 100644 --- a/src/autotester/testers/haskell/specs/settings_schema.json +++ b/src/autotester/testers/haskell/specs/settings_schema.json @@ -79,4 +79,4 @@ } } } -} \ No newline at end of file +} diff --git a/src/autotester/testers/java/bin/install.sh b/src/autotester/testers/java/bin/install.sh index af6fe57b..a65cda86 100755 --- a/src/autotester/testers/java/bin/install.sh +++ b/src/autotester/testers/java/bin/install.sh @@ -9,14 +9,14 @@ install_packages() { compile_tester() { echo "[JAVA-INSTALL] Compiling tester" - pushd ${JAVADIR} > /dev/null + pushd "${JAVADIR}" > /dev/null ./gradlew installDist --no-daemon popd > /dev/null } update_specs() { echo "[JAVA-INSTALL] Updating specs" - echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > ${TESTERDIR}/specs/install_settings.json + echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > "${TESTERDIR}/specs/install_settings.json" } # script starts here @@ -26,13 +26,13 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs -JAVADIR=${TESTERDIR}/lib +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") +JAVADIR=$(readlink -f "${THISDIR}/../lib") # main install_packages compile_tester update_specs -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/java/bin/uninstall.sh b/src/autotester/testers/java/bin/uninstall.sh index 148be4a6..9fad619d 100755 --- a/src/autotester/testers/java/bin/uninstall.sh +++ b/src/autotester/testers/java/bin/uninstall.sh @@ -2,13 +2,13 @@ remove_tester() { echo "[JAVA-UNINSTALL] Removing compiled tester" - rm -rf ${JAVADIR}/build - rm -rf ${JAVADIR}/.gradle + rm -rf "${JAVADIR}/build" + rm -rf "${JAVADIR}/.gradle" } reset_specs() { echo "[JAVA-UNINSTALL] Resetting specs" - rm -f ${TESTERDIR}/specs/install_settings.json + rm -f "${SPECSDIR}/install_settings.json" } # script starts here @@ -18,13 +18,13 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs -JAVADIR=${TESTERDIR}/lib +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") +JAVADIR=$(readlink -f "${THISDIR}/../lib") # main remove_tester reset_specs echo "[JAVA-UNINSTALL] The following system packages have not been uninstalled: python3 openjdk-12-jdk jq. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/java/specs/settings_schema.json b/src/autotester/testers/java/specs/settings_schema.json index deb98b43..a823c0b9 100644 --- a/src/autotester/testers/java/specs/settings_schema.json +++ b/src/autotester/testers/java/specs/settings_schema.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/src/autotester/testers/markus_tester.py b/src/autotester/testers/markus_tester.py index 0f70ae0f..f08ad318 100644 --- a/src/autotester/testers/markus_tester.py +++ b/src/autotester/testers/markus_tester.py @@ -9,6 +9,7 @@ class MarkusTestError(Exception): pass + class MarkusTest(ABC): class Status(enum.Enum): @@ -224,8 +225,8 @@ def run_func_wrapper(self, *args, **kwargs): self.after_successful_test_run() except MarkusTestError as e: result_json = self.error(message=str(e)) - except Exception: - result_json = self.error(message=traceback.format_exc()) + except Exception as e: + result_json = self.error(message=f'{traceback.format_exc()}\n{e}') return result_json return run_func_wrapper @@ -287,8 +288,8 @@ def run_func_wrapper(self, *args, **kwargs): return run_func(self, *args, **kwargs) except MarkusTestError as e: print(MarkusTester.error_all(message=str(e), expected=True), flush=True) - except Exception: - print(MarkusTester.error_all(message=traceback.format_exc()), flush=True) + except Exception as e: + print(MarkusTester.error_all(message=f'{traceback.format_exc()}\n{e}'), flush=True) finally: self.after_tester_run() return run_func_wrapper diff --git a/src/autotester/testers/py/bin/create_environment.sh b/src/autotester/testers/py/bin/create_environment.sh index 899592f0..7d639b63 100755 --- a/src/autotester/testers/py/bin/create_environment.sh +++ b/src/autotester/testers/py/bin/create_environment.sh @@ -3,16 +3,17 @@ set -e create_venv() { - rm -rf ${VENV_DIR} # clean up existing venv if any - python${PY_VERSION} -m venv ${VENV_DIR} - source ${VENV_DIR}/bin/activate - pip install --upgrade pip - pip install wheel - pip install "${TESTERS_DIR}" - pip install -r "${THIS_DIR}/requirements.txt" - pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines - local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth - echo ${LIB_DIR} >> ${pth_file} + rm -rf "${VENV_DIR}" # clean up existing venv if any + "python${PY_VERSION}" -m venv "${VENV_DIR}" + local pip + pip="${VENV_DIR}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel + ${pip} install "${TESTERS_DIR}" + ${pip} install -r "${THIS_DIR}/requirements.txt" + ${pip} install "${PIP_REQUIREMENTS[@]}" + local pth_file="${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth" + echo "${LIB_DIR}" >> "${pth_file}" } # script starts here @@ -23,15 +24,15 @@ fi # vars SETTINGS_JSON=$1 -ENV_DIR=$(echo ${SETTINGS_JSON} | jq --raw-output .env_loc) -PY_VERSION=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.python_version) -PIP_REQUIREMENTS=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.pip_requirements) +ENV_DIR=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_loc) +PY_VERSION=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.python_version) +read -r -a PIP_REQUIREMENTS <<< "$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.pip_requirements)" -VENV_DIR=${ENV_DIR}/venv -THIS_SCRIPT=$(readlink -f ${BASH_SOURCE}) -THIS_DIR=$(dirname ${THIS_SCRIPT}) -LIB_DIR=$(readlink -f ${THIS_DIR}/../lib) -TESTERS_DIR=$(readlink -f ${THIS_DIR}/../../../) +VENV_DIR="${ENV_DIR}/venv" +THIS_SCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THIS_DIR=$(dirname "${THIS_SCRIPT}") +LIB_DIR=$(readlink -f "${THIS_DIR}/../lib") +TESTERS_DIR=$(readlink -f "${THIS_DIR}/../../../") # main create_venv diff --git a/src/autotester/testers/py/bin/install.sh b/src/autotester/testers/py/bin/install.sh index 4f6d3ac9..17ace2f2 100755 --- a/src/autotester/testers/py/bin/install.sh +++ b/src/autotester/testers/py/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/py/bin/uninstall.sh b/src/autotester/testers/py/bin/uninstall.sh index a984a04a..e5da7132 100755 --- a/src/autotester/testers/py/bin/uninstall.sh +++ b/src/autotester/testers/py/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[PYTHON-UNINSTALL] The following system packages have not been uninstalled: python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/py/markus_python_tester.py b/src/autotester/testers/py/markus_python_tester.py index eebab25f..8919d992 100644 --- a/src/autotester/testers/py/markus_python_tester.py +++ b/src/autotester/testers/py/markus_python_tester.py @@ -1,5 +1,4 @@ import os -import tempfile import unittest import pytest import sys @@ -20,7 +19,7 @@ def __init__(self, stream, descriptions, verbosity): def addSuccess(self, test): self.results.append({'status': 'success', - 'name' : test.id(), + 'name': test.id(), 'errors': '', 'description': test._testMethodDoc}) self.successes.append(test) @@ -28,14 +27,14 @@ def addSuccess(self, test): def addFailure(self, test, err): super().addFailure(test, err) self.results.append({'status': 'failure', - 'name' : test.id(), + 'name': test.id(), 'errors': self.failures[-1][-1], 'description': test._testMethodDoc}) def addError(self, test, err): super().addError(test, err) self.results.append({'status': 'error', - 'name' : test.id(), + 'name': test.id(), 'errors': self.errors[-1][-1], 'description': test._testMethodDoc}) @@ -99,7 +98,8 @@ class MarkusPythonTester(MarkusTester): def __init__(self, specs, test_class=MarkusPythonTest): super().__init__(specs, test_class) - def _load_unittest_tests(self, test_file): + @staticmethod + def _load_unittest_tests(test_file): """ Discover unittest tests in test_file and return a unittest.TestSuite that contains these tests @@ -129,14 +129,13 @@ def _run_pytest_tests(self, test_file): of these tests """ results = [] - this_dir = os.getcwd() with open(os.devnull, 'w') as null_out: try: sys.stdout = null_out verbosity = self.specs['test_data', 'output_verbosity'] plugin = MarkusPytestPlugin() pytest.main([test_file, f'--tb={verbosity}'], plugins=[plugin]) - results = list(plugin.results.values()) + results.extend(plugin.results.values()) finally: sys.stdout = sys.__stdout__ return results diff --git a/src/autotester/testers/pyta/bin/create_environment.sh b/src/autotester/testers/pyta/bin/create_environment.sh index 14e001c9..94136c03 100755 --- a/src/autotester/testers/pyta/bin/create_environment.sh +++ b/src/autotester/testers/pyta/bin/create_environment.sh @@ -3,34 +3,37 @@ set -e create_venv() { - rm -rf ${VENV_DIR} # clean up existing venv if any - python${PY_VERSION} -m venv ${VENV_DIR} - source ${VENV_DIR}/bin/activate - pip install --upgrade pip - pip install wheel - pip install "${TESTERS_DIR}" - pip install -r "${THIS_DIR}/requirements.txt" - pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines - local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth - echo ${LIB_DIR} >> ${pth_file} + rm -rf "${VENV_DIR}" # clean up existing venv if any + "python${PY_VERSION}" -m venv "${VENV_DIR}" + local pip + pip="${VENV_DIR}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel + ${pip} install "${TESTERS_DIR}" + ${pip} install -r "${THIS_DIR}/requirements.txt" + ${pip} install "${PIP_REQUIREMENTS[@]}" + local pth_file="${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth" + echo "${LIB_DIR}" >> "${pth_file}" } # script starts here if [[ $# -lt 1 ]]; then echo "Usage: $0 settings_json" fi - + # vars SETTINGS_JSON=$1 -ENV_DIR=$(echo ${SETTINGS_JSON} | jq --raw-output .env_loc) -PY_VERSION=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.python_version) -PIP_REQUIREMENTS="$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.pip_requirements)" +ENV_DIR=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_loc) +PY_VERSION=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.python_version) +read -r -a PIP_REQUIREMENTS <<< "$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.pip_requirements)" -VENV_DIR=${ENV_DIR}/venv -THIS_SCRIPT=$(readlink -f ${BASH_SOURCE}) -THIS_DIR=$(dirname ${THIS_SCRIPT}) -LIB_DIR=$(readlink -f ${THIS_DIR}/../lib) -TESTERS_DIR=$(readlink -f ${THIS_DIR}/../../../) +VENV_DIR="${ENV_DIR}/venv" +THIS_SCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THIS_DIR=$(dirname "${THIS_SCRIPT}") +LIB_DIR=$(readlink -f "${THIS_DIR}/../lib") +TESTERS_DIR=$(readlink -f "${THIS_DIR}/../../../") +# main create_venv + diff --git a/src/autotester/testers/pyta/bin/install.sh b/src/autotester/testers/pyta/bin/install.sh index da439f84..41a1b141 100755 --- a/src/autotester/testers/pyta/bin/install.sh +++ b/src/autotester/testers/pyta/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/pyta/bin/uninstall.sh b/src/autotester/testers/pyta/bin/uninstall.sh index d794d018..8d898d35 100755 --- a/src/autotester/testers/pyta/bin/uninstall.sh +++ b/src/autotester/testers/pyta/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[PYTA-UNINSTALL] The following system packages have not been uninstalled: python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/pyta/markus_pyta_tester.py b/src/autotester/testers/pyta/markus_pyta_tester.py index c2302b02..2cad5b95 100644 --- a/src/autotester/testers/pyta/markus_pyta_tester.py +++ b/src/autotester/testers/pyta/markus_pyta_tester.py @@ -12,11 +12,13 @@ class MarkusPyTAReporter(PositionReporter): + def __init__(self, *args, **kwargs): + super().__init__(self, *args, **kwargs) + self._sorted_error_messages = defaultdict(list) + def print_messages(self, level='all'): # print to feedback file, then reset and generate data for annotations PlainReporter.print_messages(self, level) - self._sorted_error_messages = defaultdict(list) - self._sorted_style_messages = defaultdict(list) super().print_messages(level) def output_blob(self): @@ -124,5 +126,3 @@ def run(self): max_points = test_data.get('max_points', 10) test = self.test_class(self, student_file_path, max_points, feedback_open) print(test.run()) - - diff --git a/src/autotester/testers/racket/bin/install.sh b/src/autotester/testers/racket/bin/install.sh index a1e3412c..e21a6016 100755 --- a/src/autotester/testers/racket/bin/install.sh +++ b/src/autotester/testers/racket/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/racket/bin/uninstall.sh b/src/autotester/testers/racket/bin/uninstall.sh index 80a53025..557ca258 100755 --- a/src/autotester/testers/racket/bin/uninstall.sh +++ b/src/autotester/testers/racket/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[RACKET-UNINSTALL] The following system packages have not been uninstalled: racket python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/racket/lib/markus.rkt b/src/autotester/testers/racket/lib/markus.rkt index abb4294e..d8482914 100755 --- a/src/autotester/testers/racket/lib/markus.rkt +++ b/src/autotester/testers/racket/lib/markus.rkt @@ -84,4 +84,3 @@ (set! test-results (run-test (dynamic-require/expose (string->path test-file) test-suite-sym))))) (write-json (map show-test-result test-results))) - diff --git a/src/autotester/testers/racket/specs/settings_schema.json b/src/autotester/testers/racket/specs/settings_schema.json index 49abc00c..53015457 100644 --- a/src/autotester/testers/racket/specs/settings_schema.json +++ b/src/autotester/testers/racket/specs/settings_schema.json @@ -78,4 +78,4 @@ } } } -} \ No newline at end of file +} diff --git a/src/autotester/tests/cli_test.py b/src/autotester/tests/cli_test.py index 0c8d8f1b..ff12deee 100644 --- a/src/autotester/tests/cli_test.py +++ b/src/autotester/tests/cli_test.py @@ -4,11 +4,11 @@ import pytest import inspect import tempfile -import rq import glob from unittest.mock import patch, ANY, Mock from contextlib import contextmanager from fakeredis import FakeStrictRedis +from rq.exceptions import NoSuchJobError from autotester import cli @@ -25,7 +25,7 @@ def tmp_script_dir(settings_dict): with tempfile.TemporaryDirectory() as tmp_dir: files_dir = os.path.join(tmp_dir, 'files') os.mkdir(files_dir) - with open(os.path.join(files_dir, '.gitkeep'), 'w') as f: + with open(os.path.join(files_dir, '.gitkeep'), 'w'): pass with open(os.path.join(tmp_dir, 'settings.json'), 'w') as f: json.dump(settings_dict, f) @@ -34,7 +34,7 @@ def tmp_script_dir(settings_dict): @pytest.fixture(autouse=True) -def empty_test_script_dir(request, redis): +def empty_test_script_dir(redis): empty_settings = {"testers": [{"test_data": []}]} with tmp_script_dir(empty_settings) as tmp_dir: yield tmp_dir @@ -70,7 +70,8 @@ class DummyTestError(Exception): class TestEnqueueTest: - def get_kwargs(self, **kw): + @staticmethod + def get_kwargs(**kw): param_kwargs = {k: '' for k in inspect.signature(cli.run_test).parameters} return {**param_kwargs, **kw} @@ -168,22 +169,20 @@ def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): mock_enqueue_call.assert_called_with(ANY, kwargs=ANY, job_id=ANY, timeout=15) def test_cleans_up_files_on_error(self, mock_rmtree): - try: + with pytest.raises(Exception): cli.enqueue_test('Admin', 1, **self.get_kwargs(files_path='something')) - except Exception as e: - mock_rmtree.assert_called_once() - else: - pytest.fail('This call to run_test should have failed. See other failures for details') class TestUpdateSpecs: - def get_kwargs(self, **kw): + @staticmethod + def get_kwargs(**kw): param_kwargs = {k: '' for k in inspect.signature(cli.update_test_specs).parameters} return {**param_kwargs, **kw} def test_fails_when_schema_is_invalid(self): - with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=DummyTestError('error')): + with patch('autotester.server.utils.form_validation.validate_with_defaults', + return_value=DummyTestError('error')): with patch('autotester.cli.update_test_specs'): try: cli.update_specs('', **self.get_kwargs(schema={})) @@ -193,8 +192,7 @@ def test_fails_when_schema_is_invalid(self): def test_succeeds_when_schema_is_valid(self): with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=[]): - with patch('autotester.cli.update_test_specs') as p: - print(cli.update_test_specs) + with patch('autotester.cli.update_test_specs'): try: cli.update_specs('', **self.get_kwargs(schema={})) except DummyTestError: @@ -207,14 +205,11 @@ def test_calls_update_test_specs(self): update_test_specs.assert_called_once() def test_cleans_up_files_on_error(self, mock_rmtree): - with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=DummyTestError('error')): + with patch('autotester.server.utils.form_validation.validate_with_defaults', + return_value=DummyTestError('error')): with patch('autotester.cli.update_test_specs'): - try: + with pytest.raises(Exception): cli.update_specs(**self.get_kwargs(schema={}, files_path='test_files')) - except Exception: - mock_rmtree.assert_called_once() - else: - pytest.fail('This call to update_specs should have failed. See other failures for details') @pytest.fixture @@ -228,8 +223,8 @@ def mock_rq_job(): class TestCancelTest: def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): - Job, mock_job = mock_rq_job - Job.fetch.side_effect = rq.exceptions.NoSuchJobError + job_class, mock_job = mock_rq_job + job_class.fetch.side_effect = NoSuchJobError cli.cancel_test('something', [1]) mock_job.cancel.assert_not_called() @@ -272,7 +267,8 @@ def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtr class TestGetSchema: - def fake_installed_testers(self, installed): + @staticmethod + def fake_installed_testers(installed): root_dir = os.path.dirname(os.path.abspath(cli.__file__)) paths = [] for tester in installed: @@ -280,7 +276,8 @@ def fake_installed_testers(self, installed): paths.append(os.path.join(glob.glob(glob_pattern)[0], '.installed')) return paths - def assert_tester_in_schema(self, tester, schema): + @staticmethod + def assert_tester_in_schema(tester, schema): assert tester in schema["definitions"]["installed_testers"]["enum"] installed = [] for option in schema['definitions']['tester_schemas']['oneOf']: From b9649bbdad3db44c01f08caef3452df67ac1b545 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 18:03:08 -0500 Subject: [PATCH 29/46] style: more style changes --- bin/generate_supervisord_conf.py | 1 + src/autotester/cli.py | 3 ++- src/autotester/exceptions.py | 2 +- src/autotester/server/server.py | 20 +++++++++---------- .../server/utils/redis_management.py | 2 +- .../server/utils/resource_management.py | 10 +++++----- .../server/utils/string_management.py | 2 +- .../server/utils/user_management.py | 3 --- 8 files changed, 21 insertions(+), 22 deletions(-) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index d0c51414..f177a5f5 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -64,6 +64,7 @@ def write_conf_file(rq, conf_filename, user_names): directory=THIS_DIR) f.write(c) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('rq') diff --git a/src/autotester/cli.py b/src/autotester/cli.py index 9cd1edbf..6446592d 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -11,7 +11,8 @@ import shutil from rq.exceptions import NoSuchJobError from functools import wraps -from autotester.exceptions import * +from autotester.exceptions import JobArgumentError, InvalidQueueError, \ + TestScriptFilesError, TestParameterError, MarkUsError from autotester.server.utils.redis_management import redis_connection, get_avg_pop_interval, test_script_directory from autotester.server.utils.file_management import ignore_missing_dir_error from autotester.config import config diff --git a/src/autotester/exceptions.py b/src/autotester/exceptions.py index dff31ce3..6e02f341 100644 --- a/src/autotester/exceptions.py +++ b/src/autotester/exceptions.py @@ -1,4 +1,4 @@ -""" +""" Custom Exception Type for use in MarkUs """ diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 67206b95..9f9e81e5 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -41,7 +41,7 @@ def run_test_command(test_username=None): """ Return a command used to run test scripts as a the test_username - user, with the correct arguments. Set test_username to None to + user, with the correct arguments. Set test_username to None to run as the current user. >>> test_script = 'mysscript.py' @@ -61,7 +61,7 @@ def run_test_command(test_username=None): def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None): """ - Return the arguments passed to this function in a dictionary. If stderr is + Return the arguments passed to this function in a dictionary. If stderr is falsy, change it to None. Load the json string in stdout as a dictionary. """ test_results, malformed = loads_partial_json(stdout, dict) @@ -83,10 +83,10 @@ def kill_with_reaper(test_username): the permissions of this copied file so that it can be executed by the corresponding reaper user. Crucially, it sets the permissions to include the setuid bit so that the reaper user can manipulate the real uid and effective uid values of the process. - + The reaper user then runs this copied executable which kills all processes being run by the test_username user, deletes itself and exits with a 0 exit code if - sucessful. + sucessful. """ reaper_username = get_reaper_username(test_username) if reaper_username is not None: @@ -108,7 +108,7 @@ def kill_with_reaper(test_username): def kill_without_reaper(test_username): """ - Kill all processes that test_username is able to kill + Kill all processes that test_username is able to kill """ kill_cmd = f"sudo -u {test_username} -- bash -c 'kill -KILL -1'" subprocess.run(kill_cmd, shell=True) @@ -116,7 +116,7 @@ def kill_without_reaper(test_username): def create_test_script_command(env_dir, tester_type): """ - Return string representing a command line command to + Return string representing a command line command to run tests. """ import_line = TESTER_IMPORT_LINE[tester_type] @@ -138,8 +138,8 @@ def get_env_vars(test_username): def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, hooks): """ - Run each test script in test_scripts in the tests_path directory using the - command cmd. Return the results. + Run each test script in test_scripts in the tests_path directory using the + command cmd. Return the results. """ results = [] preexec_fn = set_rlimits_before_test() @@ -221,7 +221,7 @@ def clear_working_directory(tests_path, test_username): subprocess.run(chmod_cmd, shell=True) - # be careful not to remove the tests_path dir itself since we have to + # be careful not to remove the tests_path dir itself since we have to # set the group ownership with sudo (and that is only done in ../install.sh) clean_cmd = 'rm -rf {0}/.[!.]* {0}/*'.format(tests_path) subprocess.run(clean_cmd, shell=True) @@ -375,7 +375,7 @@ def destroy_tester_environments(old_test_script_dir): def update_test_specs(files_path, assignment_id, markus_address, test_specs): """ Copy new test scripts for a given assignment to from the files_path - to a new location. Indicate that these new test scripts should be used instead of + to a new location. Indicate that these new test scripts should be used instead of the old ones. And remove the old ones when it is safe to do so (they are not in the process of being copied to a working directory). diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py index afabf5c5..509cea32 100644 --- a/src/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -103,7 +103,7 @@ def get_avg_pop_interval(queue_name): except TypeError: return None count -= 1 - return (last-start) / count if count else 0 + return (last - start) / count if count else 0 def clean_up(): diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py index 915d6a8b..c90f52b1 100644 --- a/src/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -14,7 +14,7 @@ def set_rlimits_before_test(): This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), there are at least n=RLIMIT_ADJUSTMENTS[limit] resources available for cleanup processes that are not available for test processes. This ensures that cleanup - processes will always be able to run. + processes will always be able to run. """ for limit_str in config['rlimit_settings'].keys() | RLIMIT_ADJUSTMENTS.keys(): limit = rlimit_str2int(limit_str) @@ -22,15 +22,15 @@ def set_rlimits_before_test(): values = config['rlimit_settings'].get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) curr_soft, curr_hard = resource.getrlimit(limit) soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) - # reduce the hard limit so that cleanup scripts will have at least - # adj more resources to use. + # reduce the hard limit so that cleanup scripts will have at least + # adj more resources to use. adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) if (curr_hard - hard) < adj: hard = curr_hard - adj # make sure the soft limit doesn't exceed the hard limit hard = max(hard, 0) soft = max(min(hard, soft), 0) - + resource.setrlimit(limit, (soft, hard)) @@ -38,7 +38,7 @@ def set_rlimits_before_cleanup(): """ Sets the rlimit settings specified in RLIMIT_ADJUSTMENTS so that both the soft and hard limits are set as high as possible. This ensures - that cleanup processes will have as many resources as possible to run. + that cleanup processes will have as many resources as possible to run. """ for limit_str in RLIMIT_ADJUSTMENTS: limit = rlimit_str2int(limit_str) diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py index 81a99afa..6d4dc1c1 100644 --- a/src/autotester/server/utils/string_management.py +++ b/src/autotester/server/utils/string_management.py @@ -30,7 +30,7 @@ def loads_partial_json(json_string, expected_type=None): obj, ind = decoder.raw_decode(json_string[i:]) if expected_type is None or isinstance(obj, expected_type): results.append(obj) - elif json_string[i:i+ind].strip(): + elif json_string[i:i + ind].strip(): malformed = True i += ind except json.JSONDecodeError: diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index 6f0b3f68..cdbc822e 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -3,7 +3,6 @@ from autotester.exceptions import TesterUserError from autotester.config import config from autotester.server.utils.string_management import decode_if_bytes -from autotester.server.utils.redis_management import redis_connection def current_user(): @@ -18,8 +17,6 @@ def tester_user(): Raises an AutotestError if a tester user is not specified or if a workspace has not been setup for that user. """ - r = redis_connection() - user_name = os.environ.get('MARKUSWORKERUSER') if user_name is None: raise TesterUserError('No worker users available to run this job') From 3c5a74fe2072280231b5a1106fd7fac6eca9daa7 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 7 Feb 2020 18:05:02 -0500 Subject: [PATCH 30/46] style: even more style changes --- bin/generate_supervisord_conf.py | 1 + src/autotester/server/server.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index f177a5f5..dc60be86 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -33,6 +33,7 @@ THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + def write_conf_file(rq, conf_filename, user_names): try: redis_url = f'--url {config["redis", "url"]}' diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 9f9e81e5..5722fe38 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -85,7 +85,7 @@ def kill_with_reaper(test_username): the reaper user can manipulate the real uid and effective uid values of the process. The reaper user then runs this copied executable which kills all processes being - run by the test_username user, deletes itself and exits with a 0 exit code if + run by the test_username user, deletes itself and exits with a 0 exit code if sucessful. """ reaper_username = get_reaper_username(test_username) From 4a40a20902681ceb40a898415e15c945bb76b134 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 10 Feb 2020 12:09:18 -0500 Subject: [PATCH 31/46] style: make style changes suggested by the black package --- bin/generate_supervisord_conf.py | 36 +- doc/hooks_example.py | 57 +++- setup.py | 56 ++- src/autotester/cli.py | 102 ++++-- src/autotester/config.py | 28 +- src/autotester/resources/ports/__init__.py | 8 +- .../resources/postgresql/__init__.py | 28 +- .../server/hooks_context/builtin_hooks.py | 36 +- .../server/hooks_context/hooks_context.py | 86 +++-- src/autotester/server/server.py | 318 +++++++++++------- .../server/utils/file_management.py | 20 +- .../server/utils/form_validation.py | 18 +- .../server/utils/redis_management.py | 28 +- .../server/utils/resource_management.py | 10 +- .../server/utils/string_management.py | 4 +- .../server/utils/user_management.py | 14 +- src/autotester/setup.py | 26 +- .../testers/custom/default_hooks.py | 4 +- .../testers/custom/markus_custom_tester.py | 5 +- .../custom/tests/student_files/submission.py | 12 +- .../testers/haskell/markus_haskell_tester.py | 58 ++-- .../testers/java/markus_java_tester.py | 54 +-- src/autotester/testers/markus_test_specs.py | 1 - src/autotester/testers/markus_tester.py | 142 +++++--- src/autotester/testers/py/lib/c_helper.py | 219 +++++++----- src/autotester/testers/py/lib/sql_helper.py | 74 ++-- .../testers/py/markus_python_tester.py | 85 +++-- .../testers/py/tests/script_files/test.py | 5 +- .../testers/py/tests/script_files/test2.py | 1 + .../testers/py/tests/script_files/test_sql.py | 16 +- .../testers/pyta/markus_pyta_tester.py | 82 +++-- .../pyta/tests/student_files/submission.py | 1 + .../testers/racket/markus_racket_tester.py | 39 ++- src/autotester/tests/cli_test.py | 229 +++++++------ 34 files changed, 1165 insertions(+), 737 deletions(-) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index dc60be86..2ac23d3b 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -38,39 +38,41 @@ def write_conf_file(rq, conf_filename, user_names): try: redis_url = f'--url {config["redis", "url"]}' except KeyError: - redis_url = '' + redis_url = "" - with open(conf_filename, 'w') as f: + with open(conf_filename, "w") as f: f.write(HEADER) user_name_set = set(user_names) enough_users = True for worker_data in config["workers"]: - numprocs = worker_data['n'] - queues = worker_data['queues'] + numprocs = worker_data["n"] + queues = worker_data["queues"] if enough_users: for _ in range(numprocs): try: worker_user = user_name_set.pop() except KeyError: - msg = f'[AUTOTEST] Not enough worker users to create all rq workers.' - sys.stderr.write(f'{msg}\n') + msg = f"[AUTOTEST] Not enough worker users to create all rq workers." + sys.stderr.write(f"{msg}\n") enough_users = False break - queue_str = ' '.join(queues) - c = CONTENT.format(worker_user=worker_user, - rq=rq, - worker_args=redis_url, - queues=queue_str, - numprocs=1, - directory=THIS_DIR) + queue_str = " ".join(queues) + c = CONTENT.format( + worker_user=worker_user, + rq=rq, + worker_args=redis_url, + queues=queue_str, + numprocs=1, + directory=THIS_DIR, + ) f.write(c) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('rq') - parser.add_argument('conf_filename') - parser.add_argument('user_names', nargs='+') + parser.add_argument("rq") + parser.add_argument("conf_filename") + parser.add_argument("user_names", nargs="+") args = parser.parse_args() write_conf_file(args.rq, args.conf_filename, args.user_names) diff --git a/doc/hooks_example.py b/doc/hooks_example.py index 31630f14..6ba8e782 100644 --- a/doc/hooks_example.py +++ b/doc/hooks_example.py @@ -6,25 +6,50 @@ # Helper functions -def upload_svn_file(api, group_repo_name, assignment_name, file_name, svn_user, svn_password, commit_message): - repo_url = f'{api.parsed_url.scheme}://{api.parsed_url.netloc}/svn{api.parsed_url.path}/{group_repo_name}' - svn_co_command = ['svn', 'co', '--username', svn_user, '--password', svn_password, repo_url] +def upload_svn_file( + api, + group_repo_name, + assignment_name, + file_name, + svn_user, + svn_password, + commit_message, +): + repo_url = f"{api.parsed_url.scheme}://{api.parsed_url.netloc}/svn{api.parsed_url.path}/{group_repo_name}" + svn_co_command = [ + "svn", + "co", + "--username", + svn_user, + "--password", + svn_password, + repo_url, + ] subprocess.run(svn_co_command, capture_output=True, check=True) repo_file_path = os.path.join(group_repo_name, assignment_name, file_name) previous_file = os.path.isfile(repo_file_path) shutil.copy2(file_name, repo_file_path) if not previous_file: - svn_add_command = ['svn', 'add', repo_file_path] + svn_add_command = ["svn", "add", repo_file_path] subprocess.run(svn_add_command, capture_output=True, check=True) - svn_ci_command = ['svn', 'ci', '--username', svn_user, '--password', svn_password, '-m', commit_message, - repo_file_path] + svn_ci_command = [ + "svn", + "ci", + "--username", + svn_user, + "--password", + svn_password, + "-m", + commit_message, + repo_file_path, + ] subprocess.run(svn_ci_command, capture_output=True, check=True) # Hooks def before_all(_api, _assignment_id, _group_id, _group_repo_name): # clean up unwanted files - pattern = os.path.join('**', '*.o') + pattern = os.path.join("**", "*.o") for file_path in glob.glob(pattern, recursive=True): os.remove(file_path) @@ -39,14 +64,24 @@ def after_each(_api, _assignment_id, _group_id, _group_repo_name): def after_all(api, assignment_id, group_id, group_repo_name): # upload feedback file - feedback_name = 'feedback_pyta.txt' + feedback_name = "feedback_pyta.txt" if os.path.isfile(feedback_name): with open(feedback_name) as feedback_open: - api.upload_feedback_file(assignment_id, group_id, feedback_name, feedback_open.read()) + api.upload_feedback_file( + assignment_id, group_id, feedback_name, feedback_open.read() + ) # upload in svn repo - upload_svn_file(api, group_repo_name, 'AX', feedback_name, 'svn_user', 'svn_password', 'Feedback file') + upload_svn_file( + api, + group_repo_name, + "AX", + feedback_name, + "svn_user", + "svn_password", + "Feedback file", + ) # upload annotations - annotations_name = 'feedback_pyta.json' + annotations_name = "feedback_pyta.json" if os.path.isfile(annotations_name): with open(annotations_name) as annotations_open: api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) diff --git a/setup.py b/setup.py index f3a552b8..ac57e82a 100644 --- a/setup.py +++ b/setup.py @@ -2,33 +2,29 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] -setup(name='markus-autotester', - version='2.0', - description='Automatic tester for programming assignments', - url='https://github.com/MarkUsProject/markus-autotesting', - author='Misha Schwartz, Alessio Di Sandro', - author_email='mschwa@cs.toronto.edu', - license='MIT', - package_dir={'': 'src'}, - packages=find_packages(where='src', exclude=test_exclusions), - zip_safe=False, - install_requires=[ - 'redis==3.3.8', - 'requests==2.22.0', - 'rq==1.1.0', - 'supervisor==4.1.0', - 'PyYAML==5.1.2', - 'psycopg2-binary==2.8.4', - 'markusapi==0.0.1', - 'jsonschema==3.0.2' - ], - tests_require=[ - 'pytest==5.3.1', - 'hypothesis==4.47.3', - 'fakeredis==1.1.0' - ], - setup_requires=['pytest-runner'], - include_package_data=True, - entry_points={ - 'console_scripts': 'markus_autotester = autotester.cli:cli' - }) +setup( + name="markus-autotester", + version="2.0", + description="Automatic tester for programming assignments", + url="https://github.com/MarkUsProject/markus-autotesting", + author="Misha Schwartz, Alessio Di Sandro", + author_email="mschwa@cs.toronto.edu", + license="MIT", + package_dir={"": "src"}, + packages=find_packages(where="src", exclude=test_exclusions), + zip_safe=False, + install_requires=[ + "redis==3.3.8", + "requests==2.22.0", + "rq==1.1.0", + "supervisor==4.1.0", + "PyYAML==5.1.2", + "psycopg2-binary==2.8.4", + "markusapi==0.0.1", + "jsonschema==3.0.2", + ], + tests_require=["pytest==5.3.1", "hypothesis==4.47.3", "fakeredis==1.1.0"], + setup_requires=["pytest-runner"], + include_package_data=True, + entry_points={"console_scripts": "markus_autotester = autotester.cli:cli"}, +) diff --git a/src/autotester/cli.py b/src/autotester/cli.py index 6446592d..0fb47693 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -11,15 +11,24 @@ import shutil from rq.exceptions import NoSuchJobError from functools import wraps -from autotester.exceptions import JobArgumentError, InvalidQueueError, \ - TestScriptFilesError, TestParameterError, MarkUsError -from autotester.server.utils.redis_management import redis_connection, get_avg_pop_interval, test_script_directory +from autotester.exceptions import ( + JobArgumentError, + InvalidQueueError, + TestScriptFilesError, + TestParameterError, + MarkUsError, +) +from autotester.server.utils.redis_management import ( + redis_connection, + get_avg_pop_interval, + test_script_directory, +) from autotester.server.utils.file_management import ignore_missing_dir_error from autotester.config import config from autotester.server.utils import form_validation from autotester.server.server import run_test, update_test_specs -SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] +SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] def _format_job_id(markus_address, run_id, **_kw): @@ -27,7 +36,7 @@ def _format_job_id(markus_address, run_id, **_kw): Return a unique job id for each enqueued job based on the markus_address and the run_id """ - return '{}_{}'.format(markus_address, run_id) + return "{}_{}".format(markus_address, run_id) def _check_args(func, args=None, kwargs=None): @@ -40,7 +49,9 @@ def _check_args(func, args=None, kwargs=None): try: inspect.signature(func).bind(*args, **kwargs) except TypeError as e: - raise JobArgumentError('{}\nWith args: {}\nWith kwargs:{}'.format(e, args, tuple(kwargs))) + raise JobArgumentError( + "{}\nWith args: {}\nWith kwargs:{}".format(e, args, tuple(kwargs)) + ) def _get_queue(**kw): @@ -48,10 +59,12 @@ def _get_queue(**kw): Return a queue. The returned queue is one whose condition function returns True when called with the arguments in **kw. """ - for queue in config['queues']: - if form_validation.is_valid(kw, queue['schema']): - return rq.Queue(queue['name'], connection=redis_connection()) - raise InvalidQueueError('cannot enqueue job: unable to determine correct queue type') + for queue in config["queues"]: + if form_validation.is_valid(kw, queue["schema"]): + return rq.Queue(queue["name"], connection=redis_connection()) + raise InvalidQueueError( + "cannot enqueue job: unable to determine correct queue type" + ) def _print_queue_info(queue): @@ -67,7 +80,9 @@ def _print_queue_info(queue): def _check_test_script_files_exist(markus_address, assignment_id, **_kw): if test_script_directory(markus_address, assignment_id) is None: - raise TestScriptFilesError('cannot find test script files: please upload some before running tests') + raise TestScriptFilesError( + "cannot find test script files: please upload some before running tests" + ) def _clean_on_error(func): @@ -76,12 +91,13 @@ def _clean_on_error(func): Note: the files_path directory must be passed to the function as a keyword argument. """ + @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception: - files_path = kwargs.get('files_path') + files_path = kwargs.get("files_path") if files_path: shutil.rmtree(files_path, onerror=ignore_missing_dir_error) raise @@ -99,15 +115,21 @@ def _get_job_timeout(test_specs, test_categories, multiplier=1.5): """ total_timeout = 0 test_data_count = 0 - for settings in test_specs['testers']: - for test_data in settings['test_data']: - test_category = test_data.get('category', []) - if set(test_category) & set(test_categories): # TODO: ensure test_categories is non-string collection type - total_timeout += test_data.get('timeout', 30) # TODO: don't hardcode default timeout + for settings in test_specs["testers"]: + for test_data in settings["test_data"]: + test_category = test_data.get("category", []) + if set(test_category) & set( + test_categories + ): # TODO: ensure test_categories is non-string collection type + total_timeout += test_data.get( + "timeout", 30 + ) # TODO: don't hardcode default timeout test_data_count += 1 if test_data_count: return int(total_timeout * multiplier) - raise TestParameterError(f'there are no tests of the given categories: {test_categories}') + raise TestParameterError( + f"there are no tests of the given categories: {test_categories}" + ) @_clean_on_error @@ -115,16 +137,18 @@ def enqueue_test(user_type, batch_id, **kw): """ Enqueue a test run job with keyword arguments specified in **kw """ - kw['enqueue_time'] = time.time() + kw["enqueue_time"] = time.time() queue = _get_queue(user_type=user_type, batch_id=batch_id, **kw) _check_args(run_test, kwargs=kw) _check_test_script_files_exist(**kw) - test_files_dir = test_script_directory(kw['markus_address'], kw['assignment_id']) + test_files_dir = test_script_directory(kw["markus_address"], kw["assignment_id"]) with open(os.path.join(test_files_dir, SETTINGS_FILENAME)) as f: test_specs = json.load(f) _print_queue_info(queue) - timeout = _get_job_timeout(test_specs, kw['test_categories']) - queue.enqueue_call(run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout) + timeout = _get_job_timeout(test_specs, kw["test_categories"]) + queue.enqueue_call( + run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout + ) @_clean_on_error @@ -133,7 +157,9 @@ def update_specs(test_specs, schema=None, **kw): Run test spec update function after validating the form data. """ if schema is not None: - error = form_validation.validate_with_defaults(schema, test_specs, best_only=True) + error = form_validation.validate_with_defaults( + schema, test_specs, best_only=True + ) if error: raise error update_test_specs(test_specs=test_specs, **kw) @@ -152,7 +178,7 @@ def cancel_test(markus_address, run_ids, **_kw): except NoSuchJobError: return if job.is_queued(): - files_path = job.kwargs['files_path'] + files_path = job.kwargs["files_path"] if files_path: shutil.rmtree(files_path, onerror=ignore_missing_dir_error) job.cancel() @@ -168,14 +194,14 @@ def get_schema(**_kw): """ this_dir = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(this_dir, 'lib', 'tester_schema_skeleton.json')) as f: + with open(os.path.join(this_dir, "lib", "tester_schema_skeleton.json")) as f: schema_skeleton = json.load(f) - glob_pattern = os.path.join(this_dir, 'testers', '*', 'specs', '.installed') + glob_pattern = os.path.join(this_dir, "testers", "*", "specs", ".installed") for path in sorted(glob.glob(glob_pattern)): tester_type = os.path.basename(os.path.dirname(os.path.dirname(path))) specs_dir = os.path.dirname(path) - with open(os.path.join(specs_dir, 'settings_schema.json')) as f: + with open(os.path.join(specs_dir, "settings_schema.json")) as f: tester_schema = json.load(f) schema_skeleton["definitions"]["installed_testers"]["enum"].append(tester_type) @@ -201,25 +227,27 @@ def parse_arg_file(arg_file): with open(arg_file) as f: kwargs = json.load(f) - if 'files_path' not in kwargs: - kwargs['files_path'] = os.path.dirname(os.path.realpath(f.name)) + if "files_path" not in kwargs: + kwargs["files_path"] = os.path.dirname(os.path.realpath(f.name)) os.remove(arg_file) return kwargs -COMMANDS = {'run': enqueue_test, - 'specs': update_specs, - 'cancel': cancel_test, - 'schema': get_schema} +COMMANDS = { + "run": enqueue_test, + "specs": update_specs, + "cancel": cancel_test, + "schema": get_schema, +} def cli(): parser = argparse.ArgumentParser() - parser.add_argument('command', choices=COMMANDS) + parser.add_argument("command", choices=COMMANDS) group = parser.add_mutually_exclusive_group(required=False) - group.add_argument('-f', '--arg_file', type=parse_arg_file) - group.add_argument('-j', '--arg_json', type=json.loads) + group.add_argument("-f", "--arg_file", type=parse_arg_file) + group.add_argument("-j", "--arg_json", type=json.loads) args = parser.parse_args() @@ -232,5 +260,5 @@ def cli(): sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": cli() diff --git a/src/autotester/config.py b/src/autotester/config.py index 85fdeb15..83afc95c 100644 --- a/src/autotester/config.py +++ b/src/autotester/config.py @@ -7,14 +7,14 @@ from collections.abc import Mapping import yaml -DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), 'config_defaults') -CONFIG_FILENAME = 'markus_autotester_config' -CONFIG_ENV_VAR = 'MARKUS_AUTOTESTER_CONFIG' +DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), "config_defaults") +CONFIG_FILENAME = "markus_autotester_config" +CONFIG_ENV_VAR = "MARKUS_AUTOTESTER_CONFIG" def _find_local_config(): - system_config = os.path.join(os.path.sep, 'etc', CONFIG_FILENAME) - user_config = os.path.join(os.environ.get('HOME'), f'.{CONFIG_FILENAME}') + system_config = os.path.join(os.path.sep, "etc", CONFIG_FILENAME) + user_config = os.path.join(os.environ.get("HOME"), f".{CONFIG_FILENAME}") env_config = os.environ.get(CONFIG_ENV_VAR) if env_config is not None: @@ -28,17 +28,19 @@ def _find_local_config(): class _Config: _local_config = _find_local_config() - _default_config = os.path.join(DEFAULT_ROOT, 'config_default.yml') - _env_var_config = os.path.join(DEFAULT_ROOT, 'config_env_vars.yml') - _replacement_pattern = re.compile(r'.*?\${(\w+)}.*?') - _not_found_key = '!VARIABLE NOT FOUND!' + _default_config = os.path.join(DEFAULT_ROOT, "config_default.yml") + _env_var_config = os.path.join(DEFAULT_ROOT, "config_env_vars.yml") + _replacement_pattern = re.compile(r".*?\${(\w+)}.*?") + _not_found_key = "!VARIABLE NOT FOUND!" def __init__(self): self._yaml_loader = yaml.SafeLoader - self._yaml_loader.add_implicit_resolver('!ENV', self._replacement_pattern, None) - env_constructor = self._constructor_factory(lambda g: os.environ.get(g, self._not_found_key)) - self._yaml_loader.add_constructor('!ENV', env_constructor) + self._yaml_loader.add_implicit_resolver("!ENV", self._replacement_pattern, None) + env_constructor = self._constructor_factory( + lambda g: os.environ.get(g, self._not_found_key) + ) + self._yaml_loader.add_constructor("!ENV", env_constructor) self._settings = self._load_from_yaml() @@ -78,7 +80,7 @@ def constructor(loader, node, pattern=self._replacement_pattern): if match: full_value = value for g in match: - full_value = full_value.replace(f'${{{g}}}', replacement_func(g)) + full_value = full_value.replace(f"${{{g}}}", replacement_func(g)) return full_value return value diff --git a/src/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py index 92f42bdf..eb5f8d36 100644 --- a/src/autotester/resources/ports/__init__.py +++ b/src/autotester/resources/ports/__init__.py @@ -2,9 +2,9 @@ from autotester.server.utils.redis_management import redis_connection from autotester.config import config -PORT_MIN = config['resources', 'port', 'min'] -PORT_MAX = config['resources', 'port', 'max'] -REDIS_PREFIX = config['redis', '_prefix'] +PORT_MIN = config["resources", "port", "min"] +PORT_MAX = config["resources", "port", "max"] +REDIS_PREFIX = config["redis", "_prefix"] REDIS_PORT_INT = f"{REDIS_PREFIX}{config['resources', 'port', '_redis_int']}" @@ -18,7 +18,7 @@ def next_port(): return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN -def get_available_port(host='localhost'): +def get_available_port(host="localhost"): """ Return the next available open port on . """ while True: try: diff --git a/src/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py index 18df17c8..15eadce5 100644 --- a/src/autotester/resources/postgresql/__init__.py +++ b/src/autotester/resources/postgresql/__init__.py @@ -6,23 +6,37 @@ from psycopg2.extensions import AsIs from autotester.config import config -POSTGRES_PREFIX = config['resources', 'postgresql', '_prefix'] -PGPASSFILE = os.path.join(config['workspace'], config['_workspace_contents', '_logs'], '.pgpass') +POSTGRES_PREFIX = config["resources", "postgresql", "_prefix"] +PGPASSFILE = os.path.join( + config["workspace"], config["_workspace_contents", "_logs"], ".pgpass" +) def setup_database(test_username): user = getpass.getuser() - database = f'{POSTGRES_PREFIX}{test_username}' + database = f"{POSTGRES_PREFIX}{test_username}" with open(PGPASSFILE) as f: password = f.read().strip() - with psycopg2.connect(database=database, user=user, password=password, host='localhost') as conn: + with psycopg2.connect( + database=database, user=user, password=password, host="localhost" + ) as conn: with conn.cursor() as cursor: cursor.execute("DROP OWNED BY CURRENT_USER;") if test_username != user: user = test_username - password = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) - cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) + password = "".join( + secrets.choice(string.ascii_letters + string.digits) + for _ in range(20) + ) + cursor.execute( + "ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password) + ) - return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} + return { + "PGDATABASE": database, + "PGPASSWORD": password, + "PGUSER": user, + "AUTOTESTENV": "true", + } diff --git a/src/autotester/server/hooks_context/builtin_hooks.py b/src/autotester/server/hooks_context/builtin_hooks.py index 90c74721..5929a0aa 100644 --- a/src/autotester/server/hooks_context/builtin_hooks.py +++ b/src/autotester/server/hooks_context/builtin_hooks.py @@ -8,18 +8,22 @@ import importlib from autotester import testers -HOOKS = {'upload_feedback_file': {'context': 'after_each'}, - 'upload_feedback_to_repo': {'requires': ['clear_feedback_file'], - 'context': 'after_each'}, - 'upload_annotations': {'context': 'after_each'}, - 'clear_feedback_file': {'context': 'before_each'}} +HOOKS = { + "upload_feedback_file": {"context": "after_each"}, + "upload_feedback_to_repo": { + "requires": ["clear_feedback_file"], + "context": "after_each", + }, + "upload_annotations": {"context": "after_each"}, + "clear_feedback_file": {"context": "before_each"}, +} def clear_feedback_file(test_data, **_kwargs): """ Remove any previous feedback file before the tests run. """ - feedback_file = test_data.get('feedback_file_name', '') + feedback_file = test_data.get("feedback_file_name", "") if os.path.isfile(feedback_file): os.remove(feedback_file) @@ -28,27 +32,31 @@ def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **_kwargs): """ Upload the feedback file to the group's repo. """ - feedback_file = test_data.get('feedback_file_name', '') + feedback_file = test_data.get("feedback_file_name", "") if os.path.isfile(feedback_file): with open(feedback_file) as feedback_open: - api.upload_file_to_repo(assignment_id, group_id, feedback_file, feedback_open.read()) + api.upload_file_to_repo( + assignment_id, group_id, feedback_file, feedback_open.read() + ) def upload_feedback_file(api, assignment_id, group_id, test_data, **_kwargs): """ Upload the feedback file using MarkUs' api. """ - feedback_file = test_data.get('feedback_file_name', '') + feedback_file = test_data.get("feedback_file_name", "") if os.path.isfile(feedback_file): with open(feedback_file) as feedback_open: - api.upload_feedback_file(assignment_id, group_id, feedback_file, feedback_open.read()) + api.upload_feedback_file( + assignment_id, group_id, feedback_file, feedback_open.read() + ) def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): """ Upload annotations using MarkUs' api. """ - annotations_name = test_data.get('annotation_file', '') + annotations_name = test_data.get("annotation_file", "") if os.path.isfile(annotations_name): with open(annotations_name) as annotations_open: api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) @@ -59,8 +67,10 @@ def _load_default_hooks(): Return a dictionary containing all hooks loaded from any default_hooks.py in the testers package. """ defaults = {} - for _finder, name, _ispkg in pkgutil.walk_packages(testers.__path__, f'{testers.__name__}.'): - if name.endswith('default_hooks'): + for _finder, name, _ispkg in pkgutil.walk_packages( + testers.__path__, f"{testers.__name__}." + ): + if name.endswith("default_hooks"): default_hooks = importlib.import_module(name) for hook in default_hooks.HOOKS: defaults[hook.__name__] = hook diff --git a/src/autotester/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py index a782adff..ee617024 100644 --- a/src/autotester/server/hooks_context/hooks_context.py +++ b/src/autotester/server/hooks_context/hooks_context.py @@ -45,9 +45,12 @@ class Hooks: Builtin hooks can have any name and when they are executed is instead determined by the values associated to their name in the builtin_hooks.HOOKS dictionary. """ - HOOK_BASENAMES = ['before_all', 'before_each', 'after_all', 'after_each'] - def __init__(self, custom_hooks_path=None, testers=None, cwd=None, args=None, kwargs=None): + HOOK_BASENAMES = ["before_all", "before_each", "after_all", "after_each"] + + def __init__( + self, custom_hooks_path=None, testers=None, cwd=None, args=None, kwargs=None + ): """ Create a new Hooks object instance with args: @@ -98,11 +101,11 @@ def _select_builtins(tester_info, _info=None): for func_name, data in builtin_hooks.HOOKS.items(): if tester_info.get(func_name): - hook_type, hook_context = data['context'].split('_') + hook_type, hook_context = data["context"].split("_") func = getattr(builtin_hooks, func_name) if func not in _info.get(hook_context, {}).get(hook_type, set()): _info[hook_context][hook_type].append(func) - for requires in data.get('requires', []): + for requires in data.get("requires", []): Hooks._select_builtins({requires: True}, _info) return _info @@ -120,9 +123,11 @@ def _merge_hook_dicts(*hook_dicts): for key, hooks in d.items(): merged[key].extend(h for h in hooks if h) for key, hooks in merged.items(): - merged[key] = sorted((h for h in hooks if h), - key=lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__), - reverse=(key == 'after')) + merged[key] = sorted( + (h for h in hooks if h), + key=lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__), + reverse=(key == "after"), + ) return merged def _load_all(self): @@ -141,17 +146,21 @@ def _load_all(self): hooks = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) custom_hooks_module = self._load_module(self.custom_hooks_path) - + for hook_name in Hooks.HOOK_BASENAMES: - hook_type, hook_context = hook_name.split('_') # eg. "before_all" -> ("before", "all") + hook_type, hook_context = hook_name.split( + "_" + ) # eg. "before_all" -> ("before", "all") custom_hook = self._load_hook(custom_hooks_module, hook_name) builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(hook_name) hooks[None][hook_context][hook_type].extend([custom_hook, builtin_hook]) for tester_type in self.testers: - tester_hook_name = f'{hook_name}_{tester_type}' + tester_hook_name = f"{hook_name}_{tester_type}" custom_hook = self._load_hook(custom_hooks_module, tester_hook_name) builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(tester_hook_name) - hooks[tester_type][hook_context][hook_type].extend([custom_hook, builtin_hook]) + hooks[tester_type][hook_context][hook_type].extend( + [custom_hook, builtin_hook] + ) return hooks def _load_module(self, hooks_script_path): @@ -170,7 +179,7 @@ def _load_module(self, hooks_script_path): hooks_module = __import__(module_name) return hooks_module except Exception as e: - self.load_errors.append((module_name, f'{traceback.format_exc()}\n{e}')) + self.load_errors.append((module_name, f"{traceback.format_exc()}\n{e}")) return None def _load_hook(self, module, function_name): @@ -184,7 +193,9 @@ def _load_hook(self, module, function_name): if isinstance(func, Callable): return func else: - self.load_errors.append((module.__name__, f'hook function {function_name} is not callable')) + self.load_errors.append( + (module.__name__, f"hook function {function_name} is not callable") + ) except AttributeError: return @@ -193,12 +204,14 @@ def _run(self, func, extra_args=None, extra_kwargs=None): Run the function func with positional and keyword arguments obtained by merging self.args with extra_args and self.kwargs with extra_kwargs. """ - args = self.args+(extra_args or []) + args = self.args + (extra_args or []) kwargs = {**self.kwargs, **(extra_kwargs or {})} try: func(*args, **kwargs) except BaseException as e: - self.run_errors.append((func.__name__, args, kwargs, f'{traceback.format_exc()}\n{e}')) + self.run_errors.append( + (func.__name__, args, kwargs, f"{traceback.format_exc()}\n{e}") + ) def _get_hooks(self, tester_type, builtin_selector=None): """ @@ -210,22 +223,29 @@ def _get_hooks(self, tester_type, builtin_selector=None): if no builtin hooks are used. """ builtin_hook_dict = Hooks._select_builtins(builtin_selector or {}) - if tester_type == 'all': - hooks = self.hooks.get(None, {}).get('all', {}) - elif tester_type == 'each': - hooks = self.hooks.get(None, {}).get('each', {}) - other_hooks = [builtin_hook_dict.get('each', {})] + if tester_type == "all": + hooks = self.hooks.get(None, {}).get("all", {}) + elif tester_type == "each": + hooks = self.hooks.get(None, {}).get("each", {}) + other_hooks = [builtin_hook_dict.get("each", {})] for context in self._context: - context_hooks = self.hooks.get(context, {}).get('each', {}) + context_hooks = self.hooks.get(context, {}).get("each", {}) other_hooks.append(context_hooks) hooks = Hooks._merge_hook_dicts(hooks, *other_hooks) else: - hooks = self.hooks.get(tester_type, {}).get('all', {}) - hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get('all', {})) - return hooks.get('before', []), hooks.get('after', []) + hooks = self.hooks.get(tester_type, {}).get("all", {}) + hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get("all", {})) + return hooks.get("before", []), hooks.get("after", []) @contextmanager - def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwargs=None, cwd=None): + def around( + self, + tester_type, + builtin_selector=None, + extra_args=None, + extra_kwargs=None, + cwd=None, + ): """ Context manager used to run hooks around any block of code. Hooks are selected based on the tester type (one of 'all', 'each', or the name of a tester), a builtin_selector (usually the test settings for a given test @@ -233,7 +253,7 @@ def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwar and self.kwargs. If cwd is specified, each hook will be run as if the current working directory were cwd. """ before, after = self._get_hooks(tester_type, builtin_selector) - if tester_type not in {'all', 'each'}: + if tester_type not in {"all", "each"}: self._context.append(tester_type) try: if any(before) or any(after): @@ -249,7 +269,7 @@ def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwar else: yield finally: - if tester_type not in {'all', 'each'}: + if tester_type not in {"all", "each"}: self._context.pop() def format_errors(self): @@ -258,8 +278,10 @@ def format_errors(self): """ error_list = [] for module_name, tb in self.load_errors: - error_list.append(f'module_name: {module_name}\ntraceback:\n{tb}') - for hook_name, args, kwargs, tb in self.run_errors: - error_list.append(f'function_name: {hook_name}\n' - f'args: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}') - return '\n\n'.join(error_list) + error_list.append(f"module_name: {module_name}\ntraceback:\n{tb}") + for hook_name, args, kwargs, tb in self.run_errors: + error_list.append( + f"function_name: {hook_name}\n" + f"args: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}" + ) + return "\n\n".join(error_list) diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 5722fe38..e4359202 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -13,29 +13,59 @@ from autotester.exceptions import TesterCreationError from autotester.config import config from autotester.server.hooks_context.hooks_context import Hooks -from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes, stringify -from autotester.server.utils.user_management import get_reaper_username, current_user, tester_user -from autotester.server.utils.file_management import random_tmpfile_name, clean_dir_name, setup_files, \ - ignore_missing_dir_error, fd_open, fd_lock, move_tree -from autotester.server.utils.resource_management import set_rlimits_before_cleanup, set_rlimits_before_test -from autotester.server.utils.redis_management import clean_after, test_script_directory, update_pop_interval_stat +from autotester.server.utils.string_management import ( + loads_partial_json, + decode_if_bytes, + stringify, +) +from autotester.server.utils.user_management import ( + get_reaper_username, + current_user, + tester_user, +) +from autotester.server.utils.file_management import ( + random_tmpfile_name, + clean_dir_name, + setup_files, + ignore_missing_dir_error, + fd_open, + fd_lock, + move_tree, +) +from autotester.server.utils.resource_management import ( + set_rlimits_before_cleanup, + set_rlimits_before_test, +) +from autotester.server.utils.redis_management import ( + clean_after, + test_script_directory, + update_pop_interval_stat, +) from autotester.resources.ports import get_available_port from autotester.resources.postgresql import setup_database -DEFAULT_ENV_DIR = config['_workspace_contents', '_default_venv_name'] -TEST_RESULT_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_results']) -HOOKS_FILENAME = config['_workspace_contents', '_hooks_file'] -SETTINGS_FILENAME = config['_workspace_contents', '_settings_file'] -FILES_DIRNAME = config['_workspace_contents', '_files_dir'] -TEST_SPECS_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_specs']) -TEST_SCRIPT_DIR = os.path.join(config['workspace'], config['_workspace_contents', '_scripts']) - -TESTER_IMPORT_LINE = {'custom': 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', - 'haskell': 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', - 'java': 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', - 'py': 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', - 'pyta': 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', - 'racket': 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} +DEFAULT_ENV_DIR = config["_workspace_contents", "_default_venv_name"] +TEST_RESULT_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_results"] +) +HOOKS_FILENAME = config["_workspace_contents", "_hooks_file"] +SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] +FILES_DIRNAME = config["_workspace_contents", "_files_dir"] +TEST_SPECS_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_specs"] +) +TEST_SCRIPT_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_scripts"] +) + +TESTER_IMPORT_LINE = { + "custom": "from testers.custom.markus_custom_tester import MarkusCustomTester as Tester", + "haskell": "from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester", + "java": "from testers.java.markus_java_tester import MarkusJavaTester as Tester", + "py": "from testers.py.markus_python_tester import MarkusPythonTester as Tester", + "pyta": "from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester", + "racket": "from testers.racket.markus_racket_tester import MarkusRacketTester as Tester", +} def run_test_command(test_username=None): @@ -51,10 +81,11 @@ def run_test_command(test_username=None): >>> run_test_command().format(test_script) './myscript.py' """ - cmd = '{}' + cmd = "{}" if test_username is not None: - cmd = ' '.join(('sudo', '-Eu', test_username, '--', 'bash', '-c', - "'{}'".format(cmd))) + cmd = " ".join( + ("sudo", "-Eu", test_username, "--", "bash", "-c", "'{}'".format(cmd)) + ) return cmd @@ -65,12 +96,14 @@ def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None) falsy, change it to None. Load the json string in stdout as a dictionary. """ test_results, malformed = loads_partial_json(stdout, dict) - return {'time': run_time, - 'timeout': timeout, - 'tests': test_results, - 'stderr': stderr or None, - 'malformed': stdout if malformed else None, - 'extra_info': extra_info or {}} + return { + "time": run_time, + "timeout": timeout, + "tests": test_results, + "stderr": stderr or None, + "malformed": stdout if malformed else None, + "extra_info": extra_info or {}, + } def kill_with_reaper(test_username): @@ -94,13 +127,16 @@ def kill_with_reaper(test_username): kill_file_dst = random_tmpfile_name() preexec_fn = set_rlimits_before_cleanup() - copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format(test_username, - kill_file_dst) - copy_proc = subprocess.Popen(copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd) + copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format( + test_username, kill_file_dst + ) + copy_proc = subprocess.Popen( + copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd + ) if copy_proc.wait() < 0: # wait returns the return code of the proc return False - kill_cmd = 'sudo -u {} -- bash -c {}'.format(reaper_username, kill_file_dst) + kill_cmd = "sudo -u {} -- bash -c {}".format(reaper_username, kill_file_dst) kill_proc = subprocess.Popen(kill_cmd, shell=True, preexec_fn=preexec_fn) return kill_proc.wait() == 0 return False @@ -120,12 +156,16 @@ def create_test_script_command(env_dir, tester_type): run tests. """ import_line = TESTER_IMPORT_LINE[tester_type] - python_lines = ['import sys, json', - import_line, - 'from testers.markus_test_specs import MarkusTestSpecs', - f'Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()'] - python_ex = os.path.join(os.path.join(TEST_SPECS_DIR, env_dir), 'venv', 'bin', 'python') - python_str = '; '.join(python_lines) + python_lines = [ + "import sys, json", + import_line, + "from testers.markus_test_specs import MarkusTestSpecs", + f"Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()", + ] + python_ex = os.path.join( + os.path.join(TEST_SPECS_DIR, env_dir), "venv", "bin", "python" + ) + python_str = "; ".join(python_lines) return f'{python_ex} -c "{python_str}"' @@ -133,7 +173,7 @@ def get_env_vars(test_username): """ Return a dictionary containing all environment variables to pass to the next test """ db_env_vars = setup_database(test_username) port_number = get_available_port() - return {'PORT': port_number, **db_env_vars} + return {"PORT": port_number, **db_env_vars} def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, hooks): @@ -144,35 +184,51 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, results = [] preexec_fn = set_rlimits_before_test() - with hooks.around('all'): - for settings in test_specs['testers']: - tester_type = settings['tester_type'] - extra_hook_kwargs = {'settings': settings} + with hooks.around("all"): + for settings in test_specs["testers"]: + tester_type = settings["tester_type"] + extra_hook_kwargs = {"settings": settings} with hooks.around(tester_type, extra_kwargs=extra_hook_kwargs): - env_dir = settings.get('env_loc', DEFAULT_ENV_DIR) + env_dir = settings.get("env_loc", DEFAULT_ENV_DIR) cmd_str = create_test_script_command(env_dir, tester_type) args = cmd.format(cmd_str) - for test_data in settings['test_data']: - test_category = test_data.get('category', []) + for test_data in settings["test_data"]: + test_category = test_data.get("category", []) if set(test_category) & set( - test_categories): # TODO: make sure test_categories is non-string collection type - extra_hook_kwargs = {'test_data': test_data} - with hooks.around('each', builtin_selector=test_data, extra_kwargs=extra_hook_kwargs): + test_categories + ): # TODO: make sure test_categories is non-string collection type + extra_hook_kwargs = {"test_data": test_data} + with hooks.around( + "each", + builtin_selector=test_data, + extra_kwargs=extra_hook_kwargs, + ): start = time.time() - out, err = '', '' + out, err = "", "" timeout_expired = None - timeout = test_data.get('timeout') + timeout = test_data.get("timeout") try: env_vars = get_env_vars(test_username) - proc = subprocess.Popen(args, start_new_session=True, cwd=tests_path, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - stdin=subprocess.PIPE, preexec_fn=preexec_fn, - env={**os.environ, **env_vars}) + proc = subprocess.Popen( + args, + start_new_session=True, + cwd=tests_path, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=preexec_fn, + env={**os.environ, **env_vars}, + ) try: - settings_json = json.dumps({**settings, 'test_data': test_data}).encode('utf-8') - out, err = proc.communicate(input=settings_json, timeout=timeout) + settings_json = json.dumps( + {**settings, "test_data": test_data} + ).encode("utf-8") + out, err = proc.communicate( + input=settings_json, timeout=timeout + ) except subprocess.TimeoutExpired: if test_username == current_user(): pgrp = os.getpgid(proc.pid) @@ -183,14 +239,17 @@ def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, out, err = proc.communicate() timeout_expired = timeout except Exception as e: - err += '\n\n{}'.format(e) + err += "\n\n{}".format(e) finally: out = decode_if_bytes(out) err = decode_if_bytes(err) duration = int(round(time.time() - start, 3) * 1000) - extra_info = test_data.get('extra_info', {}) + extra_info = test_data.get("extra_info", {}) results.append( - create_test_group_result(out, err, duration, extra_info, timeout_expired)) + create_test_group_result( + out, err, duration, extra_info, timeout_expired + ) + ) return results, hooks.format_errors() @@ -203,10 +262,17 @@ def store_results(results_data, markus_address, assignment_id, group_id, submiss clean_markus_address = clean_dir_name(markus_address) run_time = "run_{}".format(int(time.time())) destination = os.path.join( - *stringify(TEST_RESULT_DIR, clean_markus_address, assignment_id, group_id, 's{}'.format(submission_id or ''), - run_time)) + *stringify( + TEST_RESULT_DIR, + clean_markus_address, + assignment_id, + group_id, + "s{}".format(submission_id or ""), + run_time, + ) + ) os.makedirs(destination, exist_ok=True) - with open(os.path.join(destination, 'output.json'), 'w') as f: + with open(os.path.join(destination, "output.json"), "w") as f: json.dump(results_data, f, indent=4) @@ -215,15 +281,17 @@ def clear_working_directory(tests_path, test_username): Run commands that clear the tests_path working directory """ if test_username != current_user(): - chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format(test_username, tests_path) + chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format( + test_username, tests_path + ) else: - chmod_cmd = 'chmod -Rf ugo+rwX {}'.format(tests_path) + chmod_cmd = "chmod -Rf ugo+rwX {}".format(tests_path) subprocess.run(chmod_cmd, shell=True) # be careful not to remove the tests_path dir itself since we have to # set the group ownership with sudo (and that is only done in ../install.sh) - clean_cmd = 'rm -rf {0}/.[!.]* {0}/*'.format(tests_path) + clean_cmd = "rm -rf {0}/.[!.]* {0}/*".format(tests_path) subprocess.run(clean_cmd, shell=True) @@ -240,20 +308,33 @@ def stop_tester_processes(test_username): def finalize_results_data(results, error, all_hooks_error, time_to_service): """ Return a dictionary of test script results combined with test run info """ - return {'test_groups': results, - 'error': error, - 'hooks_error': all_hooks_error, - 'time_to_service': time_to_service} + return { + "test_groups": results, + "error": error, + "hooks_error": all_hooks_error, + "time_to_service": time_to_service, + } def report(results_data, api, assignment_id, group_id, run_id): """ Post the results of running test scripts to the markus api """ - api.upload_test_group_results(assignment_id, group_id, run_id, json.dumps(results_data)) + api.upload_test_group_results( + assignment_id, group_id, run_id, json.dumps(results_data) + ) @clean_after -def run_test(markus_address, server_api_key, test_categories, files_path, assignment_id, - group_id, submission_id, run_id, enqueue_time): +def run_test( + markus_address, + server_api_key, + test_categories, + files_path, + assignment_id, + group_id, + submission_id, + run_id, + enqueue_time, +): """ Run autotesting tests using the tests in the test_specs json file on the files in files_path. @@ -276,28 +357,31 @@ def run_test(markus_address, server_api_key, test_categories, files_path, assign job = rq.get_current_job() update_pop_interval_stat(job.origin) test_username, tests_path = tester_user() - hooks_kwargs = {'api': api, - 'assignment_id': assignment_id, - 'group_id': group_id} - testers = {settings['tester_type'] for settings in test_specs['testers']} + hooks_kwargs = { + "api": api, + "assignment_id": assignment_id, + "group_id": group_id, + } + testers = {settings["tester_type"] for settings in test_specs["testers"]} hooks = Hooks(hooks_script_path, testers, cwd=tests_path, kwargs=hooks_kwargs) try: setup_files(files_path, tests_path, markus_address, assignment_id) cmd = run_test_command(test_username=test_username) - results, hooks_error = run_test_specs(cmd, - test_specs, - test_categories, - tests_path, - test_username, - hooks) + results, hooks_error = run_test_specs( + cmd, test_specs, test_categories, tests_path, test_username, hooks + ) finally: stop_tester_processes(test_username) clear_working_directory(tests_path, test_username) except Exception as e: error = str(e) finally: - results_data = finalize_results_data(results, error, hooks_error, time_to_service) - store_results(results_data, markus_address, assignment_id, group_id, submission_id) + results_data = finalize_results_data( + results, error, hooks_error, time_to_service + ) + store_results( + results_data, markus_address, assignment_id, group_id, submission_id + ) report(results_data, api, assignment_id, group_id, run_id) @@ -307,9 +391,9 @@ def get_tester_root_dir(tester_type): """ this_dir = os.path.dirname(os.path.abspath(__file__)) root_dir = os.path.dirname(this_dir) - tester_dir = os.path.join(root_dir, 'testers', tester_type) + tester_dir = os.path.join(root_dir, "testers", tester_type) if not os.path.isdir(tester_dir): - raise FileNotFoundError(f'{tester_type} is not a valid tester name') + raise FileNotFoundError(f"{tester_type} is not a valid tester name") return tester_dir @@ -319,36 +403,38 @@ def update_settings(settings, specs_dir): contained in the tester's specs directory as well as the settings. The settings will overwrite any duplicate keys in the default settings files. """ - full_settings = {'install_data': {}} - install_settings_files = [os.path.join(specs_dir, 'install_settings.json')] + full_settings = {"install_data": {}} + install_settings_files = [os.path.join(specs_dir, "install_settings.json")] for settings_file in install_settings_files: if os.path.isfile(settings_file): with open(settings_file) as f: - full_settings['install_data'].update(json.load(f)) + full_settings["install_data"].update(json.load(f)) full_settings.update(settings) return full_settings def create_tester_environments(files_path, test_specs): - for i, settings in enumerate(test_specs['testers']): + for i, settings in enumerate(test_specs["testers"]): tester_dir = get_tester_root_dir(settings["tester_type"]) - specs_dir = os.path.join(tester_dir, 'specs') - bin_dir = os.path.join(tester_dir, 'bin') + specs_dir = os.path.join(tester_dir, "specs") + bin_dir = os.path.join(tester_dir, "bin") settings = update_settings(settings, specs_dir) - if settings.get('env_data'): - new_env_dir = tempfile.mkdtemp(prefix='env', dir=TEST_SPECS_DIR) + if settings.get("env_data"): + new_env_dir = tempfile.mkdtemp(prefix="env", dir=TEST_SPECS_DIR) os.chmod(new_env_dir, 0o775) - settings['env_loc'] = new_env_dir + settings["env_loc"] = new_env_dir - create_file = os.path.join(bin_dir, 'create_environment.sh') + create_file = os.path.join(bin_dir, "create_environment.sh") if os.path.isfile(create_file): - cmd = [f'{create_file}', json.dumps(settings), files_path] + cmd = [f"{create_file}", json.dumps(settings), files_path] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise TesterCreationError(f'create tester environment failed with:\n{proc.stderr}') + raise TesterCreationError( + f"create tester environment failed with:\n{proc.stderr}" + ) else: - settings['env_loc'] = DEFAULT_ENV_DIR - test_specs['testers'][i] = settings + settings["env_loc"] = DEFAULT_ENV_DIR + test_specs["testers"][i] = settings return test_specs @@ -357,17 +443,19 @@ def destroy_tester_environments(old_test_script_dir): test_specs_file = os.path.join(old_test_script_dir, SETTINGS_FILENAME) with open(test_specs_file) as f: test_specs = json.load(f) - for settings in test_specs['testers']: - env_loc = settings.get('env_loc', DEFAULT_ENV_DIR) + for settings in test_specs["testers"]: + env_loc = settings.get("env_loc", DEFAULT_ENV_DIR) if env_loc != DEFAULT_ENV_DIR: - tester_dir = get_tester_root_dir(settings['tester_type']) - bin_dir = os.path.join(tester_dir, 'bin') - destroy_file = os.path.join(bin_dir, 'destroy_environment.sh') + tester_dir = get_tester_root_dir(settings["tester_type"]) + bin_dir = os.path.join(tester_dir, "bin") + destroy_file = os.path.join(bin_dir, "destroy_environment.sh") if os.path.isfile(destroy_file): - cmd = [f'{destroy_file}', json.dumps(settings)] + cmd = [f"{destroy_file}", json.dumps(settings)] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise TesterCreationError(f'destroy tester environment failed with:\n{proc.stderr}') + raise TesterCreationError( + f"destroy tester environment failed with:\n{proc.stderr}" + ) shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) @@ -384,16 +472,20 @@ def update_test_specs(files_path, assignment_id, markus_address, test_specs): # TODO: catch and log errors test_script_dir_name = "test_scripts_{}".format(int(time.time())) clean_markus_address = clean_dir_name(markus_address) - new_dir = os.path.join(*stringify(TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name)) + new_dir = os.path.join( + *stringify( + TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name + ) + ) new_files_dir = os.path.join(new_dir, FILES_DIRNAME) move_tree(files_path, new_files_dir) - if 'hooks_file' in test_specs: - src = os.path.join(new_files_dir, test_specs['hooks_file']) + if "hooks_file" in test_specs: + src = os.path.join(new_files_dir, test_specs["hooks_file"]) if os.path.isfile(src): os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) test_specs = create_tester_environments(new_files_dir, test_specs) settings_filename = os.path.join(new_dir, SETTINGS_FILENAME) - with open(settings_filename, 'w') as f: + with open(settings_filename, "w") as f: json.dump(test_specs, f) old_test_script_dir = test_script_directory(markus_address, assignment_id) test_script_directory(markus_address, assignment_id, set_to=new_dir) diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index 45ca6a87..96a45ff9 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -7,12 +7,12 @@ from autotester.config import config from contextlib import contextmanager -FILES_DIRNAME = config['_workspace_contents', '_files_dir'] +FILES_DIRNAME = config["_workspace_contents", "_files_dir"] def clean_dir_name(name): """ Return name modified so that it can be used as a unix style directory name """ - return name.replace('/', '_') + return name.replace("/", "_") def random_tmpfile_name(): @@ -29,10 +29,10 @@ def recursive_iglob(root_dir): """ if os.path.isdir(root_dir): for root, dirnames, filenames in os.walk(root_dir): - yield from (('d', os.path.join(root, d)) for d in dirnames) - yield from (('f', os.path.join(root, f)) for f in filenames) + yield from (("d", os.path.join(root, d)) for d in dirnames) + yield from (("f", os.path.join(root, f)) for f in filenames) else: - raise ValueError('directory does not exist: {}'.format(root_dir)) + raise ValueError("directory does not exist: {}".format(root_dir)) def copy_tree(src, dst, exclude=tuple()): @@ -48,7 +48,7 @@ def copy_tree(src, dst, exclude=tuple()): if src_path in exclude: continue target = os.path.join(dst, src_path) - if fd == 'd': + if fd == "d": os.makedirs(target, exist_ok=True) else: os.makedirs(os.path.dirname(target), exist_ok=True) @@ -111,7 +111,9 @@ def copy_test_script_files(markus_address, assignment_id, tests_path): directory if they exist. tests_path may already exist and contain files and subdirectories. """ - test_script_outer_dir = redis_management.test_script_directory(markus_address, assignment_id) + test_script_outer_dir = redis_management.test_script_directory( + markus_address, assignment_id + ) test_script_dir = os.path.join(test_script_outer_dir, FILES_DIRNAME) if os.path.isdir(test_script_dir): with fd_open(test_script_dir) as fd: @@ -134,14 +136,14 @@ def setup_files(files_path, tests_path, markus_address, assignment_id): os.chmod(tests_path, 0o1770) student_files = move_tree(files_path, tests_path) for fd, file_or_dir in student_files: - if fd == 'd': + if fd == "d": os.chmod(file_or_dir, 0o777) else: os.chmod(file_or_dir, 0o666) script_files = copy_test_script_files(markus_address, assignment_id, tests_path) for fd, file_or_dir in script_files: permissions = 0o755 - if fd == 'f': + if fd == "f": permissions -= 0o111 os.chmod(file_or_dir, permissions) return student_files, script_files diff --git a/src/autotester/server/utils/form_validation.py b/src/autotester/server/utils/form_validation.py index 96f51a3b..e6df2170 100644 --- a/src/autotester/server/utils/form_validation.py +++ b/src/autotester/server/utils/form_validation.py @@ -34,11 +34,11 @@ def set_array_defaults(validator, properties, instance, schema): """ Set defaults within an "array" context """ if not validator.is_type(instance, "array"): return - + if not instance: default_val = None if "default" in properties: - default_val = properties['default'] + default_val = properties["default"] elif properties.get("type") == "array": default_val = [] elif properties.get("type") == "object": @@ -75,7 +75,7 @@ def set_oneOf_defaults(validator, properties, instance, schema): good_instance = new_instance if len(good_properties) == 0: - msg = f'{instance} is not valid under any of the given schemas' + msg = f"{instance} is not valid under any of the given schemas" yield ValidationError(msg, context=all_errors) elif len(good_properties) > 1: msg = f'{instance} is valid under each of {", ".join(repr(p) for p in good_properties)}' @@ -84,14 +84,18 @@ def set_oneOf_defaults(validator, properties, instance, schema): instance.clear() instance.update(good_instance) - custom_validators = {"properties": set_defaults, - "items": set_array_defaults, - "oneOf": set_oneOf_defaults} + custom_validators = { + "properties": set_defaults, + "items": set_array_defaults, + "oneOf": set_oneOf_defaults, + } return validators.extend(validator_class, custom_validators) -def validate_with_defaults(schema, obj, validator_class=Draft7Validator, best_only=True): +def validate_with_defaults( + schema, obj, validator_class=Draft7Validator, best_only=True +): """ Return an iterator that yields errors from validating obj on schema after first filling in defaults on obj. diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py index 509cea32..e09f56bc 100644 --- a/src/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -5,8 +5,8 @@ from autotester.server.utils import file_management, string_management from autotester.config import config -CURRENT_TEST_SCRIPT_HASH = config['redis', '_current_test_script_hash'] -POP_INTERVAL_HASH = config['redis', '_pop_interval_hash'] +CURRENT_TEST_SCRIPT_HASH = config["redis", "_current_test_script_hash"] +POP_INTERVAL_HASH = config["redis", "_pop_interval_hash"] def redis_connection(): @@ -18,7 +18,7 @@ def redis_connection(): conn = rq.get_current_connection() if conn: return conn - rq.use_connection(redis=redis.Redis.from_url(config['redis', 'url'])) + rq.use_connection(redis=redis.Redis.from_url(config["redis", "url"])) return rq.get_current_connection() @@ -28,7 +28,7 @@ def get_test_script_key(markus_address, assignment_id): storing the location of test scripts in Redis """ clean_markus_address = file_management.clean_dir_name(markus_address) - return f'{clean_markus_address}_{assignment_id}' + return f"{clean_markus_address}_{assignment_id}" def test_script_directory(markus_address, assignment_id, set_to=None): @@ -54,9 +54,9 @@ def update_pop_interval_stat(queue_name): """ r = redis_connection() now = time.time() - r.hsetnx(POP_INTERVAL_HASH, '{}_start'.format(queue_name), now) - r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), now) - r.hincrby(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 1) + r.hsetnx(POP_INTERVAL_HASH, "{}_start".format(queue_name), now) + r.hset(POP_INTERVAL_HASH, "{}_last".format(queue_name), now) + r.hincrby(POP_INTERVAL_HASH, "{}_count".format(queue_name), 1) def clear_pop_interval_stat(queue_name): @@ -66,9 +66,9 @@ def clear_pop_interval_stat(queue_name): empty. For more details about the data updated see get_pop_interval_stat. """ r = redis_connection() - r.hdel(POP_INTERVAL_HASH, '{}_start'.format(queue_name)) - r.hset(POP_INTERVAL_HASH, '{}_last'.format(queue_name), 0) - r.hset(POP_INTERVAL_HASH, '{}_count'.format(queue_name), 0) + r.hdel(POP_INTERVAL_HASH, "{}_start".format(queue_name)) + r.hset(POP_INTERVAL_HASH, "{}_last".format(queue_name), 0) + r.hset(POP_INTERVAL_HASH, "{}_count".format(queue_name), 0) def get_pop_interval_stat(queue_name): @@ -82,9 +82,9 @@ def get_pop_interval_stat(queue_name): current burst of jobs. """ r = redis_connection() - start = r.hget(POP_INTERVAL_HASH, '{}_start'.format(queue_name)) - last = r.hget(POP_INTERVAL_HASH, '{}_count'.format(queue_name)) - count = r.hget(POP_INTERVAL_HASH, '{}_count'.format(queue_name)) + start = r.hget(POP_INTERVAL_HASH, "{}_start".format(queue_name)) + last = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) + count = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) return start, last, count @@ -119,10 +119,12 @@ def clean_after(func): Call the clean_up function after the decorated function func is finished """ + @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) finally: clean_up() + return wrapper diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py index c90f52b1..6c45d082 100644 --- a/src/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -1,11 +1,11 @@ import resource from autotester.config import config -RLIMIT_ADJUSTMENTS = {'nproc': 10} +RLIMIT_ADJUSTMENTS = {"nproc": 10} def rlimit_str2int(rlimit_string): - return getattr(resource, f'RLIMIT_{rlimit_string.upper()}') + return getattr(resource, f"RLIMIT_{rlimit_string.upper()}") def set_rlimits_before_test(): @@ -16,10 +16,12 @@ def set_rlimits_before_test(): processes that are not available for test processes. This ensures that cleanup processes will always be able to run. """ - for limit_str in config['rlimit_settings'].keys() | RLIMIT_ADJUSTMENTS.keys(): + for limit_str in config["rlimit_settings"].keys() | RLIMIT_ADJUSTMENTS.keys(): limit = rlimit_str2int(limit_str) - values = config['rlimit_settings'].get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) + values = config["rlimit_settings"].get( + limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) + ) curr_soft, curr_hard = resource.getrlimit(limit) soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) # reduce the hard limit so that cleanup scripts will have at least diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py index 6d4dc1c1..4a3812a6 100644 --- a/src/autotester/server/utils/string_management.py +++ b/src/autotester/server/utils/string_management.py @@ -6,7 +6,7 @@ def stringify(*args): yield str(a) -def decode_if_bytes(b, format_='utf-8'): +def decode_if_bytes(b, format_="utf-8"): return b.decode(format_) if isinstance(b, bytes) else b @@ -30,7 +30,7 @@ def loads_partial_json(json_string, expected_type=None): obj, ind = decoder.raw_decode(json_string[i:]) if expected_type is None or isinstance(obj, expected_type): results.append(obj) - elif json_string[i:i + ind].strip(): + elif json_string[i : i + ind].strip(): malformed = True i += ind except json.JSONDecodeError: diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index cdbc822e..acc48bfd 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -17,20 +17,20 @@ def tester_user(): Raises an AutotestError if a tester user is not specified or if a workspace has not been setup for that user. """ - user_name = os.environ.get('MARKUSWORKERUSER') + user_name = os.environ.get("MARKUSWORKERUSER") if user_name is None: - raise TesterUserError('No worker users available to run this job') + raise TesterUserError("No worker users available to run this job") - user_workspace = os.path.join(config['workspace'], - config['_workspace_contents', '_workers'], - user_name) + user_workspace = os.path.join( + config["workspace"], config["_workspace_contents", "_workers"], user_name + ) if not os.path.isdir(user_workspace): - raise TesterUserError(f'No workspace directory for user: {user_name}') + raise TesterUserError(f"No workspace directory for user: {user_name}") return user_name, decode_if_bytes(user_workspace) def get_reaper_username(test_username): - for worker_name, reaper_name in config['users', 'workers']: + for worker_name, reaper_name in config["users", "workers"]: if worker_name == test_username: return reaper_name diff --git a/src/autotester/setup.py b/src/autotester/setup.py index 110d7046..9e464c21 100644 --- a/src/autotester/setup.py +++ b/src/autotester/setup.py @@ -2,13 +2,19 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] -setup(name='markus-autotester-testers', - version='2.0', - description='Testers for the automatic tester for programming assignments', - url='https://github.com/MarkUsProject/markus-autotesting', - author='Misha Schwartz, Alessio Di Sandro', - author_email='mschwa@cs.toronto.edu', - license='MIT', - include_package_data=True, - packages=['testers'] + [f'testers.{pkg}' for pkg in find_packages(where='testers', exclude=test_exclusions)], - zip_safe=False) +setup( + name="markus-autotester-testers", + version="2.0", + description="Testers for the automatic tester for programming assignments", + url="https://github.com/MarkUsProject/markus-autotesting", + author="Misha Schwartz, Alessio Di Sandro", + author_email="mschwa@cs.toronto.edu", + license="MIT", + include_package_data=True, + packages=["testers"] + + [ + f"testers.{pkg}" + for pkg in find_packages(where="testers", exclude=test_exclusions) + ], + zip_safe=False, +) diff --git a/src/autotester/testers/custom/default_hooks.py b/src/autotester/testers/custom/default_hooks.py index c6800893..f4247477 100644 --- a/src/autotester/testers/custom/default_hooks.py +++ b/src/autotester/testers/custom/default_hooks.py @@ -3,8 +3,8 @@ def before_all_custom(settings, **_kwargs): """ Make script files executable """ - for test_data in settings['test_data']: - for script_file in test_data['script_files']: + for test_data in settings["test_data"]: + for script_file in test_data["script_files"]: os.chmod(script_file, 0o755) diff --git a/src/autotester/testers/custom/markus_custom_tester.py b/src/autotester/testers/custom/markus_custom_tester.py index 29040c69..fedb673a 100644 --- a/src/autotester/testers/custom/markus_custom_tester.py +++ b/src/autotester/testers/custom/markus_custom_tester.py @@ -3,12 +3,11 @@ class MarkusCustomTester(MarkusTester): - def __init__(self, specs): super().__init__(specs, test_class=None) @MarkusTester.run_decorator def run(self): - file_paths = self.specs['test_data', 'script_files'] + file_paths = self.specs["test_data", "script_files"] for file_path in file_paths: - subprocess.run(f'./{file_path}') + subprocess.run(f"./{file_path}") diff --git a/src/autotester/testers/custom/tests/student_files/submission.py b/src/autotester/testers/custom/tests/student_files/submission.py index 91c9a826..6eead424 100644 --- a/src/autotester/testers/custom/tests/student_files/submission.py +++ b/src/autotester/testers/custom/tests/student_files/submission.py @@ -9,4 +9,14 @@ import json -print(json.dumps({'name': 'pass_test', 'output': 'NA', 'marks_earned': 2, 'marks_total': 2, 'status': 'pass'})) +print( + json.dumps( + { + "name": "pass_test", + "output": "NA", + "marks_earned": 2, + "marks_total": 2, + "status": "pass", + } + ) +) diff --git a/src/autotester/testers/haskell/markus_haskell_tester.py b/src/autotester/testers/haskell/markus_haskell_tester.py index c9af4f3b..86093b4e 100644 --- a/src/autotester/testers/haskell/markus_haskell_tester.py +++ b/src/autotester/testers/haskell/markus_haskell_tester.py @@ -7,18 +7,17 @@ class MarkusHaskellTest(MarkusTest): - def __init__(self, tester, test_file, result, feedback_open=None): - self._test_name = result.get('name') + self._test_name = result.get("name") self._file_name = test_file - self.status = result['status'] - self.message = result['description'] + self.status = result["status"] + self.message = result["description"] super().__init__(tester, feedback_open) @property def test_name(self): if self._test_name: - return '.'.join([self._file_name, self._test_name]) + return ".".join([self._file_name, self._test_name]) return self._file_name @MarkusTest.run_decorator @@ -34,10 +33,7 @@ def run(self): class MarkusHaskellTester(MarkusTester): # column indexes of relevant data from tasty-stats csv # reference: http://hackage.haskell.org/package/tasty-stats - TASTYSTATS = {'name': 1, - 'time': 2, - 'result': 3, - 'description': -1} + TASTYSTATS = {"name": 1, "time": 2, "result": 3, "description": -1} def __init__(self, specs, test_class=MarkusHaskellTest): super().__init__(specs, test_class) @@ -48,10 +44,12 @@ def _test_run_flags(self, test_file): """ module_flag = f"--modules={os.path.basename(test_file)}" stats_flag = "--ingredient=Test.Tasty.Stats.consoleStatsReporter" - flags = [module_flag, - stats_flag, - f"--timeout={self.specs['test_data', 'test_timeout']}s", - f"--quickcheck-tests={self.specs['test_data', 'test_cases']}"] + flags = [ + module_flag, + stats_flag, + f"--timeout={self.specs['test_data', 'test_timeout']}s", + f"--quickcheck-tests={self.specs['test_data', 'test_cases']}", + ] return flags def _parse_test_results(self, reader): @@ -62,10 +60,12 @@ def _parse_test_results(self, reader): """ test_results = [] for line in reader: - result = {'status': line[self.TASTYSTATS['result']], - 'name': line[self.TASTYSTATS['name']], - 'description': line[self.TASTYSTATS['description']], - 'time': line[self.TASTYSTATS['time']]} + result = { + "status": line[self.TASTYSTATS["result"]], + "name": line[self.TASTYSTATS["name"]], + "description": line[self.TASTYSTATS["description"]], + "time": line[self.TASTYSTATS["time"]], + } test_results.append(result) return test_results @@ -78,17 +78,23 @@ def run_haskell_tests(self): """ results = {} this_dir = os.getcwd() - for test_file in self.specs['test_data', 'script_files']: + for test_file in self.specs["test_data", "script_files"]: with tempfile.NamedTemporaryFile(dir=this_dir) as f: - cmd = ['tasty-discover', '.', '_', f.name] + self._test_run_flags(test_file) - subprocess.run(cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True) + cmd = ["tasty-discover", ".", "_", f.name] + self._test_run_flags( + test_file + ) + subprocess.run( + cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True + ) with tempfile.NamedTemporaryFile(mode="w+", dir=this_dir) as sf: - cmd = ['runghc', f.name, f"--stats={sf.name}"] - subprocess.run(cmd, - stdout=subprocess.DEVNULL, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True) + cmd = ["runghc", f.name, f"--stats={sf.name}"] + subprocess.run( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) results[test_file] = self._parse_test_results(csv.reader(sf)) return results diff --git a/src/autotester/testers/java/markus_java_tester.py b/src/autotester/testers/java/markus_java_tester.py index 09ba7db3..9d51d936 100644 --- a/src/autotester/testers/java/markus_java_tester.py +++ b/src/autotester/testers/java/markus_java_tester.py @@ -6,29 +6,28 @@ class MarkusJavaTest(MarkusTest): - class JUnitStatus(enum.Enum): SUCCESSFUL = 1 ABORTED = 2 FAILED = 3 ERRORS = { - 'bad_javac': 'Java compilation error: "{}"', - 'bad_java': 'Java runtime error: "{}"' + "bad_javac": 'Java compilation error: "{}"', + "bad_java": 'Java runtime error: "{}"', } def __init__(self, tester, result, feedback_open=None): - self.class_name, _sep, self.method_name = result['name'].partition('.') - self.description = result.get('description') - self.status = MarkusJavaTest.JUnitStatus[result['status']] - self.message = result.get('message') + self.class_name, _sep, self.method_name = result["name"].partition(".") + self.description = result.get("description") + self.status = MarkusJavaTest.JUnitStatus[result["status"]] + self.message = result.get("message") super().__init__(tester, feedback_open) @property def test_name(self): - name = f'{self.class_name}.{self.method_name}' + name = f"{self.class_name}.{self.method_name}" if self.description: - name += f' ({self.description})' + name += f" ({self.description})" return name @MarkusTest.run_decorator @@ -43,24 +42,39 @@ def run(self): class MarkusJavaTester(MarkusTester): - JAVA_TESTER_CLASS = 'edu.toronto.cs.teach.MarkusJavaTester' + JAVA_TESTER_CLASS = "edu.toronto.cs.teach.MarkusJavaTester" def __init__(self, specs, test_class=MarkusJavaTest): super().__init__(specs, test_class) self.java_classpath = f'.:{self.specs["install_data", "path_to_tester_jars"]}/*' def compile(self): - javac_command = ['javac', '-cp', self.java_classpath] - javac_command.extend(self.specs['test_data', 'script_files']) + javac_command = ["javac", "-cp", self.java_classpath] + javac_command.extend(self.specs["test_data", "script_files"]) # student files imported by tests will be compiled on cascade - subprocess.run(javac_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, - check=True) + subprocess.run( + javac_command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + check=True, + ) def run_junit(self): - java_command = ['java', '-cp', self.java_classpath, MarkusJavaTester.JAVA_TESTER_CLASS] - java_command.extend(self.specs['test_data', 'script_files']) - java = subprocess.run(java_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, - check=True) + java_command = [ + "java", + "-cp", + self.java_classpath, + MarkusJavaTester.JAVA_TESTER_CLASS, + ] + java_command.extend(self.specs["test_data", "script_files"]) + java = subprocess.run( + java_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) return java @MarkusTester.run_decorator @@ -69,7 +83,7 @@ def run(self): try: self.compile() except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS['bad_javac'].format(e.stdout) + msg = MarkusJavaTest.ERRORS["bad_javac"].format(e.stdout) raise MarkusTestError(msg) from e # run the tests with junit try: @@ -77,7 +91,7 @@ def run(self): if results.stderr: raise MarkusTestError(results.stderr) except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS['bad_java'].format(e.stdout + e.stderr) + msg = MarkusJavaTest.ERRORS["bad_java"].format(e.stdout + e.stderr) raise MarkusTestError(msg) from e with self.open_feedback() as feedback_open: for result in json.loads(results.stdout): diff --git a/src/autotester/testers/markus_test_specs.py b/src/autotester/testers/markus_test_specs.py index 0f1d6a66..dcddca78 100644 --- a/src/autotester/testers/markus_test_specs.py +++ b/src/autotester/testers/markus_test_specs.py @@ -3,7 +3,6 @@ class MarkusTestSpecs(Mapping): - def __init__(self, *args, **kwargs): self._specs = dict(*args, **kwargs) diff --git a/src/autotester/testers/markus_tester.py b/src/autotester/testers/markus_tester.py index f08ad318..d9d61cbe 100644 --- a/src/autotester/testers/markus_tester.py +++ b/src/autotester/testers/markus_tester.py @@ -11,20 +11,19 @@ class MarkusTestError(Exception): class MarkusTest(ABC): - class Status(enum.Enum): - PASS = 'pass' - PARTIAL = 'partial' - FAIL = 'fail' - ERROR = 'error' - ERROR_ALL = 'error_all' + PASS = "pass" + PARTIAL = "partial" + FAIL = "fail" + ERROR = "error" + ERROR_ALL = "error_all" @abstractmethod def __init__(self, tester, feedback_open=None): self.tester = tester self.points_total = self.get_total_points() if self.points_total <= 0: - raise ValueError('The test total points must be > 0') + raise ValueError("The test total points must be > 0") self.feedback_open = feedback_open @property @@ -36,10 +35,12 @@ def test_name(self): pass def get_total_points(self): - return self.tester.specs.get('points', default={}).get(self.test_name, 1) + return self.tester.specs.get("points", default={}).get(self.test_name, 1) @staticmethod - def format_result(test_name, status, output, points_earned, points_total, time=None): + def format_result( + test_name, status, output, points_earned, points_total, time=None + ): """ Formats a test result as expected by Markus. :param test_name: The test name @@ -52,19 +53,23 @@ def format_result(test_name, status, output, points_earned, points_total, time=N :return The formatted test result. """ if points_total < 0: - raise ValueError('The test total points must be >= 0') + raise ValueError("The test total points must be >= 0") if points_earned < 0: - raise ValueError('The test points earned must be >= 0') + raise ValueError("The test points earned must be >= 0") if time is not None: if not isinstance(time, int) or time < 0: - raise ValueError('The time must be a positive integer or None') - - result_json = json.dumps({'name': test_name, - 'output': output, - 'marks_earned': points_earned, - 'marks_total': points_total, - 'status': status.value, - 'time': time}) + raise ValueError("The time must be a positive integer or None") + + result_json = json.dumps( + { + "name": test_name, + "output": output, + "marks_earned": points_earned, + "marks_total": points_total, + "status": status.value, + "time": time, + } + ) return result_json def format(self, status, output, points_earned): @@ -76,9 +81,13 @@ def format(self, status, output, points_earned): points when assigning bonus points). :return The formatted test result. """ - return MarkusTest.format_result(self.test_name, status, output, points_earned, self.points_total) + return MarkusTest.format_result( + self.test_name, status, output, points_earned, self.points_total + ) - def add_feedback(self, status, feedback='', oracle_solution=None, test_solution=None): + def add_feedback( + self, status, feedback="", oracle_solution=None, test_solution=None + ): """ Adds the feedback of this test to the feedback file. :param status: A member of MarkusTest.Status. @@ -88,20 +97,24 @@ def add_feedback(self, status, feedback='', oracle_solution=None, test_solution= """ # TODO Reconcile with format: return both, or print both if self.feedback_open is None: - raise ValueError('No feedback file enabled') - self.feedback_open.write('========== {}: {} ==========\n\n'.format(self.test_name, status.value.upper())) + raise ValueError("No feedback file enabled") + self.feedback_open.write( + "========== {}: {} ==========\n\n".format( + self.test_name, status.value.upper() + ) + ) if feedback: - self.feedback_open.write('## Feedback: {}\n\n'.format(feedback)) + self.feedback_open.write("## Feedback: {}\n\n".format(feedback)) if status != self.Status.PASS: if oracle_solution: - self.feedback_open.write('## Expected Solution:\n\n') + self.feedback_open.write("## Expected Solution:\n\n") self.feedback_open.write(oracle_solution) if test_solution: - self.feedback_open.write('## Your Solution:\n\n') + self.feedback_open.write("## Your Solution:\n\n") self.feedback_open.write(test_solution) - self.feedback_open.write('\n') + self.feedback_open.write("\n") - def passed_with_bonus(self, points_bonus, message=''): + def passed_with_bonus(self, points_bonus, message=""): """ Passes this test earning bonus points in addition to the test total points. If a feedback file is enabled, adds feedback to it. @@ -110,24 +123,32 @@ def passed_with_bonus(self, points_bonus, message=''): :return The formatted passed test. """ if points_bonus < 0: - raise ValueError('The test bonus points must be >= 0') - result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total+points_bonus) + raise ValueError("The test bonus points must be >= 0") + result = self.format( + status=self.Status.PASS, + output=message, + points_earned=self.points_total + points_bonus, + ) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result - def passed(self, message=''): + def passed(self, message=""): """ Passes this test earning the test total points. If a feedback file is enabled, adds feedback to it. :param message: An optional message, will be shown as test output. :return The formatted passed test. """ - result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total) + result = self.format( + status=self.Status.PASS, output=message, points_earned=self.points_total + ) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result - def partially_passed(self, points_earned, message, oracle_solution=None, test_solution=None): + def partially_passed( + self, points_earned, message, oracle_solution=None, test_solution=None + ): """ Partially passes this test with some points earned. If a feedback file is enabled, adds feedback to it. :param points_earned: The points earned by the test, must be a float > 0 and < the test total points. @@ -137,13 +158,19 @@ def partially_passed(self, points_earned, message, oracle_solution=None, test_so :return The formatted partially passed test. """ if points_earned <= 0: - raise ValueError('The test points earned must be > 0') + raise ValueError("The test points earned must be > 0") if points_earned >= self.points_total: - raise ValueError('The test points earned must be < the test total points') - result = self.format(status=self.Status.PARTIAL, output=message, points_earned=points_earned) + raise ValueError("The test points earned must be < the test total points") + result = self.format( + status=self.Status.PARTIAL, output=message, points_earned=points_earned + ) if self.feedback_open: - self.add_feedback(status=self.Status.PARTIAL, feedback=message, oracle_solution=oracle_solution, - test_solution=test_solution) + self.add_feedback( + status=self.Status.PARTIAL, + feedback=message, + oracle_solution=oracle_solution, + test_solution=test_solution, + ) return result def failed(self, message, oracle_solution=None, test_solution=None): @@ -156,11 +183,15 @@ def failed(self, message, oracle_solution=None, test_solution=None): """ result = self.format(status=self.Status.FAIL, output=message, points_earned=0) if self.feedback_open: - self.add_feedback(status=self.Status.FAIL, feedback=message, oracle_solution=oracle_solution, - test_solution=test_solution) + self.add_feedback( + status=self.Status.FAIL, + feedback=message, + oracle_solution=oracle_solution, + test_solution=test_solution, + ) return result - def done(self, points_earned, message='', oracle_solution=None, test_solution=None): + def done(self, points_earned, message="", oracle_solution=None, test_solution=None): """ Passes, partially passes or fails this test depending on the points earned. If the points are <= 0 this test is failed with 0 points earned, if the points are >= test total points this test is passed earning the test total @@ -180,7 +211,9 @@ def done(self, points_earned, message='', oracle_solution=None, test_solution=No points_bonus = points_earned - self.points_total return self.passed_with_bonus(points_bonus, message) else: - return self.partially_passed(points_earned, message, oracle_solution, test_solution) + return self.partially_passed( + points_earned, message, oracle_solution, test_solution + ) def error(self, message): """ @@ -215,6 +248,7 @@ def run_decorator(run_func): only the error message is sent in the description, otherwise the whole traceback is sent. """ + @wraps(run_func) def run_func_wrapper(self, *args, **kwargs): try: @@ -226,8 +260,9 @@ def run_func_wrapper(self, *args, **kwargs): except MarkusTestError as e: result_json = self.error(message=str(e)) except Exception as e: - result_json = self.error(message=f'{traceback.format_exc()}\n{e}') + result_json = self.error(message=f"{traceback.format_exc()}\n{e}") return result_json + return run_func_wrapper @abstractmethod @@ -240,7 +275,6 @@ def run(self): class MarkusTester(ABC): - @abstractmethod def __init__(self, specs, test_class=MarkusTest): self.specs = specs @@ -256,8 +290,13 @@ def error_all(message, points_total=0, expected=False): :return The formatted erred tests. """ status = MarkusTest.Status.ERROR if expected else MarkusTest.Status.ERROR_ALL - return MarkusTest.format_result(test_name='All tests', status=status, output=message, - points_earned=0, points_total=points_total) + return MarkusTest.format_result( + test_name="All tests", + status=status, + output=message, + points_earned=0, + points_total=points_total, + ) def before_tester_run(self): """ @@ -281,6 +320,7 @@ def run_decorator(run_func): only the error message is sent in the description, otherwise the whole traceback is sent. """ + @wraps(run_func) def run_func_wrapper(self, *args, **kwargs): try: @@ -289,13 +329,17 @@ def run_func_wrapper(self, *args, **kwargs): except MarkusTestError as e: print(MarkusTester.error_all(message=str(e), expected=True), flush=True) except Exception as e: - print(MarkusTester.error_all(message=f'{traceback.format_exc()}\n{e}'), flush=True) + print( + MarkusTester.error_all(message=f"{traceback.format_exc()}\n{e}"), + flush=True, + ) finally: self.after_tester_run() + return run_func_wrapper @contextmanager - def open_feedback(self, filename=None, mode='w'): + def open_feedback(self, filename=None, mode="w"): """ Yields an open file object, opened in mode if it exists, otherwise it yields None. @@ -304,7 +348,7 @@ def open_feedback(self, filename=None, mode='w'): used. """ if filename is None: - filename = self.specs.get('test_data', 'feedback_file_name') + filename = self.specs.get("test_data", "feedback_file_name") if filename: feedback_open = open(filename, mode) try: diff --git a/src/autotester/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py index f888367c..a26ccd9b 100644 --- a/src/autotester/testers/py/lib/c_helper.py +++ b/src/autotester/testers/py/lib/c_helper.py @@ -10,17 +10,17 @@ import unittest -DEFAULT_LTRACE_LOG_FILE = 'ltrace_log.txt' -DEFAULT_GCC_FLAGS = ['-std=gnu99', '-Wall', '-g'] -DEFAULT_LTRACE_FLAGS = ['-f', '-n', '2', '-o', DEFAULT_LTRACE_LOG_FILE] +DEFAULT_LTRACE_LOG_FILE = "ltrace_log.txt" +DEFAULT_GCC_FLAGS = ["-std=gnu99", "-Wall", "-g"] +DEFAULT_LTRACE_FLAGS = ["-f", "-n", "2", "-o", DEFAULT_LTRACE_LOG_FILE] # Note that the keys of the dictionary correspond to the "type" of call it was regex_dict = OrderedDict( - resumed='([0-9]+)\s*<\.\.\. (.*) (?:resumed>(.*)=\s)(-?[0-9]+)$', - unfinished='([0-9]+)\s*(.*)\((.*) None: """Compile the program, storing stdout and stderr of compilation. @@ -47,10 +48,11 @@ def setUpClass(cls) -> None: """ if not cls.make and not cls.source_files: raise ValueError( - 'ERROR: TestExecutable subclasses must specify source_files or set make=True.') + "ERROR: TestExecutable subclasses must specify source_files or set make=True." + ) - cls.compile_out = '' - cls.compile_err = '' + cls.compile_out = "" + cls.compile_err = "" # Default executable name is based on the first source file. if not cls.make and not cls.executable_name: @@ -63,31 +65,33 @@ def setUpClass(cls) -> None: try: if cls.make: # Tuple (stdoutdata, stderrdata) is returned - cls.compile_out, cls.compile_err, _ = _make(cls.make_targets, cls.make_args) + cls.compile_out, cls.compile_err, _ = _make( + cls.make_targets, cls.make_args + ) else: - cls.compile_out, cls.compile_err, _ = _compile(cls.source_files, cls.executable_name) + cls.compile_out, cls.compile_err, _ = _compile( + cls.source_files, cls.executable_name + ) except subprocess.CalledProcessError: cls.compiled = False else: cls.compiled = True - def setUp(self) -> None: """If the compilation was not successful, automatically fail every test. """ if not self.compiled: - self.fail('Test did not run due to a compilation error.') + self.fail("Test did not run due to a compilation error.") def _check_compiler_warnings(self) -> None: """Assert that compilation occurred without errors or warnings. """ - self.assertEqual(self.compile_out, '') - self.assertEqual(self.compile_err, '') + self.assertEqual(self.compile_out, "") + self.assertEqual(self.compile_err, "") def _run_exec(self, args: Optional[List[str]] = None, **kwargs) -> None: """Run this test class' executable with the given arguments and options.""" - return _exec([os.path.join('.', self.executable_name)] + (args or []), - **kwargs) + return _exec([os.path.join(".", self.executable_name)] + (args or []), **kwargs) def simple_run(args: List[str], **kwargs): @@ -97,13 +101,26 @@ def simple_run(args: List[str], **kwargs): Returns a function which takes an object on which to call run_exec (hence this object must be a subclass of TestExecutable). """ - def _t(self: 'TestExecutable') -> None: + + def _t(self: "TestExecutable") -> None: self._run_exec(args=args, **kwargs) return _t -def simple_test(args: List[str], expected_stdout='', *, expected_stderr='', expected_status=0, input_=None, timeout=2, check=True, rstrip=False, doc='', stderr_relax=False): +def simple_test( + args: List[str], + expected_stdout="", + *, + expected_stderr="", + expected_status=0, + input_=None, + timeout=2, + check=True, + rstrip=False, + doc="", + stderr_relax=False +): """Create a unittest test for fixed command-line arguments, expected stdout and stderr, and exit status. If rstrip is True, ignore trailing whitespace when doing text comparison. @@ -116,8 +133,11 @@ def simple_test(args: List[str], expected_stdout='', *, expected_stderr='', expe (as a substring check) in addition to in stderr, passing the test if one of these succeeds. """ - def _t(self: 'TestExecutable') -> None: - stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) + + def _t(self: "TestExecutable") -> None: + stdout, stderr, returncode = self._run_exec( + args=args, input_=input_, timeout=timeout, check=check + ) nonlocal expected_stderr nonlocal expected_stdout @@ -148,7 +168,18 @@ def _t(self: 'TestExecutable') -> None: _t.__doc__ = doc return _t -def substr_test(args: List[str], expected_stdout='', *, expected_stderr='', expected_status=0, input_=None, timeout=2, check=True, doc=''): + +def substr_test( + args: List[str], + expected_stdout="", + *, + expected_stderr="", + expected_status=0, + input_=None, + timeout=2, + check=True, + doc="" +): """Create a unittest test for fixed command-line arguments, expected stdout and stderr, and exit status. This test is more lenient that simple_test because it looks for expected @@ -159,8 +190,11 @@ def substr_test(args: List[str], expected_stdout='', *, expected_stderr='', expe doc specifies the docstring of the test function. """ - def _t(self: 'TestExecutable') -> None: - stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) + + def _t(self: "TestExecutable") -> None: + stdout, stderr, returncode = self._run_exec( + args=args, input_=input_, timeout=timeout, check=check + ) nonlocal expected_stderr nonlocal expected_stdout @@ -177,6 +211,7 @@ def _t(self: 'TestExecutable') -> None: _t.__doc__ = doc return _t + class TestTrace(TestExecutable): """Test class to support checks with ltrace. @@ -188,19 +223,28 @@ class TestTrace(TestExecutable): a Trace object, since it helps parse any additional arguments to ltrace args is a list of string arguments """ - call_types = [] # The only call types to watch out for (see ltrace man page) + + call_types = [] # The only call types to watch out for (see ltrace man page) @classmethod - def _check_trace(cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs): + def _check_trace( + cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs + ): if ltrace_flags is None: ltrace_flags = DEFAULT_LTRACE_FLAGS else: ltrace_flags = DEFAULT_LTRACE_FLAGS + ltrace_flags if cls.call_types: - ltrace_flags = ltrace_flags + ['-e', '+'.join(['__libc_start_main'] + cls.call_types)] + ltrace_flags = ltrace_flags + [ + "-e", + "+".join(["__libc_start_main"] + cls.call_types), + ] - return Trace([os.path.join('.', cls.executable_name)] + (args or []), - ltrace_flags, **kwargs) + return Trace( + [os.path.join(".", cls.executable_name)] + (args or []), + ltrace_flags, + **kwargs + ) class Trace: @@ -230,16 +274,18 @@ class Trace: this can be confirmed examining the regex """ - def __init__(self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs): + def __init__( + self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs + ): ltrace_flags = ltrace_flags or [] try: - _exec(['ltrace'] + ltrace_flags + command, **kwargs) + _exec(["ltrace"] + ltrace_flags + command, **kwargs) except subprocess.TimeoutExpired: # allow for partial results to be reported pass - with open(DEFAULT_LTRACE_LOG_FILE, 'rb') as f: + with open(DEFAULT_LTRACE_LOG_FILE, "rb") as f: f_bytes = f.read() - self.raw = f_bytes.decode(errors='ignore') + self.raw = f_bytes.decode(errors="ignore") self.parent_first_process = None self.lines = [] @@ -247,7 +293,7 @@ def __init__(self, command: List[str], ltrace_flags: Optional[List[str]] = None, self.first_process = None self.split_lines = self.raw.splitlines() if len(self.split_lines) > 1: - parsed_line = parse_arbitrary(self.split_lines[0], r'([0-9]+)\s*.') + parsed_line = parse_arbitrary(self.split_lines[0], r"([0-9]+)\s*.") if parsed_line: self.first_process = parsed_line[0] else: @@ -267,10 +313,10 @@ def get_status(self, pid): return None for calls in self.process_log[pid]: - if 'exited' in calls[0]: + if "exited" in calls[0]: return int(calls[1].split()[-1]) - def lines_for_pid(self, pid, match=''): + def lines_for_pid(self, pid, match=""): """Return the lines in this trace for the given pid. If match is not-empty, only return the lines whose function names @@ -284,8 +330,7 @@ def lines_for_pid(self, pid, match=''): if not match: return self.process_log[pid] - return [call for call in self.process_log[pid] - if call[0] == match] + return [call for call in self.process_log[pid] if call[0] == match] def run_through_regexes(regexes, trace_line): @@ -305,21 +350,21 @@ def run_through_regexes(regexes, trace_line): # print("this is the len of final result " + str(len(final_result))) # print(final_result) # clean the line before putting it in - sep = '->' + sep = "->" rest = final_result[1].split(sep, 1) - if len(rest) > 1: #in case there were multiple + if len(rest) > 1: # in case there were multiple final_result[1] = rest[1] # print(final_result) else: raise ValueError("groups mismatch arity") while len(final_result) < 4: - final_result+=(None,) + final_result += (None,) - final_result += (key,) # append the type of the entry to the end - return final_result # stops as soon as a matching regex is encountered + final_result += (key,) # append the type of the entry to the end + return final_result # stops as soon as a matching regex is encountered # print("line did not have any mathces " + trace_line) - return ('','','','') # did not match with any of the regexes + return ("", "", "", "") # did not match with any of the regexes def parse_arbitrary(trace_line, regex): @@ -346,16 +391,19 @@ class TestGenerator: Note: silent failures can happen (e.g., if the executable is not found). """ + dict_of_tests = defaultdict(list) # TODO add support for command-line arguments - def __init__(self, - input_dir=None, - executable_path=None, - out_dir=None, - input_extension='txt', - output_extension='stdout', - error_extension='stderr'): + def __init__( + self, + input_dir=None, + executable_path=None, + out_dir=None, + input_extension="txt", + output_extension="stdout", + error_extension="stderr", + ): """ `input_dir` specifies where the input files are found The extensions specify a pattern to look for in target files @@ -371,30 +419,36 @@ def __init__(self, self.output_extension = output_extension self.error_extension = error_extension - def build_outputs(self, args=''): + def build_outputs(self, args=""): """Generate all output files. `arg`s is optionally a string containing the command-line arguments given to the executable. """ - print(os.path.join(self.input_dir, '*.' + self.input_extension)) - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + print(os.path.join(self.input_dir, "*." + self.input_extension)) + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): print(file) name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) - cmd = "{} {} < {} > {} 2> {}".format(self.executable_path, args, file, stdout_file, stderr_file) - print('Running:', cmd) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) + cmd = "{} {} < {} > {} 2> {}".format( + self.executable_path, args, file, stdout_file, stderr_file + ) + print("Running:", cmd) try: _exec_shell([cmd]) - except subprocess.TimeoutExpired: # TODO add handling for TimeoutExpired (error log file for example?) + except subprocess.TimeoutExpired: # TODO add handling for TimeoutExpired (error log file for example?) print("failed on {}".format(file)) def clean(self): """Remove generated test files.""" - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) os.remove(stdout_file) os.remove(stderr_file) @@ -404,32 +458,37 @@ def populate_tests(self, test_klass, args=None): This must be called *after* build_outputs has been called. """ args = args or [] - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) with open(file) as in_, open(stdout_file) as out, open(stderr_file) as err: test_in = in_.read() test_out = out.read() test_err = err.read() - setattr(test_klass, 'test_' + name, - simple_test(args, test_out, test_err, test_in)) + setattr( + test_klass, + "test_" + name, + simple_test(args, test_out, test_err, test_in), + ) def _compile(files, exec_name=None, gcc_flags=DEFAULT_GCC_FLAGS, **kwargs): """Run gcc with the given flags on the given files.""" if isinstance(files, str): files = [files] - args = ['gcc'] + gcc_flags + args = ["gcc"] + gcc_flags if exec_name: - args += ['-o', exec_name] + args += ["-o", exec_name] return _exec(args + files, **kwargs) -def _make(targets=None, make_args=['--silent'], **kwargs): +def _make(targets=None, make_args=["--silent"], **kwargs): """Run make on the given targets.""" - return _exec(['make'] + make_args + (targets or []), timeout=60, **kwargs) + return _exec(["make"] + make_args + (targets or []), timeout=60, **kwargs) def _exec(args, *, input_=None, timeout=10, check=True, shell=False): @@ -449,7 +508,8 @@ def _exec(args, *, input_=None, timeout=10, check=True, shell=False): stderr=subprocess.PIPE, encoding=locale.getpreferredencoding(False), preexec_fn=lambda: os.setsid(), - shell=shell) + shell=shell, + ) try: stdout, stderr = proc.communicate(timeout=timeout, input=input_) @@ -490,7 +550,7 @@ def ongoing_process(args, check_killed=True): raise proc.exception if check_killed: - assert proc.returncode == -9, 'server exited abnormally' + assert proc.returncode == -9, "server exited abnormally" def _exec_shell(args, *, input_=None, timeout=1, check=True): @@ -510,7 +570,8 @@ def _exec_shell(args, *, input_=None, timeout=1, check=True): stderr=subprocess.PIPE, encoding=locale.getpreferredencoding(False), preexec_fn=lambda: os.setsid(), - shell=True) + shell=True, + ) try: return proc.communicate(timeout=timeout, input=input_) except subprocess.TimeoutExpired as e: diff --git a/src/autotester/testers/py/lib/sql_helper.py b/src/autotester/testers/py/lib/sql_helper.py index b84a504f..55611b4d 100644 --- a/src/autotester/testers/py/lib/sql_helper.py +++ b/src/autotester/testers/py/lib/sql_helper.py @@ -20,7 +20,7 @@ def _in_autotest_env() -> bool: This function can be used to check whether the AUTOTESTENV environment variable has been set to 'true'. """ - return os.environ.get('AUTOTESTENV') == 'true' + return os.environ.get("AUTOTESTENV") == "true" def connection(*args, **kwargs): @@ -35,16 +35,18 @@ def connection(*args, **kwargs): will be used to call psycopg2.connect in order to connect to a database. """ if _in_autotest_env(): - kwargs = {**kwargs, - 'database': os.environ.get('PGDATABASE'), - 'password': os.environ.get('PGPASSWORD'), - 'user': os.environ.get('PGUSER'), - 'host': 'localhost'} + kwargs = { + **kwargs, + "database": os.environ.get("PGDATABASE"), + "password": os.environ.get("PGPASSWORD"), + "user": os.environ.get("PGUSER"), + "host": "localhost", + } return _unmockable_psycopg2_connect(*args, **kwargs) @contextmanager -def patch_connection(target: str = 'psycopg2.connect') -> ContextManager: +def patch_connection(target: str = "psycopg2.connect") -> ContextManager: """ Context manager that patches any call to the function decribed in the string with the connection function (in this module). @@ -71,7 +73,7 @@ def patch_connection(target: str = 'psycopg2.connect') -> ContextManager: yield -def patch_connection_class(target: str = 'psycopg2.connect') -> Callable: +def patch_connection_class(target: str = "psycopg2.connect") -> Callable: """ Class decorator that adds the patch_connection decorator to every method in the class. @@ -83,18 +85,22 @@ def patch_connection_class(target: str = 'psycopg2.connect') -> Callable: >>> def __init__(self): >>> self.conn = psycopg2.connect() # calls __main__._connection instead """ + def _connect(cls): for name, method in inspect.getmembers(cls, inspect.isroutine): setattr(cls, name, patch_connection(target)(method)) return cls + return _connect -def execute_psql_file(filename: str, - *args: str, - database: Optional[str] = None, - password: Optional[str] = None, - user: Optional[str] = None) -> subprocess.CompletedProcess: +def execute_psql_file( + filename: str, + *args: str, + database: Optional[str] = None, + password: Optional[str] = None, + user: Optional[str] = None +) -> subprocess.CompletedProcess: """ Return a CompletedProcess object returned after calling: @@ -131,12 +137,14 @@ def execute_psql_file(filename: str, env = os.environ else: db_vars = { - 'PGUSER': user or os.environ.get('PGUSER'), - 'PGPASSWORD': password or os.environ.get('PGPASSWORD'), - 'PGDATABASE': database or os.environ.get('PGDATABASE') + "PGUSER": user or os.environ.get("PGUSER"), + "PGPASSWORD": password or os.environ.get("PGPASSWORD"), + "PGDATABASE": database or os.environ.get("PGDATABASE"), } env = {**os.environ, **db_vars} - return subprocess.run(['psql', '-f', filename] + list(args), env=env, capture_output=True) + return subprocess.run( + ["psql", "-f", filename] + list(args), env=env, capture_output=True + ) class PSQLTest: @@ -219,16 +227,18 @@ def schema(cls, schema: str, persist: bool = False) -> ContextManager: with cls.cursor() as curr: curr.execute("SET SEARCH_PATH TO %s;", org_search_path) if not persist: - curr.execute("DROP SCHEMA IF EXISTS %s CASCADE;", - [AsIs(schema)]) - if schema.lower() == 'public': + curr.execute("DROP SCHEMA IF EXISTS %s CASCADE;", [AsIs(schema)]) + if schema.lower() == "public": curr.execute("CREATE SCHEMA IF NOT EXISTS public;") @classmethod - def copy_schema(cls, to_schema: str, - tables: Optional[List[str]] = None, - from_schema: str = 'public', - overwrite: bool = True) -> None: + def copy_schema( + cls, + to_schema: str, + tables: Optional[List[str]] = None, + from_schema: str = "public", + overwrite: bool = True, + ) -> None: """ Copies tables from to . is 'public' by default @@ -237,8 +247,7 @@ def copy_schema(cls, to_schema: str, names in will be copied. If is True, tables of the same name in will be overwritten. """ - strings = {'new': AsIs(to_schema), - 'old': AsIs(from_schema)} + strings = {"new": AsIs(to_schema), "old": AsIs(from_schema)} if tables is None: with cls.cursor() as curr: curr.execute(cls.GET_TABLES_STR, [from_schema]) @@ -247,14 +256,16 @@ def copy_schema(cls, to_schema: str, curr.execute("CREATE SCHEMA IF NOT EXISTS %s;", [AsIs(to_schema)]) for table in tables: if overwrite: - curr.execute("DROP TABLE IF EXISTS %s.%s;", - [AsIs(to_schema), AsIs(table)]) - strs = {**strings, 'table': AsIs(table)} + curr.execute( + "DROP TABLE IF EXISTS %s.%s;", [AsIs(to_schema), AsIs(table)] + ) + strs = {**strings, "table": AsIs(table)} curr.execute(cls.SCHEMA_COPY_STR, strs) @classmethod - def execute_files(cls, files: List[str], *args, - cursor: Optional[CursorType] = None, **kwargs) -> None: + def execute_files( + cls, files: List[str], *args, cursor: Optional[CursorType] = None, **kwargs + ) -> None: """ Execute each file in by passing the content of each to cursor.execute. @@ -263,6 +274,7 @@ def execute_files(cls, files: List[str], *args, the and ) or the cursor object passed as the argument is used if is not None. """ + def _execute_files(): for file in files: with open(file) as f: diff --git a/src/autotester/testers/py/markus_python_tester.py b/src/autotester/testers/py/markus_python_tester.py index 8919d992..5c16d6ab 100644 --- a/src/autotester/testers/py/markus_python_tester.py +++ b/src/autotester/testers/py/markus_python_tester.py @@ -18,25 +18,37 @@ def __init__(self, stream, descriptions, verbosity): self.successes = [] def addSuccess(self, test): - self.results.append({'status': 'success', - 'name': test.id(), - 'errors': '', - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "success", + "name": test.id(), + "errors": "", + "description": test._testMethodDoc, + } + ) self.successes.append(test) def addFailure(self, test, err): super().addFailure(test, err) - self.results.append({'status': 'failure', - 'name': test.id(), - 'errors': self.failures[-1][-1], - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "failure", + "name": test.id(), + "errors": self.failures[-1][-1], + "description": test._testMethodDoc, + } + ) def addError(self, test, err): super().addError(test, err) - self.results.append({'status': 'error', - 'name': test.id(), - 'errors': self.errors[-1][-1], - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "error", + "name": test.id(), + "errors": self.errors[-1][-1], + "description": test._testMethodDoc, + } + ) class MarkusPytestPlugin: @@ -53,34 +65,37 @@ def pytest_runtest_makereport(self, item, call): outcome = yield rep = outcome.get_result() if rep.failed or item.nodeid not in self.results: - self.results[item.nodeid] = {'status': 'failure' if rep.failed else 'success', - 'name': item.nodeid, - 'errors': str(rep.longrepr) if rep.failed else '', - 'description': item.obj.__doc__} + self.results[item.nodeid] = { + "status": "failure" if rep.failed else "success", + "name": item.nodeid, + "errors": str(rep.longrepr) if rep.failed else "", + "description": item.obj.__doc__, + } return rep def pytest_collectreport(self, report): if report.failed: - self.results[report.nodeid] = {'status': 'error', - 'name': report.nodeid, - 'errors': str(report.longrepr), - 'description': None} + self.results[report.nodeid] = { + "status": "error", + "name": report.nodeid, + "errors": str(report.longrepr), + "description": None, + } class MarkusPythonTest(MarkusTest): - def __init__(self, tester, test_file, result, feedback_open=None): - self._test_name = result['name'] + self._test_name = result["name"] self._file_name = test_file - self.description = result.get('description') - self.status = result['status'] - self.message = result['errors'] + self.description = result.get("description") + self.status = result["status"] + self.message = result["errors"] super().__init__(tester, feedback_open) @property def test_name(self): if self.description: - return f'{self._test_name} ({self.description})' + return f"{self._test_name} ({self.description})" return self._test_name @MarkusTest.run_decorator @@ -94,7 +109,6 @@ def run(self): class MarkusPythonTester(MarkusTester): - def __init__(self, specs, test_class=MarkusPythonTest): super().__init__(specs, test_class) @@ -115,11 +129,12 @@ def _run_unittest_tests(self, test_file): of these tests """ test_suite = self._load_unittest_tests(test_file) - with open(os.devnull, 'w') as nullstream: + with open(os.devnull, "w") as nullstream: test_runner = unittest.TextTestRunner( - verbosity=self.specs['test_data', 'output_verbosity'], + verbosity=self.specs["test_data", "output_verbosity"], stream=nullstream, - resultclass=MarkusTextTestResults) + resultclass=MarkusTextTestResults, + ) test_result = test_runner.run(test_suite) return test_result.results @@ -129,12 +144,12 @@ def _run_pytest_tests(self, test_file): of these tests """ results = [] - with open(os.devnull, 'w') as null_out: + with open(os.devnull, "w") as null_out: try: sys.stdout = null_out - verbosity = self.specs['test_data', 'output_verbosity'] + verbosity = self.specs["test_data", "output_verbosity"] plugin = MarkusPytestPlugin() - pytest.main([test_file, f'--tb={verbosity}'], plugins=[plugin]) + pytest.main([test_file, f"--tb={verbosity}"], plugins=[plugin]) results.extend(plugin.results.values()) finally: sys.stdout = sys.__stdout__ @@ -145,8 +160,8 @@ def run_python_tests(self): Return a dict mapping each filename to its results """ results = {} - for test_file in self.specs['test_data', 'script_files']: - if self.specs['test_data', 'tester'] == 'unittest': + for test_file in self.specs["test_data", "script_files"]: + if self.specs["test_data", "tester"] == "unittest": result = self._run_unittest_tests(test_file) else: result = self._run_pytest_tests(test_file) diff --git a/src/autotester/testers/py/tests/script_files/test.py b/src/autotester/testers/py/tests/script_files/test.py index a4945721..e26e3567 100644 --- a/src/autotester/testers/py/tests/script_files/test.py +++ b/src/autotester/testers/py/tests/script_files/test.py @@ -1,4 +1,5 @@ import unittest + try: import submission except ImportError: @@ -6,7 +7,6 @@ class Test1(unittest.TestCase): - def test_passes(self): """This test should pass""" self.assertTrue(submission.return_true()) @@ -17,11 +17,10 @@ def test_fails(self): class Test2(unittest.TestCase): - def test_fails_and_outputs_json(self): """This test should fail and print json""" self.fail(submission.return_json()) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/src/autotester/testers/py/tests/script_files/test2.py b/src/autotester/testers/py/tests/script_files/test2.py index 8e61e77f..512d699c 100644 --- a/src/autotester/testers/py/tests/script_files/test2.py +++ b/src/autotester/testers/py/tests/script_files/test2.py @@ -1,4 +1,5 @@ import pytest + try: import submission except ImportError: diff --git a/src/autotester/testers/py/tests/script_files/test_sql.py b/src/autotester/testers/py/tests/script_files/test_sql.py index 0aa44973..e53c3138 100644 --- a/src/autotester/testers/py/tests/script_files/test_sql.py +++ b/src/autotester/testers/py/tests/script_files/test_sql.py @@ -3,7 +3,7 @@ class TestDataset1(sh.PSQLTest): - data_file = 'data1.sql' + data_file = "data1.sql" query = """ SELECT table1.word, table2.number FROM table1 JOIN table2 @@ -16,19 +16,19 @@ def setup_class(cls): # this means you only have to create a connection once for the whole test class cls.create_connection() # create a new schema named 'solution_schema' and switch the search path to that schema. - with cls.schema('solution_schema'): + with cls.schema("solution_schema"): # execute your files in this schema, they will populate the schema with some tables - cls.execute_files(['schema.ddl', cls.data_file]) + cls.execute_files(["schema.ddl", cls.data_file]) # execute the solution query in this schema, get the results and store them in a class variable with cls.cursor() as curr: curr.execute(cls.query) cls.solution_data = curr.fetchall() # create a new schema named 'test_schema' and switch the search path to that schema. - with cls.schema('test_schema'): + with cls.schema("test_schema"): # copy all the tables in solution_schema to test_schema - cls.copy_schema('test_schema', from_schema='solution_schema') + cls.copy_schema("test_schema", from_schema="solution_schema") # execute the student's file, this will create a table called correct_no_order - cls.execute_files(['submission.sql']) + cls.execute_files(["submission.sql"]) # get the contents of the correct_no_order table and store it in a class variable with cls.cursor() as curr: curr.execute("SELECT * FROM correct_no_order;") @@ -63,9 +63,9 @@ def test_falsy_same_as_null(self): def test_schema_gone(self): """ Test that demonstrates that the test_schema schema created in the setup_class method has been deleted """ with self.cursor() as curr: - curr.execute(self.GET_TABLES_STR, ['test_schema']) + curr.execute(self.GET_TABLES_STR, ["test_schema"]) assert len(curr.fetchall()) == 0 class TestDataset2(TestDataset1): - data_file = 'data2.sql' + data_file = "data2.sql" diff --git a/src/autotester/testers/pyta/markus_pyta_tester.py b/src/autotester/testers/pyta/markus_pyta_tester.py index 2cad5b95..a3155f88 100644 --- a/src/autotester/testers/pyta/markus_pyta_tester.py +++ b/src/autotester/testers/pyta/markus_pyta_tester.py @@ -11,12 +11,11 @@ class MarkusPyTAReporter(PositionReporter): - def __init__(self, *args, **kwargs): super().__init__(self, *args, **kwargs) self._sorted_error_messages = defaultdict(list) - def print_messages(self, level='all'): + def print_messages(self, level="all"): # print to feedback file, then reset and generate data for annotations PlainReporter.print_messages(self, level) super().print_messages(level) @@ -27,9 +26,7 @@ def output_blob(self): class MarkusPyTATest(MarkusTest): - ERROR_MSGS = { - 'reported': "{} error(s)" - } + ERROR_MSGS = {"reported": "{} error(s)"} def __init__(self, tester, student_file_path, max_points, feedback_open=None): self.student_file = student_file_path @@ -39,23 +36,27 @@ def __init__(self, tester, student_file_path, max_points, feedback_open=None): @property def test_name(self): - return f'PyTA {self.student_file}' + return f"PyTA {self.student_file}" def add_annotations(self, reporter): - for result in reporter._output['results']: - if 'filename' not in result: + for result in reporter._output["results"]: + if "filename" not in result: continue - for msg_group in result.get('msg_errors', []) + result.get('msg_styles', []): - for msg in msg_group['occurrences']: - self.annotations.append({ - 'annotation_category_name': None, - 'filename': result['filename'], - 'content': msg['text'], - 'line_start': msg['lineno'], - 'line_end': msg['end_lineno'], - 'column_start': msg['col_offset'], - 'column_end': msg['end_col_offset'] - }) + for msg_group in result.get("msg_errors", []) + result.get( + "msg_styles", [] + ): + for msg in msg_group["occurrences"]: + self.annotations.append( + { + "annotation_category_name": None, + "filename": result["filename"], + "content": msg["text"], + "line_start": msg["lineno"], + "line_end": msg["end_lineno"], + "column_start": msg["col_offset"], + "column_end": msg["end_col_offset"], + } + ) def after_successful_test_run(self): self.tester.annotations.extend(self.annotations) @@ -64,9 +65,15 @@ def after_successful_test_run(self): def run(self): try: # run PyTA and collect annotations - sys.stdout = self.feedback_open if self.feedback_open is not None else self.tester.devnull + sys.stdout = ( + self.feedback_open + if self.feedback_open is not None + else self.tester.devnull + ) sys.stderr = self.tester.devnull - reporter = python_ta.check_all(self.student_file, config=self.tester.pyta_config) + reporter = python_ta.check_all( + self.student_file, config=self.tester.pyta_config + ) if reporter.current_file_linted is None: # No files were checked. The mark is set to 0. num_messages = 0 @@ -76,7 +83,11 @@ def run(self): # deduct 1 point per message occurrence (not type) num_messages = len(self.annotations) points_earned = max(0, self.points_total - num_messages) - message = self.ERROR_MSGS['reported'].format(num_messages) if num_messages > 0 else '' + message = ( + self.ERROR_MSGS["reported"].format(num_messages) + if num_messages > 0 + else "" + ) return self.done(points_earned, message) except Exception as e: self.annotations = [] @@ -87,33 +98,32 @@ def run(self): class MarkusPyTATester(MarkusTester): - def __init__(self, specs, test_class=MarkusPyTATest): super().__init__(specs, test_class) - self.feedback_file = self.specs.get('test_data', 'feedback_file_name') - self.annotation_file = self.specs.get('test_data', 'annotation_file') + self.feedback_file = self.specs.get("test_data", "feedback_file_name") + self.annotation_file = self.specs.get("test_data", "annotation_file") self.pyta_config = self.update_pyta_config() self.annotations = [] - self.devnull = open(os.devnull, 'w') + self.devnull = open(os.devnull, "w") VALIDATORS[MarkusPyTAReporter.__name__] = MarkusPyTAReporter def update_pyta_config(self): - config_file = self.specs.get('test_data', 'config_file_name') + config_file = self.specs.get("test_data", "config_file_name") if config_file: with open(config_file) as f: config_dict = json.load(f) - else: + else: config_dict = {} - config_dict['pyta-reporter'] = 'MarkusPyTAReporter' + config_dict["pyta-reporter"] = "MarkusPyTAReporter" if self.feedback_file: - config_dict['pyta-output-file'] = self.feedback_file + config_dict["pyta-output-file"] = self.feedback_file return config_dict def after_tester_run(self): if self.annotation_file and self.annotations: - with open(self.annotation_file, 'w') as annotations_open: + with open(self.annotation_file, "w") as annotations_open: json.dump(self.annotations, annotations_open) if self.devnull: self.devnull.close() @@ -121,8 +131,10 @@ def after_tester_run(self): @MarkusTester.run_decorator def run(self): with self.open_feedback(self.feedback_file) as feedback_open: - for test_data in self.specs.get('test_data', 'student_files', default=[]): - student_file_path = test_data['file_path'] - max_points = test_data.get('max_points', 10) - test = self.test_class(self, student_file_path, max_points, feedback_open) + for test_data in self.specs.get("test_data", "student_files", default=[]): + student_file_path = test_data["file_path"] + max_points = test_data.get("max_points", 10) + test = self.test_class( + self, student_file_path, max_points, feedback_open + ) print(test.run()) diff --git a/src/autotester/testers/pyta/tests/student_files/submission.py b/src/autotester/testers/pyta/tests/student_files/submission.py index 12e1e7dc..92820b24 100644 --- a/src/autotester/testers/pyta/tests/student_files/submission.py +++ b/src/autotester/testers/pyta/tests/student_files/submission.py @@ -10,5 +10,6 @@ def loop(): while True: pass + def return_json(): return ']}[{"\\' diff --git a/src/autotester/testers/racket/markus_racket_tester.py b/src/autotester/testers/racket/markus_racket_tester.py index 58c3d76d..91601b54 100644 --- a/src/autotester/testers/racket/markus_racket_tester.py +++ b/src/autotester/testers/racket/markus_racket_tester.py @@ -6,11 +6,10 @@ class MarkusRacketTest(MarkusTest): - def __init__(self, tester, feedback_open, result): - self._test_name = result['name'] - self.status = result['status'] - self.message = result['message'] + self._test_name = result["name"] + self.status = result["status"] + self.message = result["message"] super().__init__(tester, feedback_open) @property @@ -29,31 +28,35 @@ def run(self): class MarkusRacketTester(MarkusTester): - ERROR_MSGS = {'bad_json': 'Unable to parse test results: {}'} + ERROR_MSGS = {"bad_json": "Unable to parse test results: {}"} def __init__(self, specs, test_class=MarkusRacketTest): super().__init__(specs, test_class) - + def run_racket_test(self): """ Return the subprocess.CompletedProcess object for each test file run using the markus.rkt tester. """ results = {} - markus_rkt = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', 'markus.rkt') - for group in self.specs['test_data', 'script_files']: - test_file = group.get('script_file') + markus_rkt = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "lib", "markus.rkt" + ) + for group in self.specs["test_data", "script_files"]: + test_file = group.get("script_file") if test_file: - suite_name = group.get('test_suite_name', 'all-tests') - cmd = [markus_rkt, '--test-suite', suite_name, test_file] - rkt = subprocess.run(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True) + suite_name = group.get("test_suite_name", "all-tests") + cmd = [markus_rkt, "--test-suite", suite_name, test_file] + rkt = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) results[test_file] = rkt.stdout return results - + @MarkusTester.run_decorator def run(self): try: @@ -66,7 +69,7 @@ def run(self): try: test_results = json.loads(result) except json.JSONDecodeError as e: - msg = MarkusRacketTester.ERROR_MSGS['bad_json'].format(result) + msg = MarkusRacketTester.ERROR_MSGS["bad_json"].format(result) raise MarkusTestError(msg) from e for t_result in test_results: test = self.test_class(self, feedback_open, t_result) diff --git a/src/autotester/tests/cli_test.py b/src/autotester/tests/cli_test.py index ff12deee..74e12a8e 100644 --- a/src/autotester/tests/cli_test.py +++ b/src/autotester/tests/cli_test.py @@ -15,21 +15,24 @@ @pytest.fixture(autouse=True) def redis(): fake_redis = FakeStrictRedis() - with patch('autotester.cli.redis_connection', return_value=fake_redis): - with patch('autotester.server.utils.redis_management.redis_connection', return_value=fake_redis): + with patch("autotester.cli.redis_connection", return_value=fake_redis): + with patch( + "autotester.server.utils.redis_management.redis_connection", + return_value=fake_redis, + ): yield fake_redis @contextmanager def tmp_script_dir(settings_dict): with tempfile.TemporaryDirectory() as tmp_dir: - files_dir = os.path.join(tmp_dir, 'files') + files_dir = os.path.join(tmp_dir, "files") os.mkdir(files_dir) - with open(os.path.join(files_dir, '.gitkeep'), 'w'): + with open(os.path.join(files_dir, ".gitkeep"), "w"): pass - with open(os.path.join(tmp_dir, 'settings.json'), 'w') as f: + with open(os.path.join(tmp_dir, "settings.json"), "w") as f: json.dump(settings_dict, f) - with patch('autotester.cli.test_script_directory', return_value=tmp_dir): + with patch("autotester.cli.test_script_directory", return_value=tmp_dir): yield tmp_dir @@ -42,25 +45,28 @@ def empty_test_script_dir(redis): @pytest.fixture def non_existant_test_script_dir(): - with patch('autotester.cli.test_script_directory', return_value=None): + with patch("autotester.cli.test_script_directory", return_value=None): yield @pytest.fixture def pop_interval(): - with patch('autotester.server.utils.redis_management.get_avg_pop_interval', return_value=None): + with patch( + "autotester.server.utils.redis_management.get_avg_pop_interval", + return_value=None, + ): yield @pytest.fixture(autouse=True) def mock_rmtree(): - with patch('shutil.rmtree') as rm: + with patch("shutil.rmtree") as rm: yield rm @pytest.fixture(autouse=True) def mock_enqueue_call(): - with patch('rq.Queue.enqueue_call') as enqueue_func: + with patch("rq.Queue.enqueue_call") as enqueue_func: yield enqueue_func @@ -69,279 +75,308 @@ class DummyTestError(Exception): class TestEnqueueTest: - @staticmethod def get_kwargs(**kw): - param_kwargs = {k: '' for k in inspect.signature(cli.run_test).parameters} + param_kwargs = {k: "" for k in inspect.signature(cli.run_test).parameters} return {**param_kwargs, **kw} def test_fails_missing_required_args(self): try: - cli.enqueue_test('Admin', 1) + cli.enqueue_test("Admin", 1) except cli.JobArgumentError: return except cli.MarkUsError as e: - pytest.fail(f'should have failed because kwargs are missing but instead failed with: {e}') - pytest.fail('should have failed because kwargs are missing') + pytest.fail( + f"should have failed because kwargs are missing but instead failed with: {e}" + ) + pytest.fail("should have failed because kwargs are missing") def test_accepts_same_kwargs_as_server_run_test_method(self): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.JobArgumentError: - pytest.fail('should not have failed because kwargs are not missing') + pytest.fail("should not have failed because kwargs are not missing") except cli.MarkUsError: pass def test_fails_if_cannot_find_valid_queue(self): try: - cli.enqueue_test('Tim', None, **self.get_kwargs()) + cli.enqueue_test("Tim", None, **self.get_kwargs()) except cli.InvalidQueueError: return except cli.MarkUsError as e: - pytest.fail(f'should have failed because a valid queue is not found but instead failed with: {e}') - pytest.fail('should have failed because a valid queue is not found') + pytest.fail( + f"should have failed because a valid queue is not found but instead failed with: {e}" + ) + pytest.fail("should have failed because a valid queue is not found") def test_can_find_valid_queue(self): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.InvalidQueueError: - pytest.fail('should not have failed because a valid queue is available') + pytest.fail("should not have failed because a valid queue is available") except cli.MarkUsError: pass def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.TestScriptFilesError: return except cli.MarkUsError as e: - pytest.fail(f'should have failed because no test scripts could be found but instead failed with: {e}') - pytest.fail('should have failed because no test scripts could be found') + pytest.fail( + f"should have failed because no test scripts could be found but instead failed with: {e}" + ) + pytest.fail("should have failed because no test scripts could be found") def test_can_find_test_files(self): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.TestScriptFilesError: - pytest.fail('should not have failed because no test scripts could be found') + pytest.fail("should not have failed because no test scripts could be found") except cli.MarkUsError: pass def test_writes_queue_info_to_stdout(self, capfd, pop_interval): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.MarkUsError: pass out, _err = capfd.readouterr() - assert re.search(r'^\d+$', out) + assert re.search(r"^\d+$", out) def test_fails_if_no_tests_groups(self): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs()) + cli.enqueue_test("Admin", 1, **self.get_kwargs()) except cli.TestParameterError: return except cli.MarkUsError: pass def test_fails_if_no_groups_in_category(self): - settings = {"testers": [{"test_data": [{"category": ['admin']}]}]} + settings = {"testers": [{"test_data": [{"category": ["admin"]}]}]} with tmp_script_dir(settings): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['student'])) + cli.enqueue_test( + "Admin", 1, **self.get_kwargs(test_categories=["student"]) + ) except cli.TestParameterError: return except cli.MarkUsError: pass def test_can_find_tests_in_given_category(self): - settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 30}]}]} + settings = { + "testers": [{"test_data": [{"category": ["admin"], "timeout": 30}]}] + } with tmp_script_dir(settings): try: - cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) + cli.enqueue_test( + "Admin", 1, **self.get_kwargs(test_categories=["admin"]) + ) except cli.TestParameterError: - pytest.fail('should not have failed to find an admin test') + pytest.fail("should not have failed to find an admin test") except cli.MarkUsError: pass def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): - settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 10}]}]} + settings = { + "testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}] + } with tmp_script_dir(settings): - cli.enqueue_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) - mock_enqueue_call.assert_called_with(ANY, kwargs=ANY, job_id=ANY, timeout=15) + cli.enqueue_test("Admin", 1, **self.get_kwargs(test_categories=["admin"])) + mock_enqueue_call.assert_called_with( + ANY, kwargs=ANY, job_id=ANY, timeout=15 + ) def test_cleans_up_files_on_error(self, mock_rmtree): with pytest.raises(Exception): - cli.enqueue_test('Admin', 1, **self.get_kwargs(files_path='something')) + cli.enqueue_test("Admin", 1, **self.get_kwargs(files_path="something")) class TestUpdateSpecs: - @staticmethod def get_kwargs(**kw): - param_kwargs = {k: '' for k in inspect.signature(cli.update_test_specs).parameters} + param_kwargs = { + k: "" for k in inspect.signature(cli.update_test_specs).parameters + } return {**param_kwargs, **kw} def test_fails_when_schema_is_invalid(self): - with patch('autotester.server.utils.form_validation.validate_with_defaults', - return_value=DummyTestError('error')): - with patch('autotester.cli.update_test_specs'): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=DummyTestError("error"), + ): + with patch("autotester.cli.update_test_specs"): try: - cli.update_specs('', **self.get_kwargs(schema={})) + cli.update_specs("", **self.get_kwargs(schema={})) except DummyTestError: return - pytest.fail('should have failed because the form is invalid') + pytest.fail("should have failed because the form is invalid") def test_succeeds_when_schema_is_valid(self): - with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=[]): - with patch('autotester.cli.update_test_specs'): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=[], + ): + with patch("autotester.cli.update_test_specs"): try: - cli.update_specs('', **self.get_kwargs(schema={})) + cli.update_specs("", **self.get_kwargs(schema={})) except DummyTestError: - pytest.fail('should not have failed because the form is valid') + pytest.fail("should not have failed because the form is valid") def test_calls_update_test_specs(self): - with patch('autotester.server.utils.form_validation.validate_with_defaults', return_value=[]): - with patch('autotester.cli.update_test_specs') as update_test_specs: - cli.update_specs('', **self.get_kwargs(schema={})) + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=[], + ): + with patch("autotester.cli.update_test_specs") as update_test_specs: + cli.update_specs("", **self.get_kwargs(schema={})) update_test_specs.assert_called_once() def test_cleans_up_files_on_error(self, mock_rmtree): - with patch('autotester.server.utils.form_validation.validate_with_defaults', - return_value=DummyTestError('error')): - with patch('autotester.cli.update_test_specs'): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=DummyTestError("error"), + ): + with patch("autotester.cli.update_test_specs"): with pytest.raises(Exception): - cli.update_specs(**self.get_kwargs(schema={}, files_path='test_files')) + cli.update_specs( + **self.get_kwargs(schema={}, files_path="test_files") + ) @pytest.fixture def mock_rq_job(): - with patch('rq.job.Job') as job: + with patch("rq.job.Job") as job: enqueued_job = Mock() job.fetch.return_value = enqueued_job yield job, enqueued_job class TestCancelTest: - def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): job_class, mock_job = mock_rq_job job_class.fetch.side_effect = NoSuchJobError - cli.cancel_test('something', [1]) + cli.cancel_test("something", [1]) mock_job.cancel.assert_not_called() def test_do_nothing_if_job_not_enqueued(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = False - cli.cancel_test('something', [1]) + cli.cancel_test("something", [1]) mock_job.cancel.assert_not_called() def test_cancel_job(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True - mock_job.kwargs = {'files_path': None} - cli.cancel_test('something', [1]) + mock_job.kwargs = {"files_path": None} + cli.cancel_test("something", [1]) mock_job.cancel.assert_called_once() def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True - files_path = 'something' - mock_job.kwargs = {'files_path': files_path} - cli.cancel_test('something', [1]) + files_path = "something" + mock_job.kwargs = {"files_path": files_path} + cli.cancel_test("something", [1]) mock_rmtree.assert_called_once_with(files_path, onerror=ANY) def test_cancel_multiple_jobs(self, mock_rq_job): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True - mock_job.kwargs = {'files_path': None} - cli.cancel_test('something', [1, 2]) + mock_job.kwargs = {"files_path": None} + cli.cancel_test("something", [1, 2]) assert mock_job.cancel.call_count == 2 def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtree): _, mock_job = mock_rq_job mock_job.is_queued.return_value = True - files_path = 'something' - mock_job.kwargs = {'files_path': files_path} - cli.cancel_test('something', [1, 2]) + files_path = "something" + mock_job.kwargs = {"files_path": files_path} + cli.cancel_test("something", [1, 2]) assert mock_rmtree.call_count == 2 class TestGetSchema: - @staticmethod def fake_installed_testers(installed): root_dir = os.path.dirname(os.path.abspath(cli.__file__)) paths = [] for tester in installed: - glob_pattern = os.path.join(root_dir, 'testers', tester, 'specs') - paths.append(os.path.join(glob.glob(glob_pattern)[0], '.installed')) + glob_pattern = os.path.join(root_dir, "testers", tester, "specs") + paths.append(os.path.join(glob.glob(glob_pattern)[0], ".installed")) return paths @staticmethod def assert_tester_in_schema(tester, schema): assert tester in schema["definitions"]["installed_testers"]["enum"] installed = [] - for option in schema['definitions']['tester_schemas']['oneOf']: - installed.append(option['properties']['tester_type']['enum'][0]) + for option in schema["definitions"]["tester_schemas"]["oneOf"]: + installed.append(option["properties"]["tester_type"]["enum"][0]) assert tester in installed def test_prints_skeleton_when_none_installed(self, capfd): - with patch('glob.glob', return_value=[]): + with patch("glob.glob", return_value=[]): cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) root_dir = os.path.dirname(os.path.abspath(cli.__file__)) - with open(os.path.join(root_dir, 'lib', 'tester_schema_skeleton.json')) as f: + with open( + os.path.join(root_dir, "lib", "tester_schema_skeleton.json") + ) as f: skeleton = json.load(f) assert schema == skeleton def test_prints_test_schema_when_one_installed(self, capfd): - with patch('glob.glob', return_value=self.fake_installed_testers(['custom'])): + with patch("glob.glob", return_value=self.fake_installed_testers(["custom"])): cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) - self.assert_tester_in_schema('custom', schema) + self.assert_tester_in_schema("custom", schema) def test_prints_test_schema_when_multiple_installed(self, capfd): - with patch('glob.glob', return_value=self.fake_installed_testers(['custom', 'py'])): + with patch( + "glob.glob", return_value=self.fake_installed_testers(["custom", "py"]) + ): cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) - self.assert_tester_in_schema('custom', schema) - self.assert_tester_in_schema('py', schema) + self.assert_tester_in_schema("custom", schema) + self.assert_tester_in_schema("py", schema) class TestParseArgFile: - def test_loads_arg_file(self): - settings = {'some': 'data'} + settings = {"some": "data"} with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') + arg_file = os.path.join(tmp_dir, "settings.json") kwargs = cli.parse_arg_file(arg_file) try: - kwargs.pop('files_path') + kwargs.pop("files_path") except KeyError: pass assert settings == kwargs def test_remove_arg_file(self): - settings = {'some': 'data'} + settings = {"some": "data"} with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') + arg_file = os.path.join(tmp_dir, "settings.json") cli.parse_arg_file(arg_file) assert not os.path.isfile(arg_file) def test_adds_file_path_if_not_present(self): - settings = {'some': 'data'} + settings = {"some": "data"} with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') + arg_file = os.path.join(tmp_dir, "settings.json") kwargs = cli.parse_arg_file(arg_file) - assert 'files_path' in kwargs - assert os.path.realpath(kwargs['files_path']) == os.path.realpath(tmp_dir) + assert "files_path" in kwargs + assert os.path.realpath(kwargs["files_path"]) == os.path.realpath(tmp_dir) def test_does_not_add_file_path_if_present(self): - settings = {'some': 'data', 'files_path': 'something'} + settings = {"some": "data", "files_path": "something"} with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') + arg_file = os.path.join(tmp_dir, "settings.json") kwargs = cli.parse_arg_file(arg_file) - assert 'files_path' in kwargs - assert kwargs['files_path'] == 'something' + assert "files_path" in kwargs + assert kwargs["files_path"] == "something" From b2af2e0dc97b5865a013514faa368af148aa72b0 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 10 Feb 2020 12:23:35 -0500 Subject: [PATCH 32/46] style: various style changes --- .../server/utils/string_management.py | 2 +- src/autotester/setup.py | 8 ++--- src/autotester/testers/py/lib/c_helper.py | 35 +++++++++++-------- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py index 4a3812a6..3c55ab5b 100644 --- a/src/autotester/server/utils/string_management.py +++ b/src/autotester/server/utils/string_management.py @@ -30,7 +30,7 @@ def loads_partial_json(json_string, expected_type=None): obj, ind = decoder.raw_decode(json_string[i:]) if expected_type is None or isinstance(obj, expected_type): results.append(obj) - elif json_string[i : i + ind].strip(): + elif json_string[i:i + ind].strip(): malformed = True i += ind except json.JSONDecodeError: diff --git a/src/autotester/setup.py b/src/autotester/setup.py index 9e464c21..76c04793 100644 --- a/src/autotester/setup.py +++ b/src/autotester/setup.py @@ -2,6 +2,8 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] +packages = ["testers"] + [f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions)] + setup( name="markus-autotester-testers", version="2.0", @@ -11,10 +13,6 @@ author_email="mschwa@cs.toronto.edu", license="MIT", include_package_data=True, - packages=["testers"] - + [ - f"testers.{pkg}" - for pkg in find_packages(where="testers", exclude=test_exclusions) - ], + packages=packages, zip_safe=False, ) diff --git a/src/autotester/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py index a26ccd9b..069b0802 100644 --- a/src/autotester/testers/py/lib/c_helper.py +++ b/src/autotester/testers/py/lib/c_helper.py @@ -16,11 +16,11 @@ # Note that the keys of the dictionary correspond to the "type" of call it was regex_dict = OrderedDict( - resumed="([0-9]+)\s*<\.\.\. (.*) (?:resumed>(.*)=\s)(-?[0-9]+)$", - unfinished="([0-9]+)\s*(.*)\((.*)(.*)=\s)(-?[0-9]+)$", + unfinished=r"([0-9]+)\s*(.*)\((.*) None: self.assertEqual(self.compile_out, "") self.assertEqual(self.compile_err, "") - def _run_exec(self, args: Optional[List[str]] = None, **kwargs) -> None: + def _run_exec(self, args: Optional[List[str]] = None, **kwargs): """Run this test class' executable with the given arguments and options.""" return _exec([os.path.join(".", self.executable_name)] + (args or []), **kwargs) @@ -257,9 +257,10 @@ class Trace: Note that we can also view the dictionary as being constructed from these arity 5-tuples, namely: (PID, func_name, args, ret_val, type) Note that args is "junk" and needs some postprocessing (for example, splitting on ,) This was done because - parsing is a better approach when dealing with variable-number capture groups, as there will be with arguments to a function. - Note that for those that do not have certain fields, like ret_val for unfinished, we pad with None. However, the last - element of the tuple (tuple[-1]) is always the "type" of the call, as determined by the regex that classified it. + parsing is a better approach when dealing with variable-number capture groups, as there will be with arguments to a + function. Note that for those that do not have certain fields, like ret_val for unfinished, we pad with None. + However, the last element of the tuple (tuple[-1]) is always the "type" of the call, as determined by the regex that + classified it. Note 2: the "special" regex is a special case, corresponding to things like: --- SIGPIPE (Broken pipe) --- and @@ -364,7 +365,7 @@ def run_through_regexes(regexes, trace_line): final_result += (key,) # append the type of the entry to the end return final_result # stops as soon as a matching regex is encountered # print("line did not have any mathces " + trace_line) - return ("", "", "", "") # did not match with any of the regexes + return "", "", "", "" # did not match with any of the regexes def parse_arbitrary(trace_line, regex): @@ -472,12 +473,14 @@ def populate_tests(self, test_klass, args=None): setattr( test_klass, "test_" + name, - simple_test(args, test_out, test_err, test_in), + simple_test(args, expected_stdout=test_out, expected_stderr=test_err, input_=test_in), ) -def _compile(files, exec_name=None, gcc_flags=DEFAULT_GCC_FLAGS, **kwargs): +def _compile(files, exec_name=None, gcc_flags=None, **kwargs): """Run gcc with the given flags on the given files.""" + if gcc_flags is None: + gcc_flags = DEFAULT_GCC_FLAGS if isinstance(files, str): files = [files] args = ["gcc"] + gcc_flags @@ -486,12 +489,14 @@ def _compile(files, exec_name=None, gcc_flags=DEFAULT_GCC_FLAGS, **kwargs): return _exec(args + files, **kwargs) -def _make(targets=None, make_args=["--silent"], **kwargs): +def _make(targets=None, make_args=None, **kwargs): """Run make on the given targets.""" + if make_args is None: + make_args = ["--silent"] return _exec(["make"] + make_args + (targets or []), timeout=60, **kwargs) -def _exec(args, *, input_=None, timeout=10, check=True, shell=False): +def _exec(args, *, input_=None, timeout=10, shell=False): """Wrapper function that calls exec on the given args in a new subprocess. Return a triple (stdout, stderr, exit status) from the subprocess. @@ -553,7 +558,7 @@ def ongoing_process(args, check_killed=True): assert proc.returncode == -9, "server exited abnormally" -def _exec_shell(args, *, input_=None, timeout=1, check=True): +def _exec_shell(args, *, input_=None, timeout=1): """Wrapper function that calls exec on the given args in a new subprocess with a shell. Returns a communicate method (like a pipe) to the exec process. From fbf416d1850e969ff40ea56540806d36ce6f1cea Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 10 Feb 2020 12:43:38 -0500 Subject: [PATCH 33/46] style: more changes suggested by black package --- src/autotester/setup.py | 4 +++- src/autotester/testers/py/lib/c_helper.py | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/autotester/setup.py b/src/autotester/setup.py index 76c04793..2f389e79 100644 --- a/src/autotester/setup.py +++ b/src/autotester/setup.py @@ -2,7 +2,9 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] -packages = ["testers"] + [f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions)] +packages = ["testers"] + [ + f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions) +] setup( name="markus-autotester-testers", diff --git a/src/autotester/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py index 069b0802..08fe7be9 100644 --- a/src/autotester/testers/py/lib/c_helper.py +++ b/src/autotester/testers/py/lib/c_helper.py @@ -473,7 +473,12 @@ def populate_tests(self, test_klass, args=None): setattr( test_klass, "test_" + name, - simple_test(args, expected_stdout=test_out, expected_stderr=test_err, input_=test_in), + simple_test( + args, + expected_stdout=test_out, + expected_stderr=test_err, + input_=test_in, + ), ) From 23f1cabfd2799239664639f66c26294e878dafe2 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 10 Feb 2020 12:47:48 -0500 Subject: [PATCH 34/46] error: report arg, kwargs of hook errors properly --- src/autotester/server/hooks_context/hooks_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/autotester/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py index ee617024..9a2dedb2 100644 --- a/src/autotester/server/hooks_context/hooks_context.py +++ b/src/autotester/server/hooks_context/hooks_context.py @@ -282,6 +282,6 @@ def format_errors(self): for hook_name, args, kwargs, tb in self.run_errors: error_list.append( f"function_name: {hook_name}\n" - f"args: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}" + f"args: {args}\nkwargs: {kwargs},\ntraceback:\n{tb}" ) return "\n\n".join(error_list) From f9d52f64c16142de4363420e055b0515d646ac67 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Wed, 12 Feb 2020 09:44:15 -0500 Subject: [PATCH 35/46] permissions: update docstring and set all other permissions to 0 --- .../server/utils/file_management.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index 96a45ff9..fbd24e4d 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -128,22 +128,22 @@ def setup_files(files_path, tests_path, markus_address, assignment_id): then make it the current working directory. The following permissions are also set: - tests_path directory: rwxrwx--T - - test subdirectories: rwxr-xr-x - - test files: rw-r--r-- - - student subdirectories: rwxrwxrwx - - student files: rw-rw-rw- + - test subdirectories: rwxrwx--T + - test files: rw-r----- + - student subdirectories: rwxrwx--- + - student files: rw-rw---- """ os.chmod(tests_path, 0o1770) student_files = move_tree(files_path, tests_path) for fd, file_or_dir in student_files: - if fd == "d": - os.chmod(file_or_dir, 0o777) + if fd == 'd': + os.chmod(file_or_dir, 0o770) else: - os.chmod(file_or_dir, 0o666) + os.chmod(file_or_dir, 0o660) script_files = copy_test_script_files(markus_address, assignment_id, tests_path) for fd, file_or_dir in script_files: - permissions = 0o755 - if fd == "f": - permissions -= 0o111 - os.chmod(file_or_dir, permissions) + if fd == 'd': + os.chmod(file_or_dir, 0o1770) + else: + os.chmod(file_or_dir, 0o640) return student_files, script_files From 2b8e76f3a0f839a0c10bd2e7600d51a224924bd2 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Wed, 12 Feb 2020 09:49:44 -0500 Subject: [PATCH 36/46] style: fix quote choices --- src/autotester/server/utils/file_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index fbd24e4d..56b53b72 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -136,13 +136,13 @@ def setup_files(files_path, tests_path, markus_address, assignment_id): os.chmod(tests_path, 0o1770) student_files = move_tree(files_path, tests_path) for fd, file_or_dir in student_files: - if fd == 'd': + if fd == "d": os.chmod(file_or_dir, 0o770) else: os.chmod(file_or_dir, 0o660) script_files = copy_test_script_files(markus_address, assignment_id, tests_path) for fd, file_or_dir in script_files: - if fd == 'd': + if fd == "d": os.chmod(file_or_dir, 0o1770) else: os.chmod(file_or_dir, 0o640) From 831e47f5683dcac498a45639b2a7dc7ffe45a898 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Thu, 13 Feb 2020 13:58:23 -0500 Subject: [PATCH 37/46] unused-variable: add unused parameter back in because MarkUs still expects it --- src/autotester/server/server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index e4359202..07bb4c51 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -331,6 +331,7 @@ def run_test( files_path, assignment_id, group_id, + group_repo_name, submission_id, run_id, enqueue_time, From c92cf338219427beb1b7b5edaf487a4e2c3c67c7 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 14 Feb 2020 16:01:47 -0500 Subject: [PATCH 38/46] config: add/remove some configuration options --- bin/generate_supervisord_conf.py | 44 +++++++------------ bin/install.sh | 12 +++-- doc/config_example.yml | 16 +++---- .../config_defaults/config_default.yml | 20 +++------ .../config_defaults/config_env_vars.yml | 10 ++--- .../server/utils/user_management.py | 6 +-- 6 files changed, 41 insertions(+), 67 deletions(-) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index 2ac23d3b..1e81ab8a 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -1,16 +1,15 @@ #!/usr/bin/env python3 from autotester.config import config -import sys import os import argparse -HEADER = """[supervisord] +HEADER = f"""[supervisord] [supervisorctl] [inet_http_server] -port = 127.0.0.1:9001 +port = {config['supervisor', 'url']} [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface @@ -34,7 +33,7 @@ THIS_DIR = os.path.dirname(os.path.abspath(__file__)) -def write_conf_file(rq, conf_filename, user_names): +def write_conf_file(rq, conf_filename): try: redis_url = f'--url {config["redis", "url"]}' except KeyError: @@ -42,37 +41,26 @@ def write_conf_file(rq, conf_filename, user_names): with open(conf_filename, "w") as f: f.write(HEADER) - user_name_set = set(user_names) - enough_users = True for worker_data in config["workers"]: - numprocs = worker_data["n"] queues = worker_data["queues"] - if enough_users: - for _ in range(numprocs): - try: - worker_user = user_name_set.pop() - except KeyError: - msg = f"[AUTOTEST] Not enough worker users to create all rq workers." - sys.stderr.write(f"{msg}\n") - enough_users = False - break - queue_str = " ".join(queues) - c = CONTENT.format( - worker_user=worker_user, - rq=rq, - worker_args=redis_url, - queues=queue_str, - numprocs=1, - directory=THIS_DIR, - ) - f.write(c) + queue_str = ' '.join(queues) + for users in worker_data["users"]: + worker_user = users['name'] + c = CONTENT.format( + worker_user=worker_user, + rq=rq, + worker_args=redis_url, + queues=queue_str, + numprocs=1, + directory=THIS_DIR, + ) + f.write(c) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("rq") parser.add_argument("conf_filename") - parser.add_argument("user_names", nargs="+") args = parser.parse_args() - write_conf_file(args.rq, args.conf_filename, args.user_names) + write_conf_file(args.rq, args.conf_filename) diff --git a/bin/install.sh b/bin/install.sh index 13c9705f..1997ab9d 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -9,7 +9,7 @@ TESTERSROOT="${PROJECTROOT}/src/autotester" SERVER_VENV="${PROJECTROOT}/venv" INSTALLABLE_TESTERS=(custom haskell java py pyta racket) TESTERS=() -USAGE_MESSAGE="Usage: $0 [-p|--python_version python_version] [--non-interactive] [--docker] [-t|--testers tester ...]" +USAGE_MESSAGE="Usage: $0 [-p|--python-version python-version] [--non-interactive] [--docker] [-a|--all-testers] [-t|--testers tester ...]" _check_python_version() { # check if the python3 is at least version 3.6 @@ -269,18 +269,16 @@ create_enqueuer_wrapper() { start_workers() { local supervisorconf - local worker_users local generate_script local rq supervisorconf="${WORKSPACE_SUBDIRS[LOGS]}/supervisord.conf" - worker_users=$(echo "${WORKER_USERS}" | tr '\n' ' ') generate_script="${BINDIR}/generate_supervisord_conf.py" rq="${SERVER_VENV}/bin/rq" echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" - sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${rq} ${supervisorconf} ${worker_users} && + sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${rq} ${supervisorconf} && ${BINDIR}/start-stop.sh start" } @@ -310,8 +308,8 @@ load_config_settings() { local config_json config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") - SERVER_USER=$(echo "${config_json}" | jq --raw-output '.users.server.name') - WORKER_AND_REAPER_USERS=$(echo "${config_json}" | jq --raw-output '.users.workers | .[] | (.name, .reaper)') + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.server_user') + WORKER_AND_REAPER_USERS=$(echo "${config_json}" | jq --raw-output '.workers | .[] | .users | .[] | (.name, .reaper)') REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') REDIS_PORT=$(redis-cli --raw -u "${REDIS_URL}" CONFIG GET port | tail -1) WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') @@ -346,7 +344,7 @@ _add_valid_tester() { while [[ $# -gt 0 ]]; do key="$1" case $key in - -p|--python_version) + -p|--python-version) SELECTING_TESTERS= PYTHON_VERSION="$2" shift 2 diff --git a/doc/config_example.yml b/doc/config_example.yml index 509196c6..1d4d0110 100644 --- a/doc/config_example.yml +++ b/doc/config_example.yml @@ -1,22 +1,22 @@ workspace: !ENV ${HOME}/.markus-autotesting/workspace +server_user: !ENV ${USER} + workers: - - n: 1 + - users: + - name: !ENV ${USER} + reaper: null queues: - student - single - batch -users: - server: - name: !ENV ${USER} - workers: - - name: !ENV ${USER} - reaper: null - redis: url: redis://127.0.0.1:6379/0 +supervisor: + url: '127.0.0.1:9001' + rlimit_settings: nproc: - 300 diff --git a/src/autotester/config_defaults/config_default.yml b/src/autotester/config_defaults/config_default.yml index 76210679..10bb0c69 100644 --- a/src/autotester/config_defaults/config_default.yml +++ b/src/autotester/config_defaults/config_default.yml @@ -11,7 +11,9 @@ queues: schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} workers: - - n: 1 + - users: + - name: !ENV ${USER} + reaper: null queues: - student - single @@ -23,26 +25,14 @@ redis: _current_test_script_hash: current_test_scripts _pop_interval_hash: pop_interval +supervisor: + url: '127.0.0.1:9001' rlimit_settings: nproc: - 300 - 300 -testers: - custom: - enable: true - haskell: - enable: true - java: - enable: true - py: - enable: true - pyta: - enable: true - racket: - enable: true - resources: port: _redis_int: port diff --git a/src/autotester/config_defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml index 27e30888..6a34ac11 100644 --- a/src/autotester/config_defaults/config_env_vars.yml +++ b/src/autotester/config_defaults/config_env_vars.yml @@ -3,12 +3,10 @@ workspace: !ENV ${HOME}/.markus-autotesting/ redis: url: !ENV ${REDIS_URL} -users: - server: - name: !ENV ${USER} - workers: - - name: !ENV ${USER} - reaper: null +server_user: !ENV ${USER} + +supervisor: + url: !ENV ${SUPERVISOR_URL} resources: postgresql: diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index acc48bfd..f02be19e 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -31,6 +31,6 @@ def tester_user(): def get_reaper_username(test_username): - for worker_name, reaper_name in config["users", "workers"]: - if worker_name == test_username: - return reaper_name + for users in (users for conf in config['workers'] for users in conf['users']): + if users['name'] == test_username: + return users['reaper'] From 89bb272b0310c5cf254099a0676684172eff5271 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 14 Feb 2020 16:24:25 -0500 Subject: [PATCH 39/46] style: change quotes used --- bin/generate_supervisord_conf.py | 4 ++-- src/autotester/server/utils/user_management.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index 1e81ab8a..e8d3c066 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -43,9 +43,9 @@ def write_conf_file(rq, conf_filename): f.write(HEADER) for worker_data in config["workers"]: queues = worker_data["queues"] - queue_str = ' '.join(queues) + queue_str = " ".join(queues) for users in worker_data["users"]: - worker_user = users['name'] + worker_user = users["name"] c = CONTENT.format( worker_user=worker_user, rq=rq, diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index f02be19e..2d2db136 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -31,6 +31,6 @@ def tester_user(): def get_reaper_username(test_username): - for users in (users for conf in config['workers'] for users in conf['users']): - if users['name'] == test_username: - return users['reaper'] + for users in (users for conf in config["workers"] for users in conf["users"]): + if users["name"] == test_username: + return users["reaper"] From 0240439979cbf03c3d2cba300a7f7cb421b3cf38 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 14 Feb 2020 16:28:59 -0500 Subject: [PATCH 40/46] java-tester: fix install location of installation setting --- src/autotester/testers/java/bin/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/autotester/testers/java/bin/install.sh b/src/autotester/testers/java/bin/install.sh index a65cda86..a2115285 100755 --- a/src/autotester/testers/java/bin/install.sh +++ b/src/autotester/testers/java/bin/install.sh @@ -16,7 +16,7 @@ compile_tester() { update_specs() { echo "[JAVA-INSTALL] Updating specs" - echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > "${TESTERDIR}/specs/install_settings.json" + echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > "${SPECSDIR}/install_settings.json" } # script starts here From d048138b9366cfc8526685e547dfb5ac7fdfb2f0 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Fri, 14 Feb 2020 16:45:33 -0500 Subject: [PATCH 41/46] config: start-stop.sh should use new config --- bin/start-stop.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/start-stop.sh b/bin/start-stop.sh index 5cfc08ae..08e57c37 100755 --- a/bin/start-stop.sh +++ b/bin/start-stop.sh @@ -39,7 +39,7 @@ load_config_settings() { local config_json config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") - SERVER_USER=$(echo "${config_json}" | jq --raw-output '.users.server.name') + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.server_user') WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') LOGS_DIR="${WORKSPACE_DIR}/"$(echo "${config_json}" | jq --raw-output '._workspace_contents._logs') REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') From 92ab4a85d9b67a117d5fb6279869084df029f7a0 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 18 Feb 2020 14:27:00 -0500 Subject: [PATCH 42/46] documentation: update to changelog and readme --- Changelog.md | 10 +- README.md | 311 ++++++++++++++++++++++++--------------------------- 2 files changed, 156 insertions(+), 165 deletions(-) diff --git a/Changelog.md b/Changelog.md index 89aa0834..a9a7ae37 100644 --- a/Changelog.md +++ b/Changelog.md @@ -2,7 +2,15 @@ All notable changes to this project will be documented here. ## [unreleased] -- allow tests to write to existing subdirectories but not overwrite existing test script files (#237) +- allow tests to write to existing subdirectories but not overwrite existing test script files (#237). +- add ability to create a docker container for the autotester in development mode (#236). +- major reorganization of the structure of this package (#236). + - additional usage options for the server installation script (bin/install.sh). + - testers can/should now be installed using the server installation script instead of individually. + - configuration files now use yaml format. + - configuration file defaults are now included in the source code so the autotester can be run with or without a + user specific configuration file. + - changed the default location for the workspace directory. ## [1.8.1] _NOTE: This changelog starts from version 1.8.1 (changes prior to this version are not documented)_ diff --git a/README.md b/README.md index 8ec357e7..5501af8a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Acceptance tests](https://layerci.com/github/MarkUsProject/markus-autotesting/badge)](https://layerci.com/github/MarkUsProject/markus-autotesting) +[![Acceptance tests](https://layerci.com/github/MarkUsProject/markus-autotesting/badge)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) Autotesting with Markus ============================== @@ -17,54 +17,48 @@ The autotesting client component is already included in a MarkUs installation. S ### Server -To install the autotesting server, run the `install.sh` script from the `server/bin` directory as: +To install the autotesting server, run the `install.sh` script from the `bin` directory with options: ``` -$ server/bin/install.sh +$ bin/install.sh [-p|--python-version python-version] [--non-interactive] [--docker] [--a|--all-testers] [-t|--testers tester ...] ``` +options: + +- `--python_version` : version of python to install/use to run the autotester (default is 3.8). +- `--non-interactive` : run the installer in non-interactive mode (all confirmations will be accepted without prompting the user). +- `--docker` : run the installer for installing in docker. This installs in non-interactive mode and iptables, postgresql debian packages will not be installed. +- `--all-testers` : install all testers as well as the server. See [Testers](#testers). +- `--testers` : install the individual named testers (See [Testers](#testers)). This option will be ignored if the --all-testers flag is used. + The server can be uninstalled by running the `uninstall.sh` script in the same directory. #### Dependencies -Installing the server will also install the following packages: +Installing the server will also install the following debian packages: -- python3.X (the python version can be configured in the config file; see below) +- python3.X (the python3 minor version can specified as an argument to the install script; see above) - python3.X-venv - redis-server - jq -- postgresql +- postgresql-client +- libpq-dev +- openssh-server +- gcc +- postgresql (if not running in a docker environment) +- iptables (if not running in a docker environment) -This script may also add new users and create new potgres databases. See the [configuration](#markus-autotesting-configuration-options) section for more details. +This script may also add new users and create new postgres databases. See the [configuration](#markus-autotesting-configuration-options) section for more details. ### Testers -After the server has been installed, one or more of the following testers should also be installed: - -- `haskell` -- `java` -- `py` -- `pyta` -- `racket` -- `custom` - -Each tester may be installed by running install scripts: - -``` -$ testers/testers/${tester_name}/bin/install.sh -``` - -where `tester_name` is one of the tester names listed above. - -Each tester can be uninstalled by running the `uninstall.sh` script in the same directory. - -Each language specific tester can run test files written in the following frameworks: +The markus autotester currently supports testers for the following languages and testing frameworks: - `haskell` - [QuickCheck](http://hackage.haskell.org/package/QuickCheck) - `java` - [JUnit](https://junit.org/junit4/) -- `py` +- `py` (python3) - [Unittest](https://docs.python.org/3/library/unittest.html) - [Pytest](https://docs.pytest.org/en/latest/) - `pyta` @@ -85,7 +79,7 @@ Installing each tester will also install the following additional packages: - tasty-quickcheck (cabal package) - `java` - openjdk-8-jdk -- `py` (python) +- `py` (python3) - none - `pyta` - none @@ -96,173 +90,162 @@ Installing each tester will also install the following additional packages: ## Markus-autotesting configuration options -These settings can be set by editing the `server/config.py` file. If any changes are made to any of the options marked _restart required_, it is recommended that the server be uninstalled and reinstalled. - -##### REDIS_CURRENT_TEST_SCRIPT_HASH -_restart required_ -Name of redis hash used to store the locations of test script directories. -There is no need to change this unless it would conflict with another redis key. -Default: `'curr_test_scripts'` - -##### REDIS_POP_HASH -Name of redis hash used to store pop interval data for each worker queue. -There is no need to change this unless it would conflict with another redis key. -Default: `'pop_intervals'` - -##### REDIS_WORKERS_HASH -_restart required_ -Name of redis hash used to store workers data (username and worker directory). -There is no need to change this unless it would conflict with another redis key. -Default: `'workers'` - -##### REDIS_CONNECTION_KWARGS -Dictionary containing keyword arguments to pass to rq.use_connection when connecting to a redis database -Default: `{}` - -##### REDIS_PREFIX -Prefix to prepend to all redis keys generated by the autotester. -There is no need to change this unless it would cause conflicts with other redis keys. -Default: `'autotest:'` - -##### POSTGRES_PREFIX -Prefix to prepend to all postgres databases created. -There is no need to change this unless it would cause conflicts with other postgres databases. -Default: `'autotest_'` - -##### WORKSPACE_DIR -_restart required_ -Absolute path to the workspace directory which will contain all directories and files generated by the autotester. -If this directory does not exist before the server is installed it will be created. -Default: None (you should set this before you install the server) - -##### SCRIPTS_DIR_NAME -_restart required_ -Name of the directory containing test scripts (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'scripts'` - -##### RESULTS_DIR_NAME -_restart required_ -Name of the directory containing test results (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'results'` - -##### SPECS_DIR_NAME -_restart required_ -Name of the directory containing tester environment specs (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'specs'` - -##### WORKERS_DIR_NAME -_restart required_ -Name of the directory containing secure workspace directories for each worker (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'workers'` - -##### LOGS_DIR_NAME -_restart required_ -Name of the directory containing log files (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'logs'` - -##### SERVER_USER -_restart required_ -Name of the user that enqueues and schedules each test job. -If this user does not exist before the server is installed it will be created. -If this is the empty string, the server user is assumed to be whichever user runs the server installation script. -Default: `''` - -##### WORKER_USERS -_restart required_ -String containing whitespace separated names of the users that run the test scripts themselves and report the results. -If these users do not exist before the server is installed they will be created. -If this is the empty string, a single worker user will be used and that user is the same as the SERVER_USER. -Default: `'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7'` - -##### REAPER_USER_PREFIX -_restart required_ -Prefix to prepend to each username in WORKER_USERS to create a new user whose sole job is to safely kill any processes still running after a test has completed. -If these users do not exist before the server is installed they will be created. -If this is the empty string, no new users will be created and tests will be terminated in a slightly less secure way (though probably still good enough for most cases). -Default: `''` - -##### DEFAULT_ENV_NAME -_restart required_ -Name of the environment used by default (if no custom environment is needed to run a given tester). -There is no need to change this. -Default: `'defaultenv'` - -##### WORKER_QUEUES -A list of dictionaries containing the following keys/value pairs: -- `'name'`: a string representing the unique name of this queue -- `'filter'`: a function which takes the same keyword arguments as the `run_test` function in `autotest_enqueuer.py` and returns `True` if this queue should be used to schedule the test job -See `config.py` for more details and to see defaults. - -##### WORKERS -A list of tuples indicating the priority in which order a worker user should pop jobs off the end of each queue. -Each tuple contains an integer indicating the number of worker users who should respect this priority order, followed by a list containing the names of queues in priority order. -For example, the following indicates that two worker users should take jobs from queue `'A'` first and queue `'B'` second, and one worker user should take jobs from queue `'B'` first and queue `'A'` second and queue `'C'` third: - -```python -WORKERS = [(2, ['A', 'B']), - (1, ['B', 'A', 'C'])] +These settings can be overridden or extended by including a configuration file in one of two locations: + +- `${HOME}/.markus_autotester_config` (where `${HOME}` is the home directory of the user running the markus server) +- `/etc/markus_autotester_config` (for a system wide configuration) + +An example configuration file can be found in `doc/config_example.yml`. Please see below for a description of all options and defaults: + +```yaml +workspace: # an absolute path to a directory containing all files/workspaces required to run the autotester default is + # ${HOME}/.markus-autotesting/workspace where ${HOME} is the home directory of the user running the autotester + +server_user: # the username of the user designated to run the autotester itself. Default is the current user + +workers: + - users: + - name: # the username of a user designated to run tests for the autotester + reaper: # the username of a user used to clean up test processes. This value can be null (see details below) + queues: # a list of queue names that these users will monitor and select test jobs from. + # The order of this list indicates which queues have priority when selecting tests to run + # default is ['student', 'single', 'batch'] (see the "queues:" setting option below) + +redis: + url: # url for the redis database. default is: redis://127.0.0.1:6379/0 + +supervisor: + url: # url used by the supervisor process. default is: '127.0.0.1:9001' + +rlimit_settings: # RLIMIT settings (see details below) + nproc: # for example, this setting sets the hard and soft limits for the number of processes available to 300 + - 300 + - 300 + +resources: + port: # set a range of ports available for use by the tests (see details below). + min: 50000 # For example, this sets the range of ports from 50000 to 65535 + max: 65535 + postgresql: + port: # port the postgres server is running on + host: # host the postgres server is running on + +queues: + - name: # the name of a queue used to enqueue test jobs (see details below) + schema: # a json schema used to validate the json representation of the arguments passed to the test_enqueuer script + # by MarkUs (see details below) +``` + +### Markus-autotesting configuration details + +#### reaper users + +Each reaper user is associated with a single worker user. The reaper user's sole job is to safely kill any processes +still running after a test has completed. If these users do not exist before the server is installed they will be created. +If no reaper username is given in the configuration file, no new users will be created and tests will be terminated in a +slightly less secure way (though probably still good enough for most cases). + +#### rlimit settings + +Rlimit settings allow the user to specify how many system resources should be allocated to each worker user when +running tests. These limits are enforced using python's [`resource`](https://docs.python.org/3/library/resource.html) +library. + +In the configuration file, limits can be set using the resource name as a key and a list of integers as a value. The +list of integers should contain two values, the first being the soft limit and the second being the hard limit. For +example, if we wish to [limit the number of open file descriptors](https://docs.python.org/3/library/resource.html#resource.RLIMIT_NOFILE) +with a soft limit of 10 and a hard limit of 20, our configuration file would include: + +```yaml +rlimit_settings: + nofile: + - 10 + - 20 +``` + +See python's [`resource`](https://docs.python.org/3/library/resource.html) library for all rlimit options. + +#### allocated ports + +Some test require the use of a dedicated port that is guaranteed not to be in use by another process. This setting +allows the user to specify a range from which these ports can be selected. When a test starts, the `PORT` environment +variable will be set to the port number selected for this test run. Available port numbers will be different from test +to test. + +#### queue names and schemas + +When a test run is sent to the autotester from MarkUs, the test is not run immediately. Instead it is put in a queue and +run only when a worker user becomes available. You can choose to just have a single queue or multiple. + +If using multiple queues, you can set a priority order for each worker user (see the `workers:` setting). The workers +will prioritize running tests from queues that appear earlier in the priority order. + +When MarkUs sends the test to the autotester, in order to decide which queue to put the test in, we inspect the json +string passed as an argument to the `markus_autotester` command (using either the `-j` or `-f` flags). This inspection +involves validating that json string against a [json schema validation](https://json-schema.org/) for each queue. If the +json string passes the validation for a certain queue, the test is added to that queue. + +For example, the default queue settings in the configuration are: + +```yaml +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} ``` -The number of workers specified in this way should be equal to the number of worker users specified in the WORKER_USERS config option. -See `config.py` for more details and to see defaults. +Under this default setup: + - a test with a non-null `batch_id` will be put in the `batch` queue. + - a test with a null `batch_id` and where `user_type == 'Admin'` will be put in the `single` queue + - a test with a null `batch_id` and where `user_type == 'Student'` will be put in the `student` queue ## MarkUs configuration options +After installing the autotester, the next step is to update the configuration settings for MarkUs. These settings are in the MarkUs configuration files typically found in the `config/environments` directory of your MarkUs installation: -##### AUTOTEST_ON +##### config.x.autotest.enable Enables autotesting. +Should be set to `true` -##### AUTOTEST_STUDENT_TESTS_ON -Allows the instructor to let students run tests on their own. - -##### AUTOTEST_STUDENT_TESTS_BUFFER_TIME +##### config.x.autotest.student_test_buffer With student tests enabled, a student can't request a new test if they already have a test in execution, to prevent denial of service. If the test script fails unexpectedly and does not return a result, a student would effectively be locked out from further testing. This is the amount of time after which a student can request a new test anyway. -(ignored if *AUTOTEST_STUDENT_TESTS_ON* is *false*) - -##### AUTOTEST_CLIENT_DIR +##### config.x.autotest.client_dir The directory where the test files for assignments are stored. (the user running MarkUs must be able to write here) -##### AUTOTEST_SERVER_HOST +##### config.x.autotest.server_host The server host name that the markus-autotesting server is installed on. -(use *localhost* if the server runs on the same machine) +(use `localhost` if the server runs on the same machine) -##### AUTOTEST_SERVER_FILES_USERNAME +##### config.x.autotest.server_username The server user to copy the tester and student files over. -This should be the same as the SERVER_USER in the markus-autotesting config file (see [above](#markus-autotesting-configuration-options)). +This should be the same as the `server_user` in the markus-autotesting configuration file. (SSH passwordless login must be set up for the user running MarkUs to connect with this user on the server; multiple MarkUs instances can use the same user; -can be *nil*, forcing *AUTOTEST_SERVER_HOST* to be *localhost* and local file system copy to be used) +can be `nil`, forcing `config.x.autotest.server_host` to be `localhost` and local file system copy to be used) -##### AUTOTEST_SERVER_DIR -The directory on the server where temporary files are copied. +##### config.x.autotest.server_dir +The directory on the autotest server where temporary files are copied. -This should be the same as the WORKSPACE_DIR in the markus-autotesting config file (see [above](#markus-autotesting-configuration-options)). +This should be the same as the `workspace` directory in the markus-autotesting config file. (multiple MarkUs instances can use the same directory) -##### AUTOTEST_SERVER_COMMAND -The command to run on the markus-autotesting server that runs the script in `server/autotest_enqueuer.py` script. +##### config.x.autotest.server_command +The command to run on the markus-autotesting server that runs the wrapper script that calls `markus_autotester`. In most cases, this should be set to `'autotest_enqueuer'` From fa621ce036fad19a2b0e6a3e6921873de4537462 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 18 Feb 2020 16:58:30 -0500 Subject: [PATCH 43/46] readme: update layerfile badge url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5501af8a..d6665704 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Acceptance tests](https://layerci.com/github/MarkUsProject/markus-autotesting/badge)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) +[![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/badge/github/MarkUsProject/markus-autotesting) Autotesting with Markus ============================== From c3de683d855ed7d569ba3ecd5819a5c488ad35de Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 18 Feb 2020 17:00:34 -0500 Subject: [PATCH 44/46] readme: update layerfile badge link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d6665704..0c936c0a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/badge/github/MarkUsProject/markus-autotesting) +[![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) Autotesting with Markus ============================== From 2bc91362730e81332d4b0049c1cc0d8ae12eee92 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Tue, 18 Feb 2020 17:30:02 -0500 Subject: [PATCH 45/46] docker: update docker configuration default options --- .dockerfiles/docker-config.yml | 12 +++++------- bin/install.sh | 1 - 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.dockerfiles/docker-config.yml b/.dockerfiles/docker-config.yml index 35f27642..46acfe8c 100644 --- a/.dockerfiles/docker-config.yml +++ b/.dockerfiles/docker-config.yml @@ -1,12 +1,10 @@ workers: - - n: 1 + - users: + - name: autotst0 + - name: autotst1 + - name: autotst2 + - name: autotst3 queues: - student - single - batch -users: - workers: - - name: autotst0 - - name: autotst1 - - name: autotst2 - - name: autotst3 diff --git a/bin/install.sh b/bin/install.sh index 1997ab9d..3167c300 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -299,7 +299,6 @@ install_testers() { suggest_next_steps() { echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVER_USER}'s crontab with a @reboot time" - echo "[AUTOTEST-INSTALL] You should install the individual testers you plan to use" } load_config_settings() { From 7edbbabce15c6e0cbe4ecd4ebb63ee2a5adf8279 Mon Sep 17 00:00:00 2001 From: mishaschwartz Date: Mon, 9 Mar 2020 12:52:26 -0400 Subject: [PATCH 46/46] changelog: update changelog for v1.9.0 --- Changelog.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Changelog.md b/Changelog.md index a9a7ae37..a21c670f 100644 --- a/Changelog.md +++ b/Changelog.md @@ -2,6 +2,8 @@ All notable changes to this project will be documented here. ## [unreleased] + +## [1.9.0] - allow tests to write to existing subdirectories but not overwrite existing test script files (#237). - add ability to create a docker container for the autotester in development mode (#236). - major reorganization of the structure of this package (#236).