diff --git a/.dockerfiles/Dockerfile b/.dockerfiles/Dockerfile new file mode 100644 index 00000000..3763e5bd --- /dev/null +++ b/.dockerfiles/Dockerfile @@ -0,0 +1,19 @@ +ARG UBUNTU_VERSION + +FROM ubuntu:$UBUNTU_VERSION + +ARG LOGIN_USER + +RUN apt-get update && \ + apt-get -y install sudo + +# Create a directory for the app code (keep the name generic) +RUN mkdir -p /app + +RUN useradd -ms /bin/bash $LOGIN_USER && \ + usermod -aG sudo $LOGIN_USER && \ + echo "$LOGIN_USER ALL=(ALL) NOPASSWD:ALL" | sudo tee "/etc/sudoers.d/$LOGIN_USER" + +USER $LOGIN_USER + +WORKDIR /app diff --git a/.dockerfiles/docker-config.yml b/.dockerfiles/docker-config.yml new file mode 100644 index 00000000..46acfe8c --- /dev/null +++ b/.dockerfiles/docker-config.yml @@ -0,0 +1,10 @@ +workers: + - users: + - name: autotst0 + - name: autotst1 + - name: autotst2 + - name: autotst3 + queues: + - student + - single + - batch diff --git a/.dockerfiles/entrypoint-dev.sh b/.dockerfiles/entrypoint-dev.sh new file mode 100755 index 00000000..c2d0dfd5 --- /dev/null +++ b/.dockerfiles/entrypoint-dev.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +if [ ! -f /.installed ]; then + /app/bin/install.sh -p '3.8' --docker + sudo touch /.installed +fi + +exec "$@" diff --git a/.flake8.ini b/.flake8.ini index 14951b5d..0e0f5037 100644 --- a/.flake8.ini +++ b/.flake8.ini @@ -1,3 +1,3 @@ [flake8] max-line-length = 120 -ignore = E266 \ No newline at end of file +ignore = E266 diff --git a/.gitignore b/.gitignore index 94c4171b..b60fa3eb 100644 --- a/.gitignore +++ b/.gitignore @@ -4,24 +4,28 @@ __pycache__ .DS_Store .hypothesis/ .pytest_cache/ +*.egg-info +.eggs +venv +# bin +bin/kill_worker_procs # server -server/venv -server/workspace -server/bin/kill_worker_procs -markus_config.rb +src/autotester/server/venv +src/autotester/server/workspace +src/autotester/server/bin/kill_worker_procs # testers -testers/testers/*/specs/.installed -testers/testers/*/specs/install_settings.json +src/autotester/testers/*/specs/.installed +src/autotester/testers/*/specs/install_settings.json # java -testers/testers/java/lib/.gradle -testers/testers/java/lib/build +src/autotester/testers/java/lib/.gradle +src/autotester/testers/java/lib/build # racket -testers/testers/racket/**/compiled/ +src/autotester/testers/racket/**/compiled/ # haskell markus_cabal diff --git a/.hound.yml b/.hound.yml index b36fe97f..89747ff9 100644 --- a/.hound.yml +++ b/.hound.yml @@ -1,4 +1,3 @@ flake8: enabled: true config_file: .flake8.ini - diff --git a/.travis.yml b/.travis.yml index e34b7ab7..14b06eaa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,11 +4,6 @@ python: - "3.6" - "3.7" - "3.8" -# command to install dependencies -install: - - pip install pytest - - pip install hypothesis - - pip install -r server/bin/requirements.txt # command to run tests script: - - pytest --ignore testers/testers/py/tests + - python setup.py test diff --git a/Changelog b/Changelog deleted file mode 100644 index 7d000510..00000000 --- a/Changelog +++ /dev/null @@ -1,10 +0,0 @@ -# CHANGELOG - -All notable changes to this project will be documented here. - -_NOTE: This changelog starts from version 1.8.1 (changes prior to this version are not documented)_ - -## [1.8.1] -### Added -- changelog -- for all changes prior to this version see https://github.com/MarkUsProject/markus-autotesting/pulls?utf8=%E2%9C%93&q=is%3Apr+created%3A%3C2019-12-19+ diff --git a/Changelog.md b/Changelog.md new file mode 100644 index 00000000..a21c670f --- /dev/null +++ b/Changelog.md @@ -0,0 +1,21 @@ +# CHANGELOG +All notable changes to this project will be documented here. + +## [unreleased] + +## [1.9.0] +- allow tests to write to existing subdirectories but not overwrite existing test script files (#237). +- add ability to create a docker container for the autotester in development mode (#236). +- major reorganization of the structure of this package (#236). + - additional usage options for the server installation script (bin/install.sh). + - testers can/should now be installed using the server installation script instead of individually. + - configuration files now use yaml format. + - configuration file defaults are now included in the source code so the autotester can be run with or without a + user specific configuration file. + - changed the default location for the workspace directory. + +## [1.8.1] +_NOTE: This changelog starts from version 1.8.1 (changes prior to this version are not documented)_ +### Added +- changelog +- for all changes prior to this version see https://github.com/MarkUsProject/markus-autotesting/pulls?utf8=%E2%9C%93&q=is%3Apr+created%3A%3C2019-12-19+ diff --git a/Layerfile b/Layerfile index 6271b980..941d08dc 100644 --- a/Layerfile +++ b/Layerfile @@ -8,11 +8,8 @@ CHECKPOINT RUN python3 -m venv /tmp/venv -RUN /tmp/venv/bin/pip install -U pip -COPY server/bin/requirements.txt /tmp/ -RUN /tmp/venv/bin/pip install -U pytest hypothesis attrs -r /tmp/requirements.txt CHECKPOINT WORKDIR /app COPY . . -RUN /tmp/venv/bin/pytest --ignore testers/testers/py/tests +RUN /tmp/venv/bin/python setup.py test diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..29494940 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include src/autotester/config_defaults/* +include src/autotester/lib/* +include src/autotester/testers/*/specs/* diff --git a/README.md b/README.md index 8ec357e7..0c936c0a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Acceptance tests](https://layerci.com/github/MarkUsProject/markus-autotesting/badge)](https://layerci.com/github/MarkUsProject/markus-autotesting) +[![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) Autotesting with Markus ============================== @@ -17,54 +17,48 @@ The autotesting client component is already included in a MarkUs installation. S ### Server -To install the autotesting server, run the `install.sh` script from the `server/bin` directory as: +To install the autotesting server, run the `install.sh` script from the `bin` directory with options: ``` -$ server/bin/install.sh +$ bin/install.sh [-p|--python-version python-version] [--non-interactive] [--docker] [--a|--all-testers] [-t|--testers tester ...] ``` +options: + +- `--python_version` : version of python to install/use to run the autotester (default is 3.8). +- `--non-interactive` : run the installer in non-interactive mode (all confirmations will be accepted without prompting the user). +- `--docker` : run the installer for installing in docker. This installs in non-interactive mode and iptables, postgresql debian packages will not be installed. +- `--all-testers` : install all testers as well as the server. See [Testers](#testers). +- `--testers` : install the individual named testers (See [Testers](#testers)). This option will be ignored if the --all-testers flag is used. + The server can be uninstalled by running the `uninstall.sh` script in the same directory. #### Dependencies -Installing the server will also install the following packages: +Installing the server will also install the following debian packages: -- python3.X (the python version can be configured in the config file; see below) +- python3.X (the python3 minor version can specified as an argument to the install script; see above) - python3.X-venv - redis-server - jq -- postgresql +- postgresql-client +- libpq-dev +- openssh-server +- gcc +- postgresql (if not running in a docker environment) +- iptables (if not running in a docker environment) -This script may also add new users and create new potgres databases. See the [configuration](#markus-autotesting-configuration-options) section for more details. +This script may also add new users and create new postgres databases. See the [configuration](#markus-autotesting-configuration-options) section for more details. ### Testers -After the server has been installed, one or more of the following testers should also be installed: - -- `haskell` -- `java` -- `py` -- `pyta` -- `racket` -- `custom` - -Each tester may be installed by running install scripts: - -``` -$ testers/testers/${tester_name}/bin/install.sh -``` - -where `tester_name` is one of the tester names listed above. - -Each tester can be uninstalled by running the `uninstall.sh` script in the same directory. - -Each language specific tester can run test files written in the following frameworks: +The markus autotester currently supports testers for the following languages and testing frameworks: - `haskell` - [QuickCheck](http://hackage.haskell.org/package/QuickCheck) - `java` - [JUnit](https://junit.org/junit4/) -- `py` +- `py` (python3) - [Unittest](https://docs.python.org/3/library/unittest.html) - [Pytest](https://docs.pytest.org/en/latest/) - `pyta` @@ -85,7 +79,7 @@ Installing each tester will also install the following additional packages: - tasty-quickcheck (cabal package) - `java` - openjdk-8-jdk -- `py` (python) +- `py` (python3) - none - `pyta` - none @@ -96,173 +90,162 @@ Installing each tester will also install the following additional packages: ## Markus-autotesting configuration options -These settings can be set by editing the `server/config.py` file. If any changes are made to any of the options marked _restart required_, it is recommended that the server be uninstalled and reinstalled. - -##### REDIS_CURRENT_TEST_SCRIPT_HASH -_restart required_ -Name of redis hash used to store the locations of test script directories. -There is no need to change this unless it would conflict with another redis key. -Default: `'curr_test_scripts'` - -##### REDIS_POP_HASH -Name of redis hash used to store pop interval data for each worker queue. -There is no need to change this unless it would conflict with another redis key. -Default: `'pop_intervals'` - -##### REDIS_WORKERS_HASH -_restart required_ -Name of redis hash used to store workers data (username and worker directory). -There is no need to change this unless it would conflict with another redis key. -Default: `'workers'` - -##### REDIS_CONNECTION_KWARGS -Dictionary containing keyword arguments to pass to rq.use_connection when connecting to a redis database -Default: `{}` - -##### REDIS_PREFIX -Prefix to prepend to all redis keys generated by the autotester. -There is no need to change this unless it would cause conflicts with other redis keys. -Default: `'autotest:'` - -##### POSTGRES_PREFIX -Prefix to prepend to all postgres databases created. -There is no need to change this unless it would cause conflicts with other postgres databases. -Default: `'autotest_'` - -##### WORKSPACE_DIR -_restart required_ -Absolute path to the workspace directory which will contain all directories and files generated by the autotester. -If this directory does not exist before the server is installed it will be created. -Default: None (you should set this before you install the server) - -##### SCRIPTS_DIR_NAME -_restart required_ -Name of the directory containing test scripts (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'scripts'` - -##### RESULTS_DIR_NAME -_restart required_ -Name of the directory containing test results (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'results'` - -##### SPECS_DIR_NAME -_restart required_ -Name of the directory containing tester environment specs (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'specs'` - -##### WORKERS_DIR_NAME -_restart required_ -Name of the directory containing secure workspace directories for each worker (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'workers'` - -##### LOGS_DIR_NAME -_restart required_ -Name of the directory containing log files (under `WORKSPACE_DIR`) -If this directory does not exist before the server is installed it will be created. -There is no need to change this assuming `WORKSPACE_DIR` is empty before installation. -Default: `'logs'` - -##### SERVER_USER -_restart required_ -Name of the user that enqueues and schedules each test job. -If this user does not exist before the server is installed it will be created. -If this is the empty string, the server user is assumed to be whichever user runs the server installation script. -Default: `''` - -##### WORKER_USERS -_restart required_ -String containing whitespace separated names of the users that run the test scripts themselves and report the results. -If these users do not exist before the server is installed they will be created. -If this is the empty string, a single worker user will be used and that user is the same as the SERVER_USER. -Default: `'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7'` - -##### REAPER_USER_PREFIX -_restart required_ -Prefix to prepend to each username in WORKER_USERS to create a new user whose sole job is to safely kill any processes still running after a test has completed. -If these users do not exist before the server is installed they will be created. -If this is the empty string, no new users will be created and tests will be terminated in a slightly less secure way (though probably still good enough for most cases). -Default: `''` - -##### DEFAULT_ENV_NAME -_restart required_ -Name of the environment used by default (if no custom environment is needed to run a given tester). -There is no need to change this. -Default: `'defaultenv'` - -##### WORKER_QUEUES -A list of dictionaries containing the following keys/value pairs: -- `'name'`: a string representing the unique name of this queue -- `'filter'`: a function which takes the same keyword arguments as the `run_test` function in `autotest_enqueuer.py` and returns `True` if this queue should be used to schedule the test job -See `config.py` for more details and to see defaults. - -##### WORKERS -A list of tuples indicating the priority in which order a worker user should pop jobs off the end of each queue. -Each tuple contains an integer indicating the number of worker users who should respect this priority order, followed by a list containing the names of queues in priority order. -For example, the following indicates that two worker users should take jobs from queue `'A'` first and queue `'B'` second, and one worker user should take jobs from queue `'B'` first and queue `'A'` second and queue `'C'` third: - -```python -WORKERS = [(2, ['A', 'B']), - (1, ['B', 'A', 'C'])] +These settings can be overridden or extended by including a configuration file in one of two locations: + +- `${HOME}/.markus_autotester_config` (where `${HOME}` is the home directory of the user running the markus server) +- `/etc/markus_autotester_config` (for a system wide configuration) + +An example configuration file can be found in `doc/config_example.yml`. Please see below for a description of all options and defaults: + +```yaml +workspace: # an absolute path to a directory containing all files/workspaces required to run the autotester default is + # ${HOME}/.markus-autotesting/workspace where ${HOME} is the home directory of the user running the autotester + +server_user: # the username of the user designated to run the autotester itself. Default is the current user + +workers: + - users: + - name: # the username of a user designated to run tests for the autotester + reaper: # the username of a user used to clean up test processes. This value can be null (see details below) + queues: # a list of queue names that these users will monitor and select test jobs from. + # The order of this list indicates which queues have priority when selecting tests to run + # default is ['student', 'single', 'batch'] (see the "queues:" setting option below) + +redis: + url: # url for the redis database. default is: redis://127.0.0.1:6379/0 + +supervisor: + url: # url used by the supervisor process. default is: '127.0.0.1:9001' + +rlimit_settings: # RLIMIT settings (see details below) + nproc: # for example, this setting sets the hard and soft limits for the number of processes available to 300 + - 300 + - 300 + +resources: + port: # set a range of ports available for use by the tests (see details below). + min: 50000 # For example, this sets the range of ports from 50000 to 65535 + max: 65535 + postgresql: + port: # port the postgres server is running on + host: # host the postgres server is running on + +queues: + - name: # the name of a queue used to enqueue test jobs (see details below) + schema: # a json schema used to validate the json representation of the arguments passed to the test_enqueuer script + # by MarkUs (see details below) +``` + +### Markus-autotesting configuration details + +#### reaper users + +Each reaper user is associated with a single worker user. The reaper user's sole job is to safely kill any processes +still running after a test has completed. If these users do not exist before the server is installed they will be created. +If no reaper username is given in the configuration file, no new users will be created and tests will be terminated in a +slightly less secure way (though probably still good enough for most cases). + +#### rlimit settings + +Rlimit settings allow the user to specify how many system resources should be allocated to each worker user when +running tests. These limits are enforced using python's [`resource`](https://docs.python.org/3/library/resource.html) +library. + +In the configuration file, limits can be set using the resource name as a key and a list of integers as a value. The +list of integers should contain two values, the first being the soft limit and the second being the hard limit. For +example, if we wish to [limit the number of open file descriptors](https://docs.python.org/3/library/resource.html#resource.RLIMIT_NOFILE) +with a soft limit of 10 and a hard limit of 20, our configuration file would include: + +```yaml +rlimit_settings: + nofile: + - 10 + - 20 +``` + +See python's [`resource`](https://docs.python.org/3/library/resource.html) library for all rlimit options. + +#### allocated ports + +Some test require the use of a dedicated port that is guaranteed not to be in use by another process. This setting +allows the user to specify a range from which these ports can be selected. When a test starts, the `PORT` environment +variable will be set to the port number selected for this test run. Available port numbers will be different from test +to test. + +#### queue names and schemas + +When a test run is sent to the autotester from MarkUs, the test is not run immediately. Instead it is put in a queue and +run only when a worker user becomes available. You can choose to just have a single queue or multiple. + +If using multiple queues, you can set a priority order for each worker user (see the `workers:` setting). The workers +will prioritize running tests from queues that appear earlier in the priority order. + +When MarkUs sends the test to the autotester, in order to decide which queue to put the test in, we inspect the json +string passed as an argument to the `markus_autotester` command (using either the `-j` or `-f` flags). This inspection +involves validating that json string against a [json schema validation](https://json-schema.org/) for each queue. If the +json string passes the validation for a certain queue, the test is added to that queue. + +For example, the default queue settings in the configuration are: + +```yaml +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} ``` -The number of workers specified in this way should be equal to the number of worker users specified in the WORKER_USERS config option. -See `config.py` for more details and to see defaults. +Under this default setup: + - a test with a non-null `batch_id` will be put in the `batch` queue. + - a test with a null `batch_id` and where `user_type == 'Admin'` will be put in the `single` queue + - a test with a null `batch_id` and where `user_type == 'Student'` will be put in the `student` queue ## MarkUs configuration options +After installing the autotester, the next step is to update the configuration settings for MarkUs. These settings are in the MarkUs configuration files typically found in the `config/environments` directory of your MarkUs installation: -##### AUTOTEST_ON +##### config.x.autotest.enable Enables autotesting. +Should be set to `true` -##### AUTOTEST_STUDENT_TESTS_ON -Allows the instructor to let students run tests on their own. - -##### AUTOTEST_STUDENT_TESTS_BUFFER_TIME +##### config.x.autotest.student_test_buffer With student tests enabled, a student can't request a new test if they already have a test in execution, to prevent denial of service. If the test script fails unexpectedly and does not return a result, a student would effectively be locked out from further testing. This is the amount of time after which a student can request a new test anyway. -(ignored if *AUTOTEST_STUDENT_TESTS_ON* is *false*) - -##### AUTOTEST_CLIENT_DIR +##### config.x.autotest.client_dir The directory where the test files for assignments are stored. (the user running MarkUs must be able to write here) -##### AUTOTEST_SERVER_HOST +##### config.x.autotest.server_host The server host name that the markus-autotesting server is installed on. -(use *localhost* if the server runs on the same machine) +(use `localhost` if the server runs on the same machine) -##### AUTOTEST_SERVER_FILES_USERNAME +##### config.x.autotest.server_username The server user to copy the tester and student files over. -This should be the same as the SERVER_USER in the markus-autotesting config file (see [above](#markus-autotesting-configuration-options)). +This should be the same as the `server_user` in the markus-autotesting configuration file. (SSH passwordless login must be set up for the user running MarkUs to connect with this user on the server; multiple MarkUs instances can use the same user; -can be *nil*, forcing *AUTOTEST_SERVER_HOST* to be *localhost* and local file system copy to be used) +can be `nil`, forcing `config.x.autotest.server_host` to be `localhost` and local file system copy to be used) -##### AUTOTEST_SERVER_DIR -The directory on the server where temporary files are copied. +##### config.x.autotest.server_dir +The directory on the autotest server where temporary files are copied. -This should be the same as the WORKSPACE_DIR in the markus-autotesting config file (see [above](#markus-autotesting-configuration-options)). +This should be the same as the `workspace` directory in the markus-autotesting config file. (multiple MarkUs instances can use the same directory) -##### AUTOTEST_SERVER_COMMAND -The command to run on the markus-autotesting server that runs the script in `server/autotest_enqueuer.py` script. +##### config.x.autotest.server_command +The command to run on the markus-autotesting server that runs the wrapper script that calls `markus_autotester`. In most cases, this should be set to `'autotest_enqueuer'` diff --git a/server/bin/archive_workspace.sh b/bin/archive_workspace.sh similarity index 77% rename from server/bin/archive_workspace.sh rename to bin/archive_workspace.sh index 8eb5b66b..4eac5f3a 100755 --- a/server/bin/archive_workspace.sh +++ b/bin/archive_workspace.sh @@ -22,6 +22,14 @@ if [[ $# -lt 1 ]]; then exit 1 fi +# TODO: this file needs to be updated +echo 'This archiver is broken, do not use until it has been updated. +To archive the workspace, run: + +$ tar cJf +' 1>&2 +exit 1 + # vars THISSCRIPT=$(readlink -f ${BASH_SOURCE}) BINDIR=$(dirname ${THISSCRIPT}) diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py new file mode 100755 index 00000000..e8d3c066 --- /dev/null +++ b/bin/generate_supervisord_conf.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +from autotester.config import config +import os +import argparse + +HEADER = f"""[supervisord] + +[supervisorctl] + +[inet_http_server] +port = {config['supervisor', 'url']} + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +""" + +CONTENT = """[program:rq_worker_{worker_user}] +environment=MARKUSWORKERUSER={worker_user} +command={rq} worker {worker_args} {queues} +process_name=rq_worker_{worker_user} +numprocs={numprocs} +directory={directory} +stopsignal=TERM +autostart=true +autorestart=true +stopasgroup=true +killasgroup=true + +""" + +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def write_conf_file(rq, conf_filename): + try: + redis_url = f'--url {config["redis", "url"]}' + except KeyError: + redis_url = "" + + with open(conf_filename, "w") as f: + f.write(HEADER) + for worker_data in config["workers"]: + queues = worker_data["queues"] + queue_str = " ".join(queues) + for users in worker_data["users"]: + worker_user = users["name"] + c = CONTENT.format( + worker_user=worker_user, + rq=rq, + worker_args=redis_url, + queues=queue_str, + numprocs=1, + directory=THIS_DIR, + ) + f.write(c) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("rq") + parser.add_argument("conf_filename") + args = parser.parse_args() + + write_conf_file(args.rq, args.conf_filename) diff --git a/bin/install.sh b/bin/install.sh new file mode 100755 index 00000000..3167c300 --- /dev/null +++ b/bin/install.sh @@ -0,0 +1,398 @@ +#!/bin/bash + +set -e + +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +BINDIR=$(dirname "${THISSCRIPT}") +PROJECTROOT=$(dirname "${BINDIR}") +TESTERSROOT="${PROJECTROOT}/src/autotester" +SERVER_VENV="${PROJECTROOT}/venv" +INSTALLABLE_TESTERS=(custom haskell java py pyta racket) +TESTERS=() +USAGE_MESSAGE="Usage: $0 [-p|--python-version python-version] [--non-interactive] [--docker] [-a|--all-testers] [-t|--testers tester ...]" + +_check_python_version() { + # check if the python3 is at least version 3.6 + if dpkg --compare-versions "$1" 'lt' '3.6'; then + echo "[AUTOTEST-INSTALL-ERROR] Python3 must be at least version 3.6. Found version $1" 1>&2 + exit 1 + fi +} + +set_python_version() { + # get the python version from the argument passed to this script or use python3.8 by default + if [ -z "${PYTHON_VERSION}" ]; then + PYTHON_VERSION=3.8 + else + # check if both a major and minor version have been specified + if [[ ${PYTHON_VERSION} != $(echo "${PYTHON_VERSION}" | grep -ow '^[0-9].[0-9]$') ]]; then + echo "[AUTOTEST-INSTALL-ERROR] Please specify a major and minor python version only. Found ${PYTHON_VERSION}" 1>&2 + exit 1 + fi + _check_python_version "${PYTHON_VERSION}" + fi +} + +install_packages() { + # install required system packages + echo "[AUTOTEST-INSTALL] Installing system packages" + local debian_frontend + local apt_opts + local apt_yes + + if [ -n "${NON_INTERACTIVE}" ]; then + debian_frontend=noninteractive + apt_opts=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-confold') + apt_yes='-y' + fi + + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" update + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install software-properties-common + sudo add-apt-repository ${apt_yes} ppa:deadsnakes/ppa + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install "python${PYTHON_VERSION}" \ + "python${PYTHON_VERSION}-venv" \ + redis-server \ + jq \ + postgresql-client \ + libpq-dev \ + openssh-server \ + gcc + if [ -z "${DOCKER}" ]; then + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install iptables postgresql + fi + + _check_python_version "$(python3 --version | grep -oP '\s(\d).(\d)')" +} + +create_venv() { + # create a virtual environment which will be used to run the autotester and install the + # autotester package (in editable mode). + echo "[AUTOTEST-INSTALL] Installing server virtual environment at '${SERVER_VENV}'" + rm -rf "${SERVER_VENV}" + "python${PYTHON_VERSION}" -m venv "${SERVER_VENV}" + + PYTHON="${SERVER_VENV}/bin/python" + + echo "[AUTOTEST-INSTALL] Installing python packages into virtual environment" + local pip="${SERVER_VENV}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel # must be installed before requirements + ${pip} install -e "${PROJECTROOT}" +} + +_create_server_user() { + # create a user to run the autotester server if they do not already exist + if id "${SERVER_USER}" &> /dev/null; then + echo "[AUTOTEST-INSTALL] Using existing server user '${SERVER_USER}'" + else + echo "[AUTOTEST-INSTALL] Creating server user '${SERVER_USER}'" + local gecos + gecos=() + if [ -n "${NON_INTERACTIVE}" ]; then + gecos=('--gecos' '') + fi + sudo adduser --disabled-password "${gecos[@]}" "${SERVER_USER}" + fi +} + +_create_unprivileged_user() { + # create a user with limited permissions: + # - no home directory + # - no access to the port used by redis-server + # - the SERVER_USER will have sudo access to this unprivileged user + local username=$1 + + if id "${username}" &> /dev/null; then + echo "[AUTOTEST-INSTALL] Reusing existing user '${username}'" + else + echo "[AUTOTEST-INSTALL] Creating user '${username}'" + local gecos + gecos=() + if [ -n "${NON_INTERACTIVE}" ]; then + gecos=('--gecos' '') + fi + sudo adduser --disabled-login --no-create-home "${gecos[@]}" "${username}" + fi + if [ -z "${DOCKER}" ]; then + sudo iptables -I OUTPUT -p tcp --dport "${REDIS_PORT}" -m owner --uid-owner "${username}" -j REJECT + else + echo "[AUTOTEST-INSTALL] worker users are not restricted from accessing redis in a docker installation" + fi + echo "${SERVER_USER} ALL=(${username}) NOPASSWD:ALL" | sudo EDITOR="tee -a" visudo +} + +_create_worker_and_reaper_users() { + # create worker users and reapers users according to the configuration settings + # all user names for these users should be unique. + local worker_user + local reaper_user + + while read -r worker_user; do + read -r reaper_user + if [[ "${SERVER_USER}" != "${worker_user}" ]]; then + _create_unprivileged_user "${worker_user}" + fi + if [[ "${reaper_user}" != 'null' ]]; then + _create_unprivileged_user "${reaper_user}" + sudo usermod -g "${worker_user}" "${reaper_user}" + fi + done <<< "${WORKER_AND_REAPER_USERS}" +} + +create_users() { + # create all users required to run the autotester + _create_server_user + _create_worker_and_reaper_users +} + +_create_workspace_subdir() { + local subdir + local permissions + subdir="$1" + permissions="$2" + + sudo mkdir -p "${subdir}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${subdir}" + sudo chmod "${permissions}" "${subdir}" +} + +_create_worker_dirs() { + # create directories for each worker use to run tests in + local worker_dir + while read -r worker_user; do + worker_dir="${WORKSPACE_SUBDIRS[WORKERS]}/${worker_user}" + mkdir -p "${worker_dir}" + sudo chown "${SERVER_USER}:${worker_user}" "${worker_dir}" + sudo chmod "ug=rwx,o=,+t" "${worker_dir}" + done <<< "${WORKER_USERS}" +} + +create_workspace() { + # create the workspace directory and populate it with the relevant directory structure + echo "[AUTOTEST-INSTALL] Creating workspace directories at '${WORKSPACE_DIR}'" + mkdir -p "${WORKSPACE_DIR}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${WORKSPACE_DIR}" + + _create_workspace_subdir "${WORKSPACE_SUBDIRS[SCRIPTS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[RESULTS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[LOGS]}" 'u=rwx,go=' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[SPECS]}" 'u=rwx,go=rx' + _create_workspace_subdir "${WORKSPACE_SUBDIRS[WORKERS]}" 'u=rwx,go=rx' + + _create_worker_dirs +} + +create_worker_dbs() { + echo "[AUTOTEST-INSTALL] Creating databases for worker users" + local serverpwd + local pgpassfile + local psql_string + local psql + serverpwd=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-15) + pgpassfile="${WORKSPACE_SUBDIRS[LOGS]}/.pgpass" + + if [ -z "${DOCKER}" ]; then + local pghost_args + if [[ "${POSTGRES_HOST}" == 'localhost' ]]; then + pghost_args='' # this allows for local peer authentication if it is configured + else + pghost_args="-h ${POSTGRES_HOST}" + fi + psql=(sudo -u postgres psql "${pghost_args}" -p "${POSTGRES_PORT}") + else + psql=(psql -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U postgres) + fi + + sudo touch "${pgpassfile}" + sudo chown "${SERVER_USER}:${SERVER_USER}" "${pgpassfile}" + sudo chmod 'u=rw,go=' "${pgpassfile}" + echo -e "${serverpwd}" | sudo -u "${SERVER_USER}" tee "${pgpassfile}" > /dev/null + + psql_string="DROP ROLE IF EXISTS ${SERVER_USER}; + CREATE ROLE ${SERVER_USER}; + ALTER ROLE ${SERVER_USER} LOGIN PASSWORD '${serverpwd}'; + ALTER ROLE ${SERVER_USER} CREATEROLE;" + "${psql[@]}" <<< "${psql_string}" + + while read -r worker_user; do + local database="${POSTGRES_PREFIX}${worker_user}" + psql_string="DROP DATABASE IF EXISTS ${database}; + CREATE DATABASE ${database} OWNER ${SERVER_USER}; + REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC;" + + if [[ "${worker_user}" != "${SERVER_USER}" ]]; then + psql_string="${psql_string} + DROP ROLE IF EXISTS ${worker_user}; + CREATE ROLE ${worker_user} LOGIN PASSWORD null; + " + fi + psql_string="${psql_string} + GRANT CONNECT, CREATE ON DATABASE ${database} TO ${worker_user};" + + "${psql[@]}" <<< "${psql_string}" + done <<< "${WORKER_USERS}" +} + +create_default_tester_venv() { + local default_tester_venv + default_tester_venv="${WORKSPACE_SUBDIRS[SPECS]}/${DEFAULT_VENV_NAME}/venv" + + "python${PYTHON_VERSION}" -m venv "${default_tester_venv}" + local pip + pip="${default_tester_venv}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel # must be installed before requirements + ${pip} install "${TESTERSROOT}" +} + +compile_reaper_script() { + local reaperexe + reaperexe="${BINDIR}/kill_worker_procs" + + echo "[AUTOTEST-INSTALL] Compiling reaper script at '${reaperexe}'" + gcc "${reaperexe}.c" -o "${reaperexe}" + chmod ugo=r "${reaperexe}" +} + +create_enqueuer_wrapper() { + local enqueuer + enqueuer=/usr/local/bin/autotest_enqueuer + + echo "[AUTOTEST-INSTALL] Creating enqueuer wrapper at '${enqueuer}'" + + echo "#!/usr/bin/env bash + ${SERVER_VENV}/bin/markus_autotester \"\$@\"" | sudo tee ${enqueuer} > /dev/null + sudo chown "${SERVER_USER}:${SERVER_USER}" "${enqueuer}" + sudo chmod u=rwx,go=r ${enqueuer} + +} + +start_workers() { + local supervisorconf + local generate_script + local rq + + supervisorconf="${WORKSPACE_SUBDIRS[LOGS]}/supervisord.conf" + generate_script="${BINDIR}/generate_supervisord_conf.py" + rq="${SERVER_VENV}/bin/rq" + + + echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" + sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${rq} ${supervisorconf} && + ${BINDIR}/start-stop.sh start" +} + +install_testers() { + local tester + local to_install + if [[ -n ${INSTALL_ALL_TESTERS} ]]; then + to_install=( "${INSTALLABLE_TESTERS[@]}" ) + else + to_install=( "${TESTERS[@]}" ) + fi + for tester in "${to_install[@]}"; do + echo "[AUTOTEST-INSTALL] installing tester: ${tester}" + "${TESTERSROOT}/testers/${tester}/bin/install.sh" + done +} + +suggest_next_steps() { + echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" + echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVER_USER}'s crontab with a @reboot time" +} + +load_config_settings() { + # Get the configuration settings as a json string and load config settings needed for this + # installation script + local config_json + config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") + + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.server_user') + WORKER_AND_REAPER_USERS=$(echo "${config_json}" | jq --raw-output '.workers | .[] | .users | .[] | (.name, .reaper)') + REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') + REDIS_PORT=$(redis-cli --raw -u "${REDIS_URL}" CONFIG GET port | tail -1) + WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') + POSTGRES_PREFIX=$(echo "${config_json}" | jq --raw-output '.resources.postgresql._prefix') + POSTGRES_PORT=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.port') + POSTGRES_HOST=$(echo "${config_json}" | jq --raw-output '.resources.postgresql.host') + WORKER_USERS=$(echo "${WORKER_AND_REAPER_USERS}" | sed -n 'p;n') + DEFAULT_VENV_NAME=$(echo "${config_json}" | jq --raw-output '._workspace_contents._default_venv_name') + declare -gA WORKSPACE_SUBDIRS + WORKSPACE_SUBDIRS=( + ['SCRIPTS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._scripts') + ['RESULTS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._results') + ['LOGS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._logs') + ['SPECS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._specs') + ['WORKERS']="${WORKSPACE_DIR}"$(echo "${config_json}" | jq --raw-output '._workspace_contents._workers') + ) +} + +_add_valid_tester() { + local tester + for tester in "${INSTALLABLE_TESTERS[@]}"; do + if [[ "$1" == "${tester}" ]]; then + TESTERS=( "${TESTERS[@]}" "${tester}" ) + return 0 + fi + done + + TESTER_MESSAGE="$1 is not an installable tester. Choose from: ${INSTALLABLE_TESTERS[*]}\n${TESTER_MESSAGE}" + return 1 +} + +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -p|--python-version) + SELECTING_TESTERS= + PYTHON_VERSION="$2" + shift 2 + ;; + --non-interactive) + SELECTING_TESTERS= + NON_INTERACTIVE=1 + shift + ;; + --docker) + SELECTING_TESTERS= + NON_INTERACTIVE=1 + DOCKER=1 + shift + ;; + -a|--all-testers) + INSTALL_ALL_TESTERS=1 + shift + ;; + -t|--testers) + shift + SELECTING_TESTERS=1 + while [[ -n "${1// }" && "-t --testers" != *"$1"* ]] && _add_valid_tester "$1"; do + shift + done + ;; + *) + BAD_USAGE=1 + shift + ;; + esac +done + +if [[ -n ${BAD_USAGE} ]]; then + [[ -n "${SELECTING_TESTERS}" && -z ${INSTALL_ALL_TESTERS} ]] && echo -e "${TESTER_MESSAGE}" 1>&2 + echo "${USAGE_MESSAGE}" 1>&2 + exit 1 +fi + +set_python_version +install_packages +create_venv +load_config_settings +create_users +create_workspace +install_testers +create_default_tester_venv +compile_reaper_script +create_enqueuer_wrapper +create_worker_dbs +start_workers +suggest_next_steps diff --git a/server/bin/kill_worker_procs.c b/bin/kill_worker_procs.c similarity index 100% rename from server/bin/kill_worker_procs.c rename to bin/kill_worker_procs.c diff --git a/bin/start-stop.sh b/bin/start-stop.sh new file mode 100755 index 00000000..08e57c37 --- /dev/null +++ b/bin/start-stop.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +set -e + +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +PROJECTROOT=$(dirname "${THISDIR}") +PYTHON="${PROJECTROOT}/venv/bin/python" +RQ="${PROJECTROOT}/venv/bin/rq" +SUPERVISORD="${PROJECTROOT}/venv/bin/supervisord" + +start_supervisor() { + local pid_file + pid_file="${LOGS_DIR}/supervisord.pid" + if [ -f "${pid_file}" ]; then + local supervisor_pid + supervisor_pid=$(cat "${pid_file}") + echo "Supervisor appears to be running already (PID: ${supervisor_pid})" >&2 + exit 1 + fi + (cd "${LOGS_DIR}" && ${SUPERVISORD} -c supervisord.conf) +} + +stop_supervisor() { + local pid_file + pid_file="${LOGS_DIR}/supervisord.pid" + if [ ! -f "${pid_file}" ]; then + echo 'Supervisor appears to be stopped already' >&2 + exit 1 + fi + local supervisor_pid + supervisor_pid=$(cat "${pid_file}") + kill "${supervisor_pid}" +} + +load_config_settings() { + # Get the configuration settings as a json string and load config settings needed for this + # installation script + local config_json + config_json=$("${PYTHON}" -c "from autotester.config import config; print(config.to_json())") + + SERVER_USER=$(echo "${config_json}" | jq --raw-output '.server_user') + WORKSPACE_DIR=$(echo "${config_json}" | jq --raw-output '.workspace') + LOGS_DIR="${WORKSPACE_DIR}/"$(echo "${config_json}" | jq --raw-output '._workspace_contents._logs') + REDIS_URL=$(echo "${config_json}" | jq --raw-output '.redis.url') +} + +# script starts here + +load_config_settings + +if [[ "$(whoami)" != "${SERVER_USER}" ]]; then + echo "Please run this script as user: ${SERVER_USER}" >&2 + exit 2 +fi + +case $1 in + start) + start_supervisor + ;; + stop) + stop_supervisor + ;; + restart) + stop_supervisor + start_supervisor + ;; + stat) + "${RQ}" info --url "${REDIS_URL}" "${@:2}" + ;; + *) + echo "Usage: $0 [start | stop | restart | stat]" >&2 + exit 1 + ;; +esac diff --git a/server/bin/uninstall.sh b/bin/uninstall.sh similarity index 94% rename from server/bin/uninstall.sh rename to bin/uninstall.sh index 8f07909a..cfa2c36b 100755 --- a/server/bin/uninstall.sh +++ b/bin/uninstall.sh @@ -131,6 +131,16 @@ if [[ $# -gt 0 ]]; then exit 1 fi +# TODO: this uninstaller need to be updated +echo 'This uninstaller is broken, do not use until it has been updated. +To uninstall the autotester please run: + +$ bin/start-stop.sh stop + +and then optionally remove all tester users, server user, postgres databases, and remove any unneeded files in +the workspace directory' 1>&2 +exit 1 + # vars THISSCRIPT=$(readlink -f ${BASH_SOURCE}) BINDIR=$(dirname ${THISSCRIPT}) diff --git a/doc/config_example.yml b/doc/config_example.yml new file mode 100644 index 00000000..1d4d0110 --- /dev/null +++ b/doc/config_example.yml @@ -0,0 +1,39 @@ +workspace: !ENV ${HOME}/.markus-autotesting/workspace + +server_user: !ENV ${USER} + +workers: + - users: + - name: !ENV ${USER} + reaper: null + queues: + - student + - single + - batch + +redis: + url: redis://127.0.0.1:6379/0 + +supervisor: + url: '127.0.0.1:9001' + +rlimit_settings: + nproc: + - 300 + - 300 + +resources: + port: + min: 50000 + max: 65535 + postgresql: + port: 5432 + host: localhost + +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} diff --git a/doc/hooks.py b/doc/hooks.py deleted file mode 100644 index 23c64c41..00000000 --- a/doc/hooks.py +++ /dev/null @@ -1,52 +0,0 @@ -import glob -import os -import json -import shutil -import subprocess - - -# Helper functions -def upload_svn_file(api, group_repo_name, assignment_name, file_name, svn_user, svn_password, commit_message): - repo_url = f'{api.parsed_url.scheme}://{api.parsed_url.netloc}/svn{api.parsed_url.path}/{group_repo_name}' - svn_co_command = ['svn', 'co', '--username', svn_user, '--password', svn_password, repo_url] - subprocess.run(svn_co_command, capture_output=True, check=True) - repo_file_path = os.path.join(group_repo_name, assignment_name, file_name) - previous_file = os.path.isfile(repo_file_path) - shutil.copy2(file_name, repo_file_path) - if not previous_file: - svn_add_command = ['svn', 'add', repo_file_path] - subprocess.run(svn_add_command, capture_output=True, check=True) - svn_ci_command = ['svn', 'ci', '--username', svn_user, '--password', svn_password, '-m', commit_message, - repo_file_path] - subprocess.run(svn_ci_command, capture_output=True, check=True) - - -# Hooks -def before_all(api, assignment_id, group_id, group_repo_name): - # clean up unwanted files - pattern = os.path.join('**', '*.o') - for file_path in glob.glob(pattern, recursive=True): - os.remove(file_path) - - -def before_each(api, assignment_id, group_id, group_repo_name): - pass - - -def after_each(api, assignment_id, group_id, group_repo_name): - pass - - -def after_all(api, assignment_id, group_id, group_repo_name): - # upload feedback file - feedback_name = 'feedback_pyta.txt' - if os.path.isfile(feedback_name): - with open(feedback_name) as feedback_open: - api.upload_feedback_file(assignment_id, group_id, feedback_name, feedback_open.read()) - # upload in svn repo - upload_svn_file(api, group_repo_name, 'AX', feedback_name, 'svn_user', 'svn_password', 'Feedback file') - # upload annotations - annotations_name = 'feedback_pyta.json' - if os.path.isfile(annotations_name): - with open(annotations_name) as annotations_open: - api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) diff --git a/doc/hooks_example.py b/doc/hooks_example.py new file mode 100644 index 00000000..6ba8e782 --- /dev/null +++ b/doc/hooks_example.py @@ -0,0 +1,87 @@ +import glob +import os +import json +import shutil +import subprocess + + +# Helper functions +def upload_svn_file( + api, + group_repo_name, + assignment_name, + file_name, + svn_user, + svn_password, + commit_message, +): + repo_url = f"{api.parsed_url.scheme}://{api.parsed_url.netloc}/svn{api.parsed_url.path}/{group_repo_name}" + svn_co_command = [ + "svn", + "co", + "--username", + svn_user, + "--password", + svn_password, + repo_url, + ] + subprocess.run(svn_co_command, capture_output=True, check=True) + repo_file_path = os.path.join(group_repo_name, assignment_name, file_name) + previous_file = os.path.isfile(repo_file_path) + shutil.copy2(file_name, repo_file_path) + if not previous_file: + svn_add_command = ["svn", "add", repo_file_path] + subprocess.run(svn_add_command, capture_output=True, check=True) + svn_ci_command = [ + "svn", + "ci", + "--username", + svn_user, + "--password", + svn_password, + "-m", + commit_message, + repo_file_path, + ] + subprocess.run(svn_ci_command, capture_output=True, check=True) + + +# Hooks +def before_all(_api, _assignment_id, _group_id, _group_repo_name): + # clean up unwanted files + pattern = os.path.join("**", "*.o") + for file_path in glob.glob(pattern, recursive=True): + os.remove(file_path) + + +def before_each(_api, _assignment_id, _group_id, _group_repo_name): + pass + + +def after_each(_api, _assignment_id, _group_id, _group_repo_name): + pass + + +def after_all(api, assignment_id, group_id, group_repo_name): + # upload feedback file + feedback_name = "feedback_pyta.txt" + if os.path.isfile(feedback_name): + with open(feedback_name) as feedback_open: + api.upload_feedback_file( + assignment_id, group_id, feedback_name, feedback_open.read() + ) + # upload in svn repo + upload_svn_file( + api, + group_repo_name, + "AX", + feedback_name, + "svn_user", + "svn_password", + "Feedback file", + ) + # upload annotations + annotations_name = "feedback_pyta.json" + if os.path.isfile(annotations_name): + with open(annotations_name) as annotations_open: + api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..f326e27d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,53 @@ +version: '3.7' + +services: + app: &app + build: + context: . + dockerfile: ./.dockerfiles/Dockerfile + args: + UBUNTU_VERSION: '18.04' + LOGIN_USER: 'docker' + image: markus-autotest-dev:1.0.0 + stdin_open: true + tty: true + user: docker + volumes: + - .:/app:cached + environment: + - USER=docker + - REDIS_URL=redis://redis_autotest:6379/ + - PGHOST=postgres_autotest + - PGPORT=5432 + - EDITOR=vi + - MARKUS_AUTOTESTER_CONFIG=/app/.dockerfiles/docker-config.yml + depends_on: + - postgres_autotest + - redis_autotest + + autotest: + <<: *app + entrypoint: .dockerfiles/entrypoint-dev.sh + command: '/bin/bash' + + postgres_autotest: + image: postgres:10 + volumes: + - .psqlrc:/root/.psqlrc:ro + - postgres_autotest:/var/lib/postgresql/data + - ./log:/root/log:cached + environment: + - PSQL_HISTFILE=/root/log/.psql_history + ports: + - '45432:5432' + + redis_autotest: + image: redis:3.2-alpine + volumes: + - redis_autotest:/data + ports: + - 6379 + +volumes: + postgres_autotest: + redis_autotest: diff --git a/server/autotest_server.py b/server/autotest_server.py deleted file mode 100755 index 7b6b589a..00000000 --- a/server/autotest_server.py +++ /dev/null @@ -1,808 +0,0 @@ -#!/usr/bin/env python3 - -import os -import fcntl -import shutil -import sys -import time -import json -import subprocess -import signal -import redis -import rq -import pwd -from contextlib import contextmanager -from functools import wraps -from itertools import zip_longest -from hooks_context.hooks_context import Hooks -import resource -import uuid -import tempfile -import hashlib -import yaml -import getpass -import secrets -import string -import psycopg2 -import socket -from psycopg2.extensions import AsIs -from markusapi import Markus -import config - - -CURRENT_TEST_SCRIPT_FORMAT = '{}_{}' -TEST_SCRIPT_DIR = os.path.join(config.WORKSPACE_DIR, config.SCRIPTS_DIR_NAME) -TEST_RESULT_DIR = os.path.join(config.WORKSPACE_DIR, config.RESULTS_DIR_NAME) -TEST_SPECS_DIR = os.path.join(config.WORKSPACE_DIR, config.SPECS_DIR_NAME) -REDIS_CURRENT_TEST_SCRIPT_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_CURRENT_TEST_SCRIPT_HASH) -REDIS_WORKERS_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_WORKERS_HASH) -REDIS_PORT_INT = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_PORT_INT) -REDIS_POP_HASH = '{}{}'.format(config.REDIS_PREFIX, config.REDIS_POP_HASH) -DEFAULT_ENV_DIR = os.path.join(TEST_SPECS_DIR, config.DEFAULT_ENV_NAME) -PGPASSFILE = os.path.join(config.WORKSPACE_DIR, config.LOGS_DIR_NAME, '.pgpass') - -TEST_SCRIPTS_SETTINGS_FILENAME = 'settings.json' -TEST_SCRIPTS_FILES_DIRNAME = 'files' -HOOKS_FILENAME = 'hooks.py' - -PORT_MIN = 50000 -PORT_MAX = 65535 - -# For each rlimit limit (key), make sure that cleanup processes -# have at least n=(value) resources more than tester processes -RLIMIT_ADJUSTMENTS = {'RLIMIT_NPROC': 10} - -TESTER_IMPORT_LINE = {'custom' : 'from testers.custom.markus_custom_tester import MarkusCustomTester as Tester', - 'haskell' : 'from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester', - 'java' : 'from testers.java.markus_java_tester import MarkusJavaTester as Tester', - 'py' : 'from testers.py.markus_python_tester import MarkusPythonTester as Tester', - 'pyta' : 'from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester', - 'racket' : 'from testers.racket.markus_racket_tester import MarkusRacketTester as Tester'} - -### CUSTOM EXCEPTION CLASSES ### - -class AutotestError(Exception): pass - -### HELPER FUNCTIONS ### - -def stringify(*args): - for a in args: - yield str(a) - -def rlimit_str2int(rlimit_string): - return resource.__getattribute__(rlimit_string) - -def current_user(): - return pwd.getpwuid(os.getuid()).pw_name - -def get_reaper_username(test_username): - return '{}{}'.format(config.REAPER_USER_PREFIX, test_username) - -def decode_if_bytes(b, format='utf-8'): - return b.decode(format) if isinstance(b, bytes) else b - -def clean_dir_name(name): - """ Return name modified so that it can be used as a unix style directory name """ - return name.replace('/', '_') - -def random_tmpfile_name(): - return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) - -def get_test_script_key(markus_address, assignment_id): - """ - Return unique key for each assignment used for - storing the location of test scripts in Redis - """ - clean_markus_address = clean_dir_name(markus_address) - return CURRENT_TEST_SCRIPT_FORMAT.format(clean_markus_address, assignment_id) - -def test_script_directory(markus_address, assignment_id, set_to=None): - """ - Return the directory containing the test scripts for a specific assignment. - Optionally updates the location of the test script directory to the value - of the set_to keyword argument (if it is not None) - """ - key = get_test_script_key(markus_address, assignment_id) - r = redis_connection() - if set_to is not None: - r.hset(REDIS_CURRENT_TEST_SCRIPT_HASH, key, set_to) - out = r.hget(REDIS_CURRENT_TEST_SCRIPT_HASH, key) - return decode_if_bytes(out) - -def recursive_iglob(root_dir): - """ - Walk breadth first over a directory tree starting at root_dir and - yield the path to each directory or file encountered. - Yields a tuple containing a string indicating whether the path is to - a directory ("d") or a file ("f") and the path itself. Raise a - ValueError if the root_dir doesn't exist - """ - if os.path.isdir(root_dir): - for root, dirnames, filenames in os.walk(root_dir): - yield from (('d', os.path.join(root, d)) for d in dirnames) - yield from (('f', os.path.join(root, f)) for f in filenames) - else: - raise ValueError('directory does not exist: {}'.format(root_dir)) - -def redis_connection(): - """ - Return the currently open redis connection object. If there is no - connection currently open, one is created using the keyword arguments - specified in config.REDIS_CONNECTION_KWARGS - """ - conn = rq.get_current_connection() - if conn: - return conn - kwargs = config.REDIS_CONNECTION_KWARGS - rq.use_connection(redis=redis.Redis(**kwargs)) - return rq.get_current_connection() - -def copy_tree(src, dst, exclude=[]): - """ - Recursively copy all files and subdirectories in the path - indicated by src to the path indicated by dst. If directories - don't exist, they are created. Do not copy files or directories - in the exclude list. - """ - copied = [] - for fd, file_or_dir in recursive_iglob(src): - src_path = os.path.relpath(file_or_dir, src) - if src_path in exclude: - continue - target = os.path.join(dst, src_path) - if fd == 'd': - os.makedirs(target, exist_ok=True) - else: - os.makedirs(os.path.dirname(target), exist_ok=True) - shutil.copy2(file_or_dir, target) - copied.append((fd, target)) - return copied - -def ignore_missing_dir_error(_func, _path, excinfo): - """ Used by shutil.rmtree to ignore a FileNotFoundError """ - err_type, err_inst, traceback = excinfo - if err_type == FileNotFoundError: - return - raise err_inst - -def move_tree(src, dst): - """ - Recursively move all files and subdirectories in the path - indicated by src to the path indicated by dst. If directories - don't exist, they are created. - """ - os.makedirs(dst, exist_ok=True) - moved = copy_tree(src, dst) - shutil.rmtree(src, onerror=ignore_missing_dir_error) - return moved - -def loads_partial_json(json_string, expected_type=None): - """ - Return a list of objects loaded from a json string and a boolean - indicating whether the json_string was malformed. This will try - to load as many valid objects as possible from a (potentially - malformed) json string. If the optional expected_type keyword argument - is not None then only objects of the given type are returned, - if any objects of a different type are found, the string will - be treated as malfomed. - """ - i = 0 - decoder = json.JSONDecoder() - results = [] - malformed = False - json_string = json_string.strip() - while i < len(json_string): - try: - obj, ind = decoder.raw_decode(json_string[i:]) - if expected_type is None or isinstance(obj, expected_type): - results.append(obj) - elif json_string[i:i+ind].strip(): - malformed = True - i += ind - except json.JSONDecodeError: - if json_string[i].strip(): - malformed = True - i += 1 - return results, malformed - -@contextmanager -def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): - """ - Open the file or directory at path, yield its - file descriptor, and close it when finished. - flags, *args and **kwargs are passed on to os.open. - """ - fd = os.open(path, flags, *args, **kwargs) - try: - yield fd - finally: - os.close(fd) - -@contextmanager -def fd_lock(file_descriptor, exclusive=True): - """ - Lock the object with the given file descriptor and unlock it - when finished. A lock can either be exclusive or shared by - setting the exclusive keyword argument to True or False. - """ - fcntl.flock(file_descriptor, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) - try: - yield - finally: - fcntl.flock(file_descriptor, fcntl.LOCK_UN) - -def tester_user(): - """ - Get the workspace for the tester user specified by the MARKUSWORKERUSER - environment variable, return the user_name and path to that user's workspace. - - Raises an AutotestError if a tester user is not specified or if a workspace - has not been setup for that user. - """ - r = redis_connection() - - user_name = os.environ.get('MARKUSWORKERUSER') - if user_name is None: - raise AutotestError('No worker users available to run this job') - - user_workspace = r.hget(REDIS_WORKERS_HASH, user_name) - if user_workspace is None: - raise AutotestError(f'No workspace directory for user: {user_name}') - - return user_name, decode_if_bytes(user_workspace) - -### MAINTENANCE FUNCTIONS ### - -def update_pop_interval_stat(queue_name): - """ - Update the values contained in the redis hash named REDIS_POP_HASH for - the queue named queue_name. This should be called whenever a new job - is popped from a queue for which we want to keep track of the popping - rate. For more details about the data updated see get_pop_interval_stat. - """ - r = redis_connection() - now = time.time() - r.hsetnx(REDIS_POP_HASH, '{}_start'.format(queue_name), now) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), now) - r.hincrby(REDIS_POP_HASH, '{}_count'.format(queue_name), 1) - -def clear_pop_interval_stat(queue_name): - """ - Reset the values contained in the redis hash named REDIS_POP_HASH for - the queue named queue_name. This should be called whenever a queue becomes - empty. For more details about the data updated see get_pop_interval_stat. - """ - r = redis_connection() - r.hdel(REDIS_POP_HASH, '{}_start'.format(queue_name)) - r.hset(REDIS_POP_HASH, '{}_last'.format(queue_name), 0) - r.hset(REDIS_POP_HASH, '{}_count'.format(queue_name), 0) - -def get_pop_interval_stat(queue_name): - """ - Return the following data about the queue named queue_name: - - the time the first job was popped from the queue during the - current burst of jobs. - - the number of jobs popped from the queue during the current - burst of jobs. - - the time the most recent job was popped from the queue during - current burst of jobs. - """ - r = redis_connection() - start = r.hget(REDIS_POP_HASH, '{}_start'.format(queue_name)) - last = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) - count = r.hget(REDIS_POP_HASH, '{}_count'.format(queue_name)) - return start, last, count - -def get_avg_pop_interval(queue_name): - """ - Return the average interval between pops off of the end of the - queue named queue_name during the current burst of jobs. - Return None if there are no jobs in the queue, indicating that - there is no current burst. - """ - start, last, count = get_pop_interval_stat(queue_name) - try: - start = float(start) - last = float(last) - count = int(count) - except TypeError: - return None - count -= 1 - return (last-start) / count if count else 0 - -def clean_up(): - """ Reset the pop interval data for each empty queue """ - with rq.Connection(redis_connection()): - for q in rq.Queue.all(): - if q.is_empty(): - clear_pop_interval_stat(q.name) - - -def clean_after(func): - """ - Call the clean_up function after the - decorated function func is finished - """ - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - finally: - clean_up() - return wrapper - -### RUN TESTS ### - -def copy_test_script_files(markus_address, assignment_id, tests_path): - """ - Copy test script files for a given assignment to the tests_path - directory if they exist. tests_path may already exist and contain - files and subdirectories. - """ - test_script_outer_dir = test_script_directory(markus_address, assignment_id) - test_script_dir = os.path.join(test_script_outer_dir, TEST_SCRIPTS_FILES_DIRNAME) - if os.path.isdir(test_script_dir): - with fd_open(test_script_dir) as fd: - with fd_lock(fd, exclusive=False): - return copy_tree(test_script_dir, tests_path) - return [] - -def setup_files(files_path, tests_path, markus_address, assignment_id): - """ - Copy test script files and student files to the working directory tests_path, - then make it the current working directory. - The following permissions are also set: - - tests_path directory: rwxrwx--T - - test subdirectories: rwxr-xr-x - - test files: rw-r--r-- - - student subdirectories: rwxrwxrwx - - student files: rw-rw-rw- - """ - os.chmod(tests_path, 0o1770) - student_files = move_tree(files_path, tests_path) - for fd, file_or_dir in student_files: - if fd == 'd': - os.chmod(file_or_dir, 0o777) - else: - os.chmod(file_or_dir, 0o666) - script_files = copy_test_script_files(markus_address, assignment_id, tests_path) - for fd, file_or_dir in script_files: - permissions = 0o755 - if fd == 'f': - permissions -= 0o111 - os.chmod(file_or_dir, permissions) - return student_files, script_files - -def test_run_command(test_username=None): - """ - Return a command used to run test scripts as a the test_username - user, with the correct arguments. Set test_username to None to - run as the current user. - - >>> test_script = 'mysscript.py' - >>> test_run_command('f').format(test_script) - 'sudo -u f -- bash -c "./myscript.py"' - - >>> test_run_command().format(test_script) - './myscript.py' - """ - cmd = '{}' - if test_username is not None: - cmd = ' '.join(('sudo', '-Eu', test_username, '--', 'bash', '-c', - "'{}'".format(cmd))) - - return cmd - -def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None): - """ - Return the arguments passed to this function in a dictionary. If stderr is - falsy, change it to None. Load the json string in stdout as a dictionary. - """ - test_results, malformed = loads_partial_json(stdout, dict) - return {'time' : run_time, - 'timeout' : timeout, - 'tests' : test_results, - 'stderr' : stderr or None, - 'malformed' : stdout if malformed else None, - 'extra_info': extra_info or {}} - -def get_test_preexec_fn(): - """ - Return a function that sets rlimit settings specified in config file - This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), - there are at least n=RLIMIT_ADJUSTMENTS[limit] resources available for cleanup - processes that are not available for test processes. This ensures that cleanup - processes will always be able to run. - """ - def preexec_fn(): - for limit_str in config.RLIMIT_SETTINGS.keys() | RLIMIT_ADJUSTMENTS.keys(): - limit = rlimit_str2int(limit_str) - - values = config.RLIMIT_SETTINGS.get(limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) - curr_soft, curr_hard = resource.getrlimit(limit) - soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) - # reduce the hard limit so that cleanup scripts will have at least - # adj more resources to use. - adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) - if (curr_hard - hard) < adj: - hard = curr_hard - adj - # make sure the soft limit doesn't exceed the hard limit - hard = max(hard, 0) - soft = max(min(hard, soft), 0) - - resource.setrlimit(limit, (soft, hard)) - - return preexec_fn - -def get_cleanup_preexec_fn(): - """ - Return a function that sets the rlimit settings specified in RLIMIT_ADJUSTMENTS - so that both the soft and hard limits are set as high as possible. This ensures - that cleanup processes will have as many resources as possible to run. - """ - def preexec_fn(): - for limit_str in RLIMIT_ADJUSTMENTS: - limit = rlimit_str2int(limit_str) - soft, hard = resource.getrlimit(limit) - soft = max(soft, hard) - resource.setrlimit(limit, (soft, hard)) - - return preexec_fn - -def kill_with_reaper(test_username): - """ - Try to kill all processes currently being run by test_username using the method - described in this article: https://lwn.net/Articles/754980/. Return True if this - is method is attempted and is successful, otherwise return False. - - This copies the kill_worker_procs executable as the test_username user and sets - the permissions of this copied file so that it can be executed by the corresponding - reaper user. Crucially, it sets the permissions to include the setuid bit so that - the reaper user can manipulate the real uid and effective uid values of the process. - - The reaper user then runs this copied executable which kills all processes being - run by the test_username user, deletes itself and exits with a 0 exit code if - sucessful. - """ - if config.REAPER_USER_PREFIX: - reaper_username = get_reaper_username(test_username) - cwd = os.path.dirname(os.path.abspath(__file__)) - kill_file_dst = random_tmpfile_name() - preexec_fn = get_cleanup_preexec_fn() - - copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format(test_username, kill_file_dst) - copy_proc = subprocess.Popen(copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd) - if copy_proc.wait() < 0: # wait returns the return code of the proc - return False - - kill_cmd = 'sudo -u {} -- bash -c {}'.format(reaper_username, kill_file_dst) - kill_proc = subprocess.Popen(kill_cmd, shell=True, preexec_fn=preexec_fn) - return kill_proc.wait() == 0 - return False - -def kill_without_reaper(test_username): - """ - Kill all processes that test_username is able to kill - """ - kill_cmd = f"sudo -u {test_username} -- bash -c 'kill -KILL -1'" - subprocess.run(kill_cmd, shell=True) - -def create_test_script_command(env_dir, tester_type): - """ - Return string representing a command line command to - run tests. - """ - import_line = TESTER_IMPORT_LINE[tester_type] - python_lines = [ 'import sys, json', - import_line, - 'from testers.markus_test_specs import MarkusTestSpecs', - f'Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()'] - venv_activate = os.path.join(os.path.abspath(env_dir), 'venv', 'bin', 'activate') - python_str = '; '.join(python_lines) - venv_str = f'source {venv_activate}' - return ' && '.join([venv_str, f'python -c "{python_str}"']) - - -def setup_database(test_username): - user = getpass.getuser() - database = f'{config.POSTGRES_PREFIX}{test_username}' - - with open(PGPASSFILE) as f: - password = f.read().strip() - - with psycopg2.connect(database=database, user=user, password=password, host='localhost') as conn: - with conn.cursor() as cursor: - cursor.execute("DROP OWNED BY CURRENT_USER;") - if test_username != user: - user = test_username - password = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) - cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) - - return {'PGDATABASE': database, 'PGPASSWORD': password, 'PGUSER': user, 'AUTOTESTENV': 'true'} - - -def next_port(): - """ Return a port number that is greater than the last time this method was - called (by any process on this machine). - - This port number is not guaranteed to be free - """ - r = redis_connection() - return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN - - -def get_available_port(): - """ Return the next available open port on localhost. """ - while True: - try: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('localhost', next_port())) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - port = s.getsockname()[1] - return str(port) - except OSError: - continue - - -def get_env_vars(test_username): - """ Return a dictionary containing all environment variables to pass to the next test """ - db_env_vars = setup_database(test_username) - port_number = get_available_port() - return {'PORT': port_number, **db_env_vars} - - -def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, hooks): - """ - Run each test script in test_scripts in the tests_path directory using the - command cmd. Return the results. - """ - results = [] - preexec_fn = get_test_preexec_fn() - - with hooks.around('all'): - for settings in test_specs['testers']: - tester_type = settings['tester_type'] - extra_hook_kwargs = {'settings': settings} - with hooks.around(tester_type, extra_kwargs=extra_hook_kwargs): - env_dir = settings.get('env_loc', DEFAULT_ENV_DIR) - - cmd_str = create_test_script_command(env_dir, tester_type) - args = cmd.format(cmd_str) - - for test_data in settings['test_data']: - test_category = test_data.get('category', []) - if set(test_category) & set(test_categories): #TODO: make sure test_categories is non-string collection type - extra_hook_kwargs = {'test_data': test_data} - with hooks.around('each', builtin_selector=test_data, extra_kwargs=extra_hook_kwargs): - start = time.time() - out, err = '', '' - timeout_expired = None - timeout = test_data.get('timeout') - try: - env_vars = get_env_vars(test_username) - proc = subprocess.Popen(args, start_new_session=True, cwd=tests_path, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - stdin=subprocess.PIPE, preexec_fn=preexec_fn, - env={**os.environ, **env_vars}) - try: - settings_json = json.dumps({**settings, 'test_data': test_data}).encode('utf-8') - out, err = proc.communicate(input=settings_json, timeout=timeout) - except subprocess.TimeoutExpired: - if test_username == current_user(): - pgrp = os.getpgid(proc.pid) - os.killpg(pgrp, signal.SIGKILL) - else: - if not kill_with_reaper(test_username): - kill_without_reaper(test_username) - out, err = proc.communicate() - timeout_expired = timeout - except Exception as e: - err += '\n\n{}'.format(e) - finally: - out = decode_if_bytes(out) - err = decode_if_bytes(err) - duration = int(round(time.time()-start, 3) * 1000) - extra_info = test_data.get('extra_info', {}) - results.append(create_test_group_result(out, err, duration, extra_info, timeout_expired)) - return results, hooks.format_errors() - -def store_results(results_data, markus_address, assignment_id, group_id, submission_id): - """ - Write the results of multiple test script runs to an output file as a json string. - The output file is located at: - {TEST_RESULT_DIR}/{markus_address}/{assignment_id}/{group_id}/{submission_id}/ouput.json - """ - clean_markus_address = clean_dir_name(markus_address) - run_time = "run_{}".format(int(time.time())) - destination = os.path.join(*stringify(TEST_RESULT_DIR, clean_markus_address, assignment_id, group_id, 's{}'.format(submission_id or ''), run_time)) - os.makedirs(destination, exist_ok=True) - with open(os.path.join(destination, 'output.json'), 'w') as f: - json.dump(results_data, f, indent=4) - -def clear_working_directory(tests_path, test_username): - """ - Run commands that clear the tests_path working directory - """ - if test_username != current_user(): - chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format(test_username, tests_path) - else: - chmod_cmd = 'chmod -Rf ugo+rwX {}'.format(tests_path) - - subprocess.run(chmod_cmd, shell=True) - - # be careful not to remove the tests_path dir itself since we have to - # set the group ownership with sudo (and that is only done in ../install.sh) - clean_cmd = 'rm -rf {0}/.[!.]* {0}/*'.format(tests_path) - subprocess.run(clean_cmd, shell=True) - -def stop_tester_processes(test_username): - """ - Run a command that kills all tester processes either by killing all - user processes or killing with a reaper user (see https://lwn.net/Articles/754980/ - for reference). - """ - if test_username != current_user(): - if not kill_with_reaper(test_username): - kill_without_reaper(test_username) - -def finalize_results_data(results, error, all_hooks_error, time_to_service): - """ Return a dictionary of test script results combined with test run info """ - return {'test_groups' : results, - 'error' : error, - 'hooks_error' : all_hooks_error, - 'time_to_service' : time_to_service} - -def report(results_data, api, assignment_id, group_id, run_id): - """ Post the results of running test scripts to the markus api """ - api.upload_test_group_results(assignment_id, group_id, run_id, json.dumps(results_data)) - -@clean_after -def run_test(markus_address, server_api_key, test_categories, files_path, assignment_id, - group_id, group_repo_name, submission_id, run_id, enqueue_time): - """ - Run autotesting tests using the tests in the test_specs json file on the files in files_path. - - This function should be used by an rq worker. - """ - results = [] - error = None - hooks_error = None - time_to_service = int(round(time.time() - enqueue_time, 3) * 1000) - - test_script_path = test_script_directory(markus_address, assignment_id) - hooks_script_path = os.path.join(test_script_path, HOOKS_FILENAME) - test_specs_path = os.path.join(test_script_path, TEST_SCRIPTS_SETTINGS_FILENAME) - api = Markus(server_api_key, markus_address) - - with open(test_specs_path) as f: - test_specs = json.load(f) - - try: - job = rq.get_current_job() - update_pop_interval_stat(job.origin) - test_username, tests_path = tester_user() - hooks_kwargs = {'api': api, - 'assignment_id': assignment_id, - 'group_id': group_id} - testers = {settings['tester_type'] for settings in test_specs['testers']} - hooks = Hooks(hooks_script_path, testers, cwd=tests_path, kwargs=hooks_kwargs) - try: - setup_files(files_path, tests_path, markus_address, assignment_id) - cmd = test_run_command(test_username=test_username) - results, hooks_error = run_test_specs(cmd, - test_specs, - test_categories, - tests_path, - test_username, - hooks) - finally: - stop_tester_processes(test_username) - clear_working_directory(tests_path, test_username) - except Exception as e: - error = str(e) - finally: - results_data = finalize_results_data(results, error, hooks_error, time_to_service) - store_results(results_data, markus_address, assignment_id, group_id, submission_id) - report(results_data, api, assignment_id, group_id, run_id) - -### UPDATE TEST SCRIPTS ### - -def get_tester_root_dir(tester_type): - """ - Return the root directory of the tester named tester_type - """ - this_dir = os.path.dirname(os.path.abspath(__file__)) - root_dir = os.path.dirname(this_dir) - tester_dir = os.path.join(root_dir, 'testers', 'testers', tester_type) - if not os.path.isdir(tester_dir): - raise FileNotFoundError(f'{tester_type} is not a valid tester name') - return tester_dir - -def update_settings(settings, specs_dir): - """ - Return a dictionary containing all the default settings and the installation settings - contained in the tester's specs directory as well as the settings. The settings - will overwrite any duplicate keys in the default settings files. - """ - full_settings = {'install_data': {}} - install_settings_files = [os.path.join(specs_dir, 'install_settings.json')] - for settings_file in install_settings_files: - if os.path.isfile(settings_file): - with open(settings_file) as f: - full_settings['install_data'].update(json.load(f)) - full_settings.update(settings) - return full_settings - -def create_tester_environments(files_path, test_specs): - for i, settings in enumerate(test_specs['testers']): - tester_dir = get_tester_root_dir(settings["tester_type"]) - specs_dir = os.path.join(tester_dir, 'specs') - bin_dir = os.path.join(tester_dir, 'bin') - settings = update_settings(settings, specs_dir) - if settings.get('env_data'): - new_env_dir = tempfile.mkdtemp(prefix='env', dir=TEST_SPECS_DIR) - os.chmod(new_env_dir, 0o775) - settings['env_loc'] = new_env_dir - - create_file = os.path.join(bin_dir, 'create_environment.sh') - if os.path.isfile(create_file): - cmd = [f'{create_file}', json.dumps(settings), files_path] - proc = subprocess.run(cmd, stderr=subprocess.PIPE) - if proc.returncode != 0: - raise AutotestError(f'create tester environment failed with:\n{proc.stderr}') - else: - settings['env_loc'] = DEFAULT_ENV_DIR - test_specs['testers'][i] = settings - - return test_specs - -def destroy_tester_environments(old_test_script_dir): - test_specs_file = os.path.join(old_test_script_dir, TEST_SCRIPTS_SETTINGS_FILENAME) - with open(test_specs_file) as f: - test_specs = json.load(f) - for settings in test_specs['testers']: - env_loc = settings.get('env_loc', DEFAULT_ENV_DIR) - if env_loc != DEFAULT_ENV_DIR: - tester_dir = get_tester_root_dir(settings['tester_type']) - bin_dir = os.path.join(tester_dir, 'bin') - destroy_file = os.path.join(bin_dir, 'destroy_environment.sh') - if os.path.isfile(destroy_file): - cmd = [f'{destroy_file}', json.dumps(settings)] - proc = subprocess.run(cmd, stderr=subprocess.PIPE) - if proc.returncode != 0: - raise AutotestError(f'destroy tester environment failed with:\n{proc.stderr}') - shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) - -@clean_after -def update_test_specs(files_path, assignment_id, markus_address, test_specs): - """ - Copy new test scripts for a given assignment to from the files_path - to a new location. Indicate that these new test scripts should be used instead of - the old ones. And remove the old ones when it is safe to do so (they are not in the - process of being copied to a working directory). - - This function should be used by an rq worker. - """ - # TODO: catch and log errors - test_script_dir_name = "test_scripts_{}".format(int(time.time())) - clean_markus_address = clean_dir_name(markus_address) - new_dir = os.path.join(*stringify(TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name)) - new_files_dir = os.path.join(new_dir, TEST_SCRIPTS_FILES_DIRNAME) - move_tree(files_path, new_files_dir) - if 'hooks_file' in test_specs: - src = os.path.isfile(os.path.join(new_files_dir, test_specs['hooks_file'])) - if os.path.isfile(src): - os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) - test_specs = create_tester_environments(new_files_dir, test_specs) - settings_filename = os.path.join(new_dir, TEST_SCRIPTS_SETTINGS_FILENAME) - with open(settings_filename, 'w') as f: - json.dump(test_specs, f) - old_test_script_dir = test_script_directory(markus_address, assignment_id) - test_script_directory(markus_address, assignment_id, set_to=new_dir) - - if old_test_script_dir is not None: - with fd_open(old_test_script_dir) as fd: - with fd_lock(fd, exclusive=True): - destroy_tester_environments(old_test_script_dir) - shutil.rmtree(old_test_script_dir, onerror=ignore_missing_dir_error) - diff --git a/server/bin/install.sh b/server/bin/install.sh deleted file mode 100755 index bde181fd..00000000 --- a/server/bin/install.sh +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env bash - -set -e - -install_packages() { - echo "[AUTOTEST-INSTALL] Installing system packages" - sudo apt-get install "python${PYTHONVERSION}" "python${PYTHONVERSION}-venv" redis-server jq postgresql -} - -create_server_user() { - if [[ -z ${SERVERUSER} ]]; then - echo "[AUTOTEST-INSTALL] No dedicated server user, using '${THISUSER}'" - mkdir -p ${WORKSPACEDIR} - else - if id ${SERVERUSER} &> /dev/null; then - echo "[AUTOTEST-INSTALL] Using existing server user '${SERVERUSER}'" - else - echo "[AUTOTEST-INSTALL] Creating server user '${SERVERUSER}'" - sudo adduser --disabled-password ${SERVERUSER} - fi - sudo mkdir -p ${WORKSPACEDIR} - sudo chown ${SERVERUSER}:${SERVERUSER} ${WORKSPACEDIR} - sudo chmod u=rwx,go=rx ${WORKSPACEDIR} - fi -} - -create_unprivileged_user() { - local username=$1 - local usertype=$2 - - if id ${username} &> /dev/null; then - echo "[AUTOTEST-INSTALL] Reusing existing ${usertype} user '${username}'" - else - echo "[AUTOTEST-INSTALL] Creating ${usertype} user '${username}'" - sudo adduser --disabled-login --no-create-home ${username} - fi - sudo iptables -I OUTPUT -p tcp --dport 6379 -m owner --uid-owner ${username} -j REJECT - echo "${SERVERUSEREFFECTIVE} ALL=(${username}) NOPASSWD:ALL" | sudo EDITOR="tee -a" visudo -} - -create_worker_dir() { - local workeruser=$1 - local workerdir=${WORKERSSDIR}/${workeruser} - - sudo mkdir -p ${workerdir} - sudo chown ${SERVERUSEREFFECTIVE}:${workeruser} ${workerdir} - sudo chmod ug=rwx,o=,+t ${workerdir} - redis-cli HSET ${REDISWORKERS} ${workeruser} ${workerdir} -} - -create_worker_and_reaper_users() { - redis-cli DEL ${REDISWORKERS} > /dev/null - if [[ -z ${WORKERUSERS} ]]; then - echo "[AUTOTEST-INSTALL] No dedicated worker user, using '${SERVERUSEREFFECTIVE}'" - create_worker_dir ${SERVERUSEREFFECTIVE} - else - for workeruser in ${WORKERUSERS}; do - create_unprivileged_user ${workeruser} worker - create_worker_dir ${workeruser} - if [[ -n ${REAPERPREFIX} ]]; then - local reaperuser="${REAPERPREFIX}${workeruser}" - create_unprivileged_user ${reaperuser} reaper - sudo usermod -g ${workeruser} ${reaperuser} - fi - done - fi -} - -create_workspace_dirs() { - echo "[AUTOTEST-INSTALL] Creating workspace directories at '${WORKSPACEDIR}'" - sudo mkdir -p ${RESULTSDIR} - sudo mkdir -p ${SCRIPTSDIR} - sudo mkdir -p ${SPECSDIR} - sudo mkdir -p ${WORKERSSDIR} - sudo mkdir -p ${LOGSDIR} - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${RESULTSDIR} ${SCRIPTSDIR} ${SPECSDIR} ${WORKERSSDIR} ${LOGSDIR} - sudo chmod u=rwx,go= ${RESULTSDIR} ${SCRIPTSDIR} ${LOGSDIR} - sudo chmod u=rwx,go=rx ${SPECSDIR} ${WORKERSSDIR} -} - -install_venv() { - local servervenv=${SERVERDIR}/venv - - echo "[AUTOTEST-INSTALL] Installing server virtual environment at '${servervenv}'" - rm -rf ${servervenv} - "python${PYTHONVERSION}" -m venv ${servervenv} - source ${servervenv}/bin/activate - pip install wheel # must be installed before requirements - pip install -r ${BINDIR}/requirements.txt - deactivate -} - -install_default_tester_venv() { - local defaultvenv=${SPECSDIR}/$(get_config_param DEFAULT_ENV_NAME)/venv - local pth_file=${defaultvenv}/lib/python${PYTHONVERSION}/site-packages/testers.pth - - echo "[AUTOTEST-INSTALL] Installing default tester virtual environment at '${defaultvenv}'" - rm -rf ${defaultvenv} - "python${PYTHONVERSION}" -m venv ${defaultvenv} - echo ${TESTERSDIR} >| ${pth_file} - source ${defaultvenv}/bin/activate - pip install wheel - pip install -r ${BINDIR}/default_tester_requirements.txt - deactivate -} - -start_workers() { - local servervenv=${SERVERDIR}/venv/bin/activate - local supervisorconf=${LOGSDIR}/supervisord.conf - if [[ -z ${WORKERUSERS} ]]; then - local worker_users=${SERVERUSEREFFECTIVE} - else - local worker_users=${WORKERUSERS} - fi - - echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" - sudo -u ${SERVERUSEREFFECTIVE} -- bash -c "source ${servervenv} && - ${SERVERDIR}/generate_supervisord_conf.py ${supervisorconf} ${worker_users} && - cd ${LOGSDIR} && - supervisord -c ${supervisorconf} && - deactivate" -} - -create_worker_dbs() { - echo "[AUTOTEST-INSTALL] Creating databases for worker users" - local serverpwd=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-15) - local pgpassfile=${LOGSDIR}/.pgpass - local pgport=$(sudo -u postgres psql -t -P format=unaligned -c "select setting from pg_settings where name = 'port';") - sudo touch ${pgpassfile} - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${pgpassfile} - sudo chmod 600 ${pgpassfile} - sudo -u postgres psql <<-EOF - DROP ROLE IF EXISTS ${SERVERUSEREFFECTIVE}; - CREATE ROLE ${SERVERUSEREFFECTIVE} LOGIN PASSWORD '${serverpwd}'; - ALTER ROLE ${SERVERUSEREFFECTIVE} CREATEROLE; - EOF - echo -e "${serverpwd}" | sudo -u ${SERVERUSEREFFECTIVE} tee -a ${pgpassfile} > /dev/null - if [[ -z ${WORKERUSERS} ]]; then - local database="${POSTGRESPREFIX}${SERVERUSEREFFECTIVE}" - sudo -u postgres psql <<-EOF - DROP DATABASE IF EXISTS ${database}; - CREATE DATABASE ${database} OWNER ${SERVERUSEREFFECTIVE}; - REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC; - EOF - else - for workeruser in ${WORKERUSERS}; do - local database="${POSTGRESPREFIX}${workeruser}" - sudo -u postgres psql <<-EOF - DROP DATABASE IF EXISTS ${database}; - DROP ROLE IF EXISTS ${workeruser}; - CREATE ROLE ${workeruser} LOGIN PASSWORD null; - CREATE DATABASE ${database} OWNER ${SERVERUSEREFFECTIVE}; - REVOKE CONNECT ON DATABASE ${database} FROM PUBLIC; - GRANT CONNECT, CREATE ON DATABASE ${database} TO ${workeruser}; - EOF - done - fi -} - -compile_reaper_script() { - local reaperexe="${BINDIR}/kill_worker_procs" - - echo "[AUTOTEST-INSTALL] Compiling reaper script at '${reaperexe}'" - gcc "${reaperexe}.c" -o ${reaperexe} - chmod ugo=r ${reaperexe} -} - -create_enqueuer_wrapper() { - local enqueuer=/usr/local/bin/autotest_enqueuer - - echo "[AUTOTEST-INSTALL] Creating enqueuer wrapper at '${enqueuer}'" - # this heredoc requires actual tabs - cat <<-EOF | sudo tee ${enqueuer} > /dev/null - #!/usr/bin/env bash - - source ${SERVERDIR}/venv/bin/activate - ${SERVERDIR}/autotest_enqueuer.py "\$@" - EOF - sudo chown ${SERVERUSEREFFECTIVE}:${SERVERUSEREFFECTIVE} ${enqueuer} - sudo chmod u=rwx,go=r ${enqueuer} -} - -create_markus_config() { - local serverconf="" - if [[ -n ${SERVERUSER} ]]; then - serverconf="'${SERVERUSER}'" - else - serverconf="nil" - fi - - echo "[AUTOTEST-INSTALL] Creating Markus web server config snippet at 'markus_config.rb'" - echo " - AUTOTEST_ON = true - AUTOTEST_STUDENT_TESTS_ON = false - AUTOTEST_STUDENT_TESTS_BUFFER_TIME = 1.hour - AUTOTEST_CLIENT_DIR = 'TODO_markus_dir' - AUTOTEST_SERVER_HOST = '$(hostname).$(dnsdomainname)' - AUTOTEST_SERVER_USERNAME = ${serverconf} - AUTOTEST_SERVER_DIR = '${WORKSPACEDIR}' - AUTOTEST_SERVER_COMMAND = 'autotest_enqueuer' - " >| markus_config.rb -} - -suggest_next_steps() { - if [[ -n ${SERVERUSER} ]]; then - echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVERUSER}'s '~/.ssh/authorized_keys'" - fi - echo "[AUTOTEST-INSTALL] You may want to add 'source ${SERVERDIR}/venv/bin/activate && cd ${WORKSPACEDIR} && supervisord -c ${SERVERDIR}/supervisord.conf && deactivate' to ${SERVERUSEREFFECTIVE}'s crontab with a @reboot time" - echo "[AUTOTEST-INSTALL] You should install the individual testers you plan to use" -} - -get_config_param() { - echo $(cd ${SERVERDIR} && python3 -c "import config; print(config.$1)") -} - -# script starts here -if [[ $# -gt 0 ]]; then - echo "Usage: $0" - exit 1 -fi - -# vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -BINDIR=$(dirname ${THISSCRIPT}) -SERVERDIR=$(dirname ${BINDIR}) -TESTERSDIR=$(dirname ${SERVERDIR})/testers -THISUSER=$(whoami) -PYTHONVERSION="3.8" - -# install python here so we can parse arguments from the config file more easily -install_packages - -SERVERUSER=$(get_config_param SERVER_USER) -if [[ -n ${SERVERUSER} ]]; then - SERVERUSEREFFECTIVE=${SERVERUSER} -else - SERVERUSEREFFECTIVE=${THISUSER} -fi -WORKERUSERS=$(get_config_param WORKER_USERS) -WORKSPACEDIR=$(get_config_param WORKSPACE_DIR) -SPECSDIR=${WORKSPACEDIR}/$(get_config_param SPECS_DIR_NAME) -RESULTSDIR=${WORKSPACEDIR}/$(get_config_param RESULTS_DIR_NAME) -SCRIPTSDIR=${WORKSPACEDIR}/$(get_config_param SCRIPTS_DIR_NAME) -WORKERSSDIR=${WORKSPACEDIR}/$(get_config_param WORKERS_DIR_NAME) -LOGSDIR=${WORKSPACEDIR}/$(get_config_param LOGS_DIR_NAME) -REDISPREFIX=$(get_config_param REDIS_PREFIX) -REDISWORKERS=${REDISPREFIX}$(get_config_param REDIS_WORKERS_HASH) -REAPERPREFIX=$(get_config_param REAPER_USER_PREFIX) -POSTGRESPREFIX=$(get_config_param POSTGRES_PREFIX) - -# main -create_server_user -create_worker_and_reaper_users -create_workspace_dirs -create_worker_dbs -install_venv -install_default_tester_venv -compile_reaper_script -create_enqueuer_wrapper -create_markus_config -start_workers -suggest_next_steps diff --git a/server/bin/requirements.txt b/server/bin/requirements.txt deleted file mode 100644 index 77ac59b5..00000000 --- a/server/bin/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -redis>=3.3.11 -requests>=2.22.0 -rq>=1.1.0 -supervisor>=4.1.0 -PyYAML>=5.2 -psycopg2-binary>=2.8.4 -markusapi>=0.0.1 -jsonschema>=3.2.0 -fakeredis>=1.1.0 diff --git a/server/config.py b/server/config.py deleted file mode 100755 index 52e79211..00000000 --- a/server/config.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 - -#### CHANGE CONFIG PARAMETERS BELOW #### - -## REDIS CONFIGS ## - -# name of redis hash used to store the locations of test script directories -REDIS_CURRENT_TEST_SCRIPT_HASH = 'curr_test_scripts' -# name of redis hash used to store pop interval data for each worker queue -REDIS_POP_HASH = 'pop_intervals' -# name of redis hash used to store workers data (username and worker directory) -REDIS_WORKERS_HASH = 'workers' -# name of redis integer used to access the next available port -REDIS_PORT_INT = 'ports' -# dictionary containing keyword arguments to pass to rq.use_connection -# when connecting to a redis database (empty dictionary is default) -REDIS_CONNECTION_KWARGS = {} -# prefix to prepend to all redis keys generated by the autotester -REDIS_PREFIX = 'autotest:' -# prefix to prepend to all postgres databases created -POSTGRES_PREFIX = 'autotest_' - -## WORKING DIR CONFIGS ## - -# the main working directory -WORKSPACE_DIR = '/home/vagrant/markus-autotesting/server/workspace' -# name of the directory containing test scripts -SCRIPTS_DIR_NAME = 'scripts' -# name of the directory containing test results -RESULTS_DIR_NAME = 'results' -# name of the directory containing specs files -SPECS_DIR_NAME = 'specs' -# name of the directory containing workspaces for the workers -WORKERS_DIR_NAME = 'workers' -# name of the directory containing log files -LOGS_DIR_NAME = 'logs' -# name of the server user -SERVER_USER = '' -# names of the worker users -WORKER_USERS = 'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7' -# prefix used to name reaper users -# (reapers not used to kill worker processes if set to the empty string) -REAPER_USER_PREFIX = '' -# default tester environment name -DEFAULT_ENV_NAME = 'defaultenv' - -## RLIMIT SETTINGS FOR TESTER PROCESSES ## - -# values are: (soft limit, hard limit) -# see https://docs.python.org/3/library/resource.html for reference on limit options -# NOTE: these limits cannot be higher than the limits set for the tester user in -# /etc/security/limits.conf (or similar). These limits may be reduced in certain -# cases (see the docstring for get_test_preexec_fn and get_cleanup_preexec_fn in -# autotest_server.py) -RLIMIT_SETTINGS = { - 'RLIMIT_NPROC': (300, 300) -} - -### QUEUE CONFIGS ### - -# functions used to select which type of queue to use. They must accept any number -# of keyword arguments and should only return a boolean (see autotest_enqueuer._get_queue) -def batch_filter(**kwargs): - return kwargs.get('batch_id') is not None - -def single_filter(**kwargs): - return kwargs.get('user_type') == 'Admin' and not batch_filter(**kwargs) - -def student_filter(**kwargs): - return kwargs.get('user_type') == 'Student' and not batch_filter(**kwargs) - -# list of worker queues. Values of each are a string indicating the queue name, -# and a function used to select whether or not to use this type of queue -# (see autotest_enqueuer._get_queue) -batch_queue = {'name': 'batch', 'filter': batch_filter} -single_queue = {'name': 'single', 'filter': single_filter} -student_queue = {'name': 'student', 'filter': student_filter} -WORKER_QUEUES = [batch_queue, single_queue, student_queue] - -### WORKER CONFIGS ### - -WORKERS = [(4, [student_queue['name'], single_queue['name'], batch_queue['name']]), - (2, [single_queue['name'], student_queue['name'], batch_queue['name']]), - (2, [batch_queue['name'], student_queue['name'], single_queue['name']])] diff --git a/server/generate_supervisord_conf.py b/server/generate_supervisord_conf.py deleted file mode 100755 index 141ddb63..00000000 --- a/server/generate_supervisord_conf.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 - -import config -import sys -import os -import shutil -import argparse - -HEADER = """[supervisord] - -[supervisorctl] - -[inet_http_server] -port = 127.0.0.1:9001 - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -""" - -CONTENT = """[program:rq_worker_{worker_user}] -environment=MARKUSWORKERUSER={worker_user} -command={rq} worker {worker_args} {queues} -process_name=rq_worker_{worker_user} -numprocs={numprocs} -directory={directory} -stopsignal=TERM -autostart=true -autorestart=true -stopasgroup=true -killasgroup=true - -""" - -THIS_DIR = os.path.dirname(os.path.abspath(__file__)) - -def write_conf_file(conf_filename, user_names): - try: - rkw = config.REDIS_CONNECTION_KWARGS - redis_url = '--url redis://{}:{}/{}'.format(rkw['host'], rkw['port'], rkw['db']) - except KeyError: - redis_url = '' - - with open(conf_filename, 'w') as f: - f.write(HEADER) - user_name_set = set(user_names) - enough_users = True - for numprocs, queues in config.WORKERS: - if enough_users: - for _ in range(numprocs): - try: - worker_user = user_name_set.pop() - except KeyError: - msg = f'[AUTOTEST] Not enough worker users to create all rq workers.' - sys.stderr.write(f'{msg}\n') - enough_users = False - break - queue_str = ' '.join(queues) - c = CONTENT.format(worker_user=worker_user, - rq=shutil.which('rq'), - worker_args=redis_url, - queues=queue_str, - numprocs=1, - directory=THIS_DIR) - f.write(c) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('conf_filename') - parser.add_argument('user_names', nargs='+') - args = parser.parse_args() - - write_conf_file(args.conf_filename, args.user_names) diff --git a/server/hooks_context/builtin_hooks.py b/server/hooks_context/builtin_hooks.py deleted file mode 100644 index 8b117989..00000000 --- a/server/hooks_context/builtin_hooks.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Builtin hooks used by hooks_context.Hooks -""" - -import os -import sys -import json -import glob -from pathlib import Path -from hooks_context.utils import add_path - -HOOKS = {'upload_feedback_file' : {'context': 'after_each'}, - 'upload_feedback_to_repo' : {'requires': ['clear_feedback_file'], - 'context': 'after_each'}, - 'upload_annotations' : {'context': 'after_each'}, - 'clear_feedback_file' : {'context': 'before_each'}} - - -def clear_feedback_file(test_data, **kwargs): - """ - Remove any previous feedback file before the tests run. - """ - feedback_file = test_data.get('feedback_file_name', '') - if os.path.isfile(feedback_file): - os.remove(feedback_file) - - -def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **kwargs): - """ - Upload the feedback file to the group's repo. - """ - feedback_file = test_data.get('feedback_file_name', '') - if os.path.isfile(feedback_file): - with open(feedback_file) as feedback_open: - api.upload_file_to_repo(assignment_id, group_id, feedback_file, feedback_open.read()) - - -def upload_feedback_file(api, assignment_id, group_id, test_data, **kwargs): - """ - Upload the feedback file using MarkUs' api. - """ - feedback_file = test_data.get('feedback_file_name', '') - if os.path.isfile(feedback_file): - with open(feedback_file) as feedback_open: - api.upload_feedback_file(assignment_id, group_id, feedback_file, feedback_open.read()) - - -def upload_annotations(api, assignment_id, group_id, test_data, **kwargs): - """ - Upload annotations using MarkUs' api. - """ - annotations_name = test_data.get('annotation_file', '') - if os.path.isfile(annotations_name): - with open(annotations_name) as annotations_open: - api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) - -## DEFAULT TESTER HOOKS ## - - -def _load_default_hooks(): - """ - Return a dictionary containing all hooks loaded from any default_hooks.py - files in any of the bin/ directories for each tester. - """ - glob_pat = os.path.join(Path(__file__).resolve().parents[2], 'testers', 'testers', '*', 'bin', 'default_hooks.py') - defaults = {} - for hooks_file in glob.glob(glob_pat): - bin_dir = os.path.dirname(hooks_file) - with add_path(bin_dir): - default_hooks = __import__('default_hooks') - for hook in default_hooks.HOOKS: - defaults[hook.__name__] = hook - return defaults - -DEFAULT_HOOKS = _load_default_hooks() diff --git a/server/rq_fail_queue_contents.py b/server/rq_fail_queue_contents.py deleted file mode 100644 index dc0af95b..00000000 --- a/server/rq_fail_queue_contents.py +++ /dev/null @@ -1,5 +0,0 @@ -import autotest_server as ats - -with ats.rq.Connection(ats.redis_connection()): - for job in ats.rq.get_failed_queue().jobs: - print(job.exc_info) \ No newline at end of file diff --git a/server/tests/autotest_enqueuer_test.py b/server/tests/autotest_enqueuer_test.py deleted file mode 100644 index a91c783e..00000000 --- a/server/tests/autotest_enqueuer_test.py +++ /dev/null @@ -1,371 +0,0 @@ -import sys -import os -import json -import re -import pytest -import inspect -import tempfile -import rq -import glob -from unittest.mock import patch, ANY, Mock -from contextlib import contextmanager -from fakeredis import FakeStrictRedis -from tests import config_default - -sys.path.append('..') -import autotest_enqueuer as ate # noqa: E402 -import autotest_server as ats # noqa: E402 -ate.config = config_default -ats.config = config_default - - -@pytest.fixture(autouse=True) -def redis(): - fake_redis = FakeStrictRedis() - with patch('autotest_server.redis_connection', return_value=fake_redis): - yield fake_redis - - -@contextmanager -def tmp_script_dir(settings_dict): - with tempfile.TemporaryDirectory() as tmp_dir: - files_dir = os.path.join(tmp_dir, 'files') - os.mkdir(files_dir) - with open(os.path.join(files_dir, '.gitkeep'), 'w') as f: - pass - with open(os.path.join(tmp_dir, 'settings.json'), 'w') as f: - json.dump(settings_dict, f) - with patch('autotest_server.test_script_directory', return_value=tmp_dir): - yield tmp_dir - - -@pytest.fixture(autouse=True) -def empty_test_script_dir(request): - if 'no_test_script_dir' in request.keywords: - yield - else: - empty_settings = {"testers": [{"test_data": []}]} - with tmp_script_dir(empty_settings) as tmp_dir: - yield tmp_dir - - -@pytest.fixture -def non_existant_test_script_dir(): - with patch('autotest_server.test_script_directory', return_value=None): - yield - - -@pytest.fixture -def pop_interval(): - with patch('autotest_server.get_avg_pop_interval', return_value=None): - yield - - -@pytest.fixture(autouse=True) -def mock_rmtree(): - with patch('shutil.rmtree') as rm: - yield rm - - -@pytest.fixture(autouse=True) -def mock_enqueue_call(): - with patch('rq.Queue.enqueue_call') as enqueue_func: - yield enqueue_func - - -class DummyTestError(Exception): - pass - - -class TestRunTest: - - def get_kwargs(self, **kw): - param_kwargs = {k: '' for k in inspect.signature(ats.run_test).parameters} - return {**param_kwargs, **kw} - - def test_fails_missing_required_args(self): - try: - ate.run_test('Admin', 1) - except ate.JobArgumentError: - return - except ate.MarkUsError as e: - pytest.fail(f'should have failed because kwargs are missing but instead failed with: {e}') - pytest.fail('should have failed because kwargs are missing') - - def test_accepts_same_kwargs_as_server_run_test_method(self): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.JobArgumentError: - pytest.fail('should not have failed because kwargs are not missing') - except ate.MarkUsError: - pass - - def test_fails_if_cannot_find_valid_queue(self): - try: - ate.run_test('Tim', None, **self.get_kwargs()) - except ate.InvalidQueueError: - return - except ate.MarkUsError as e: - pytest.fail(f'should have failed because a valid queue is not found but instead failed with: {e}') - pytest.fail('should have failed because a valid queue is not found') - - def test_can_find_valid_queue(self): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.InvalidQueueError: - pytest.fail('should not have failed because a valid queue is available') - except ate.MarkUsError: - pass - - def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestScriptFilesError: - return - except ate.MarkUsError as e: - pytest.fail(f'should have failed because no test scripts could be found but instead failed with: {e}') - pytest.fail('should have failed because no test scripts could be found') - - def test_can_find_test_files(self): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestScriptFilesError: - pytest.fail('should not have failed because no test scripts could be found') - except ate.MarkUsError: - pass - - def test_writes_queue_info_to_stdout(self, capfd, pop_interval): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.MarkUsError: - pass - out, _err = capfd.readouterr() - assert re.search(r'^\d+$', out) - - def test_fails_if_no_tests_groups(self): - try: - ate.run_test('Admin', 1, **self.get_kwargs()) - except ate.TestParameterError: - return - except ate.MarkUsError: - pass - - @pytest.mark.no_test_script_dir - def test_fails_if_no_groups_in_category(self): - settings = {"testers": [{"test_data": [{"category": ['admin']}]}]} - with tmp_script_dir(settings): - try: - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['student'])) - except ate.TestParameterError: - return - except ate.MarkUsError: - pass - - @pytest.mark.no_test_script_dir - def test_can_find_tests_in_given_category(self): - settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 30}]}]} - with tmp_script_dir(settings): - try: - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) - except ate.TestParameterError: - pytest.fail('should not have failed to find an admin test') - except ate.MarkUsError: - pass - - @pytest.mark.no_test_script_dir - def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): - settings = {"testers": [{"test_data": [{"category": ['admin'], "timeout": 10}]}]} - with tmp_script_dir(settings): - ate.run_test('Admin', 1, **self.get_kwargs(test_categories=['admin'])) - mock_enqueue_call.assert_called_with(ANY, kwargs=ANY, job_id=ANY, timeout=15) - - def test_cleans_up_files_on_error(self, mock_rmtree): - try: - ate.run_test('Admin', 1, **self.get_kwargs(files_path='something')) - except Exception: - mock_rmtree.assert_called_once() - else: - pytest.fail('This call to run_test should have failed. See other failures for details') - - -@pytest.fixture -def update_test_specs(): - with patch('autotest_server.update_test_specs') as mock_func: - yield mock_func - - -class TestUpdateSpecs: - - def get_kwargs(self, **kw): - param_kwargs = {k: '' for k in inspect.signature(ats.update_test_specs).parameters} - return {**param_kwargs, **kw} - - def test_fails_when_schema_is_invalid(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=['something']): - with patch('form_validation.best_match', return_value=DummyTestError('error')): - try: - ate.update_specs({}, **self.get_kwargs(schema={})) - except DummyTestError: - return - pytest.fail('should have failed because the form is invalid') - - def test_succeeds_when_schema_is_valid(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=[]): - with patch('form_validation.best_match', return_value=DummyTestError('error')): - try: - ate.update_specs({}, **self.get_kwargs(schema={})) - except DummyTestError: - pytest.fail('should not have failed because the form is valid') - - def test_calls_update_test_specs(self, update_test_specs): - with patch('form_validation.validate_with_defaults', return_value=[]): - with patch('form_validation.best_match', return_value=DummyTestError('error')): - ate.update_specs({}, **self.get_kwargs(schema={})) - update_test_specs.assert_called_once() - - def test_cleans_up_files_on_error(self, mock_rmtree): - with patch('form_validation.validate_with_defaults', side_effect=Exception): - try: - ate.update_specs({}, **self.get_kwargs(schema={}, files_path='something')) - except Exception: - mock_rmtree.assert_called_once() - else: - pytest.fail('This call to update_specs should have failed. See other failures for details') - - -@pytest.fixture -def mock_rq_job(): - with patch('rq.job.Job') as job: - enqueued_job = Mock() - job.fetch.return_value = enqueued_job - yield job, enqueued_job - - -class TestCancelTest: - - def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): - Job, mock_job = mock_rq_job - Job.fetch.side_effect = rq.exceptions.NoSuchJobError - ate.cancel_test('something', [1]) - mock_job.cancel.assert_not_called() - - def test_do_nothing_if_job_not_enqueued(self, mock_rq_job): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = False - ate.cancel_test('something', [1]) - mock_job.cancel.assert_not_called() - - def test_cancel_job(self, mock_rq_job): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - mock_job.kwargs = {'files_path': None} - ate.cancel_test('something', [1]) - mock_job.cancel.assert_called_once() - - def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - files_path = 'something' - mock_job.kwargs = {'files_path': files_path} - ate.cancel_test('something', [1]) - mock_rmtree.assert_called_once_with(files_path, onerror=ANY) - - def test_cancel_multiple_jobs(self, mock_rq_job): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - mock_job.kwargs = {'files_path': None} - ate.cancel_test('something', [1, 2]) - assert mock_job.cancel.call_count == 2 - - def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtree): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - files_path = 'something' - mock_job.kwargs = {'files_path': files_path} - ate.cancel_test('something', [1, 2]) - assert mock_rmtree.call_count == 2 - - -class TestGetSchema: - - def fake_installed_testers(self, installed): - server_dir = os.path.dirname(os.path.abspath(ate.__file__)) - root_dir = os.path.dirname(server_dir) - paths = [] - for tester in installed: - glob_pattern = os.path.join(root_dir, 'testers', 'testers', tester, 'specs') - paths.append(os.path.join(glob.glob(glob_pattern)[0], '.installed')) - return paths - - def assert_tester_in_schema(self, tester, schema): - assert tester in schema["definitions"]["installed_testers"]["enum"] - installed = [] - for option in schema['definitions']['tester_schemas']['oneOf']: - installed.append(option['properties']['tester_type']['enum'][0]) - assert tester in installed - - def test_prints_skeleton_when_none_installed(self, capfd): - with patch('glob.glob', return_value=[]): - ate.get_schema() - out, _err = capfd.readouterr() - schema = json.loads(out) - server_dir = os.path.dirname(os.path.abspath(ate.__file__)) - with open(os.path.join(server_dir, 'bin', 'tester_schema_skeleton.json')) as f: - skeleton = json.load(f) - assert schema == skeleton - - def test_prints_test_schema_when_one_installed(self, capfd): - with patch('glob.glob', return_value=self.fake_installed_testers(['custom'])): - ate.get_schema() - out, _err = capfd.readouterr() - schema = json.loads(out) - self.assert_tester_in_schema('custom', schema) - - def test_prints_test_schema_when_multiple_installed(self, capfd): - with patch('glob.glob', return_value=self.fake_installed_testers(['custom', 'py'])): - ate.get_schema() - out, _err = capfd.readouterr() - schema = json.loads(out) - self.assert_tester_in_schema('custom', schema) - self.assert_tester_in_schema('py', schema) - - -class TestParseArgFile: - - @pytest.mark.no_test_script_dir - def test_loads_arg_file(self): - settings = {'some': 'data'} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) - try: - kwargs.pop('files_path') - except KeyError: - pass - assert settings == kwargs - - @pytest.mark.no_test_script_dir - def test_remove_arg_file(self): - settings = {'some': 'data'} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') - ate.parse_arg_file(arg_file) - assert not os.path.isfile(arg_file) - - @pytest.mark.no_test_script_dir - def test_adds_file_path_if_not_present(self): - settings = {'some': 'data'} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) - assert 'files_path' in kwargs - assert os.path.realpath(kwargs['files_path']) == os.path.realpath(tmp_dir) - - @pytest.mark.no_test_script_dir - def test_does_not_add_file_path_if_present(self): - settings = {'some': 'data', 'files_path': 'something'} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, 'settings.json') - kwargs = ate.parse_arg_file(arg_file) - assert 'files_path' in kwargs - assert kwargs['files_path'] == 'something' diff --git a/server/tests/config_default.py b/server/tests/config_default.py deleted file mode 100755 index f66a88db..00000000 --- a/server/tests/config_default.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 - -#### CHANGE CONFIG PARAMETERS BELOW #### - -## REDIS CONFIGS ## - -# name of redis hash used to store the locations of test script directories -REDIS_CURRENT_TEST_SCRIPT_HASH = 'curr_test_scripts' -# name of redis hash used to store pop interval data for each worker queue -REDIS_POP_HASH = 'pop_intervals' -# name of redis hash used to store workers data (username and worker directory) -REDIS_WORKERS_HASH = 'workers' -# name of redis integer used to access the next available port -REDIS_PORT_INT = 'ports' -# dictionary containing keyword arguments to pass to rq.use_connection -# when connecting to a redis database (empty dictionary is default) -REDIS_CONNECTION_KWARGS = {} -# prefix to prepend to all redis keys generated by the autotester -REDIS_PREFIX = 'autotest:' -# prefix to prepend to all postgres databases created -POSTGRES_PREFIX = 'autotest_' - -## WORKING DIR CONFIGS ## - -# the main working directory -WORKSPACE_DIR = '/home/vagrant/markus-autotesting/server/workspace' -# name of the directory containing test scripts -SCRIPTS_DIR_NAME = 'scripts' -# name of the directory containing test results -RESULTS_DIR_NAME = 'results' -# name of the directory containing specs files -SPECS_DIR_NAME = 'specs' -# name of the directory containing workspaces for the workers -WORKERS_DIR_NAME = 'workers' -# name of the directory containing log files -LOGS_DIR_NAME = 'logs' -# name of the server user -SERVER_USER = '' -# names of the worker users -WORKER_USERS = 'autotst0 autotst1 autotst2 autotst3 autotst4 autotst5 autotst6 autotst7' -# prefix used to name reaper users -# (reapers not used to kill worker processes if set to the empty string) -REAPER_USER_PREFIX = '' -# default tester environment name -DEFAULT_ENV_NAME = 'defaultenv' - -## RLIMIT SETTINGS FOR TESTER PROCESSES ## - -# values are: (soft limit, hard limit) -# see https://docs.python.org/3/library/resource.html for reference on limit options -# NOTE: these limits cannot be higher than the limits set for the tester user in -# /etc/security/limits.conf (or similar). These limits may be reduced in certain -# cases (see the docstring for get_test_preexec_fn and get_cleanup_preexec_fn in -# autotest_server.py) -RLIMIT_SETTINGS = { - 'RLIMIT_NPROC': (300, 300) -} - - -### QUEUE CONFIGS ### - -# functions used to select which type of queue to use. They must accept any number -# of keyword arguments and should only return a boolean (see autotest_enqueuer._get_queue) -def batch_filter(**kwargs): - return kwargs.get('batch_id') is not None - - -def single_filter(**kwargs): - return kwargs.get('user_type') == 'Admin' and not batch_filter(**kwargs) - - -def student_filter(**kwargs): - return kwargs.get('user_type') == 'Student' and not batch_filter(**kwargs) - - -# list of worker queues. Values of each are a string indicating the queue name, -# and a function used to select whether or not to use this type of queue -# (see autotest_enqueuer._get_queue) -batch_queue = {'name': 'batch', 'filter': batch_filter} -single_queue = {'name': 'single', 'filter': single_filter} -student_queue = {'name': 'student', 'filter': student_filter} -WORKER_QUEUES = [batch_queue, single_queue, student_queue] - -### WORKER CONFIGS ### - -WORKERS = [(4, [student_queue['name'], single_queue['name'], batch_queue['name']]), - (2, [single_queue['name'], student_queue['name'], batch_queue['name']]), - (2, [batch_queue['name'], student_queue['name'], single_queue['name']])] diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..84050d65 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,5 @@ +[aliases] +test=pytest + +[tool:pytest] +addopts = --ignore=src/autotester/testers/py/tests diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..ac57e82a --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +from setuptools import setup, find_packages + +test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] + +setup( + name="markus-autotester", + version="2.0", + description="Automatic tester for programming assignments", + url="https://github.com/MarkUsProject/markus-autotesting", + author="Misha Schwartz, Alessio Di Sandro", + author_email="mschwa@cs.toronto.edu", + license="MIT", + package_dir={"": "src"}, + packages=find_packages(where="src", exclude=test_exclusions), + zip_safe=False, + install_requires=[ + "redis==3.3.8", + "requests==2.22.0", + "rq==1.1.0", + "supervisor==4.1.0", + "PyYAML==5.1.2", + "psycopg2-binary==2.8.4", + "markusapi==0.0.1", + "jsonschema==3.0.2", + ], + tests_require=["pytest==5.3.1", "hypothesis==4.47.3", "fakeredis==1.1.0"], + setup_requires=["pytest-runner"], + include_package_data=True, + entry_points={"console_scripts": "markus_autotester = autotester.cli:cli"}, +) diff --git a/src/autotester/MANIFEST.in b/src/autotester/MANIFEST.in new file mode 100644 index 00000000..fc768370 --- /dev/null +++ b/src/autotester/MANIFEST.in @@ -0,0 +1,3 @@ +include testers/racket/lib/markus.rkt +graft testers/java/lib +include testers/*/specs/install_settings.json diff --git a/server/hooks_context/__init__.py b/src/autotester/__init__.py similarity index 100% rename from server/hooks_context/__init__.py rename to src/autotester/__init__.py diff --git a/server/autotest_enqueuer.py b/src/autotester/cli.py similarity index 54% rename from server/autotest_enqueuer.py rename to src/autotester/cli.py index 8af5c650..0fb47693 100755 --- a/server/autotest_enqueuer.py +++ b/src/autotester/cli.py @@ -7,55 +7,51 @@ import json import inspect import glob -import autotest_server as ats import time -import config import shutil +from rq.exceptions import NoSuchJobError from functools import wraps -import form_validation - -### ERROR CLASSES ### - - -class MarkUsError(Exception): - pass - - -class JobArgumentError(MarkUsError): - pass - - -class InvalidQueueError(MarkUsError): - pass - - -class TestScriptFilesError(MarkUsError): - pass - - -class TestParameterError(MarkUsError): - pass - -### HELPER FUNCTIONS ### - - -def _format_job_id(markus_address, run_id, **kw): +from autotester.exceptions import ( + JobArgumentError, + InvalidQueueError, + TestScriptFilesError, + TestParameterError, + MarkUsError, +) +from autotester.server.utils.redis_management import ( + redis_connection, + get_avg_pop_interval, + test_script_directory, +) +from autotester.server.utils.file_management import ignore_missing_dir_error +from autotester.config import config +from autotester.server.utils import form_validation +from autotester.server.server import run_test, update_test_specs + +SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] + + +def _format_job_id(markus_address, run_id, **_kw): """ Return a unique job id for each enqueued job based on the markus_address and the run_id """ - return '{}_{}'.format(markus_address, run_id) + return "{}_{}".format(markus_address, run_id) -def _check_args(func, args=[], kwargs={}): +def _check_args(func, args=None, kwargs=None): """ Raises an error if calling the function func with args and kwargs would raise an error. """ + args = args or [] + kwargs = kwargs or {} try: inspect.signature(func).bind(*args, **kwargs) except TypeError as e: - raise JobArgumentError('{}\nWith args: {}\nWith kwargs:{}'.format(e, args, tuple(kwargs))) + raise JobArgumentError( + "{}\nWith args: {}\nWith kwargs:{}".format(e, args, tuple(kwargs)) + ) def _get_queue(**kw): @@ -63,10 +59,12 @@ def _get_queue(**kw): Return a queue. The returned queue is one whose condition function returns True when called with the arguments in **kw. """ - for queue_type in config.WORKER_QUEUES: - if queue_type['filter'](**kw): - return rq.Queue(queue_type['name'], connection=ats.redis_connection()) - raise InvalidQueueError('cannot enqueue job: unable to determine correct queue type') + for queue in config["queues"]: + if form_validation.is_valid(kw, queue["schema"]): + return rq.Queue(queue["name"], connection=redis_connection()) + raise InvalidQueueError( + "cannot enqueue job: unable to determine correct queue type" + ) def _print_queue_info(queue): @@ -76,13 +74,15 @@ def _print_queue_info(queue): from the queue and the number of jobs in the queue. """ count = queue.count - avg_pop_interval = ats.get_avg_pop_interval(queue.name) or 0 + avg_pop_interval = get_avg_pop_interval(queue.name) or 0 print(avg_pop_interval * count) -def _check_test_script_files_exist(markus_address, assignment_id, **kw): - if ats.test_script_directory(markus_address, assignment_id) is None: - raise TestScriptFilesError('cannot find test script files: please upload some before running tests') +def _check_test_script_files_exist(markus_address, assignment_id, **_kw): + if test_script_directory(markus_address, assignment_id) is None: + raise TestScriptFilesError( + "cannot find test script files: please upload some before running tests" + ) def _clean_on_error(func): @@ -91,14 +91,15 @@ def _clean_on_error(func): Note: the files_path directory must be passed to the function as a keyword argument. """ + @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception: - files_path = kwargs.get('files_path') + files_path = kwargs.get("files_path") if files_path: - shutil.rmtree(files_path, onerror=ats.ignore_missing_dir_error) + shutil.rmtree(files_path, onerror=ignore_missing_dir_error) raise return wrapper @@ -114,35 +115,40 @@ def _get_job_timeout(test_specs, test_categories, multiplier=1.5): """ total_timeout = 0 test_data_count = 0 - for settings in test_specs['testers']: - for test_data in settings['test_data']: - test_category = test_data.get('category', []) - if set(test_category) & set(test_categories): # TODO: ensure test_categories is non-string collection type - total_timeout += test_data.get('timeout', 30) # TODO: don't hardcode default timeout + for settings in test_specs["testers"]: + for test_data in settings["test_data"]: + test_category = test_data.get("category", []) + if set(test_category) & set( + test_categories + ): # TODO: ensure test_categories is non-string collection type + total_timeout += test_data.get( + "timeout", 30 + ) # TODO: don't hardcode default timeout test_data_count += 1 if test_data_count: return int(total_timeout * multiplier) - raise TestParameterError(f'there are no tests of the given categories: {test_categories}') - - -### COMMAND FUNCTIONS ### + raise TestParameterError( + f"there are no tests of the given categories: {test_categories}" + ) @_clean_on_error -def run_test(user_type, batch_id, **kw): +def enqueue_test(user_type, batch_id, **kw): """ Enqueue a test run job with keyword arguments specified in **kw """ - kw['enqueue_time'] = time.time() + kw["enqueue_time"] = time.time() queue = _get_queue(user_type=user_type, batch_id=batch_id, **kw) - _check_args(ats.run_test, kwargs=kw) + _check_args(run_test, kwargs=kw) _check_test_script_files_exist(**kw) - test_files_dir = ats.test_script_directory(kw['markus_address'], kw['assignment_id']) - with open(os.path.join(test_files_dir, ats.TEST_SCRIPTS_SETTINGS_FILENAME)) as f: + test_files_dir = test_script_directory(kw["markus_address"], kw["assignment_id"]) + with open(os.path.join(test_files_dir, SETTINGS_FILENAME)) as f: test_specs = json.load(f) _print_queue_info(queue) - timeout = _get_job_timeout(test_specs, kw['test_categories']) - queue.enqueue_call(ats.run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout) + timeout = _get_job_timeout(test_specs, kw["test_categories"]) + queue.enqueue_call( + run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout + ) @_clean_on_error @@ -151,32 +157,34 @@ def update_specs(test_specs, schema=None, **kw): Run test spec update function after validating the form data. """ if schema is not None: - errors = list(form_validation.validate_with_defaults(schema, test_specs)) - if errors: - raise form_validation.best_match(errors) - ats.update_test_specs(test_specs=test_specs, **kw) + error = form_validation.validate_with_defaults( + schema, test_specs, best_only=True + ) + if error: + raise error + update_test_specs(test_specs=test_specs, **kw) -def cancel_test(markus_address, run_ids, **kw): +def cancel_test(markus_address, run_ids, **_kw): """ Cancel a test run job with the job_id defined using markus_address and run_id. """ - with rq.Connection(ats.redis_connection()): + with rq.Connection(redis_connection()): for run_id in run_ids: job_id = _format_job_id(markus_address, run_id) try: job = rq.job.Job.fetch(job_id) - except rq.exceptions.NoSuchJobError: + except NoSuchJobError: return if job.is_queued(): - files_path = job.kwargs['files_path'] + files_path = job.kwargs["files_path"] if files_path: - shutil.rmtree(files_path, onerror=ats.ignore_missing_dir_error) + shutil.rmtree(files_path, onerror=ignore_missing_dir_error) job.cancel() -def get_schema(**kw): +def get_schema(**_kw): """ Print a json to stdout representing a json schema that indicates the required specs for each installed tester type. @@ -185,16 +193,15 @@ def get_schema(**kw): (https://github.com/mozilla-services/react-jsonschema-form) or similar. """ this_dir = os.path.dirname(os.path.abspath(__file__)) - root_dir = os.path.dirname(this_dir) - with open(os.path.join(this_dir, 'bin', 'tester_schema_skeleton.json')) as f: + with open(os.path.join(this_dir, "lib", "tester_schema_skeleton.json")) as f: schema_skeleton = json.load(f) - glob_pattern = os.path.join(root_dir, 'testers', 'testers', '*', 'specs', '.installed') + glob_pattern = os.path.join(this_dir, "testers", "*", "specs", ".installed") for path in sorted(glob.glob(glob_pattern)): tester_type = os.path.basename(os.path.dirname(os.path.dirname(path))) specs_dir = os.path.dirname(path) - with open(os.path.join(specs_dir, 'settings_schema.json')) as f: + with open(os.path.join(specs_dir, "settings_schema.json")) as f: tester_schema = json.load(f) schema_skeleton["definitions"]["installed_testers"]["enum"].append(tester_type) @@ -220,24 +227,27 @@ def parse_arg_file(arg_file): with open(arg_file) as f: kwargs = json.load(f) - if 'files_path' not in kwargs: - kwargs['files_path'] = os.path.dirname(os.path.realpath(f.name)) + if "files_path" not in kwargs: + kwargs["files_path"] = os.path.dirname(os.path.realpath(f.name)) os.remove(arg_file) return kwargs -COMMANDS = {'run': run_test, - 'specs': update_specs, - 'cancel': cancel_test, - 'schema': get_schema} +COMMANDS = { + "run": enqueue_test, + "specs": update_specs, + "cancel": cancel_test, + "schema": get_schema, +} + -if __name__ == '__main__': +def cli(): parser = argparse.ArgumentParser() - parser.add_argument('command', choices=COMMANDS) + parser.add_argument("command", choices=COMMANDS) group = parser.add_mutually_exclusive_group(required=False) - group.add_argument('-f', '--arg_file', type=parse_arg_file) - group.add_argument('-j', '--arg_json', type=json.loads) + group.add_argument("-f", "--arg_file", type=parse_arg_file) + group.add_argument("-j", "--arg_json", type=json.loads) args = parser.parse_args() @@ -248,3 +258,7 @@ def parse_arg_file(arg_file): except MarkUsError as e: print(str(e)) sys.exit(1) + + +if __name__ == "__main__": + cli() diff --git a/src/autotester/config.py b/src/autotester/config.py new file mode 100644 index 00000000..83afc95c --- /dev/null +++ b/src/autotester/config.py @@ -0,0 +1,103 @@ +# Thanks to this blog post for how to load env vars with the yaml loader: +# https://medium.com/swlh/python-yaml-configuration-with-environment-variables-parsing-77930f4273ac + +import os +import re +import json +from collections.abc import Mapping +import yaml + +DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), "config_defaults") +CONFIG_FILENAME = "markus_autotester_config" +CONFIG_ENV_VAR = "MARKUS_AUTOTESTER_CONFIG" + + +def _find_local_config(): + system_config = os.path.join(os.path.sep, "etc", CONFIG_FILENAME) + user_config = os.path.join(os.environ.get("HOME"), f".{CONFIG_FILENAME}") + env_config = os.environ.get(CONFIG_ENV_VAR) + + if env_config is not None: + return env_config + if os.path.isfile(user_config): + return user_config + if os.path.isfile(system_config): + return system_config + + +class _Config: + + _local_config = _find_local_config() + _default_config = os.path.join(DEFAULT_ROOT, "config_default.yml") + _env_var_config = os.path.join(DEFAULT_ROOT, "config_env_vars.yml") + _replacement_pattern = re.compile(r".*?\${(\w+)}.*?") + _not_found_key = "!VARIABLE NOT FOUND!" + + def __init__(self): + self._yaml_loader = yaml.SafeLoader + + self._yaml_loader.add_implicit_resolver("!ENV", self._replacement_pattern, None) + env_constructor = self._constructor_factory( + lambda g: os.environ.get(g, self._not_found_key) + ) + self._yaml_loader.add_constructor("!ENV", env_constructor) + + self._settings = self._load_from_yaml() + + def __getitem__(self, key): + try: + return self._settings[key] + except KeyError: + if isinstance(key, tuple): + d = self + for k in key: + d = d[k] + return d + raise + + def to_json(self): + return json.dumps(self._settings) + + @classmethod + def _merge_dicts(cls, dicts): + try: + _merged = dicts[0].copy() + except AttributeError: + _merged = dicts[0] + if all(isinstance(d, Mapping) for d in dicts): + for d in dicts[1:]: + for key, val in d.items(): + if key not in _merged or _merged[key] == cls._not_found_key: + _merged[key] = val + else: + _merged[key] = cls._merge_dicts([_merged[key], val]) + return _merged + + def _constructor_factory(self, replacement_func): + def constructor(loader, node, pattern=self._replacement_pattern): + value = loader.construct_scalar(node) + match = pattern.findall(value) + if match: + full_value = value + for g in match: + full_value = full_value.replace(f"${{{g}}}", replacement_func(g)) + return full_value + return value + + return constructor + + def _load_from_yaml(self): + config_dicts = [] + if self._local_config is not None and os.path.isfile(self._local_config): + with open(self._local_config) as f: + local_config = yaml.load(f, Loader=self._yaml_loader) + if local_config is not None: + config_dicts.append(local_config) + with open(self._env_var_config) as f: + config_dicts.append(yaml.load(f, Loader=self._yaml_loader)) + with open(self._default_config) as f: + config_dicts.append(yaml.load(f, Loader=self._yaml_loader)) + return self._merge_dicts(config_dicts) + + +config = _Config() diff --git a/src/autotester/config_defaults/config_default.yml b/src/autotester/config_defaults/config_default.yml new file mode 100644 index 00000000..10bb0c69 --- /dev/null +++ b/src/autotester/config_defaults/config_default.yml @@ -0,0 +1,55 @@ +# Default settings. +# Settings prefixed with an underscore are technically overwritable by +# a local settings file but it is not recommended. + +queues: + - name: batch + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} + - name: single + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} + - name: student + schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} + +workers: + - users: + - name: !ENV ${USER} + reaper: null + queues: + - student + - single + - batch + +redis: + url: 'redis://127.0.0.1:6379/0' + _prefix: 'redis:' + _current_test_script_hash: current_test_scripts + _pop_interval_hash: pop_interval + +supervisor: + url: '127.0.0.1:9001' + +rlimit_settings: + nproc: + - 300 + - 300 + +resources: + port: + _redis_int: port + min: 50000 + max: 65535 + postgresql: + _prefix: autotest_ + port: 5432 + host: localhost + +_workspace_contents: + _scripts: scripts + _results: results + _specs: specs + _logs: logs + _workers: workers + _default_venv_name: defaultvenv + _settings_file: settings.json + _files_dir: files + _hooks_file: hooks.py diff --git a/src/autotester/config_defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml new file mode 100644 index 00000000..6a34ac11 --- /dev/null +++ b/src/autotester/config_defaults/config_env_vars.yml @@ -0,0 +1,14 @@ +workspace: !ENV ${HOME}/.markus-autotesting/ + +redis: + url: !ENV ${REDIS_URL} + +server_user: !ENV ${USER} + +supervisor: + url: !ENV ${SUPERVISOR_URL} + +resources: + postgresql: + port: !ENV ${PGPORT} + host: !ENV ${PGHOST} diff --git a/src/autotester/exceptions.py b/src/autotester/exceptions.py new file mode 100644 index 00000000..6e02f341 --- /dev/null +++ b/src/autotester/exceptions.py @@ -0,0 +1,31 @@ +""" +Custom Exception Type for use in MarkUs +""" + + +class MarkUsError(Exception): + pass + + +class TesterCreationError(MarkUsError): + pass + + +class TesterUserError(MarkUsError): + pass + + +class JobArgumentError(MarkUsError): + pass + + +class InvalidQueueError(MarkUsError): + pass + + +class TestScriptFilesError(MarkUsError): + pass + + +class TestParameterError(MarkUsError): + pass diff --git a/server/bin/tester_schema_skeleton.json b/src/autotester/lib/tester_schema_skeleton.json similarity index 99% rename from server/bin/tester_schema_skeleton.json rename to src/autotester/lib/tester_schema_skeleton.json index a949f2b3..47c7e85d 100644 --- a/server/bin/tester_schema_skeleton.json +++ b/src/autotester/lib/tester_schema_skeleton.json @@ -51,4 +51,4 @@ "title": "Custom hooks file" } } -} \ No newline at end of file +} diff --git a/server/tests/__init__.py b/src/autotester/resources/__init__.py similarity index 100% rename from server/tests/__init__.py rename to src/autotester/resources/__init__.py diff --git a/src/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py new file mode 100644 index 00000000..eb5f8d36 --- /dev/null +++ b/src/autotester/resources/ports/__init__.py @@ -0,0 +1,31 @@ +import socket +from autotester.server.utils.redis_management import redis_connection +from autotester.config import config + +PORT_MIN = config["resources", "port", "min"] +PORT_MAX = config["resources", "port", "max"] +REDIS_PREFIX = config["redis", "_prefix"] +REDIS_PORT_INT = f"{REDIS_PREFIX}{config['resources', 'port', '_redis_int']}" + + +def next_port(): + """ Return a port number that is greater than the last time this method was + called (by any process on this machine). + + This port number is not guaranteed to be free + """ + r = redis_connection() + return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN + + +def get_available_port(host="localhost"): + """ Return the next available open port on . """ + while True: + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind((host, next_port())) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + port = s.getsockname()[1] + return str(port) + except OSError: + continue diff --git a/src/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py new file mode 100644 index 00000000..15eadce5 --- /dev/null +++ b/src/autotester/resources/postgresql/__init__.py @@ -0,0 +1,42 @@ +import os +import getpass +import psycopg2 +import secrets +import string +from psycopg2.extensions import AsIs +from autotester.config import config + +POSTGRES_PREFIX = config["resources", "postgresql", "_prefix"] +PGPASSFILE = os.path.join( + config["workspace"], config["_workspace_contents", "_logs"], ".pgpass" +) + + +def setup_database(test_username): + user = getpass.getuser() + database = f"{POSTGRES_PREFIX}{test_username}" + + with open(PGPASSFILE) as f: + password = f.read().strip() + + with psycopg2.connect( + database=database, user=user, password=password, host="localhost" + ) as conn: + with conn.cursor() as cursor: + cursor.execute("DROP OWNED BY CURRENT_USER;") + if test_username != user: + user = test_username + password = "".join( + secrets.choice(string.ascii_letters + string.digits) + for _ in range(20) + ) + cursor.execute( + "ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password) + ) + + return { + "PGDATABASE": database, + "PGPASSWORD": password, + "PGUSER": user, + "AUTOTESTENV": "true", + } diff --git a/testers/testers/__init__.py b/src/autotester/server/__init__.py similarity index 100% rename from testers/testers/__init__.py rename to src/autotester/server/__init__.py diff --git a/testers/testers/custom/__init__.py b/src/autotester/server/hooks_context/__init__.py similarity index 100% rename from testers/testers/custom/__init__.py rename to src/autotester/server/hooks_context/__init__.py diff --git a/src/autotester/server/hooks_context/builtin_hooks.py b/src/autotester/server/hooks_context/builtin_hooks.py new file mode 100644 index 00000000..5929a0aa --- /dev/null +++ b/src/autotester/server/hooks_context/builtin_hooks.py @@ -0,0 +1,80 @@ +""" +Builtin hooks used by hooks_context.Hooks +""" + +import os +import json +import pkgutil +import importlib +from autotester import testers + +HOOKS = { + "upload_feedback_file": {"context": "after_each"}, + "upload_feedback_to_repo": { + "requires": ["clear_feedback_file"], + "context": "after_each", + }, + "upload_annotations": {"context": "after_each"}, + "clear_feedback_file": {"context": "before_each"}, +} + + +def clear_feedback_file(test_data, **_kwargs): + """ + Remove any previous feedback file before the tests run. + """ + feedback_file = test_data.get("feedback_file_name", "") + if os.path.isfile(feedback_file): + os.remove(feedback_file) + + +def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **_kwargs): + """ + Upload the feedback file to the group's repo. + """ + feedback_file = test_data.get("feedback_file_name", "") + if os.path.isfile(feedback_file): + with open(feedback_file) as feedback_open: + api.upload_file_to_repo( + assignment_id, group_id, feedback_file, feedback_open.read() + ) + + +def upload_feedback_file(api, assignment_id, group_id, test_data, **_kwargs): + """ + Upload the feedback file using MarkUs' api. + """ + feedback_file = test_data.get("feedback_file_name", "") + if os.path.isfile(feedback_file): + with open(feedback_file) as feedback_open: + api.upload_feedback_file( + assignment_id, group_id, feedback_file, feedback_open.read() + ) + + +def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): + """ + Upload annotations using MarkUs' api. + """ + annotations_name = test_data.get("annotation_file", "") + if os.path.isfile(annotations_name): + with open(annotations_name) as annotations_open: + api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) + + +def _load_default_hooks(): + """ + Return a dictionary containing all hooks loaded from any default_hooks.py in the testers package. + """ + defaults = {} + for _finder, name, _ispkg in pkgutil.walk_packages( + testers.__path__, f"{testers.__name__}." + ): + if name.endswith("default_hooks"): + default_hooks = importlib.import_module(name) + for hook in default_hooks.HOOKS: + defaults[hook.__name__] = hook + return defaults + + +DEFAULT_HOOKS = _load_default_hooks() diff --git a/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py similarity index 81% rename from server/hooks_context/hooks_context.py rename to src/autotester/server/hooks_context/hooks_context.py index 1169f530..9a2dedb2 100644 --- a/server/hooks_context/hooks_context.py +++ b/src/autotester/server/hooks_context/hooks_context.py @@ -5,13 +5,12 @@ """ import os -import sys import traceback from collections import defaultdict, deque from collections.abc import Callable from contextlib import contextmanager -from hooks_context import builtin_hooks -from hooks_context.utils import current_directory, add_path +from autotester.server.hooks_context import builtin_hooks +from autotester.server.utils.path_management import current_directory, add_path class Hooks: @@ -46,9 +45,12 @@ class Hooks: Builtin hooks can have any name and when they are executed is instead determined by the values associated to their name in the builtin_hooks.HOOKS dictionary. """ - HOOK_BASENAMES = ['before_all', 'before_each', 'after_all', 'after_each'] - def __init__(self, custom_hooks_path=None, testers=None, cwd=None, args=None, kwargs=None): + HOOK_BASENAMES = ["before_all", "before_each", "after_all", "after_each"] + + def __init__( + self, custom_hooks_path=None, testers=None, cwd=None, args=None, kwargs=None + ): """ Create a new Hooks object instance with args: @@ -99,11 +101,11 @@ def _select_builtins(tester_info, _info=None): for func_name, data in builtin_hooks.HOOKS.items(): if tester_info.get(func_name): - hook_type, hook_context = data['context'].split('_') + hook_type, hook_context = data["context"].split("_") func = getattr(builtin_hooks, func_name) if func not in _info.get(hook_context, {}).get(hook_type, set()): _info[hook_context][hook_type].append(func) - for requires in data.get('requires', []): + for requires in data.get("requires", []): Hooks._select_builtins({requires: True}, _info) return _info @@ -117,15 +119,17 @@ def _merge_hook_dicts(*hook_dicts): order. """ merged = defaultdict(list) - sort_key = lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__) for d in hook_dicts: for key, hooks in d.items(): merged[key].extend(h for h in hooks if h) for key, hooks in merged.items(): - merged[key] = sorted((h for h in hooks if h), key=sort_key, reverse=(key=='after')) + merged[key] = sorted( + (h for h in hooks if h), + key=lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__), + reverse=(key == "after"), + ) return merged - def _load_all(self): """ Return a dictionary containing all hooks that may be run over the course of a test run. @@ -142,17 +146,21 @@ def _load_all(self): hooks = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) custom_hooks_module = self._load_module(self.custom_hooks_path) - + for hook_name in Hooks.HOOK_BASENAMES: - hook_type, hook_context = hook_name.split('_') # eg. "before_all" -> ("before", "all") + hook_type, hook_context = hook_name.split( + "_" + ) # eg. "before_all" -> ("before", "all") custom_hook = self._load_hook(custom_hooks_module, hook_name) builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(hook_name) hooks[None][hook_context][hook_type].extend([custom_hook, builtin_hook]) for tester_type in self.testers: - tester_hook_name = f'{hook_name}_{tester_type}' + tester_hook_name = f"{hook_name}_{tester_type}" custom_hook = self._load_hook(custom_hooks_module, tester_hook_name) builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(tester_hook_name) - hooks[tester_type][hook_context][hook_type].extend([custom_hook, builtin_hook]) + hooks[tester_type][hook_context][hook_type].extend( + [custom_hook, builtin_hook] + ) return hooks def _load_module(self, hooks_script_path): @@ -170,8 +178,8 @@ def _load_module(self, hooks_script_path): with add_path(dirpath): hooks_module = __import__(module_name) return hooks_module - except Exception: - self.load_errors.append((module_name, traceback.format_exc())) + except Exception as e: + self.load_errors.append((module_name, f"{traceback.format_exc()}\n{e}")) return None def _load_hook(self, module, function_name): @@ -185,7 +193,9 @@ def _load_hook(self, module, function_name): if isinstance(func, Callable): return func else: - self.load_errors.append((module.__name__, f'hook function {function_name} is not callable')) + self.load_errors.append( + (module.__name__, f"hook function {function_name} is not callable") + ) except AttributeError: return @@ -194,12 +204,14 @@ def _run(self, func, extra_args=None, extra_kwargs=None): Run the function func with positional and keyword arguments obtained by merging self.args with extra_args and self.kwargs with extra_kwargs. """ - args = self.args+(extra_args or []) + args = self.args + (extra_args or []) kwargs = {**self.kwargs, **(extra_kwargs or {})} try: func(*args, **kwargs) except BaseException as e: - self.run_errors.append((func.__name__, args, kwargs, traceback.format_exc())) + self.run_errors.append( + (func.__name__, args, kwargs, f"{traceback.format_exc()}\n{e}") + ) def _get_hooks(self, tester_type, builtin_selector=None): """ @@ -211,23 +223,29 @@ def _get_hooks(self, tester_type, builtin_selector=None): if no builtin hooks are used. """ builtin_hook_dict = Hooks._select_builtins(builtin_selector or {}) - if tester_type == 'all': - hooks = self.hooks.get(None, {}).get('all', {}) - elif tester_type == 'each': - hooks = self.hooks.get(None, {}).get('each', {}) - other_hooks = [builtin_hook_dict.get('each', {})] + if tester_type == "all": + hooks = self.hooks.get(None, {}).get("all", {}) + elif tester_type == "each": + hooks = self.hooks.get(None, {}).get("each", {}) + other_hooks = [builtin_hook_dict.get("each", {})] for context in self._context: - context_hooks = self.hooks.get(context, {}).get('each', {}) + context_hooks = self.hooks.get(context, {}).get("each", {}) other_hooks.append(context_hooks) hooks = Hooks._merge_hook_dicts(hooks, *other_hooks) else: - hooks = self.hooks.get(tester_type, {}).get('all', {}) - hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get('all', {})) - return hooks.get('before', []), hooks.get('after', []) - + hooks = self.hooks.get(tester_type, {}).get("all", {}) + hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get("all", {})) + return hooks.get("before", []), hooks.get("after", []) @contextmanager - def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwargs=None, cwd=None): + def around( + self, + tester_type, + builtin_selector=None, + extra_args=None, + extra_kwargs=None, + cwd=None, + ): """ Context manager used to run hooks around any block of code. Hooks are selected based on the tester type (one of 'all', 'each', or the name of a tester), a builtin_selector (usually the test settings for a given test @@ -235,7 +253,7 @@ def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwar and self.kwargs. If cwd is specified, each hook will be run as if the current working directory were cwd. """ before, after = self._get_hooks(tester_type, builtin_selector) - if tester_type not in {'all', 'each'}: + if tester_type not in {"all", "each"}: self._context.append(tester_type) try: if any(before) or any(after): @@ -251,7 +269,7 @@ def around(self, tester_type, builtin_selector=None, extra_args=None, extra_kwar else: yield finally: - if tester_type not in {'all', 'each'}: + if tester_type not in {"all", "each"}: self._context.pop() def format_errors(self): @@ -260,9 +278,10 @@ def format_errors(self): """ error_list = [] for module_name, tb in self.load_errors: - error_list.append(f'module_name: {module_name}\ntraceback:\n{tb}') - for hook_name, args, kwargs, tb in self.run_errors: - error_list.append(f'function_name: {hook_name}\nargs: {self.args}\nkwargs: {self.kwargs},\ntraceback:\n{tb}') - return '\n\n'.join(error_list) - - + error_list.append(f"module_name: {module_name}\ntraceback:\n{tb}") + for hook_name, args, kwargs, tb in self.run_errors: + error_list.append( + f"function_name: {hook_name}\n" + f"args: {args}\nkwargs: {kwargs},\ntraceback:\n{tb}" + ) + return "\n\n".join(error_list) diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py new file mode 100755 index 00000000..07bb4c51 --- /dev/null +++ b/src/autotester/server/server.py @@ -0,0 +1,498 @@ +#!/usr/bin/env python3 + +import os +import shutil +import time +import json +import subprocess +import signal +import rq +import tempfile +from markusapi import Markus + +from autotester.exceptions import TesterCreationError +from autotester.config import config +from autotester.server.hooks_context.hooks_context import Hooks +from autotester.server.utils.string_management import ( + loads_partial_json, + decode_if_bytes, + stringify, +) +from autotester.server.utils.user_management import ( + get_reaper_username, + current_user, + tester_user, +) +from autotester.server.utils.file_management import ( + random_tmpfile_name, + clean_dir_name, + setup_files, + ignore_missing_dir_error, + fd_open, + fd_lock, + move_tree, +) +from autotester.server.utils.resource_management import ( + set_rlimits_before_cleanup, + set_rlimits_before_test, +) +from autotester.server.utils.redis_management import ( + clean_after, + test_script_directory, + update_pop_interval_stat, +) +from autotester.resources.ports import get_available_port +from autotester.resources.postgresql import setup_database + +DEFAULT_ENV_DIR = config["_workspace_contents", "_default_venv_name"] +TEST_RESULT_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_results"] +) +HOOKS_FILENAME = config["_workspace_contents", "_hooks_file"] +SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] +FILES_DIRNAME = config["_workspace_contents", "_files_dir"] +TEST_SPECS_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_specs"] +) +TEST_SCRIPT_DIR = os.path.join( + config["workspace"], config["_workspace_contents", "_scripts"] +) + +TESTER_IMPORT_LINE = { + "custom": "from testers.custom.markus_custom_tester import MarkusCustomTester as Tester", + "haskell": "from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester", + "java": "from testers.java.markus_java_tester import MarkusJavaTester as Tester", + "py": "from testers.py.markus_python_tester import MarkusPythonTester as Tester", + "pyta": "from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester", + "racket": "from testers.racket.markus_racket_tester import MarkusRacketTester as Tester", +} + + +def run_test_command(test_username=None): + """ + Return a command used to run test scripts as a the test_username + user, with the correct arguments. Set test_username to None to + run as the current user. + + >>> test_script = 'mysscript.py' + >>> run_test_command('f').format(test_script) + 'sudo -u f -- bash -c "./myscript.py"' + + >>> run_test_command().format(test_script) + './myscript.py' + """ + cmd = "{}" + if test_username is not None: + cmd = " ".join( + ("sudo", "-Eu", test_username, "--", "bash", "-c", "'{}'".format(cmd)) + ) + + return cmd + + +def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None): + """ + Return the arguments passed to this function in a dictionary. If stderr is + falsy, change it to None. Load the json string in stdout as a dictionary. + """ + test_results, malformed = loads_partial_json(stdout, dict) + return { + "time": run_time, + "timeout": timeout, + "tests": test_results, + "stderr": stderr or None, + "malformed": stdout if malformed else None, + "extra_info": extra_info or {}, + } + + +def kill_with_reaper(test_username): + """ + Try to kill all processes currently being run by test_username using the method + described in this article: https://lwn.net/Articles/754980/. Return True if this + is method is attempted and is successful, otherwise return False. + + This copies the kill_worker_procs executable as the test_username user and sets + the permissions of this copied file so that it can be executed by the corresponding + reaper user. Crucially, it sets the permissions to include the setuid bit so that + the reaper user can manipulate the real uid and effective uid values of the process. + + The reaper user then runs this copied executable which kills all processes being + run by the test_username user, deletes itself and exits with a 0 exit code if + sucessful. + """ + reaper_username = get_reaper_username(test_username) + if reaper_username is not None: + cwd = os.path.dirname(os.path.abspath(__file__)) + kill_file_dst = random_tmpfile_name() + preexec_fn = set_rlimits_before_cleanup() + + copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format( + test_username, kill_file_dst + ) + copy_proc = subprocess.Popen( + copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd + ) + if copy_proc.wait() < 0: # wait returns the return code of the proc + return False + + kill_cmd = "sudo -u {} -- bash -c {}".format(reaper_username, kill_file_dst) + kill_proc = subprocess.Popen(kill_cmd, shell=True, preexec_fn=preexec_fn) + return kill_proc.wait() == 0 + return False + + +def kill_without_reaper(test_username): + """ + Kill all processes that test_username is able to kill + """ + kill_cmd = f"sudo -u {test_username} -- bash -c 'kill -KILL -1'" + subprocess.run(kill_cmd, shell=True) + + +def create_test_script_command(env_dir, tester_type): + """ + Return string representing a command line command to + run tests. + """ + import_line = TESTER_IMPORT_LINE[tester_type] + python_lines = [ + "import sys, json", + import_line, + "from testers.markus_test_specs import MarkusTestSpecs", + f"Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()", + ] + python_ex = os.path.join( + os.path.join(TEST_SPECS_DIR, env_dir), "venv", "bin", "python" + ) + python_str = "; ".join(python_lines) + return f'{python_ex} -c "{python_str}"' + + +def get_env_vars(test_username): + """ Return a dictionary containing all environment variables to pass to the next test """ + db_env_vars = setup_database(test_username) + port_number = get_available_port() + return {"PORT": port_number, **db_env_vars} + + +def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, hooks): + """ + Run each test script in test_scripts in the tests_path directory using the + command cmd. Return the results. + """ + results = [] + preexec_fn = set_rlimits_before_test() + + with hooks.around("all"): + for settings in test_specs["testers"]: + tester_type = settings["tester_type"] + extra_hook_kwargs = {"settings": settings} + with hooks.around(tester_type, extra_kwargs=extra_hook_kwargs): + env_dir = settings.get("env_loc", DEFAULT_ENV_DIR) + + cmd_str = create_test_script_command(env_dir, tester_type) + args = cmd.format(cmd_str) + + for test_data in settings["test_data"]: + test_category = test_data.get("category", []) + if set(test_category) & set( + test_categories + ): # TODO: make sure test_categories is non-string collection type + extra_hook_kwargs = {"test_data": test_data} + with hooks.around( + "each", + builtin_selector=test_data, + extra_kwargs=extra_hook_kwargs, + ): + start = time.time() + out, err = "", "" + timeout_expired = None + timeout = test_data.get("timeout") + try: + env_vars = get_env_vars(test_username) + proc = subprocess.Popen( + args, + start_new_session=True, + cwd=tests_path, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=preexec_fn, + env={**os.environ, **env_vars}, + ) + try: + settings_json = json.dumps( + {**settings, "test_data": test_data} + ).encode("utf-8") + out, err = proc.communicate( + input=settings_json, timeout=timeout + ) + except subprocess.TimeoutExpired: + if test_username == current_user(): + pgrp = os.getpgid(proc.pid) + os.killpg(pgrp, signal.SIGKILL) + else: + if not kill_with_reaper(test_username): + kill_without_reaper(test_username) + out, err = proc.communicate() + timeout_expired = timeout + except Exception as e: + err += "\n\n{}".format(e) + finally: + out = decode_if_bytes(out) + err = decode_if_bytes(err) + duration = int(round(time.time() - start, 3) * 1000) + extra_info = test_data.get("extra_info", {}) + results.append( + create_test_group_result( + out, err, duration, extra_info, timeout_expired + ) + ) + return results, hooks.format_errors() + + +def store_results(results_data, markus_address, assignment_id, group_id, submission_id): + """ + Write the results of multiple test script runs to an output file as a json string. + The output file is located at: + {TEST_RESULT_DIR}/{markus_address}/{assignment_id}/{group_id}/{submission_id}/ouput.json + """ + clean_markus_address = clean_dir_name(markus_address) + run_time = "run_{}".format(int(time.time())) + destination = os.path.join( + *stringify( + TEST_RESULT_DIR, + clean_markus_address, + assignment_id, + group_id, + "s{}".format(submission_id or ""), + run_time, + ) + ) + os.makedirs(destination, exist_ok=True) + with open(os.path.join(destination, "output.json"), "w") as f: + json.dump(results_data, f, indent=4) + + +def clear_working_directory(tests_path, test_username): + """ + Run commands that clear the tests_path working directory + """ + if test_username != current_user(): + chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format( + test_username, tests_path + ) + else: + chmod_cmd = "chmod -Rf ugo+rwX {}".format(tests_path) + + subprocess.run(chmod_cmd, shell=True) + + # be careful not to remove the tests_path dir itself since we have to + # set the group ownership with sudo (and that is only done in ../install.sh) + clean_cmd = "rm -rf {0}/.[!.]* {0}/*".format(tests_path) + subprocess.run(clean_cmd, shell=True) + + +def stop_tester_processes(test_username): + """ + Run a command that kills all tester processes either by killing all + user processes or killing with a reaper user (see https://lwn.net/Articles/754980/ + for reference). + """ + if test_username != current_user(): + if not kill_with_reaper(test_username): + kill_without_reaper(test_username) + + +def finalize_results_data(results, error, all_hooks_error, time_to_service): + """ Return a dictionary of test script results combined with test run info """ + return { + "test_groups": results, + "error": error, + "hooks_error": all_hooks_error, + "time_to_service": time_to_service, + } + + +def report(results_data, api, assignment_id, group_id, run_id): + """ Post the results of running test scripts to the markus api """ + api.upload_test_group_results( + assignment_id, group_id, run_id, json.dumps(results_data) + ) + + +@clean_after +def run_test( + markus_address, + server_api_key, + test_categories, + files_path, + assignment_id, + group_id, + group_repo_name, + submission_id, + run_id, + enqueue_time, +): + """ + Run autotesting tests using the tests in the test_specs json file on the files in files_path. + + This function should be used by an rq worker. + """ + results = [] + error = None + hooks_error = None + time_to_service = int(round(time.time() - enqueue_time, 3) * 1000) + + test_script_path = test_script_directory(markus_address, assignment_id) + hooks_script_path = os.path.join(test_script_path, HOOKS_FILENAME) + test_specs_path = os.path.join(test_script_path, SETTINGS_FILENAME) + api = Markus(server_api_key, markus_address) + + with open(test_specs_path) as f: + test_specs = json.load(f) + + try: + job = rq.get_current_job() + update_pop_interval_stat(job.origin) + test_username, tests_path = tester_user() + hooks_kwargs = { + "api": api, + "assignment_id": assignment_id, + "group_id": group_id, + } + testers = {settings["tester_type"] for settings in test_specs["testers"]} + hooks = Hooks(hooks_script_path, testers, cwd=tests_path, kwargs=hooks_kwargs) + try: + setup_files(files_path, tests_path, markus_address, assignment_id) + cmd = run_test_command(test_username=test_username) + results, hooks_error = run_test_specs( + cmd, test_specs, test_categories, tests_path, test_username, hooks + ) + finally: + stop_tester_processes(test_username) + clear_working_directory(tests_path, test_username) + except Exception as e: + error = str(e) + finally: + results_data = finalize_results_data( + results, error, hooks_error, time_to_service + ) + store_results( + results_data, markus_address, assignment_id, group_id, submission_id + ) + report(results_data, api, assignment_id, group_id, run_id) + + +def get_tester_root_dir(tester_type): + """ + Return the root directory of the tester named tester_type + """ + this_dir = os.path.dirname(os.path.abspath(__file__)) + root_dir = os.path.dirname(this_dir) + tester_dir = os.path.join(root_dir, "testers", tester_type) + if not os.path.isdir(tester_dir): + raise FileNotFoundError(f"{tester_type} is not a valid tester name") + return tester_dir + + +def update_settings(settings, specs_dir): + """ + Return a dictionary containing all the default settings and the installation settings + contained in the tester's specs directory as well as the settings. The settings + will overwrite any duplicate keys in the default settings files. + """ + full_settings = {"install_data": {}} + install_settings_files = [os.path.join(specs_dir, "install_settings.json")] + for settings_file in install_settings_files: + if os.path.isfile(settings_file): + with open(settings_file) as f: + full_settings["install_data"].update(json.load(f)) + full_settings.update(settings) + return full_settings + + +def create_tester_environments(files_path, test_specs): + for i, settings in enumerate(test_specs["testers"]): + tester_dir = get_tester_root_dir(settings["tester_type"]) + specs_dir = os.path.join(tester_dir, "specs") + bin_dir = os.path.join(tester_dir, "bin") + settings = update_settings(settings, specs_dir) + if settings.get("env_data"): + new_env_dir = tempfile.mkdtemp(prefix="env", dir=TEST_SPECS_DIR) + os.chmod(new_env_dir, 0o775) + settings["env_loc"] = new_env_dir + + create_file = os.path.join(bin_dir, "create_environment.sh") + if os.path.isfile(create_file): + cmd = [f"{create_file}", json.dumps(settings), files_path] + proc = subprocess.run(cmd, stderr=subprocess.PIPE) + if proc.returncode != 0: + raise TesterCreationError( + f"create tester environment failed with:\n{proc.stderr}" + ) + else: + settings["env_loc"] = DEFAULT_ENV_DIR + test_specs["testers"][i] = settings + + return test_specs + + +def destroy_tester_environments(old_test_script_dir): + test_specs_file = os.path.join(old_test_script_dir, SETTINGS_FILENAME) + with open(test_specs_file) as f: + test_specs = json.load(f) + for settings in test_specs["testers"]: + env_loc = settings.get("env_loc", DEFAULT_ENV_DIR) + if env_loc != DEFAULT_ENV_DIR: + tester_dir = get_tester_root_dir(settings["tester_type"]) + bin_dir = os.path.join(tester_dir, "bin") + destroy_file = os.path.join(bin_dir, "destroy_environment.sh") + if os.path.isfile(destroy_file): + cmd = [f"{destroy_file}", json.dumps(settings)] + proc = subprocess.run(cmd, stderr=subprocess.PIPE) + if proc.returncode != 0: + raise TesterCreationError( + f"destroy tester environment failed with:\n{proc.stderr}" + ) + shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) + + +@clean_after +def update_test_specs(files_path, assignment_id, markus_address, test_specs): + """ + Copy new test scripts for a given assignment to from the files_path + to a new location. Indicate that these new test scripts should be used instead of + the old ones. And remove the old ones when it is safe to do so (they are not in the + process of being copied to a working directory). + + This function should be used by an rq worker. + """ + # TODO: catch and log errors + test_script_dir_name = "test_scripts_{}".format(int(time.time())) + clean_markus_address = clean_dir_name(markus_address) + new_dir = os.path.join( + *stringify( + TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name + ) + ) + new_files_dir = os.path.join(new_dir, FILES_DIRNAME) + move_tree(files_path, new_files_dir) + if "hooks_file" in test_specs: + src = os.path.join(new_files_dir, test_specs["hooks_file"]) + if os.path.isfile(src): + os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) + test_specs = create_tester_environments(new_files_dir, test_specs) + settings_filename = os.path.join(new_dir, SETTINGS_FILENAME) + with open(settings_filename, "w") as f: + json.dump(test_specs, f) + old_test_script_dir = test_script_directory(markus_address, assignment_id) + test_script_directory(markus_address, assignment_id, set_to=new_dir) + + if old_test_script_dir is not None: + with fd_open(old_test_script_dir) as fd: + with fd_lock(fd, exclusive=True): + destroy_tester_environments(old_test_script_dir) + shutil.rmtree(old_test_script_dir, onerror=ignore_missing_dir_error) diff --git a/testers/testers/haskell/__init__.py b/src/autotester/server/utils/__init__.py similarity index 100% rename from testers/testers/haskell/__init__.py rename to src/autotester/server/utils/__init__.py diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py new file mode 100644 index 00000000..56b53b72 --- /dev/null +++ b/src/autotester/server/utils/file_management.py @@ -0,0 +1,149 @@ +import os +import uuid +import tempfile +import shutil +import fcntl +from autotester.server.utils import redis_management +from autotester.config import config +from contextlib import contextmanager + +FILES_DIRNAME = config["_workspace_contents", "_files_dir"] + + +def clean_dir_name(name): + """ Return name modified so that it can be used as a unix style directory name """ + return name.replace("/", "_") + + +def random_tmpfile_name(): + return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) + + +def recursive_iglob(root_dir): + """ + Walk breadth first over a directory tree starting at root_dir and + yield the path to each directory or file encountered. + Yields a tuple containing a string indicating whether the path is to + a directory ("d") or a file ("f") and the path itself. Raise a + ValueError if the root_dir doesn't exist + """ + if os.path.isdir(root_dir): + for root, dirnames, filenames in os.walk(root_dir): + yield from (("d", os.path.join(root, d)) for d in dirnames) + yield from (("f", os.path.join(root, f)) for f in filenames) + else: + raise ValueError("directory does not exist: {}".format(root_dir)) + + +def copy_tree(src, dst, exclude=tuple()): + """ + Recursively copy all files and subdirectories in the path + indicated by src to the path indicated by dst. If directories + don't exist, they are created. Do not copy files or directories + in the exclude list. + """ + copied = [] + for fd, file_or_dir in recursive_iglob(src): + src_path = os.path.relpath(file_or_dir, src) + if src_path in exclude: + continue + target = os.path.join(dst, src_path) + if fd == "d": + os.makedirs(target, exist_ok=True) + else: + os.makedirs(os.path.dirname(target), exist_ok=True) + shutil.copy2(file_or_dir, target) + copied.append((fd, target)) + return copied + + +def ignore_missing_dir_error(_func, _path, excinfo): + """ Used by shutil.rmtree to ignore a FileNotFoundError """ + err_type, err_inst, traceback = excinfo + if err_type == FileNotFoundError: + return + raise err_inst + + +def move_tree(src, dst): + """ + Recursively move all files and subdirectories in the path + indicated by src to the path indicated by dst. If directories + don't exist, they are created. + """ + os.makedirs(dst, exist_ok=True) + moved = copy_tree(src, dst) + shutil.rmtree(src, onerror=ignore_missing_dir_error) + return moved + + +@contextmanager +def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): + """ + Open the file or directory at path, yield its + file descriptor, and close it when finished. + flags, *args and **kwargs are passed on to os.open. + """ + fd = os.open(path, flags, *args, **kwargs) + try: + yield fd + finally: + os.close(fd) + + +@contextmanager +def fd_lock(file_descriptor, exclusive=True): + """ + Lock the object with the given file descriptor and unlock it + when finished. A lock can either be exclusive or shared by + setting the exclusive keyword argument to True or False. + """ + fcntl.flock(file_descriptor, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) + try: + yield + finally: + fcntl.flock(file_descriptor, fcntl.LOCK_UN) + + +def copy_test_script_files(markus_address, assignment_id, tests_path): + """ + Copy test script files for a given assignment to the tests_path + directory if they exist. tests_path may already exist and contain + files and subdirectories. + """ + test_script_outer_dir = redis_management.test_script_directory( + markus_address, assignment_id + ) + test_script_dir = os.path.join(test_script_outer_dir, FILES_DIRNAME) + if os.path.isdir(test_script_dir): + with fd_open(test_script_dir) as fd: + with fd_lock(fd, exclusive=False): + return copy_tree(test_script_dir, tests_path) + return [] + + +def setup_files(files_path, tests_path, markus_address, assignment_id): + """ + Copy test script files and student files to the working directory tests_path, + then make it the current working directory. + The following permissions are also set: + - tests_path directory: rwxrwx--T + - test subdirectories: rwxrwx--T + - test files: rw-r----- + - student subdirectories: rwxrwx--- + - student files: rw-rw---- + """ + os.chmod(tests_path, 0o1770) + student_files = move_tree(files_path, tests_path) + for fd, file_or_dir in student_files: + if fd == "d": + os.chmod(file_or_dir, 0o770) + else: + os.chmod(file_or_dir, 0o660) + script_files = copy_test_script_files(markus_address, assignment_id, tests_path) + for fd, file_or_dir in script_files: + if fd == "d": + os.chmod(file_or_dir, 0o1770) + else: + os.chmod(file_or_dir, 0o640) + return student_files, script_files diff --git a/server/form_validation/__init__.py b/src/autotester/server/utils/form_validation.py similarity index 84% rename from server/form_validation/__init__.py rename to src/autotester/server/utils/form_validation.py index 6f4285bf..e6df2170 100644 --- a/server/form_validation/__init__.py +++ b/src/autotester/server/utils/form_validation.py @@ -2,6 +2,7 @@ from jsonschema.exceptions import best_match from copy import deepcopy + def extend_with_default(validator_class=Draft7Validator): """ Extends a validator class to add defaults before validation. @@ -9,7 +10,6 @@ def extend_with_default(validator_class=Draft7Validator): """ validate_props = validator_class.VALIDATORS["properties"] validate_array = validator_class.VALIDATORS["items"] - validate_oneOf = validator_class.VALIDATORS["oneOf"] # must use draft 4+ def set_defaults(validator, properties, instance, schema): """ Set defaults within a "properties" context """ @@ -34,11 +34,11 @@ def set_array_defaults(validator, properties, instance, schema): """ Set defaults within an "array" context """ if not validator.is_type(instance, "array"): return - + if not instance: default_val = None if "default" in properties: - default_val = properties['default'] + default_val = properties["default"] elif properties.get("type") == "array": default_val = [] elif properties.get("type") == "object": @@ -75,7 +75,7 @@ def set_oneOf_defaults(validator, properties, instance, schema): good_instance = new_instance if len(good_properties) == 0: - msg = f'{instance} is not valid under any of the given schemas' + msg = f"{instance} is not valid under any of the given schemas" yield ValidationError(msg, context=all_errors) elif len(good_properties) > 1: msg = f'{instance} is valid under each of {", ".join(repr(p) for p in good_properties)}' @@ -84,13 +84,18 @@ def set_oneOf_defaults(validator, properties, instance, schema): instance.clear() instance.update(good_instance) - custom_validators = {"properties": set_defaults, - "items": set_array_defaults, - "oneOf": set_oneOf_defaults} + custom_validators = { + "properties": set_defaults, + "items": set_array_defaults, + "oneOf": set_oneOf_defaults, + } return validators.extend(validator_class, custom_validators) -def validate_with_defaults(schema, obj, validator_class=Draft7Validator): + +def validate_with_defaults( + schema, obj, validator_class=Draft7Validator, best_only=True +): """ Return an iterator that yields errors from validating obj on schema after first filling in defaults on obj. @@ -99,5 +104,15 @@ def validate_with_defaults(schema, obj, validator_class=Draft7Validator): # first time to fill in defaults since validating 'required', 'minProperties', # etc. can't be done until the instance has been properly filled with defaults. list(validator.iter_errors(obj)) - return validator.iter_errors(obj) - \ No newline at end of file + errors = list(validator.iter_errors(obj)) + if best_only: + return best_match(errors) + return errors + + +def is_valid(obj, schema, validator_class=Draft7Validator): + """ + Return True if is valid for schema using the + validator . + """ + return validator_class(schema).is_valid(obj) diff --git a/server/hooks_context/utils.py b/src/autotester/server/utils/path_management.py similarity index 97% rename from server/hooks_context/utils.py rename to src/autotester/server/utils/path_management.py index e2a1d84f..531869e8 100644 --- a/server/hooks_context/utils.py +++ b/src/autotester/server/utils/path_management.py @@ -1,6 +1,7 @@ -from contextlib import contextmanager import os import sys +from contextlib import contextmanager + @contextmanager def current_directory(path): @@ -18,6 +19,7 @@ def current_directory(path): else: yield + @contextmanager def add_path(path, prepend=True): """ @@ -36,4 +38,4 @@ def add_path(path, prepend=True): i = (sys.path if prepend else sys.path[::-1]).index(path) sys.path.pop(i) except ValueError: - pass \ No newline at end of file + pass diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py new file mode 100644 index 00000000..e09f56bc --- /dev/null +++ b/src/autotester/server/utils/redis_management.py @@ -0,0 +1,130 @@ +import redis +import rq +import time +from functools import wraps +from autotester.server.utils import file_management, string_management +from autotester.config import config + +CURRENT_TEST_SCRIPT_HASH = config["redis", "_current_test_script_hash"] +POP_INTERVAL_HASH = config["redis", "_pop_interval_hash"] + + +def redis_connection(): + """ + Return the currently open redis connection object. If there is no + connection currently open, one is created using the url specified in + config['redis', 'url'] + """ + conn = rq.get_current_connection() + if conn: + return conn + rq.use_connection(redis=redis.Redis.from_url(config["redis", "url"])) + return rq.get_current_connection() + + +def get_test_script_key(markus_address, assignment_id): + """ + Return unique key for each assignment used for + storing the location of test scripts in Redis + """ + clean_markus_address = file_management.clean_dir_name(markus_address) + return f"{clean_markus_address}_{assignment_id}" + + +def test_script_directory(markus_address, assignment_id, set_to=None): + """ + Return the directory containing the test scripts for a specific assignment. + Optionally updates the location of the test script directory to the value + of the set_to keyword argument (if it is not None) + """ + key = get_test_script_key(markus_address, assignment_id) + r = redis_connection() + if set_to is not None: + r.hset(CURRENT_TEST_SCRIPT_HASH, key, set_to) + out = r.hget(CURRENT_TEST_SCRIPT_HASH, key) + return string_management.decode_if_bytes(out) + + +def update_pop_interval_stat(queue_name): + """ + Update the values contained in the redis hash named REDIS_POP_HASH for + the queue named queue_name. This should be called whenever a new job + is popped from a queue for which we want to keep track of the popping + rate. For more details about the data updated see get_pop_interval_stat. + """ + r = redis_connection() + now = time.time() + r.hsetnx(POP_INTERVAL_HASH, "{}_start".format(queue_name), now) + r.hset(POP_INTERVAL_HASH, "{}_last".format(queue_name), now) + r.hincrby(POP_INTERVAL_HASH, "{}_count".format(queue_name), 1) + + +def clear_pop_interval_stat(queue_name): + """ + Reset the values contained in the redis hash named REDIS_POP_HASH for + the queue named queue_name. This should be called whenever a queue becomes + empty. For more details about the data updated see get_pop_interval_stat. + """ + r = redis_connection() + r.hdel(POP_INTERVAL_HASH, "{}_start".format(queue_name)) + r.hset(POP_INTERVAL_HASH, "{}_last".format(queue_name), 0) + r.hset(POP_INTERVAL_HASH, "{}_count".format(queue_name), 0) + + +def get_pop_interval_stat(queue_name): + """ + Return the following data about the queue named queue_name: + - the time the first job was popped from the queue during the + current burst of jobs. + - the number of jobs popped from the queue during the current + burst of jobs. + - the time the most recent job was popped from the queue during + current burst of jobs. + """ + r = redis_connection() + start = r.hget(POP_INTERVAL_HASH, "{}_start".format(queue_name)) + last = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) + count = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) + return start, last, count + + +def get_avg_pop_interval(queue_name): + """ + Return the average interval between pops off of the end of the + queue named queue_name during the current burst of jobs. + Return None if there are no jobs in the queue, indicating that + there is no current burst. + """ + start, last, count = get_pop_interval_stat(queue_name) + try: + start = float(start) + last = float(last) + count = int(count) + except TypeError: + return None + count -= 1 + return (last - start) / count if count else 0 + + +def clean_up(): + """ Reset the pop interval data for each empty queue """ + with rq.Connection(redis_connection()): + for q in rq.Queue.all(): + if q.is_empty(): + clear_pop_interval_stat(q.name) + + +def clean_after(func): + """ + Call the clean_up function after the + decorated function func is finished + """ + + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + finally: + clean_up() + + return wrapper diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py new file mode 100644 index 00000000..6c45d082 --- /dev/null +++ b/src/autotester/server/utils/resource_management.py @@ -0,0 +1,49 @@ +import resource +from autotester.config import config + +RLIMIT_ADJUSTMENTS = {"nproc": 10} + + +def rlimit_str2int(rlimit_string): + return getattr(resource, f"RLIMIT_{rlimit_string.upper()}") + + +def set_rlimits_before_test(): + """ + Sets rlimit settings specified in config file + This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), + there are at least n=RLIMIT_ADJUSTMENTS[limit] resources available for cleanup + processes that are not available for test processes. This ensures that cleanup + processes will always be able to run. + """ + for limit_str in config["rlimit_settings"].keys() | RLIMIT_ADJUSTMENTS.keys(): + limit = rlimit_str2int(limit_str) + + values = config["rlimit_settings"].get( + limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) + ) + curr_soft, curr_hard = resource.getrlimit(limit) + soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) + # reduce the hard limit so that cleanup scripts will have at least + # adj more resources to use. + adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) + if (curr_hard - hard) < adj: + hard = curr_hard - adj + # make sure the soft limit doesn't exceed the hard limit + hard = max(hard, 0) + soft = max(min(hard, soft), 0) + + resource.setrlimit(limit, (soft, hard)) + + +def set_rlimits_before_cleanup(): + """ + Sets the rlimit settings specified in RLIMIT_ADJUSTMENTS + so that both the soft and hard limits are set as high as possible. This ensures + that cleanup processes will have as many resources as possible to run. + """ + for limit_str in RLIMIT_ADJUSTMENTS: + limit = rlimit_str2int(limit_str) + soft, hard = resource.getrlimit(limit) + soft = max(soft, hard) + resource.setrlimit(limit, (soft, hard)) diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py new file mode 100644 index 00000000..3c55ab5b --- /dev/null +++ b/src/autotester/server/utils/string_management.py @@ -0,0 +1,40 @@ +import json + + +def stringify(*args): + for a in args: + yield str(a) + + +def decode_if_bytes(b, format_="utf-8"): + return b.decode(format_) if isinstance(b, bytes) else b + + +def loads_partial_json(json_string, expected_type=None): + """ + Return a list of objects loaded from a json string and a boolean + indicating whether the json_string was malformed. This will try + to load as many valid objects as possible from a (potentially + malformed) json string. If the optional expected_type keyword argument + is not None then only objects of the given type are returned, + if any objects of a different type are found, the string will + be treated as malfomed. + """ + i = 0 + decoder = json.JSONDecoder() + results = [] + malformed = False + json_string = json_string.strip() + while i < len(json_string): + try: + obj, ind = decoder.raw_decode(json_string[i:]) + if expected_type is None or isinstance(obj, expected_type): + results.append(obj) + elif json_string[i:i + ind].strip(): + malformed = True + i += ind + except json.JSONDecodeError: + if json_string[i].strip(): + malformed = True + i += 1 + return results, malformed diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py new file mode 100644 index 00000000..2d2db136 --- /dev/null +++ b/src/autotester/server/utils/user_management.py @@ -0,0 +1,36 @@ +import os +import pwd +from autotester.exceptions import TesterUserError +from autotester.config import config +from autotester.server.utils.string_management import decode_if_bytes + + +def current_user(): + return pwd.getpwuid(os.getuid()).pw_name + + +def tester_user(): + """ + Get the workspace for the tester user specified by the MARKUSWORKERUSER + environment variable, return the user_name and path to that user's workspace. + + Raises an AutotestError if a tester user is not specified or if a workspace + has not been setup for that user. + """ + user_name = os.environ.get("MARKUSWORKERUSER") + if user_name is None: + raise TesterUserError("No worker users available to run this job") + + user_workspace = os.path.join( + config["workspace"], config["_workspace_contents", "_workers"], user_name + ) + if not os.path.isdir(user_workspace): + raise TesterUserError(f"No workspace directory for user: {user_name}") + + return user_name, decode_if_bytes(user_workspace) + + +def get_reaper_username(test_username): + for users in (users for conf in config["workers"] for users in conf["users"]): + if users["name"] == test_username: + return users["reaper"] diff --git a/src/autotester/setup.py b/src/autotester/setup.py new file mode 100644 index 00000000..2f389e79 --- /dev/null +++ b/src/autotester/setup.py @@ -0,0 +1,20 @@ +from setuptools import setup, find_packages + +test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] + +packages = ["testers"] + [ + f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions) +] + +setup( + name="markus-autotester-testers", + version="2.0", + description="Testers for the automatic tester for programming assignments", + url="https://github.com/MarkUsProject/markus-autotesting", + author="Misha Schwartz, Alessio Di Sandro", + author_email="mschwa@cs.toronto.edu", + license="MIT", + include_package_data=True, + packages=packages, + zip_safe=False, +) diff --git a/testers/testers/java/__init__.py b/src/autotester/testers/__init__.py similarity index 100% rename from testers/testers/java/__init__.py rename to src/autotester/testers/__init__.py diff --git a/testers/testers/py/__init__.py b/src/autotester/testers/custom/__init__.py similarity index 100% rename from testers/testers/py/__init__.py rename to src/autotester/testers/custom/__init__.py diff --git a/src/autotester/testers/custom/bin/install.sh b/src/autotester/testers/custom/bin/install.sh new file mode 100755 index 00000000..cb7db56a --- /dev/null +++ b/src/autotester/testers/custom/bin/install.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -e + +# script starts here +if [[ $# -ne 0 ]]; then + echo "Usage: $0" + exit 1 +fi + +# vars +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") + +# main +touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/custom/bin/uninstall.sh b/src/autotester/testers/custom/bin/uninstall.sh new file mode 100755 index 00000000..679a58f6 --- /dev/null +++ b/src/autotester/testers/custom/bin/uninstall.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# script starts here +if [[ $# -ne 0 ]]; then + echo "Usage: $0" + exit 1 +fi + +# vars +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") + +# main +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/custom/default_hooks.py b/src/autotester/testers/custom/default_hooks.py new file mode 100644 index 00000000..f4247477 --- /dev/null +++ b/src/autotester/testers/custom/default_hooks.py @@ -0,0 +1,11 @@ +import os + + +def before_all_custom(settings, **_kwargs): + """ Make script files executable """ + for test_data in settings["test_data"]: + for script_file in test_data["script_files"]: + os.chmod(script_file, 0o755) + + +HOOKS = [before_all_custom] diff --git a/testers/testers/custom/markus_custom_tester.py b/src/autotester/testers/custom/markus_custom_tester.py similarity index 72% rename from testers/testers/custom/markus_custom_tester.py rename to src/autotester/testers/custom/markus_custom_tester.py index 29040c69..fedb673a 100644 --- a/testers/testers/custom/markus_custom_tester.py +++ b/src/autotester/testers/custom/markus_custom_tester.py @@ -3,12 +3,11 @@ class MarkusCustomTester(MarkusTester): - def __init__(self, specs): super().__init__(specs, test_class=None) @MarkusTester.run_decorator def run(self): - file_paths = self.specs['test_data', 'script_files'] + file_paths = self.specs["test_data", "script_files"] for file_path in file_paths: - subprocess.run(f'./{file_path}') + subprocess.run(f"./{file_path}") diff --git a/testers/testers/custom/specs/settings_schema.json b/src/autotester/testers/custom/specs/settings_schema.json similarity index 99% rename from testers/testers/custom/specs/settings_schema.json rename to src/autotester/testers/custom/specs/settings_schema.json index 81db9977..7eb79e7d 100644 --- a/testers/testers/custom/specs/settings_schema.json +++ b/src/autotester/testers/custom/specs/settings_schema.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/testers/testers/pyta/__init__.py b/src/autotester/testers/custom/tests/__init__.py similarity index 100% rename from testers/testers/pyta/__init__.py rename to src/autotester/testers/custom/tests/__init__.py diff --git a/testers/testers/custom/tests/script_files/autotest_01.sh b/src/autotester/testers/custom/tests/script_files/autotest_01.sh similarity index 100% rename from testers/testers/custom/tests/script_files/autotest_01.sh rename to src/autotester/testers/custom/tests/script_files/autotest_01.sh diff --git a/testers/testers/custom/tests/specs.json b/src/autotester/testers/custom/tests/specs.json similarity index 100% rename from testers/testers/custom/tests/specs.json rename to src/autotester/testers/custom/tests/specs.json diff --git a/testers/testers/racket/__init__.py b/src/autotester/testers/custom/tests/student_files/__init__.py similarity index 100% rename from testers/testers/racket/__init__.py rename to src/autotester/testers/custom/tests/student_files/__init__.py diff --git a/src/autotester/testers/custom/tests/student_files/submission.py b/src/autotester/testers/custom/tests/student_files/submission.py new file mode 100644 index 00000000..6eead424 --- /dev/null +++ b/src/autotester/testers/custom/tests/student_files/submission.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +""" +This student submission file is used to test the autotester +It represents the test case where: + + The submission passes with full marks +""" + +import json + +print( + json.dumps( + { + "name": "pass_test", + "output": "NA", + "marks_earned": 2, + "marks_total": 2, + "status": "pass", + } + ) +) diff --git a/server/bin/default_tester_requirements.txt b/src/autotester/testers/haskell/__init__.py similarity index 100% rename from server/bin/default_tester_requirements.txt rename to src/autotester/testers/haskell/__init__.py diff --git a/testers/testers/haskell/bin/install.sh b/src/autotester/testers/haskell/bin/install.sh similarity index 82% rename from testers/testers/haskell/bin/install.sh rename to src/autotester/testers/haskell/bin/install.sh index b3fb9953..275513fb 100755 --- a/testers/testers/haskell/bin/install.sh +++ b/src/autotester/testers/haskell/bin/install.sh @@ -24,11 +24,11 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages install_haskell_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/testers/testers/haskell/bin/uninstall.sh b/src/autotester/testers/haskell/bin/uninstall.sh similarity index 73% rename from testers/testers/haskell/bin/uninstall.sh rename to src/autotester/testers/haskell/bin/uninstall.sh index 74b0420a..880e64a9 100755 --- a/testers/testers/haskell/bin/uninstall.sh +++ b/src/autotester/testers/haskell/bin/uninstall.sh @@ -7,11 +7,11 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[HASKELL-UNINSTALL] The following system packages have not been uninstalled: ghc cabal-install python3. You may uninstall them if you wish." echo "[HASKELL-UNINSTALL] The following cabal packages have not been uninstalled: tasty-stats tasty-discover tasty-quickcheck. You may uninstall them if you can figure out how." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/testers/testers/haskell/markus_haskell_tester.py b/src/autotester/testers/haskell/markus_haskell_tester.py similarity index 66% rename from testers/testers/haskell/markus_haskell_tester.py rename to src/autotester/testers/haskell/markus_haskell_tester.py index 746e3000..86093b4e 100644 --- a/testers/testers/haskell/markus_haskell_tester.py +++ b/src/autotester/testers/haskell/markus_haskell_tester.py @@ -5,19 +5,19 @@ from testers.markus_tester import MarkusTester, MarkusTest, MarkusTestError -class MarkusHaskellTest(MarkusTest): +class MarkusHaskellTest(MarkusTest): def __init__(self, tester, test_file, result, feedback_open=None): - self._test_name = result.get('name') + self._test_name = result.get("name") self._file_name = test_file - self.status = result['status'] - self.message = result['description'] + self.status = result["status"] + self.message = result["description"] super().__init__(tester, feedback_open) @property def test_name(self): if self._test_name: - return '.'.join([self._file_name, self._test_name]) + return ".".join([self._file_name, self._test_name]) return self._file_name @MarkusTest.run_decorator @@ -29,28 +29,27 @@ def run(self): else: return self.error(message=self.message) -class MarkusHaskellTester(MarkusTester): +class MarkusHaskellTester(MarkusTester): # column indexes of relevant data from tasty-stats csv # reference: http://hackage.haskell.org/package/tasty-stats - TASTYSTATS = {'name' : 1, - 'time' : 2, - 'result' : 3, - 'description' : -1} + TASTYSTATS = {"name": 1, "time": 2, "result": 3, "description": -1} def __init__(self, specs, test_class=MarkusHaskellTest): super().__init__(specs, test_class) - + def _test_run_flags(self, test_file): """ Return a list of additional arguments to the tasty-discover executable """ module_flag = f"--modules={os.path.basename(test_file)}" stats_flag = "--ingredient=Test.Tasty.Stats.consoleStatsReporter" - flags = [module_flag, - stats_flag, - f"--timeout={self.specs['test_data', 'test_timeout']}s", - f"--quickcheck-tests={self.specs['test_data', 'test_cases']}"] + flags = [ + module_flag, + stats_flag, + f"--timeout={self.specs['test_data', 'test_timeout']}s", + f"--quickcheck-tests={self.specs['test_data', 'test_cases']}", + ] return flags def _parse_test_results(self, reader): @@ -61,10 +60,12 @@ def _parse_test_results(self, reader): """ test_results = [] for line in reader: - result = {'status' : line[self.TASTYSTATS['result']], - 'name' : line[self.TASTYSTATS['name']], - 'description' : line[self.TASTYSTATS['description']], - 'time' : line[self.TASTYSTATS['time']]} + result = { + "status": line[self.TASTYSTATS["result"]], + "name": line[self.TASTYSTATS["name"]], + "description": line[self.TASTYSTATS["description"]], + "time": line[self.TASTYSTATS["time"]], + } test_results.append(result) return test_results @@ -77,17 +78,23 @@ def run_haskell_tests(self): """ results = {} this_dir = os.getcwd() - for test_file in self.specs['test_data', 'script_files']: + for test_file in self.specs["test_data", "script_files"]: with tempfile.NamedTemporaryFile(dir=this_dir) as f: - cmd = ['tasty-discover', '.', '_', f.name] + self._test_run_flags(test_file) - subprocess.run(cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True) + cmd = ["tasty-discover", ".", "_", f.name] + self._test_run_flags( + test_file + ) + subprocess.run( + cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True + ) with tempfile.NamedTemporaryFile(mode="w+", dir=this_dir) as sf: - cmd = ['runghc', f.name, f"--stats={sf.name}"] - subprocess.run(cmd, - stdout=subprocess.DEVNULL, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True) + cmd = ["runghc", f.name, f"--stats={sf.name}"] + subprocess.run( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) results[test_file] = self._parse_test_results(csv.reader(sf)) return results diff --git a/testers/testers/haskell/specs/settings_schema.json b/src/autotester/testers/haskell/specs/settings_schema.json similarity index 99% rename from testers/testers/haskell/specs/settings_schema.json rename to src/autotester/testers/haskell/specs/settings_schema.json index 463b1d59..c7defbd3 100644 --- a/testers/testers/haskell/specs/settings_schema.json +++ b/src/autotester/testers/haskell/specs/settings_schema.json @@ -79,4 +79,4 @@ } } } -} \ No newline at end of file +} diff --git a/testers/testers/haskell/tests/script_files/Test.hs b/src/autotester/testers/haskell/tests/script_files/Test.hs similarity index 100% rename from testers/testers/haskell/tests/script_files/Test.hs rename to src/autotester/testers/haskell/tests/script_files/Test.hs diff --git a/testers/testers/haskell/tests/specs.json b/src/autotester/testers/haskell/tests/specs.json similarity index 100% rename from testers/testers/haskell/tests/specs.json rename to src/autotester/testers/haskell/tests/specs.json diff --git a/testers/testers/haskell/tests/student_files/Submission.hs b/src/autotester/testers/haskell/tests/student_files/Submission.hs similarity index 100% rename from testers/testers/haskell/tests/student_files/Submission.hs rename to src/autotester/testers/haskell/tests/student_files/Submission.hs diff --git a/testers/testers/tests/.gitkeep b/src/autotester/testers/java/__init__.py similarity index 100% rename from testers/testers/tests/.gitkeep rename to src/autotester/testers/java/__init__.py diff --git a/testers/testers/java/bin/install.sh b/src/autotester/testers/java/bin/install.sh similarity index 64% rename from testers/testers/java/bin/install.sh rename to src/autotester/testers/java/bin/install.sh index af6fe57b..a2115285 100755 --- a/testers/testers/java/bin/install.sh +++ b/src/autotester/testers/java/bin/install.sh @@ -9,14 +9,14 @@ install_packages() { compile_tester() { echo "[JAVA-INSTALL] Compiling tester" - pushd ${JAVADIR} > /dev/null + pushd "${JAVADIR}" > /dev/null ./gradlew installDist --no-daemon popd > /dev/null } update_specs() { echo "[JAVA-INSTALL] Updating specs" - echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > ${TESTERDIR}/specs/install_settings.json + echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > "${SPECSDIR}/install_settings.json" } # script starts here @@ -26,13 +26,13 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs -JAVADIR=${TESTERDIR}/lib +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") +JAVADIR=$(readlink -f "${THISDIR}/../lib") # main install_packages compile_tester update_specs -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/testers/testers/java/bin/uninstall.sh b/src/autotester/testers/java/bin/uninstall.sh similarity index 57% rename from testers/testers/java/bin/uninstall.sh rename to src/autotester/testers/java/bin/uninstall.sh index 148be4a6..9fad619d 100755 --- a/testers/testers/java/bin/uninstall.sh +++ b/src/autotester/testers/java/bin/uninstall.sh @@ -2,13 +2,13 @@ remove_tester() { echo "[JAVA-UNINSTALL] Removing compiled tester" - rm -rf ${JAVADIR}/build - rm -rf ${JAVADIR}/.gradle + rm -rf "${JAVADIR}/build" + rm -rf "${JAVADIR}/.gradle" } reset_specs() { echo "[JAVA-UNINSTALL] Resetting specs" - rm -f ${TESTERDIR}/specs/install_settings.json + rm -f "${SPECSDIR}/install_settings.json" } # script starts here @@ -18,13 +18,13 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs -JAVADIR=${TESTERDIR}/lib +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") +JAVADIR=$(readlink -f "${THISDIR}/../lib") # main remove_tester reset_specs echo "[JAVA-UNINSTALL] The following system packages have not been uninstalled: python3 openjdk-12-jdk jq. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/testers/testers/java/lib/build.gradle b/src/autotester/testers/java/lib/build.gradle similarity index 100% rename from testers/testers/java/lib/build.gradle rename to src/autotester/testers/java/lib/build.gradle diff --git a/testers/testers/java/lib/gradle/wrapper/gradle-wrapper.jar b/src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from testers/testers/java/lib/gradle/wrapper/gradle-wrapper.jar rename to src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.jar diff --git a/testers/testers/java/lib/gradle/wrapper/gradle-wrapper.properties b/src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from testers/testers/java/lib/gradle/wrapper/gradle-wrapper.properties rename to src/autotester/testers/java/lib/gradle/wrapper/gradle-wrapper.properties diff --git a/testers/testers/java/lib/gradlew b/src/autotester/testers/java/lib/gradlew similarity index 100% rename from testers/testers/java/lib/gradlew rename to src/autotester/testers/java/lib/gradlew diff --git a/testers/testers/java/lib/gradlew.bat b/src/autotester/testers/java/lib/gradlew.bat similarity index 100% rename from testers/testers/java/lib/gradlew.bat rename to src/autotester/testers/java/lib/gradlew.bat diff --git a/testers/testers/java/lib/settings.gradle b/src/autotester/testers/java/lib/settings.gradle similarity index 100% rename from testers/testers/java/lib/settings.gradle rename to src/autotester/testers/java/lib/settings.gradle diff --git a/testers/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java b/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java similarity index 100% rename from testers/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java rename to src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java diff --git a/testers/testers/java/markus_java_tester.py b/src/autotester/testers/java/markus_java_tester.py similarity index 58% rename from testers/testers/java/markus_java_tester.py rename to src/autotester/testers/java/markus_java_tester.py index 09ba7db3..9d51d936 100644 --- a/testers/testers/java/markus_java_tester.py +++ b/src/autotester/testers/java/markus_java_tester.py @@ -6,29 +6,28 @@ class MarkusJavaTest(MarkusTest): - class JUnitStatus(enum.Enum): SUCCESSFUL = 1 ABORTED = 2 FAILED = 3 ERRORS = { - 'bad_javac': 'Java compilation error: "{}"', - 'bad_java': 'Java runtime error: "{}"' + "bad_javac": 'Java compilation error: "{}"', + "bad_java": 'Java runtime error: "{}"', } def __init__(self, tester, result, feedback_open=None): - self.class_name, _sep, self.method_name = result['name'].partition('.') - self.description = result.get('description') - self.status = MarkusJavaTest.JUnitStatus[result['status']] - self.message = result.get('message') + self.class_name, _sep, self.method_name = result["name"].partition(".") + self.description = result.get("description") + self.status = MarkusJavaTest.JUnitStatus[result["status"]] + self.message = result.get("message") super().__init__(tester, feedback_open) @property def test_name(self): - name = f'{self.class_name}.{self.method_name}' + name = f"{self.class_name}.{self.method_name}" if self.description: - name += f' ({self.description})' + name += f" ({self.description})" return name @MarkusTest.run_decorator @@ -43,24 +42,39 @@ def run(self): class MarkusJavaTester(MarkusTester): - JAVA_TESTER_CLASS = 'edu.toronto.cs.teach.MarkusJavaTester' + JAVA_TESTER_CLASS = "edu.toronto.cs.teach.MarkusJavaTester" def __init__(self, specs, test_class=MarkusJavaTest): super().__init__(specs, test_class) self.java_classpath = f'.:{self.specs["install_data", "path_to_tester_jars"]}/*' def compile(self): - javac_command = ['javac', '-cp', self.java_classpath] - javac_command.extend(self.specs['test_data', 'script_files']) + javac_command = ["javac", "-cp", self.java_classpath] + javac_command.extend(self.specs["test_data", "script_files"]) # student files imported by tests will be compiled on cascade - subprocess.run(javac_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, - check=True) + subprocess.run( + javac_command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + check=True, + ) def run_junit(self): - java_command = ['java', '-cp', self.java_classpath, MarkusJavaTester.JAVA_TESTER_CLASS] - java_command.extend(self.specs['test_data', 'script_files']) - java = subprocess.run(java_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, - check=True) + java_command = [ + "java", + "-cp", + self.java_classpath, + MarkusJavaTester.JAVA_TESTER_CLASS, + ] + java_command.extend(self.specs["test_data", "script_files"]) + java = subprocess.run( + java_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) return java @MarkusTester.run_decorator @@ -69,7 +83,7 @@ def run(self): try: self.compile() except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS['bad_javac'].format(e.stdout) + msg = MarkusJavaTest.ERRORS["bad_javac"].format(e.stdout) raise MarkusTestError(msg) from e # run the tests with junit try: @@ -77,7 +91,7 @@ def run(self): if results.stderr: raise MarkusTestError(results.stderr) except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS['bad_java'].format(e.stdout + e.stderr) + msg = MarkusJavaTest.ERRORS["bad_java"].format(e.stdout + e.stderr) raise MarkusTestError(msg) from e with self.open_feedback() as feedback_open: for result in json.loads(results.stdout): diff --git a/testers/testers/java/specs/settings_schema.json b/src/autotester/testers/java/specs/settings_schema.json similarity index 99% rename from testers/testers/java/specs/settings_schema.json rename to src/autotester/testers/java/specs/settings_schema.json index deb98b43..a823c0b9 100644 --- a/testers/testers/java/specs/settings_schema.json +++ b/src/autotester/testers/java/specs/settings_schema.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/testers/testers/java/tests/script_files/Test1.java b/src/autotester/testers/java/tests/script_files/Test1.java similarity index 100% rename from testers/testers/java/tests/script_files/Test1.java rename to src/autotester/testers/java/tests/script_files/Test1.java diff --git a/testers/testers/java/tests/script_files/Test2.java b/src/autotester/testers/java/tests/script_files/Test2.java similarity index 100% rename from testers/testers/java/tests/script_files/Test2.java rename to src/autotester/testers/java/tests/script_files/Test2.java diff --git a/testers/testers/java/tests/specs.json b/src/autotester/testers/java/tests/specs.json similarity index 100% rename from testers/testers/java/tests/specs.json rename to src/autotester/testers/java/tests/specs.json diff --git a/testers/testers/java/tests/student_files/Submission.java b/src/autotester/testers/java/tests/student_files/Submission.java similarity index 100% rename from testers/testers/java/tests/student_files/Submission.java rename to src/autotester/testers/java/tests/student_files/Submission.java diff --git a/testers/testers/markus_test_specs.py b/src/autotester/testers/markus_test_specs.py similarity index 99% rename from testers/testers/markus_test_specs.py rename to src/autotester/testers/markus_test_specs.py index 0f1d6a66..dcddca78 100644 --- a/testers/testers/markus_test_specs.py +++ b/src/autotester/testers/markus_test_specs.py @@ -3,7 +3,6 @@ class MarkusTestSpecs(Mapping): - def __init__(self, *args, **kwargs): self._specs = dict(*args, **kwargs) diff --git a/testers/testers/markus_tester.py b/src/autotester/testers/markus_tester.py similarity index 73% rename from testers/testers/markus_tester.py rename to src/autotester/testers/markus_tester.py index 0f70ae0f..d9d61cbe 100644 --- a/testers/testers/markus_tester.py +++ b/src/autotester/testers/markus_tester.py @@ -9,21 +9,21 @@ class MarkusTestError(Exception): pass -class MarkusTest(ABC): +class MarkusTest(ABC): class Status(enum.Enum): - PASS = 'pass' - PARTIAL = 'partial' - FAIL = 'fail' - ERROR = 'error' - ERROR_ALL = 'error_all' + PASS = "pass" + PARTIAL = "partial" + FAIL = "fail" + ERROR = "error" + ERROR_ALL = "error_all" @abstractmethod def __init__(self, tester, feedback_open=None): self.tester = tester self.points_total = self.get_total_points() if self.points_total <= 0: - raise ValueError('The test total points must be > 0') + raise ValueError("The test total points must be > 0") self.feedback_open = feedback_open @property @@ -35,10 +35,12 @@ def test_name(self): pass def get_total_points(self): - return self.tester.specs.get('points', default={}).get(self.test_name, 1) + return self.tester.specs.get("points", default={}).get(self.test_name, 1) @staticmethod - def format_result(test_name, status, output, points_earned, points_total, time=None): + def format_result( + test_name, status, output, points_earned, points_total, time=None + ): """ Formats a test result as expected by Markus. :param test_name: The test name @@ -51,19 +53,23 @@ def format_result(test_name, status, output, points_earned, points_total, time=N :return The formatted test result. """ if points_total < 0: - raise ValueError('The test total points must be >= 0') + raise ValueError("The test total points must be >= 0") if points_earned < 0: - raise ValueError('The test points earned must be >= 0') + raise ValueError("The test points earned must be >= 0") if time is not None: if not isinstance(time, int) or time < 0: - raise ValueError('The time must be a positive integer or None') - - result_json = json.dumps({'name': test_name, - 'output': output, - 'marks_earned': points_earned, - 'marks_total': points_total, - 'status': status.value, - 'time': time}) + raise ValueError("The time must be a positive integer or None") + + result_json = json.dumps( + { + "name": test_name, + "output": output, + "marks_earned": points_earned, + "marks_total": points_total, + "status": status.value, + "time": time, + } + ) return result_json def format(self, status, output, points_earned): @@ -75,9 +81,13 @@ def format(self, status, output, points_earned): points when assigning bonus points). :return The formatted test result. """ - return MarkusTest.format_result(self.test_name, status, output, points_earned, self.points_total) + return MarkusTest.format_result( + self.test_name, status, output, points_earned, self.points_total + ) - def add_feedback(self, status, feedback='', oracle_solution=None, test_solution=None): + def add_feedback( + self, status, feedback="", oracle_solution=None, test_solution=None + ): """ Adds the feedback of this test to the feedback file. :param status: A member of MarkusTest.Status. @@ -87,20 +97,24 @@ def add_feedback(self, status, feedback='', oracle_solution=None, test_solution= """ # TODO Reconcile with format: return both, or print both if self.feedback_open is None: - raise ValueError('No feedback file enabled') - self.feedback_open.write('========== {}: {} ==========\n\n'.format(self.test_name, status.value.upper())) + raise ValueError("No feedback file enabled") + self.feedback_open.write( + "========== {}: {} ==========\n\n".format( + self.test_name, status.value.upper() + ) + ) if feedback: - self.feedback_open.write('## Feedback: {}\n\n'.format(feedback)) + self.feedback_open.write("## Feedback: {}\n\n".format(feedback)) if status != self.Status.PASS: if oracle_solution: - self.feedback_open.write('## Expected Solution:\n\n') + self.feedback_open.write("## Expected Solution:\n\n") self.feedback_open.write(oracle_solution) if test_solution: - self.feedback_open.write('## Your Solution:\n\n') + self.feedback_open.write("## Your Solution:\n\n") self.feedback_open.write(test_solution) - self.feedback_open.write('\n') + self.feedback_open.write("\n") - def passed_with_bonus(self, points_bonus, message=''): + def passed_with_bonus(self, points_bonus, message=""): """ Passes this test earning bonus points in addition to the test total points. If a feedback file is enabled, adds feedback to it. @@ -109,24 +123,32 @@ def passed_with_bonus(self, points_bonus, message=''): :return The formatted passed test. """ if points_bonus < 0: - raise ValueError('The test bonus points must be >= 0') - result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total+points_bonus) + raise ValueError("The test bonus points must be >= 0") + result = self.format( + status=self.Status.PASS, + output=message, + points_earned=self.points_total + points_bonus, + ) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result - def passed(self, message=''): + def passed(self, message=""): """ Passes this test earning the test total points. If a feedback file is enabled, adds feedback to it. :param message: An optional message, will be shown as test output. :return The formatted passed test. """ - result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total) + result = self.format( + status=self.Status.PASS, output=message, points_earned=self.points_total + ) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result - def partially_passed(self, points_earned, message, oracle_solution=None, test_solution=None): + def partially_passed( + self, points_earned, message, oracle_solution=None, test_solution=None + ): """ Partially passes this test with some points earned. If a feedback file is enabled, adds feedback to it. :param points_earned: The points earned by the test, must be a float > 0 and < the test total points. @@ -136,13 +158,19 @@ def partially_passed(self, points_earned, message, oracle_solution=None, test_so :return The formatted partially passed test. """ if points_earned <= 0: - raise ValueError('The test points earned must be > 0') + raise ValueError("The test points earned must be > 0") if points_earned >= self.points_total: - raise ValueError('The test points earned must be < the test total points') - result = self.format(status=self.Status.PARTIAL, output=message, points_earned=points_earned) + raise ValueError("The test points earned must be < the test total points") + result = self.format( + status=self.Status.PARTIAL, output=message, points_earned=points_earned + ) if self.feedback_open: - self.add_feedback(status=self.Status.PARTIAL, feedback=message, oracle_solution=oracle_solution, - test_solution=test_solution) + self.add_feedback( + status=self.Status.PARTIAL, + feedback=message, + oracle_solution=oracle_solution, + test_solution=test_solution, + ) return result def failed(self, message, oracle_solution=None, test_solution=None): @@ -155,11 +183,15 @@ def failed(self, message, oracle_solution=None, test_solution=None): """ result = self.format(status=self.Status.FAIL, output=message, points_earned=0) if self.feedback_open: - self.add_feedback(status=self.Status.FAIL, feedback=message, oracle_solution=oracle_solution, - test_solution=test_solution) + self.add_feedback( + status=self.Status.FAIL, + feedback=message, + oracle_solution=oracle_solution, + test_solution=test_solution, + ) return result - def done(self, points_earned, message='', oracle_solution=None, test_solution=None): + def done(self, points_earned, message="", oracle_solution=None, test_solution=None): """ Passes, partially passes or fails this test depending on the points earned. If the points are <= 0 this test is failed with 0 points earned, if the points are >= test total points this test is passed earning the test total @@ -179,7 +211,9 @@ def done(self, points_earned, message='', oracle_solution=None, test_solution=No points_bonus = points_earned - self.points_total return self.passed_with_bonus(points_bonus, message) else: - return self.partially_passed(points_earned, message, oracle_solution, test_solution) + return self.partially_passed( + points_earned, message, oracle_solution, test_solution + ) def error(self, message): """ @@ -214,6 +248,7 @@ def run_decorator(run_func): only the error message is sent in the description, otherwise the whole traceback is sent. """ + @wraps(run_func) def run_func_wrapper(self, *args, **kwargs): try: @@ -224,9 +259,10 @@ def run_func_wrapper(self, *args, **kwargs): self.after_successful_test_run() except MarkusTestError as e: result_json = self.error(message=str(e)) - except Exception: - result_json = self.error(message=traceback.format_exc()) + except Exception as e: + result_json = self.error(message=f"{traceback.format_exc()}\n{e}") return result_json + return run_func_wrapper @abstractmethod @@ -239,7 +275,6 @@ def run(self): class MarkusTester(ABC): - @abstractmethod def __init__(self, specs, test_class=MarkusTest): self.specs = specs @@ -255,8 +290,13 @@ def error_all(message, points_total=0, expected=False): :return The formatted erred tests. """ status = MarkusTest.Status.ERROR if expected else MarkusTest.Status.ERROR_ALL - return MarkusTest.format_result(test_name='All tests', status=status, output=message, - points_earned=0, points_total=points_total) + return MarkusTest.format_result( + test_name="All tests", + status=status, + output=message, + points_earned=0, + points_total=points_total, + ) def before_tester_run(self): """ @@ -280,6 +320,7 @@ def run_decorator(run_func): only the error message is sent in the description, otherwise the whole traceback is sent. """ + @wraps(run_func) def run_func_wrapper(self, *args, **kwargs): try: @@ -287,14 +328,18 @@ def run_func_wrapper(self, *args, **kwargs): return run_func(self, *args, **kwargs) except MarkusTestError as e: print(MarkusTester.error_all(message=str(e), expected=True), flush=True) - except Exception: - print(MarkusTester.error_all(message=traceback.format_exc()), flush=True) + except Exception as e: + print( + MarkusTester.error_all(message=f"{traceback.format_exc()}\n{e}"), + flush=True, + ) finally: self.after_tester_run() + return run_func_wrapper @contextmanager - def open_feedback(self, filename=None, mode='w'): + def open_feedback(self, filename=None, mode="w"): """ Yields an open file object, opened in mode if it exists, otherwise it yields None. @@ -303,7 +348,7 @@ def open_feedback(self, filename=None, mode='w'): used. """ if filename is None: - filename = self.specs.get('test_data', 'feedback_file_name') + filename = self.specs.get("test_data", "feedback_file_name") if filename: feedback_open = open(filename, mode) try: diff --git a/src/autotester/testers/py/__init__.py b/src/autotester/testers/py/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/testers/py/bin/create_environment.sh b/src/autotester/testers/py/bin/create_environment.sh new file mode 100755 index 00000000..7d639b63 --- /dev/null +++ b/src/autotester/testers/py/bin/create_environment.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -e + +create_venv() { + rm -rf "${VENV_DIR}" # clean up existing venv if any + "python${PY_VERSION}" -m venv "${VENV_DIR}" + local pip + pip="${VENV_DIR}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel + ${pip} install "${TESTERS_DIR}" + ${pip} install -r "${THIS_DIR}/requirements.txt" + ${pip} install "${PIP_REQUIREMENTS[@]}" + local pth_file="${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth" + echo "${LIB_DIR}" >> "${pth_file}" +} + +# script starts here +if [[ $# -lt 1 ]]; then + echo "Usage: $0 settings_json" +fi + +# vars +SETTINGS_JSON=$1 + +ENV_DIR=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_loc) +PY_VERSION=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.python_version) +read -r -a PIP_REQUIREMENTS <<< "$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.pip_requirements)" + +VENV_DIR="${ENV_DIR}/venv" +THIS_SCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THIS_DIR=$(dirname "${THIS_SCRIPT}") +LIB_DIR=$(readlink -f "${THIS_DIR}/../lib") +TESTERS_DIR=$(readlink -f "${THIS_DIR}/../../../") + +# main +create_venv diff --git a/testers/testers/py/bin/install.sh b/src/autotester/testers/py/bin/install.sh similarity index 61% rename from testers/testers/py/bin/install.sh rename to src/autotester/testers/py/bin/install.sh index 4f6d3ac9..17ace2f2 100755 --- a/testers/testers/py/bin/install.sh +++ b/src/autotester/testers/py/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/testers/testers/py/bin/requirements.txt b/src/autotester/testers/py/bin/requirements.txt similarity index 100% rename from testers/testers/py/bin/requirements.txt rename to src/autotester/testers/py/bin/requirements.txt diff --git a/testers/testers/py/bin/uninstall.sh b/src/autotester/testers/py/bin/uninstall.sh similarity index 60% rename from testers/testers/py/bin/uninstall.sh rename to src/autotester/testers/py/bin/uninstall.sh index a984a04a..e5da7132 100755 --- a/testers/testers/py/bin/uninstall.sh +++ b/src/autotester/testers/py/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[PYTHON-UNINSTALL] The following system packages have not been uninstalled: python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/py/lib/__init__.py b/src/autotester/testers/py/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py similarity index 72% rename from testers/testers/py/lib/c_helper.py rename to src/autotester/testers/py/lib/c_helper.py index f888367c..08fe7be9 100644 --- a/testers/testers/py/lib/c_helper.py +++ b/src/autotester/testers/py/lib/c_helper.py @@ -10,17 +10,17 @@ import unittest -DEFAULT_LTRACE_LOG_FILE = 'ltrace_log.txt' -DEFAULT_GCC_FLAGS = ['-std=gnu99', '-Wall', '-g'] -DEFAULT_LTRACE_FLAGS = ['-f', '-n', '2', '-o', DEFAULT_LTRACE_LOG_FILE] +DEFAULT_LTRACE_LOG_FILE = "ltrace_log.txt" +DEFAULT_GCC_FLAGS = ["-std=gnu99", "-Wall", "-g"] +DEFAULT_LTRACE_FLAGS = ["-f", "-n", "2", "-o", DEFAULT_LTRACE_LOG_FILE] # Note that the keys of the dictionary correspond to the "type" of call it was regex_dict = OrderedDict( - resumed='([0-9]+)\s*<\.\.\. (.*) (?:resumed>(.*)=\s)(-?[0-9]+)$', - unfinished='([0-9]+)\s*(.*)\((.*)(.*)=\s)(-?[0-9]+)$", + unfinished=r"([0-9]+)\s*(.*)\((.*) None: """Compile the program, storing stdout and stderr of compilation. @@ -47,10 +48,11 @@ def setUpClass(cls) -> None: """ if not cls.make and not cls.source_files: raise ValueError( - 'ERROR: TestExecutable subclasses must specify source_files or set make=True.') + "ERROR: TestExecutable subclasses must specify source_files or set make=True." + ) - cls.compile_out = '' - cls.compile_err = '' + cls.compile_out = "" + cls.compile_err = "" # Default executable name is based on the first source file. if not cls.make and not cls.executable_name: @@ -63,31 +65,33 @@ def setUpClass(cls) -> None: try: if cls.make: # Tuple (stdoutdata, stderrdata) is returned - cls.compile_out, cls.compile_err, _ = _make(cls.make_targets, cls.make_args) + cls.compile_out, cls.compile_err, _ = _make( + cls.make_targets, cls.make_args + ) else: - cls.compile_out, cls.compile_err, _ = _compile(cls.source_files, cls.executable_name) + cls.compile_out, cls.compile_err, _ = _compile( + cls.source_files, cls.executable_name + ) except subprocess.CalledProcessError: cls.compiled = False else: cls.compiled = True - def setUp(self) -> None: """If the compilation was not successful, automatically fail every test. """ if not self.compiled: - self.fail('Test did not run due to a compilation error.') + self.fail("Test did not run due to a compilation error.") def _check_compiler_warnings(self) -> None: """Assert that compilation occurred without errors or warnings. """ - self.assertEqual(self.compile_out, '') - self.assertEqual(self.compile_err, '') + self.assertEqual(self.compile_out, "") + self.assertEqual(self.compile_err, "") - def _run_exec(self, args: Optional[List[str]] = None, **kwargs) -> None: + def _run_exec(self, args: Optional[List[str]] = None, **kwargs): """Run this test class' executable with the given arguments and options.""" - return _exec([os.path.join('.', self.executable_name)] + (args or []), - **kwargs) + return _exec([os.path.join(".", self.executable_name)] + (args or []), **kwargs) def simple_run(args: List[str], **kwargs): @@ -97,13 +101,26 @@ def simple_run(args: List[str], **kwargs): Returns a function which takes an object on which to call run_exec (hence this object must be a subclass of TestExecutable). """ - def _t(self: 'TestExecutable') -> None: + + def _t(self: "TestExecutable") -> None: self._run_exec(args=args, **kwargs) return _t -def simple_test(args: List[str], expected_stdout='', *, expected_stderr='', expected_status=0, input_=None, timeout=2, check=True, rstrip=False, doc='', stderr_relax=False): +def simple_test( + args: List[str], + expected_stdout="", + *, + expected_stderr="", + expected_status=0, + input_=None, + timeout=2, + check=True, + rstrip=False, + doc="", + stderr_relax=False +): """Create a unittest test for fixed command-line arguments, expected stdout and stderr, and exit status. If rstrip is True, ignore trailing whitespace when doing text comparison. @@ -116,8 +133,11 @@ def simple_test(args: List[str], expected_stdout='', *, expected_stderr='', expe (as a substring check) in addition to in stderr, passing the test if one of these succeeds. """ - def _t(self: 'TestExecutable') -> None: - stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) + + def _t(self: "TestExecutable") -> None: + stdout, stderr, returncode = self._run_exec( + args=args, input_=input_, timeout=timeout, check=check + ) nonlocal expected_stderr nonlocal expected_stdout @@ -148,7 +168,18 @@ def _t(self: 'TestExecutable') -> None: _t.__doc__ = doc return _t -def substr_test(args: List[str], expected_stdout='', *, expected_stderr='', expected_status=0, input_=None, timeout=2, check=True, doc=''): + +def substr_test( + args: List[str], + expected_stdout="", + *, + expected_stderr="", + expected_status=0, + input_=None, + timeout=2, + check=True, + doc="" +): """Create a unittest test for fixed command-line arguments, expected stdout and stderr, and exit status. This test is more lenient that simple_test because it looks for expected @@ -159,8 +190,11 @@ def substr_test(args: List[str], expected_stdout='', *, expected_stderr='', expe doc specifies the docstring of the test function. """ - def _t(self: 'TestExecutable') -> None: - stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) + + def _t(self: "TestExecutable") -> None: + stdout, stderr, returncode = self._run_exec( + args=args, input_=input_, timeout=timeout, check=check + ) nonlocal expected_stderr nonlocal expected_stdout @@ -177,6 +211,7 @@ def _t(self: 'TestExecutable') -> None: _t.__doc__ = doc return _t + class TestTrace(TestExecutable): """Test class to support checks with ltrace. @@ -188,19 +223,28 @@ class TestTrace(TestExecutable): a Trace object, since it helps parse any additional arguments to ltrace args is a list of string arguments """ - call_types = [] # The only call types to watch out for (see ltrace man page) + + call_types = [] # The only call types to watch out for (see ltrace man page) @classmethod - def _check_trace(cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs): + def _check_trace( + cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs + ): if ltrace_flags is None: ltrace_flags = DEFAULT_LTRACE_FLAGS else: ltrace_flags = DEFAULT_LTRACE_FLAGS + ltrace_flags if cls.call_types: - ltrace_flags = ltrace_flags + ['-e', '+'.join(['__libc_start_main'] + cls.call_types)] + ltrace_flags = ltrace_flags + [ + "-e", + "+".join(["__libc_start_main"] + cls.call_types), + ] - return Trace([os.path.join('.', cls.executable_name)] + (args or []), - ltrace_flags, **kwargs) + return Trace( + [os.path.join(".", cls.executable_name)] + (args or []), + ltrace_flags, + **kwargs + ) class Trace: @@ -213,9 +257,10 @@ class Trace: Note that we can also view the dictionary as being constructed from these arity 5-tuples, namely: (PID, func_name, args, ret_val, type) Note that args is "junk" and needs some postprocessing (for example, splitting on ,) This was done because - parsing is a better approach when dealing with variable-number capture groups, as there will be with arguments to a function. - Note that for those that do not have certain fields, like ret_val for unfinished, we pad with None. However, the last - element of the tuple (tuple[-1]) is always the "type" of the call, as determined by the regex that classified it. + parsing is a better approach when dealing with variable-number capture groups, as there will be with arguments to a + function. Note that for those that do not have certain fields, like ret_val for unfinished, we pad with None. + However, the last element of the tuple (tuple[-1]) is always the "type" of the call, as determined by the regex that + classified it. Note 2: the "special" regex is a special case, corresponding to things like: --- SIGPIPE (Broken pipe) --- and @@ -230,16 +275,18 @@ class Trace: this can be confirmed examining the regex """ - def __init__(self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs): + def __init__( + self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs + ): ltrace_flags = ltrace_flags or [] try: - _exec(['ltrace'] + ltrace_flags + command, **kwargs) + _exec(["ltrace"] + ltrace_flags + command, **kwargs) except subprocess.TimeoutExpired: # allow for partial results to be reported pass - with open(DEFAULT_LTRACE_LOG_FILE, 'rb') as f: + with open(DEFAULT_LTRACE_LOG_FILE, "rb") as f: f_bytes = f.read() - self.raw = f_bytes.decode(errors='ignore') + self.raw = f_bytes.decode(errors="ignore") self.parent_first_process = None self.lines = [] @@ -247,7 +294,7 @@ def __init__(self, command: List[str], ltrace_flags: Optional[List[str]] = None, self.first_process = None self.split_lines = self.raw.splitlines() if len(self.split_lines) > 1: - parsed_line = parse_arbitrary(self.split_lines[0], r'([0-9]+)\s*.') + parsed_line = parse_arbitrary(self.split_lines[0], r"([0-9]+)\s*.") if parsed_line: self.first_process = parsed_line[0] else: @@ -267,10 +314,10 @@ def get_status(self, pid): return None for calls in self.process_log[pid]: - if 'exited' in calls[0]: + if "exited" in calls[0]: return int(calls[1].split()[-1]) - def lines_for_pid(self, pid, match=''): + def lines_for_pid(self, pid, match=""): """Return the lines in this trace for the given pid. If match is not-empty, only return the lines whose function names @@ -284,8 +331,7 @@ def lines_for_pid(self, pid, match=''): if not match: return self.process_log[pid] - return [call for call in self.process_log[pid] - if call[0] == match] + return [call for call in self.process_log[pid] if call[0] == match] def run_through_regexes(regexes, trace_line): @@ -305,21 +351,21 @@ def run_through_regexes(regexes, trace_line): # print("this is the len of final result " + str(len(final_result))) # print(final_result) # clean the line before putting it in - sep = '->' + sep = "->" rest = final_result[1].split(sep, 1) - if len(rest) > 1: #in case there were multiple + if len(rest) > 1: # in case there were multiple final_result[1] = rest[1] # print(final_result) else: raise ValueError("groups mismatch arity") while len(final_result) < 4: - final_result+=(None,) + final_result += (None,) - final_result += (key,) # append the type of the entry to the end - return final_result # stops as soon as a matching regex is encountered + final_result += (key,) # append the type of the entry to the end + return final_result # stops as soon as a matching regex is encountered # print("line did not have any mathces " + trace_line) - return ('','','','') # did not match with any of the regexes + return "", "", "", "" # did not match with any of the regexes def parse_arbitrary(trace_line, regex): @@ -346,16 +392,19 @@ class TestGenerator: Note: silent failures can happen (e.g., if the executable is not found). """ + dict_of_tests = defaultdict(list) # TODO add support for command-line arguments - def __init__(self, - input_dir=None, - executable_path=None, - out_dir=None, - input_extension='txt', - output_extension='stdout', - error_extension='stderr'): + def __init__( + self, + input_dir=None, + executable_path=None, + out_dir=None, + input_extension="txt", + output_extension="stdout", + error_extension="stderr", + ): """ `input_dir` specifies where the input files are found The extensions specify a pattern to look for in target files @@ -371,30 +420,36 @@ def __init__(self, self.output_extension = output_extension self.error_extension = error_extension - def build_outputs(self, args=''): + def build_outputs(self, args=""): """Generate all output files. `arg`s is optionally a string containing the command-line arguments given to the executable. """ - print(os.path.join(self.input_dir, '*.' + self.input_extension)) - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + print(os.path.join(self.input_dir, "*." + self.input_extension)) + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): print(file) name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) - cmd = "{} {} < {} > {} 2> {}".format(self.executable_path, args, file, stdout_file, stderr_file) - print('Running:', cmd) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) + cmd = "{} {} < {} > {} 2> {}".format( + self.executable_path, args, file, stdout_file, stderr_file + ) + print("Running:", cmd) try: _exec_shell([cmd]) - except subprocess.TimeoutExpired: # TODO add handling for TimeoutExpired (error log file for example?) + except subprocess.TimeoutExpired: # TODO add handling for TimeoutExpired (error log file for example?) print("failed on {}".format(file)) def clean(self): """Remove generated test files.""" - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) os.remove(stdout_file) os.remove(stderr_file) @@ -404,35 +459,49 @@ def populate_tests(self, test_klass, args=None): This must be called *after* build_outputs has been called. """ args = args or [] - for file in glob.glob(os.path.join(self.input_dir, '*.' + self.input_extension)): + for file in glob.glob( + os.path.join(self.input_dir, "*." + self.input_extension) + ): name = os.path.splitext(os.path.basename(file))[0] - stdout_file = os.path.join(self.out_dir, name + '.' + self.output_extension) - stderr_file = os.path.join(self.out_dir, name + '.' + self.error_extension) + stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) + stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) with open(file) as in_, open(stdout_file) as out, open(stderr_file) as err: test_in = in_.read() test_out = out.read() test_err = err.read() - setattr(test_klass, 'test_' + name, - simple_test(args, test_out, test_err, test_in)) + setattr( + test_klass, + "test_" + name, + simple_test( + args, + expected_stdout=test_out, + expected_stderr=test_err, + input_=test_in, + ), + ) -def _compile(files, exec_name=None, gcc_flags=DEFAULT_GCC_FLAGS, **kwargs): +def _compile(files, exec_name=None, gcc_flags=None, **kwargs): """Run gcc with the given flags on the given files.""" + if gcc_flags is None: + gcc_flags = DEFAULT_GCC_FLAGS if isinstance(files, str): files = [files] - args = ['gcc'] + gcc_flags + args = ["gcc"] + gcc_flags if exec_name: - args += ['-o', exec_name] + args += ["-o", exec_name] return _exec(args + files, **kwargs) -def _make(targets=None, make_args=['--silent'], **kwargs): +def _make(targets=None, make_args=None, **kwargs): """Run make on the given targets.""" - return _exec(['make'] + make_args + (targets or []), timeout=60, **kwargs) + if make_args is None: + make_args = ["--silent"] + return _exec(["make"] + make_args + (targets or []), timeout=60, **kwargs) -def _exec(args, *, input_=None, timeout=10, check=True, shell=False): +def _exec(args, *, input_=None, timeout=10, shell=False): """Wrapper function that calls exec on the given args in a new subprocess. Return a triple (stdout, stderr, exit status) from the subprocess. @@ -449,7 +518,8 @@ def _exec(args, *, input_=None, timeout=10, check=True, shell=False): stderr=subprocess.PIPE, encoding=locale.getpreferredencoding(False), preexec_fn=lambda: os.setsid(), - shell=shell) + shell=shell, + ) try: stdout, stderr = proc.communicate(timeout=timeout, input=input_) @@ -490,10 +560,10 @@ def ongoing_process(args, check_killed=True): raise proc.exception if check_killed: - assert proc.returncode == -9, 'server exited abnormally' + assert proc.returncode == -9, "server exited abnormally" -def _exec_shell(args, *, input_=None, timeout=1, check=True): +def _exec_shell(args, *, input_=None, timeout=1): """Wrapper function that calls exec on the given args in a new subprocess with a shell. Returns a communicate method (like a pipe) to the exec process. @@ -510,7 +580,8 @@ def _exec_shell(args, *, input_=None, timeout=1, check=True): stderr=subprocess.PIPE, encoding=locale.getpreferredencoding(False), preexec_fn=lambda: os.setsid(), - shell=True) + shell=True, + ) try: return proc.communicate(timeout=timeout, input=input_) except subprocess.TimeoutExpired as e: diff --git a/testers/testers/py/lib/sql_helper.py b/src/autotester/testers/py/lib/sql_helper.py similarity index 83% rename from testers/testers/py/lib/sql_helper.py rename to src/autotester/testers/py/lib/sql_helper.py index b84a504f..55611b4d 100644 --- a/testers/testers/py/lib/sql_helper.py +++ b/src/autotester/testers/py/lib/sql_helper.py @@ -20,7 +20,7 @@ def _in_autotest_env() -> bool: This function can be used to check whether the AUTOTESTENV environment variable has been set to 'true'. """ - return os.environ.get('AUTOTESTENV') == 'true' + return os.environ.get("AUTOTESTENV") == "true" def connection(*args, **kwargs): @@ -35,16 +35,18 @@ def connection(*args, **kwargs): will be used to call psycopg2.connect in order to connect to a database. """ if _in_autotest_env(): - kwargs = {**kwargs, - 'database': os.environ.get('PGDATABASE'), - 'password': os.environ.get('PGPASSWORD'), - 'user': os.environ.get('PGUSER'), - 'host': 'localhost'} + kwargs = { + **kwargs, + "database": os.environ.get("PGDATABASE"), + "password": os.environ.get("PGPASSWORD"), + "user": os.environ.get("PGUSER"), + "host": "localhost", + } return _unmockable_psycopg2_connect(*args, **kwargs) @contextmanager -def patch_connection(target: str = 'psycopg2.connect') -> ContextManager: +def patch_connection(target: str = "psycopg2.connect") -> ContextManager: """ Context manager that patches any call to the function decribed in the string with the connection function (in this module). @@ -71,7 +73,7 @@ def patch_connection(target: str = 'psycopg2.connect') -> ContextManager: yield -def patch_connection_class(target: str = 'psycopg2.connect') -> Callable: +def patch_connection_class(target: str = "psycopg2.connect") -> Callable: """ Class decorator that adds the patch_connection decorator to every method in the class. @@ -83,18 +85,22 @@ def patch_connection_class(target: str = 'psycopg2.connect') -> Callable: >>> def __init__(self): >>> self.conn = psycopg2.connect() # calls __main__._connection instead """ + def _connect(cls): for name, method in inspect.getmembers(cls, inspect.isroutine): setattr(cls, name, patch_connection(target)(method)) return cls + return _connect -def execute_psql_file(filename: str, - *args: str, - database: Optional[str] = None, - password: Optional[str] = None, - user: Optional[str] = None) -> subprocess.CompletedProcess: +def execute_psql_file( + filename: str, + *args: str, + database: Optional[str] = None, + password: Optional[str] = None, + user: Optional[str] = None +) -> subprocess.CompletedProcess: """ Return a CompletedProcess object returned after calling: @@ -131,12 +137,14 @@ def execute_psql_file(filename: str, env = os.environ else: db_vars = { - 'PGUSER': user or os.environ.get('PGUSER'), - 'PGPASSWORD': password or os.environ.get('PGPASSWORD'), - 'PGDATABASE': database or os.environ.get('PGDATABASE') + "PGUSER": user or os.environ.get("PGUSER"), + "PGPASSWORD": password or os.environ.get("PGPASSWORD"), + "PGDATABASE": database or os.environ.get("PGDATABASE"), } env = {**os.environ, **db_vars} - return subprocess.run(['psql', '-f', filename] + list(args), env=env, capture_output=True) + return subprocess.run( + ["psql", "-f", filename] + list(args), env=env, capture_output=True + ) class PSQLTest: @@ -219,16 +227,18 @@ def schema(cls, schema: str, persist: bool = False) -> ContextManager: with cls.cursor() as curr: curr.execute("SET SEARCH_PATH TO %s;", org_search_path) if not persist: - curr.execute("DROP SCHEMA IF EXISTS %s CASCADE;", - [AsIs(schema)]) - if schema.lower() == 'public': + curr.execute("DROP SCHEMA IF EXISTS %s CASCADE;", [AsIs(schema)]) + if schema.lower() == "public": curr.execute("CREATE SCHEMA IF NOT EXISTS public;") @classmethod - def copy_schema(cls, to_schema: str, - tables: Optional[List[str]] = None, - from_schema: str = 'public', - overwrite: bool = True) -> None: + def copy_schema( + cls, + to_schema: str, + tables: Optional[List[str]] = None, + from_schema: str = "public", + overwrite: bool = True, + ) -> None: """ Copies tables from to . is 'public' by default @@ -237,8 +247,7 @@ def copy_schema(cls, to_schema: str, names in will be copied. If is True, tables of the same name in will be overwritten. """ - strings = {'new': AsIs(to_schema), - 'old': AsIs(from_schema)} + strings = {"new": AsIs(to_schema), "old": AsIs(from_schema)} if tables is None: with cls.cursor() as curr: curr.execute(cls.GET_TABLES_STR, [from_schema]) @@ -247,14 +256,16 @@ def copy_schema(cls, to_schema: str, curr.execute("CREATE SCHEMA IF NOT EXISTS %s;", [AsIs(to_schema)]) for table in tables: if overwrite: - curr.execute("DROP TABLE IF EXISTS %s.%s;", - [AsIs(to_schema), AsIs(table)]) - strs = {**strings, 'table': AsIs(table)} + curr.execute( + "DROP TABLE IF EXISTS %s.%s;", [AsIs(to_schema), AsIs(table)] + ) + strs = {**strings, "table": AsIs(table)} curr.execute(cls.SCHEMA_COPY_STR, strs) @classmethod - def execute_files(cls, files: List[str], *args, - cursor: Optional[CursorType] = None, **kwargs) -> None: + def execute_files( + cls, files: List[str], *args, cursor: Optional[CursorType] = None, **kwargs + ) -> None: """ Execute each file in by passing the content of each to cursor.execute. @@ -263,6 +274,7 @@ def execute_files(cls, files: List[str], *args, the and ) or the cursor object passed as the argument is used if is not None. """ + def _execute_files(): for file in files: with open(file) as f: diff --git a/testers/testers/py/markus_python_tester.py b/src/autotester/testers/py/markus_python_tester.py similarity index 64% rename from testers/testers/py/markus_python_tester.py rename to src/autotester/testers/py/markus_python_tester.py index eebab25f..5c16d6ab 100644 --- a/testers/testers/py/markus_python_tester.py +++ b/src/autotester/testers/py/markus_python_tester.py @@ -1,5 +1,4 @@ import os -import tempfile import unittest import pytest import sys @@ -19,25 +18,37 @@ def __init__(self, stream, descriptions, verbosity): self.successes = [] def addSuccess(self, test): - self.results.append({'status': 'success', - 'name' : test.id(), - 'errors': '', - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "success", + "name": test.id(), + "errors": "", + "description": test._testMethodDoc, + } + ) self.successes.append(test) def addFailure(self, test, err): super().addFailure(test, err) - self.results.append({'status': 'failure', - 'name' : test.id(), - 'errors': self.failures[-1][-1], - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "failure", + "name": test.id(), + "errors": self.failures[-1][-1], + "description": test._testMethodDoc, + } + ) def addError(self, test, err): super().addError(test, err) - self.results.append({'status': 'error', - 'name' : test.id(), - 'errors': self.errors[-1][-1], - 'description': test._testMethodDoc}) + self.results.append( + { + "status": "error", + "name": test.id(), + "errors": self.errors[-1][-1], + "description": test._testMethodDoc, + } + ) class MarkusPytestPlugin: @@ -54,34 +65,37 @@ def pytest_runtest_makereport(self, item, call): outcome = yield rep = outcome.get_result() if rep.failed or item.nodeid not in self.results: - self.results[item.nodeid] = {'status': 'failure' if rep.failed else 'success', - 'name': item.nodeid, - 'errors': str(rep.longrepr) if rep.failed else '', - 'description': item.obj.__doc__} + self.results[item.nodeid] = { + "status": "failure" if rep.failed else "success", + "name": item.nodeid, + "errors": str(rep.longrepr) if rep.failed else "", + "description": item.obj.__doc__, + } return rep def pytest_collectreport(self, report): if report.failed: - self.results[report.nodeid] = {'status': 'error', - 'name': report.nodeid, - 'errors': str(report.longrepr), - 'description': None} + self.results[report.nodeid] = { + "status": "error", + "name": report.nodeid, + "errors": str(report.longrepr), + "description": None, + } class MarkusPythonTest(MarkusTest): - def __init__(self, tester, test_file, result, feedback_open=None): - self._test_name = result['name'] + self._test_name = result["name"] self._file_name = test_file - self.description = result.get('description') - self.status = result['status'] - self.message = result['errors'] + self.description = result.get("description") + self.status = result["status"] + self.message = result["errors"] super().__init__(tester, feedback_open) @property def test_name(self): if self.description: - return f'{self._test_name} ({self.description})' + return f"{self._test_name} ({self.description})" return self._test_name @MarkusTest.run_decorator @@ -95,11 +109,11 @@ def run(self): class MarkusPythonTester(MarkusTester): - def __init__(self, specs, test_class=MarkusPythonTest): super().__init__(specs, test_class) - def _load_unittest_tests(self, test_file): + @staticmethod + def _load_unittest_tests(test_file): """ Discover unittest tests in test_file and return a unittest.TestSuite that contains these tests @@ -115,11 +129,12 @@ def _run_unittest_tests(self, test_file): of these tests """ test_suite = self._load_unittest_tests(test_file) - with open(os.devnull, 'w') as nullstream: + with open(os.devnull, "w") as nullstream: test_runner = unittest.TextTestRunner( - verbosity=self.specs['test_data', 'output_verbosity'], + verbosity=self.specs["test_data", "output_verbosity"], stream=nullstream, - resultclass=MarkusTextTestResults) + resultclass=MarkusTextTestResults, + ) test_result = test_runner.run(test_suite) return test_result.results @@ -129,14 +144,13 @@ def _run_pytest_tests(self, test_file): of these tests """ results = [] - this_dir = os.getcwd() - with open(os.devnull, 'w') as null_out: + with open(os.devnull, "w") as null_out: try: sys.stdout = null_out - verbosity = self.specs['test_data', 'output_verbosity'] + verbosity = self.specs["test_data", "output_verbosity"] plugin = MarkusPytestPlugin() - pytest.main([test_file, f'--tb={verbosity}'], plugins=[plugin]) - results = list(plugin.results.values()) + pytest.main([test_file, f"--tb={verbosity}"], plugins=[plugin]) + results.extend(plugin.results.values()) finally: sys.stdout = sys.__stdout__ return results @@ -146,8 +160,8 @@ def run_python_tests(self): Return a dict mapping each filename to its results """ results = {} - for test_file in self.specs['test_data', 'script_files']: - if self.specs['test_data', 'tester'] == 'unittest': + for test_file in self.specs["test_data", "script_files"]: + if self.specs["test_data", "tester"] == "unittest": result = self._run_unittest_tests(test_file) else: result = self._run_pytest_tests(test_file) diff --git a/testers/testers/py/specs/settings_schema.json b/src/autotester/testers/py/specs/settings_schema.json similarity index 100% rename from testers/testers/py/specs/settings_schema.json rename to src/autotester/testers/py/specs/settings_schema.json diff --git a/src/autotester/testers/py/tests/__init__.py b/src/autotester/testers/py/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/testers/py/tests/script_files/__init__.py b/src/autotester/testers/py/tests/script_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/py/tests/script_files/data1.sql b/src/autotester/testers/py/tests/script_files/data1.sql similarity index 100% rename from testers/testers/py/tests/script_files/data1.sql rename to src/autotester/testers/py/tests/script_files/data1.sql diff --git a/testers/testers/py/tests/script_files/data2.sql b/src/autotester/testers/py/tests/script_files/data2.sql similarity index 100% rename from testers/testers/py/tests/script_files/data2.sql rename to src/autotester/testers/py/tests/script_files/data2.sql diff --git a/testers/testers/py/tests/script_files/schema.ddl b/src/autotester/testers/py/tests/script_files/schema.ddl similarity index 100% rename from testers/testers/py/tests/script_files/schema.ddl rename to src/autotester/testers/py/tests/script_files/schema.ddl diff --git a/testers/testers/py/tests/script_files/test.py b/src/autotester/testers/py/tests/script_files/test.py similarity index 94% rename from testers/testers/py/tests/script_files/test.py rename to src/autotester/testers/py/tests/script_files/test.py index a4945721..e26e3567 100644 --- a/testers/testers/py/tests/script_files/test.py +++ b/src/autotester/testers/py/tests/script_files/test.py @@ -1,4 +1,5 @@ import unittest + try: import submission except ImportError: @@ -6,7 +7,6 @@ class Test1(unittest.TestCase): - def test_passes(self): """This test should pass""" self.assertTrue(submission.return_true()) @@ -17,11 +17,10 @@ def test_fails(self): class Test2(unittest.TestCase): - def test_fails_and_outputs_json(self): """This test should fail and print json""" self.fail(submission.return_json()) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/testers/testers/py/tests/script_files/test2.py b/src/autotester/testers/py/tests/script_files/test2.py similarity index 99% rename from testers/testers/py/tests/script_files/test2.py rename to src/autotester/testers/py/tests/script_files/test2.py index 8e61e77f..512d699c 100644 --- a/testers/testers/py/tests/script_files/test2.py +++ b/src/autotester/testers/py/tests/script_files/test2.py @@ -1,4 +1,5 @@ import pytest + try: import submission except ImportError: diff --git a/testers/testers/py/tests/script_files/test_sql.py b/src/autotester/testers/py/tests/script_files/test_sql.py similarity index 88% rename from testers/testers/py/tests/script_files/test_sql.py rename to src/autotester/testers/py/tests/script_files/test_sql.py index 0aa44973..e53c3138 100644 --- a/testers/testers/py/tests/script_files/test_sql.py +++ b/src/autotester/testers/py/tests/script_files/test_sql.py @@ -3,7 +3,7 @@ class TestDataset1(sh.PSQLTest): - data_file = 'data1.sql' + data_file = "data1.sql" query = """ SELECT table1.word, table2.number FROM table1 JOIN table2 @@ -16,19 +16,19 @@ def setup_class(cls): # this means you only have to create a connection once for the whole test class cls.create_connection() # create a new schema named 'solution_schema' and switch the search path to that schema. - with cls.schema('solution_schema'): + with cls.schema("solution_schema"): # execute your files in this schema, they will populate the schema with some tables - cls.execute_files(['schema.ddl', cls.data_file]) + cls.execute_files(["schema.ddl", cls.data_file]) # execute the solution query in this schema, get the results and store them in a class variable with cls.cursor() as curr: curr.execute(cls.query) cls.solution_data = curr.fetchall() # create a new schema named 'test_schema' and switch the search path to that schema. - with cls.schema('test_schema'): + with cls.schema("test_schema"): # copy all the tables in solution_schema to test_schema - cls.copy_schema('test_schema', from_schema='solution_schema') + cls.copy_schema("test_schema", from_schema="solution_schema") # execute the student's file, this will create a table called correct_no_order - cls.execute_files(['submission.sql']) + cls.execute_files(["submission.sql"]) # get the contents of the correct_no_order table and store it in a class variable with cls.cursor() as curr: curr.execute("SELECT * FROM correct_no_order;") @@ -63,9 +63,9 @@ def test_falsy_same_as_null(self): def test_schema_gone(self): """ Test that demonstrates that the test_schema schema created in the setup_class method has been deleted """ with self.cursor() as curr: - curr.execute(self.GET_TABLES_STR, ['test_schema']) + curr.execute(self.GET_TABLES_STR, ["test_schema"]) assert len(curr.fetchall()) == 0 class TestDataset2(TestDataset1): - data_file = 'data2.sql' + data_file = "data2.sql" diff --git a/testers/testers/py/tests/specs.json b/src/autotester/testers/py/tests/specs.json similarity index 100% rename from testers/testers/py/tests/specs.json rename to src/autotester/testers/py/tests/specs.json diff --git a/src/autotester/testers/py/tests/student_files/__init__.py b/src/autotester/testers/py/tests/student_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/py/tests/student_files/submission.py b/src/autotester/testers/py/tests/student_files/submission.py similarity index 100% rename from testers/testers/py/tests/student_files/submission.py rename to src/autotester/testers/py/tests/student_files/submission.py diff --git a/testers/testers/py/tests/student_files/submission.sql b/src/autotester/testers/py/tests/student_files/submission.sql similarity index 100% rename from testers/testers/py/tests/student_files/submission.sql rename to src/autotester/testers/py/tests/student_files/submission.sql diff --git a/src/autotester/testers/pyta/__init__.py b/src/autotester/testers/pyta/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/testers/pyta/bin/create_environment.sh b/src/autotester/testers/pyta/bin/create_environment.sh new file mode 100755 index 00000000..94136c03 --- /dev/null +++ b/src/autotester/testers/pyta/bin/create_environment.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -e + +create_venv() { + rm -rf "${VENV_DIR}" # clean up existing venv if any + "python${PY_VERSION}" -m venv "${VENV_DIR}" + local pip + pip="${VENV_DIR}/bin/pip" + ${pip} install --upgrade pip + ${pip} install wheel + ${pip} install "${TESTERS_DIR}" + ${pip} install -r "${THIS_DIR}/requirements.txt" + ${pip} install "${PIP_REQUIREMENTS[@]}" + local pth_file="${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth" + echo "${LIB_DIR}" >> "${pth_file}" +} + +# script starts here +if [[ $# -lt 1 ]]; then + echo "Usage: $0 settings_json" +fi + +# vars +SETTINGS_JSON=$1 + +ENV_DIR=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_loc) +PY_VERSION=$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.python_version) +read -r -a PIP_REQUIREMENTS <<< "$(echo "${SETTINGS_JSON}" | jq --raw-output .env_data.pip_requirements)" + +VENV_DIR="${ENV_DIR}/venv" +THIS_SCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THIS_DIR=$(dirname "${THIS_SCRIPT}") +LIB_DIR=$(readlink -f "${THIS_DIR}/../lib") +TESTERS_DIR=$(readlink -f "${THIS_DIR}/../../../") + +# main +create_venv + diff --git a/testers/testers/pyta/bin/install.sh b/src/autotester/testers/pyta/bin/install.sh similarity index 61% rename from testers/testers/pyta/bin/install.sh rename to src/autotester/testers/pyta/bin/install.sh index da439f84..41a1b141 100755 --- a/testers/testers/pyta/bin/install.sh +++ b/src/autotester/testers/pyta/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/testers/testers/pyta/bin/requirements.txt b/src/autotester/testers/pyta/bin/requirements.txt similarity index 100% rename from testers/testers/pyta/bin/requirements.txt rename to src/autotester/testers/pyta/bin/requirements.txt diff --git a/testers/testers/pyta/bin/uninstall.sh b/src/autotester/testers/pyta/bin/uninstall.sh similarity index 60% rename from testers/testers/pyta/bin/uninstall.sh rename to src/autotester/testers/pyta/bin/uninstall.sh index d794d018..8d898d35 100755 --- a/testers/testers/pyta/bin/uninstall.sh +++ b/src/autotester/testers/pyta/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[PYTA-UNINSTALL] The following system packages have not been uninstalled: python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/testers/testers/pyta/markus_pyta_tester.py b/src/autotester/testers/pyta/markus_pyta_tester.py similarity index 55% rename from testers/testers/pyta/markus_pyta_tester.py rename to src/autotester/testers/pyta/markus_pyta_tester.py index c2302b02..a3155f88 100644 --- a/testers/testers/pyta/markus_pyta_tester.py +++ b/src/autotester/testers/pyta/markus_pyta_tester.py @@ -11,12 +11,13 @@ class MarkusPyTAReporter(PositionReporter): + def __init__(self, *args, **kwargs): + super().__init__(self, *args, **kwargs) + self._sorted_error_messages = defaultdict(list) - def print_messages(self, level='all'): + def print_messages(self, level="all"): # print to feedback file, then reset and generate data for annotations PlainReporter.print_messages(self, level) - self._sorted_error_messages = defaultdict(list) - self._sorted_style_messages = defaultdict(list) super().print_messages(level) def output_blob(self): @@ -25,9 +26,7 @@ def output_blob(self): class MarkusPyTATest(MarkusTest): - ERROR_MSGS = { - 'reported': "{} error(s)" - } + ERROR_MSGS = {"reported": "{} error(s)"} def __init__(self, tester, student_file_path, max_points, feedback_open=None): self.student_file = student_file_path @@ -37,23 +36,27 @@ def __init__(self, tester, student_file_path, max_points, feedback_open=None): @property def test_name(self): - return f'PyTA {self.student_file}' + return f"PyTA {self.student_file}" def add_annotations(self, reporter): - for result in reporter._output['results']: - if 'filename' not in result: + for result in reporter._output["results"]: + if "filename" not in result: continue - for msg_group in result.get('msg_errors', []) + result.get('msg_styles', []): - for msg in msg_group['occurrences']: - self.annotations.append({ - 'annotation_category_name': None, - 'filename': result['filename'], - 'content': msg['text'], - 'line_start': msg['lineno'], - 'line_end': msg['end_lineno'], - 'column_start': msg['col_offset'], - 'column_end': msg['end_col_offset'] - }) + for msg_group in result.get("msg_errors", []) + result.get( + "msg_styles", [] + ): + for msg in msg_group["occurrences"]: + self.annotations.append( + { + "annotation_category_name": None, + "filename": result["filename"], + "content": msg["text"], + "line_start": msg["lineno"], + "line_end": msg["end_lineno"], + "column_start": msg["col_offset"], + "column_end": msg["end_col_offset"], + } + ) def after_successful_test_run(self): self.tester.annotations.extend(self.annotations) @@ -62,9 +65,15 @@ def after_successful_test_run(self): def run(self): try: # run PyTA and collect annotations - sys.stdout = self.feedback_open if self.feedback_open is not None else self.tester.devnull + sys.stdout = ( + self.feedback_open + if self.feedback_open is not None + else self.tester.devnull + ) sys.stderr = self.tester.devnull - reporter = python_ta.check_all(self.student_file, config=self.tester.pyta_config) + reporter = python_ta.check_all( + self.student_file, config=self.tester.pyta_config + ) if reporter.current_file_linted is None: # No files were checked. The mark is set to 0. num_messages = 0 @@ -74,7 +83,11 @@ def run(self): # deduct 1 point per message occurrence (not type) num_messages = len(self.annotations) points_earned = max(0, self.points_total - num_messages) - message = self.ERROR_MSGS['reported'].format(num_messages) if num_messages > 0 else '' + message = ( + self.ERROR_MSGS["reported"].format(num_messages) + if num_messages > 0 + else "" + ) return self.done(points_earned, message) except Exception as e: self.annotations = [] @@ -85,33 +98,32 @@ def run(self): class MarkusPyTATester(MarkusTester): - def __init__(self, specs, test_class=MarkusPyTATest): super().__init__(specs, test_class) - self.feedback_file = self.specs.get('test_data', 'feedback_file_name') - self.annotation_file = self.specs.get('test_data', 'annotation_file') + self.feedback_file = self.specs.get("test_data", "feedback_file_name") + self.annotation_file = self.specs.get("test_data", "annotation_file") self.pyta_config = self.update_pyta_config() self.annotations = [] - self.devnull = open(os.devnull, 'w') + self.devnull = open(os.devnull, "w") VALIDATORS[MarkusPyTAReporter.__name__] = MarkusPyTAReporter def update_pyta_config(self): - config_file = self.specs.get('test_data', 'config_file_name') + config_file = self.specs.get("test_data", "config_file_name") if config_file: with open(config_file) as f: config_dict = json.load(f) - else: + else: config_dict = {} - config_dict['pyta-reporter'] = 'MarkusPyTAReporter' + config_dict["pyta-reporter"] = "MarkusPyTAReporter" if self.feedback_file: - config_dict['pyta-output-file'] = self.feedback_file + config_dict["pyta-output-file"] = self.feedback_file return config_dict def after_tester_run(self): if self.annotation_file and self.annotations: - with open(self.annotation_file, 'w') as annotations_open: + with open(self.annotation_file, "w") as annotations_open: json.dump(self.annotations, annotations_open) if self.devnull: self.devnull.close() @@ -119,10 +131,10 @@ def after_tester_run(self): @MarkusTester.run_decorator def run(self): with self.open_feedback(self.feedback_file) as feedback_open: - for test_data in self.specs.get('test_data', 'student_files', default=[]): - student_file_path = test_data['file_path'] - max_points = test_data.get('max_points', 10) - test = self.test_class(self, student_file_path, max_points, feedback_open) + for test_data in self.specs.get("test_data", "student_files", default=[]): + student_file_path = test_data["file_path"] + max_points = test_data.get("max_points", 10) + test = self.test_class( + self, student_file_path, max_points, feedback_open + ) print(test.run()) - - diff --git a/testers/testers/pyta/specs/settings_schema.json b/src/autotester/testers/pyta/specs/settings_schema.json similarity index 100% rename from testers/testers/pyta/specs/settings_schema.json rename to src/autotester/testers/pyta/specs/settings_schema.json diff --git a/src/autotester/testers/pyta/tests/__init__.py b/src/autotester/testers/pyta/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/pyta/tests/specs.json b/src/autotester/testers/pyta/tests/specs.json similarity index 100% rename from testers/testers/pyta/tests/specs.json rename to src/autotester/testers/pyta/tests/specs.json diff --git a/src/autotester/testers/pyta/tests/student_files/__init__.py b/src/autotester/testers/pyta/tests/student_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/pyta/tests/student_files/submission.py b/src/autotester/testers/pyta/tests/student_files/submission.py similarity index 99% rename from testers/testers/pyta/tests/student_files/submission.py rename to src/autotester/testers/pyta/tests/student_files/submission.py index 12e1e7dc..92820b24 100644 --- a/testers/testers/pyta/tests/student_files/submission.py +++ b/src/autotester/testers/pyta/tests/student_files/submission.py @@ -10,5 +10,6 @@ def loop(): while True: pass + def return_json(): return ']}[{"\\' diff --git a/src/autotester/testers/racket/__init__.py b/src/autotester/testers/racket/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testers/testers/racket/bin/install.sh b/src/autotester/testers/racket/bin/install.sh similarity index 62% rename from testers/testers/racket/bin/install.sh rename to src/autotester/testers/racket/bin/install.sh index a1e3412c..e21a6016 100755 --- a/testers/testers/racket/bin/install.sh +++ b/src/autotester/testers/racket/bin/install.sh @@ -14,10 +14,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main install_packages -touch ${SPECSDIR}/.installed +touch "${SPECSDIR}/.installed" diff --git a/testers/testers/racket/bin/uninstall.sh b/src/autotester/testers/racket/bin/uninstall.sh similarity index 61% rename from testers/testers/racket/bin/uninstall.sh rename to src/autotester/testers/racket/bin/uninstall.sh index 80a53025..557ca258 100755 --- a/testers/testers/racket/bin/uninstall.sh +++ b/src/autotester/testers/racket/bin/uninstall.sh @@ -7,10 +7,10 @@ if [[ $# -ne 0 ]]; then fi # vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs +THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") +THISDIR=$(dirname "${THISSCRIPT}") +SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main echo "[RACKET-UNINSTALL] The following system packages have not been uninstalled: racket python3. You may uninstall them if you wish." -rm -f ${SPECSDIR}/.installed +rm -f "${SPECSDIR}/.installed" diff --git a/testers/testers/racket/lib/markus.rkt b/src/autotester/testers/racket/lib/markus.rkt similarity index 99% rename from testers/testers/racket/lib/markus.rkt rename to src/autotester/testers/racket/lib/markus.rkt index abb4294e..d8482914 100755 --- a/testers/testers/racket/lib/markus.rkt +++ b/src/autotester/testers/racket/lib/markus.rkt @@ -84,4 +84,3 @@ (set! test-results (run-test (dynamic-require/expose (string->path test-file) test-suite-sym))))) (write-json (map show-test-result test-results))) - diff --git a/testers/testers/racket/markus_racket_tester.py b/src/autotester/testers/racket/markus_racket_tester.py similarity index 66% rename from testers/testers/racket/markus_racket_tester.py rename to src/autotester/testers/racket/markus_racket_tester.py index 58c3d76d..91601b54 100644 --- a/testers/testers/racket/markus_racket_tester.py +++ b/src/autotester/testers/racket/markus_racket_tester.py @@ -6,11 +6,10 @@ class MarkusRacketTest(MarkusTest): - def __init__(self, tester, feedback_open, result): - self._test_name = result['name'] - self.status = result['status'] - self.message = result['message'] + self._test_name = result["name"] + self.status = result["status"] + self.message = result["message"] super().__init__(tester, feedback_open) @property @@ -29,31 +28,35 @@ def run(self): class MarkusRacketTester(MarkusTester): - ERROR_MSGS = {'bad_json': 'Unable to parse test results: {}'} + ERROR_MSGS = {"bad_json": "Unable to parse test results: {}"} def __init__(self, specs, test_class=MarkusRacketTest): super().__init__(specs, test_class) - + def run_racket_test(self): """ Return the subprocess.CompletedProcess object for each test file run using the markus.rkt tester. """ results = {} - markus_rkt = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', 'markus.rkt') - for group in self.specs['test_data', 'script_files']: - test_file = group.get('script_file') + markus_rkt = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "lib", "markus.rkt" + ) + for group in self.specs["test_data", "script_files"]: + test_file = group.get("script_file") if test_file: - suite_name = group.get('test_suite_name', 'all-tests') - cmd = [markus_rkt, '--test-suite', suite_name, test_file] - rkt = subprocess.run(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True) + suite_name = group.get("test_suite_name", "all-tests") + cmd = [markus_rkt, "--test-suite", suite_name, test_file] + rkt = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) results[test_file] = rkt.stdout return results - + @MarkusTester.run_decorator def run(self): try: @@ -66,7 +69,7 @@ def run(self): try: test_results = json.loads(result) except json.JSONDecodeError as e: - msg = MarkusRacketTester.ERROR_MSGS['bad_json'].format(result) + msg = MarkusRacketTester.ERROR_MSGS["bad_json"].format(result) raise MarkusTestError(msg) from e for t_result in test_results: test = self.test_class(self, feedback_open, t_result) diff --git a/testers/testers/racket/specs/settings_schema.json b/src/autotester/testers/racket/specs/settings_schema.json similarity index 99% rename from testers/testers/racket/specs/settings_schema.json rename to src/autotester/testers/racket/specs/settings_schema.json index 49abc00c..53015457 100644 --- a/testers/testers/racket/specs/settings_schema.json +++ b/src/autotester/testers/racket/specs/settings_schema.json @@ -78,4 +78,4 @@ } } } -} \ No newline at end of file +} diff --git a/testers/testers/racket/tests/script_files/test.rkt b/src/autotester/testers/racket/tests/script_files/test.rkt similarity index 100% rename from testers/testers/racket/tests/script_files/test.rkt rename to src/autotester/testers/racket/tests/script_files/test.rkt diff --git a/testers/testers/racket/tests/specs.json b/src/autotester/testers/racket/tests/specs.json similarity index 100% rename from testers/testers/racket/tests/specs.json rename to src/autotester/testers/racket/tests/specs.json diff --git a/testers/testers/racket/tests/student_files/submission.rkt b/src/autotester/testers/racket/tests/student_files/submission.rkt similarity index 100% rename from testers/testers/racket/tests/student_files/submission.rkt rename to src/autotester/testers/racket/tests/student_files/submission.rkt diff --git a/src/autotester/testers/tests/.gitkeep b/src/autotester/testers/tests/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/tests/__init__.py b/src/autotester/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/tests/cli_test.py b/src/autotester/tests/cli_test.py new file mode 100644 index 00000000..74e12a8e --- /dev/null +++ b/src/autotester/tests/cli_test.py @@ -0,0 +1,382 @@ +import os +import json +import re +import pytest +import inspect +import tempfile +import glob +from unittest.mock import patch, ANY, Mock +from contextlib import contextmanager +from fakeredis import FakeStrictRedis +from rq.exceptions import NoSuchJobError +from autotester import cli + + +@pytest.fixture(autouse=True) +def redis(): + fake_redis = FakeStrictRedis() + with patch("autotester.cli.redis_connection", return_value=fake_redis): + with patch( + "autotester.server.utils.redis_management.redis_connection", + return_value=fake_redis, + ): + yield fake_redis + + +@contextmanager +def tmp_script_dir(settings_dict): + with tempfile.TemporaryDirectory() as tmp_dir: + files_dir = os.path.join(tmp_dir, "files") + os.mkdir(files_dir) + with open(os.path.join(files_dir, ".gitkeep"), "w"): + pass + with open(os.path.join(tmp_dir, "settings.json"), "w") as f: + json.dump(settings_dict, f) + with patch("autotester.cli.test_script_directory", return_value=tmp_dir): + yield tmp_dir + + +@pytest.fixture(autouse=True) +def empty_test_script_dir(redis): + empty_settings = {"testers": [{"test_data": []}]} + with tmp_script_dir(empty_settings) as tmp_dir: + yield tmp_dir + + +@pytest.fixture +def non_existant_test_script_dir(): + with patch("autotester.cli.test_script_directory", return_value=None): + yield + + +@pytest.fixture +def pop_interval(): + with patch( + "autotester.server.utils.redis_management.get_avg_pop_interval", + return_value=None, + ): + yield + + +@pytest.fixture(autouse=True) +def mock_rmtree(): + with patch("shutil.rmtree") as rm: + yield rm + + +@pytest.fixture(autouse=True) +def mock_enqueue_call(): + with patch("rq.Queue.enqueue_call") as enqueue_func: + yield enqueue_func + + +class DummyTestError(Exception): + pass + + +class TestEnqueueTest: + @staticmethod + def get_kwargs(**kw): + param_kwargs = {k: "" for k in inspect.signature(cli.run_test).parameters} + return {**param_kwargs, **kw} + + def test_fails_missing_required_args(self): + try: + cli.enqueue_test("Admin", 1) + except cli.JobArgumentError: + return + except cli.MarkUsError as e: + pytest.fail( + f"should have failed because kwargs are missing but instead failed with: {e}" + ) + pytest.fail("should have failed because kwargs are missing") + + def test_accepts_same_kwargs_as_server_run_test_method(self): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.JobArgumentError: + pytest.fail("should not have failed because kwargs are not missing") + except cli.MarkUsError: + pass + + def test_fails_if_cannot_find_valid_queue(self): + try: + cli.enqueue_test("Tim", None, **self.get_kwargs()) + except cli.InvalidQueueError: + return + except cli.MarkUsError as e: + pytest.fail( + f"should have failed because a valid queue is not found but instead failed with: {e}" + ) + pytest.fail("should have failed because a valid queue is not found") + + def test_can_find_valid_queue(self): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.InvalidQueueError: + pytest.fail("should not have failed because a valid queue is available") + except cli.MarkUsError: + pass + + def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.TestScriptFilesError: + return + except cli.MarkUsError as e: + pytest.fail( + f"should have failed because no test scripts could be found but instead failed with: {e}" + ) + pytest.fail("should have failed because no test scripts could be found") + + def test_can_find_test_files(self): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.TestScriptFilesError: + pytest.fail("should not have failed because no test scripts could be found") + except cli.MarkUsError: + pass + + def test_writes_queue_info_to_stdout(self, capfd, pop_interval): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.MarkUsError: + pass + out, _err = capfd.readouterr() + assert re.search(r"^\d+$", out) + + def test_fails_if_no_tests_groups(self): + try: + cli.enqueue_test("Admin", 1, **self.get_kwargs()) + except cli.TestParameterError: + return + except cli.MarkUsError: + pass + + def test_fails_if_no_groups_in_category(self): + settings = {"testers": [{"test_data": [{"category": ["admin"]}]}]} + with tmp_script_dir(settings): + try: + cli.enqueue_test( + "Admin", 1, **self.get_kwargs(test_categories=["student"]) + ) + except cli.TestParameterError: + return + except cli.MarkUsError: + pass + + def test_can_find_tests_in_given_category(self): + settings = { + "testers": [{"test_data": [{"category": ["admin"], "timeout": 30}]}] + } + with tmp_script_dir(settings): + try: + cli.enqueue_test( + "Admin", 1, **self.get_kwargs(test_categories=["admin"]) + ) + except cli.TestParameterError: + pytest.fail("should not have failed to find an admin test") + except cli.MarkUsError: + pass + + def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): + settings = { + "testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}] + } + with tmp_script_dir(settings): + cli.enqueue_test("Admin", 1, **self.get_kwargs(test_categories=["admin"])) + mock_enqueue_call.assert_called_with( + ANY, kwargs=ANY, job_id=ANY, timeout=15 + ) + + def test_cleans_up_files_on_error(self, mock_rmtree): + with pytest.raises(Exception): + cli.enqueue_test("Admin", 1, **self.get_kwargs(files_path="something")) + + +class TestUpdateSpecs: + @staticmethod + def get_kwargs(**kw): + param_kwargs = { + k: "" for k in inspect.signature(cli.update_test_specs).parameters + } + return {**param_kwargs, **kw} + + def test_fails_when_schema_is_invalid(self): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=DummyTestError("error"), + ): + with patch("autotester.cli.update_test_specs"): + try: + cli.update_specs("", **self.get_kwargs(schema={})) + except DummyTestError: + return + pytest.fail("should have failed because the form is invalid") + + def test_succeeds_when_schema_is_valid(self): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=[], + ): + with patch("autotester.cli.update_test_specs"): + try: + cli.update_specs("", **self.get_kwargs(schema={})) + except DummyTestError: + pytest.fail("should not have failed because the form is valid") + + def test_calls_update_test_specs(self): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=[], + ): + with patch("autotester.cli.update_test_specs") as update_test_specs: + cli.update_specs("", **self.get_kwargs(schema={})) + update_test_specs.assert_called_once() + + def test_cleans_up_files_on_error(self, mock_rmtree): + with patch( + "autotester.server.utils.form_validation.validate_with_defaults", + return_value=DummyTestError("error"), + ): + with patch("autotester.cli.update_test_specs"): + with pytest.raises(Exception): + cli.update_specs( + **self.get_kwargs(schema={}, files_path="test_files") + ) + + +@pytest.fixture +def mock_rq_job(): + with patch("rq.job.Job") as job: + enqueued_job = Mock() + job.fetch.return_value = enqueued_job + yield job, enqueued_job + + +class TestCancelTest: + def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): + job_class, mock_job = mock_rq_job + job_class.fetch.side_effect = NoSuchJobError + cli.cancel_test("something", [1]) + mock_job.cancel.assert_not_called() + + def test_do_nothing_if_job_not_enqueued(self, mock_rq_job): + _, mock_job = mock_rq_job + mock_job.is_queued.return_value = False + cli.cancel_test("something", [1]) + mock_job.cancel.assert_not_called() + + def test_cancel_job(self, mock_rq_job): + _, mock_job = mock_rq_job + mock_job.is_queued.return_value = True + mock_job.kwargs = {"files_path": None} + cli.cancel_test("something", [1]) + mock_job.cancel.assert_called_once() + + def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): + _, mock_job = mock_rq_job + mock_job.is_queued.return_value = True + files_path = "something" + mock_job.kwargs = {"files_path": files_path} + cli.cancel_test("something", [1]) + mock_rmtree.assert_called_once_with(files_path, onerror=ANY) + + def test_cancel_multiple_jobs(self, mock_rq_job): + _, mock_job = mock_rq_job + mock_job.is_queued.return_value = True + mock_job.kwargs = {"files_path": None} + cli.cancel_test("something", [1, 2]) + assert mock_job.cancel.call_count == 2 + + def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtree): + _, mock_job = mock_rq_job + mock_job.is_queued.return_value = True + files_path = "something" + mock_job.kwargs = {"files_path": files_path} + cli.cancel_test("something", [1, 2]) + assert mock_rmtree.call_count == 2 + + +class TestGetSchema: + @staticmethod + def fake_installed_testers(installed): + root_dir = os.path.dirname(os.path.abspath(cli.__file__)) + paths = [] + for tester in installed: + glob_pattern = os.path.join(root_dir, "testers", tester, "specs") + paths.append(os.path.join(glob.glob(glob_pattern)[0], ".installed")) + return paths + + @staticmethod + def assert_tester_in_schema(tester, schema): + assert tester in schema["definitions"]["installed_testers"]["enum"] + installed = [] + for option in schema["definitions"]["tester_schemas"]["oneOf"]: + installed.append(option["properties"]["tester_type"]["enum"][0]) + assert tester in installed + + def test_prints_skeleton_when_none_installed(self, capfd): + with patch("glob.glob", return_value=[]): + cli.get_schema() + out, _err = capfd.readouterr() + schema = json.loads(out) + root_dir = os.path.dirname(os.path.abspath(cli.__file__)) + with open( + os.path.join(root_dir, "lib", "tester_schema_skeleton.json") + ) as f: + skeleton = json.load(f) + assert schema == skeleton + + def test_prints_test_schema_when_one_installed(self, capfd): + with patch("glob.glob", return_value=self.fake_installed_testers(["custom"])): + cli.get_schema() + out, _err = capfd.readouterr() + schema = json.loads(out) + self.assert_tester_in_schema("custom", schema) + + def test_prints_test_schema_when_multiple_installed(self, capfd): + with patch( + "glob.glob", return_value=self.fake_installed_testers(["custom", "py"]) + ): + cli.get_schema() + out, _err = capfd.readouterr() + schema = json.loads(out) + self.assert_tester_in_schema("custom", schema) + self.assert_tester_in_schema("py", schema) + + +class TestParseArgFile: + def test_loads_arg_file(self): + settings = {"some": "data"} + with tmp_script_dir(settings) as tmp_dir: + arg_file = os.path.join(tmp_dir, "settings.json") + kwargs = cli.parse_arg_file(arg_file) + try: + kwargs.pop("files_path") + except KeyError: + pass + assert settings == kwargs + + def test_remove_arg_file(self): + settings = {"some": "data"} + with tmp_script_dir(settings) as tmp_dir: + arg_file = os.path.join(tmp_dir, "settings.json") + cli.parse_arg_file(arg_file) + assert not os.path.isfile(arg_file) + + def test_adds_file_path_if_not_present(self): + settings = {"some": "data"} + with tmp_script_dir(settings) as tmp_dir: + arg_file = os.path.join(tmp_dir, "settings.json") + kwargs = cli.parse_arg_file(arg_file) + assert "files_path" in kwargs + assert os.path.realpath(kwargs["files_path"]) == os.path.realpath(tmp_dir) + + def test_does_not_add_file_path_if_present(self): + settings = {"some": "data", "files_path": "something"} + with tmp_script_dir(settings) as tmp_dir: + arg_file = os.path.join(tmp_dir, "settings.json") + kwargs = cli.parse_arg_file(arg_file) + assert "files_path" in kwargs + assert kwargs["files_path"] == "something" diff --git a/testers/testers/custom/bin/default_hooks.py b/testers/testers/custom/bin/default_hooks.py deleted file mode 100644 index 21649236..00000000 --- a/testers/testers/custom/bin/default_hooks.py +++ /dev/null @@ -1,9 +0,0 @@ -import os - -def before_all_custom(settings, **kwargs): - """ Make script files executable """ - for test_data in settings['test_data']: - for script_file in test_data['script_files']: - os.chmod(script_file, 0o755) - -HOOKS = [before_all_custom] diff --git a/testers/testers/custom/bin/install.sh b/testers/testers/custom/bin/install.sh deleted file mode 100755 index d667c5fb..00000000 --- a/testers/testers/custom/bin/install.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" - exit 1 -fi - -# vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs - -# main -touch ${SPECSDIR}/.installed diff --git a/testers/testers/custom/bin/uninstall.sh b/testers/testers/custom/bin/uninstall.sh deleted file mode 100755 index 123b84af..00000000 --- a/testers/testers/custom/bin/uninstall.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" - exit 1 -fi - -# vars -THISSCRIPT=$(readlink -f ${BASH_SOURCE}) -TESTERDIR=$(dirname $(dirname ${THISSCRIPT})) -SPECSDIR=${TESTERDIR}/specs - -# main -rm -f ${SPECSDIR}/.installed diff --git a/testers/testers/custom/specs/default_install_settings.json b/testers/testers/custom/specs/default_install_settings.json deleted file mode 100644 index a2867fc3..00000000 --- a/testers/testers/custom/specs/default_install_settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "executable_scripts": true -} diff --git a/testers/testers/custom/tests/student_files/submission.py b/testers/testers/custom/tests/student_files/submission.py deleted file mode 100644 index 91c9a826..00000000 --- a/testers/testers/custom/tests/student_files/submission.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -""" -This student submission file is used to test the autotester -It represents the test case where: - - The submission passes with full marks -""" - -import json - -print(json.dumps({'name': 'pass_test', 'output': 'NA', 'marks_earned': 2, 'marks_total': 2, 'status': 'pass'})) diff --git a/testers/testers/py/bin/create_environment.sh b/testers/testers/py/bin/create_environment.sh deleted file mode 100755 index 717fe81c..00000000 --- a/testers/testers/py/bin/create_environment.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -set -e - -create_venv() { - rm -rf ${VENV_DIR} # clean up existing venv if any - python${PY_VERSION} -m venv ${VENV_DIR} - source ${VENV_DIR}/bin/activate - pip install --upgrade pip - pip install wheel - pip install -r "${THIS_DIR}/requirements.txt" - pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines - local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth - echo ${LIB_DIR} >> ${pth_file} - echo ${TESTERS_DIR} >> ${pth_file} -} - -# script starts here -if [[ $# -lt 1 ]]; then - echo "Usage: $0 settings_json" -fi - -# vars -SETTINGS_JSON=$1 - -ENV_DIR=$(echo ${SETTINGS_JSON} | jq --raw-output .env_loc) -PY_VERSION=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.python_version) -PIP_REQUIREMENTS=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.pip_requirements) - -VENV_DIR=${ENV_DIR}/venv -THIS_SCRIPT=$(readlink -f ${BASH_SOURCE}) -THIS_DIR=$(dirname ${THIS_SCRIPT}) -LIB_DIR=$(readlink -f ${THIS_DIR}/../lib) -TESTERS_DIR=$(readlink -f ${THIS_DIR}/../../../) - -# main -create_venv diff --git a/testers/testers/pyta/bin/create_environment.sh b/testers/testers/pyta/bin/create_environment.sh deleted file mode 100755 index d398daeb..00000000 --- a/testers/testers/pyta/bin/create_environment.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -set -e - -create_venv() { - rm -rf ${VENV_DIR} # clean up existing venv if any - python${PY_VERSION} -m venv ${VENV_DIR} - source ${VENV_DIR}/bin/activate - pip install --upgrade pip - pip install wheel - pip install -r "${THIS_DIR}/requirements.txt" - pip install -r <(echo ${PIP_REQUIREMENTS} | sed 's/\s\+/\n/g') # sub spaces for newlines - local pth_file=${VENV_DIR}/lib/python${PY_VERSION}/site-packages/lib.pth - echo ${LIB_DIR} >> ${pth_file} - echo ${TESTERS_DIR} >> ${pth_file} -} - -# script starts here -if [[ $# -lt 1 ]]; then - echo "Usage: $0 settings_json" -fi - -# vars -SETTINGS_JSON=$1 - -ENV_DIR=$(echo ${SETTINGS_JSON} | jq --raw-output .env_loc) -PY_VERSION=$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.python_version) -PIP_REQUIREMENTS="$(echo ${SETTINGS_JSON} | jq --raw-output .env_data.pip_requirements)" - -VENV_DIR=${ENV_DIR}/venv -THIS_SCRIPT=$(readlink -f ${BASH_SOURCE}) -THIS_DIR=$(dirname ${THIS_SCRIPT}) -LIB_DIR=$(readlink -f ${THIS_DIR}/../lib) -TESTERS_DIR=$(readlink -f ${THIS_DIR}/../../../) - -create_venv