diff --git a/.github/workflows/client.yaml b/.github/workflows/client.yaml index a7b157d1c3..42edc9ecba 100644 --- a/.github/workflows/client.yaml +++ b/.github/workflows/client.yaml @@ -184,4 +184,4 @@ jobs: env: GITHUB_ACTION: 1 PYTHON_VERSION: ${{matrix.python-version}} - run: scripts/run_demo.sh + run: python3 scripts/client_test/cli_test.py mnist diff --git a/client/starwhale/api/_impl/data_store.py b/client/starwhale/api/_impl/data_store.py index 8e64f7ff16..3b71eb5408 100644 --- a/client/starwhale/api/_impl/data_store.py +++ b/client/starwhale/api/_impl/data_store.py @@ -1332,7 +1332,7 @@ def run(self) -> None: self.table_name, self.schema, self._updating_records ) except Exception as e: - logger.warning(f"{self} run-update-table raise exception: {e}") + logger.exception(e) self._queue_run_exceptions.append(e) if len(self._queue_run_exceptions) > self._run_exceptions_limits: break diff --git a/client/starwhale/api/_impl/dataset/loader.py b/client/starwhale/api/_impl/dataset/loader.py index 7545c2f7ea..650d110a18 100644 --- a/client/starwhale/api/_impl/dataset/loader.py +++ b/client/starwhale/api/_impl/dataset/loader.py @@ -172,7 +172,7 @@ def get_sharding_data_loader( return get_data_loader( dataset_uri=_uri, start=start, - end=end + 1, + end=end, logger=logger, ) diff --git a/client/starwhale/api/_impl/model.py b/client/starwhale/api/_impl/model.py index a365495f4d..08d42106fd 100644 --- a/client/starwhale/api/_impl/model.py +++ b/client/starwhale/api/_impl/model.py @@ -47,6 +47,9 @@ def save(self, data_id: t.Union[int, str], result: t.Any, **kwargs: t.Any) -> No data_id=data_id, result=result, **kwargs, serialize=True ) + def flush(self) -> None: + self.evaluation.flush_result() + def __exit__(self) -> None: self.evaluation.close() @@ -70,12 +73,14 @@ def __init__( self, ignore_annotations: bool = False, ignore_error: bool = False, + flush_result: bool = False, ) -> None: self.context: Context = context_holder.context # TODO: add args for compare result and label directly self.ignore_annotations = ignore_annotations self.ignore_error = ignore_error + self.flush_result = flush_result _logdir = EvaluationStorage.local_run_dir( self.context.project, self.context.version @@ -100,6 +105,7 @@ def __init__( eval_id=self.context.version, project=self.context.project ) self._monkey_patch() + self._update_status(STATUS.START) def _init_logger( self, log_dir: Path, rotation: str = "500MB" @@ -188,7 +194,6 @@ def _wrapper(*args: t.Any, **kwargs: t.Any) -> None: @_record_status # type: ignore def _starwhale_internal_run_cmp(self) -> None: - self._update_status(STATUS.START) now = now_str() try: ppl_result_loader = PPLResultIterator(self.context) @@ -204,8 +209,6 @@ def _starwhale_internal_run_cmp(self) -> None: @_record_status # type: ignore def _starwhale_internal_run_ppl(self) -> None: - self._update_status(STATUS.START) - result_storage = PPLResultStorage(self.context) if not self.context.dataset_uris: @@ -245,7 +248,8 @@ def _starwhale_internal_run_ppl(self) -> None: result=result, annotations={} if self.ignore_annotations else _annotations, ) - self._update_status(STATUS.RUNNING) + if self.flush_result: + result_storage.flush() def _update_status(self, status: str) -> None: fpath = self.status_dir / CURRENT_FNAME diff --git a/client/tests/sdk/test_model.py b/client/tests/sdk/test_model.py index db8fbf9b56..d4272247a0 100644 --- a/client/tests/sdk/test_model.py +++ b/client/tests/sdk/test_model.py @@ -291,7 +291,7 @@ def cmp(self, _data_loader: t.Any) -> t.Any: ) context_holder.context = context # mock - with Dummy() as _handler: + with Dummy(flush_result=True) as _handler: _handler._starwhale_internal_run_ppl() context = Context( diff --git a/scripts/client_test/cli_test.py b/scripts/client_test/cli_test.py index a1e1a8f194..03733a2ab2 100644 --- a/scripts/client_test/cli_test.py +++ b/scripts/client_test/cli_test.py @@ -2,7 +2,7 @@ import sys import tempfile from time import sleep -from typing import Any +from typing import Any, Optional from cmds.eval_cmd import Evaluation from cmds.base.common import EnvironmentPrepare @@ -78,7 +78,10 @@ def standard_workflow( _eval_list = self.evaluation.list() assert len(_eval_list) == 1 - assert self.evaluation.info(_eval_list[0]["manifest"]["version"]) + eval_info = self.evaluation.info(_eval_list[0]["manifest"]["version"]) + assert eval_info + assert eval_info["manifest"]["status"] == "success" + if mode != RunMode.CLOUD: return # 5.login to cloud @@ -146,7 +149,7 @@ def standard_workflow( print("job status api occur some error!now will exit") break - assert _job_status == "SUCCESS" + # assert _job_status == "SUCCESS" # 10.reset instance to local self.instance.select("local") @@ -157,48 +160,55 @@ def get_job_status(self, cloud_uri: str, cloud_project: str, job_id: str) -> Any ) return _remote_job["manifest"]["jobStatus"] if _remote_job else "API ERROR" - def test_mnist(self, mode: str) -> None: + def test_mnist(self, cloud_url: Optional[str]) -> None: + invoke(["cp", "-rf", f"{ROOT_DIR}/example", f"{self._work_dir}/example"]) _environment_prepare = EnvironmentPrepare(work_dir=self._work_dir) _environment_prepare.prepare_mnist_data() _environment_prepare.prepare_mnist_requirements() self.standard_workflow( - mode=mode, + mode=RunMode.CLOUD if cloud_url else RunMode.STANDALONE, model_name="mnist", model_workdir=f"{self._work_dir}/example/mnist", ds_name="mnist", ds_workdir=f"{self._work_dir}/example/mnist", - rt_name="pytorch-mnist", - rt_workdir=f"{self._work_dir}/example/mnist", - cloud_uri=os.environ.get("CONTROLLER_URL") or "http://127.0.0.1:8082", + rt_name="pytorch", + rt_workdir=f"{self._work_dir}/example/runtime/pytorch", + cloud_uri=cloud_url if cloud_url else "http://127.0.0.1:8082", + cloud_project="starwhale", + ) + + def test_simple(self, cloud_url: Optional[str]) -> None: + self.standard_workflow( + mode=RunMode.CLOUD if cloud_url else RunMode.STANDALONE, + model_name="simple-test", + model_workdir=f"{self._work_dir}/scripts/example", + ds_name="simple-test", + ds_workdir=f"{self._work_dir}/scripts/example", + rt_name="simple-test", + rt_workdir=f"{self._work_dir}/scripts/example", + cloud_uri=cloud_url if cloud_url else "http://127.0.0.1:8082", cloud_project="starwhale", ) # TODO add more example -def init_run_environment() -> str: +def init_run_environment(work_dir: str) -> None: # prepare environment - _work_dir = os.environ.get("SW_WORK_DIR") - print(f"work-dir is:{_work_dir}") - _tmp = None - if not _work_dir: - _tmp = tempfile.TemporaryDirectory() - _work_dir = _tmp.name - print(f"use work-dir is:{_work_dir}") + print(f"work-dir is:{work_dir}") - os.environ["SW_CLI_CONFIG"] = f"{_work_dir}/config.yaml" - os.environ["SW_LOCAL_STORAGE"] = f"{_work_dir}/data" + os.environ["SW_CLI_CONFIG"] = f"{work_dir}/config.yaml" + os.environ["SW_LOCAL_STORAGE"] = f"{work_dir}/data" - invoke(["cp", "-rf", f"{ROOT_DIR}/example", f"{_work_dir}/example"]) - invoke(["cp", "-rf", f"{ROOT_DIR}/client", f"{_work_dir}/client"]) - invoke(["cp", "-rf", f"{ROOT_DIR}/README.md", f"{_work_dir}/README.md"]) + invoke(["cp", "-rf", f"{ROOT_DIR}/client", f"{work_dir}/client"]) + invoke(["cp", "-rf", f"{ROOT_DIR}/scripts", f"{work_dir}/scripts"]) + invoke(["cp", "-rf", f"{ROOT_DIR}/README.md", f"{work_dir}/README.md"]) # install sw at current session print(f"env PYPI_RELEASE_VERSION is:{os.environ.get('PYPI_RELEASE_VERSION')}") - invoke(["pip", "install", "-e", f"{_work_dir}/client"]) + invoke(["python3", "-m", "pip", "install", "-e", f"{work_dir}/client"]) _res, _err = invoke(["swcli", "--version"]) print(f"pytest use swcli version is:{_res}") - return _work_dir class RunMode: @@ -207,11 +217,15 @@ class RunMode: if __name__ == "__main__": - # start test - test_cli = TestCli(work_dir=init_run_environment()) - example = sys.argv[1] - _mode = RunMode.CLOUD if os.environ.get("CONTROLLER_URL") else RunMode.STANDALONE - if example == "mnist": - test_cli.test_mnist(_mode) - else: - print("there is nothing to run!") + with tempfile.TemporaryDirectory() as workdir: + init_run_environment(workdir) + # start test + test_cli = TestCli(work_dir=workdir) + example = sys.argv[1] + _cloud_url = os.environ.get("CONTROLLER_URL") + if example == "mnist": + test_cli.test_mnist(_cloud_url) + elif example == "simple": + test_cli.test_simple(_cloud_url) + else: + print("there is nothing to run!") diff --git a/scripts/client_test/cmds/base/common.py b/scripts/client_test/cmds/base/common.py index 0c4720ab0b..a93bf4836d 100644 --- a/scripts/client_test/cmds/base/common.py +++ b/scripts/client_test/cmds/base/common.py @@ -1,4 +1,4 @@ -from .invoke import invoke +from .invoke import invoke, invoke_with_react class EnvironmentPrepare: @@ -52,28 +52,28 @@ def prepare_mnist_data(self) -> None: "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", ] ) - invoke( + invoke_with_react( [ "gzip", "-d", f"{self.work_dir}/example/mnist/data/train-images-idx3-ubyte.gz", ] ) - invoke( + invoke_with_react( [ "gzip", "-d", f"{self.work_dir}/example/mnist/data/train-labels-idx1-ubyte.gz", ] ) - invoke( + invoke_with_react( [ "gzip", "-d", f"{self.work_dir}/example/mnist/data/t10k-images-idx3-ubyte.gz", ] ) - invoke( + invoke_with_react( [ "gzip", "-d", diff --git a/scripts/client_test/cmds/eval_cmd.py b/scripts/client_test/cmds/eval_cmd.py index ce4e890e4f..e271307e54 100644 --- a/scripts/client_test/cmds/eval_cmd.py +++ b/scripts/client_test/cmds/eval_cmd.py @@ -61,7 +61,7 @@ def run( _args.extend(["--project", project]) _res, _err = invoke(_args) - return not _err and _valid_str in _res + return _valid_str in _res def info(self, version: str) -> Any: """ diff --git a/scripts/client_test/step_spec.yaml b/scripts/client_test/step_spec.yaml index 3071543826..cb25a52ccd 100644 --- a/scripts/client_test/step_spec.yaml +++ b/scripts/client_test/step_spec.yaml @@ -6,8 +6,8 @@ default: overwriteable: true resources: - type: cpu - request: 0.2 - limit: 0.2 + request: 0.4 + limit: 0.4 step_name: ppl task_num: 2 - cls_name: '' @@ -18,7 +18,7 @@ default: overwriteable: false resources: - type: cpu - request: 0.2 - limit: 0.2 + request: 0.4 + limit: 0.4 step_name: cmp task_num: 1 diff --git a/scripts/e2e_test/check_controller_port.sh b/scripts/e2e_test/check_controller_port.sh new file mode 100755 index 0000000000..71e7033443 --- /dev/null +++ b/scripts/e2e_test/check_controller_port.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +var=$(lsof -i tcp:8082) +if [ ! -z "$var" ] +then + sleep 10 +else + if kill -9 `ps -ef|grep port-forward | grep -v grep | awk '{print $2}'` ; then echo "kill success and restart port-forward"; fi + nohup kubectl port-forward --namespace starwhale-e2e svc/starwhale-e2e-controller 8082:8082 & + sleep 10 +fi + diff --git a/scripts/e2e_test/check_job.sh b/scripts/e2e_test/check_job.sh new file mode 100755 index 0000000000..3edf3a8794 --- /dev/null +++ b/scripts/e2e_test/check_job.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +set -e + +if [[ ! -z ${DEBUG} ]]; then + set -x +fi +echo "login" +curl -D - --location --request POST "http://$1/api/v1/login" \ +--header 'Accept: application/json' \ +--form 'userName="starwhale"' \ +--form 'userPwd="abcd1234"' -o /dev/null | while read line +do + if [[ "${line}" =~ ^Authorization.* ]] ; then + echo "${line}" > auth_header.h + fi +done + +auth_header=`cat auth_header.h` + +sudo apt-get install jq + +echo "get task" +OUT=`curl -X 'GET' \ + "http://$1/api/v1/project/starwhale/job/1/task?pageNum=1&pageSize=10" \ + -H 'accept: application/json' \ + -H "$auth_header" | jq '.data.list'| jq -r '.[]|.id'` +echo "taskids: $OUT..." +read -a task_ids <<< $OUT +task_ids=($OUT) + +SAVEIFS=$IFS # Save current IFS (Internal Field Separator) +IFS=$'\n' # Change IFS to newline char +task_ids=($OUT) # split the `names` string into an array by the same name +IFS=$SAVEIFS # Restore original IFS +echo "get logs..." +for (( i=0; i<${#task_ids[@]}; i++ )) +do + task_id=${task_ids[$i]} + log_file=`curl -X 'GET' "http://$1/api/v1/log/offline/$task_id" -H 'accept: application/json' -H "$auth_header" | jq -r '.data[0]'` + echo $log_file + + echo "task log is:" + curl -X 'GET' "http://$1/api/v1/log/offline/$task_id/$log_file" -H 'accept: plain/text' -H "$auth_header" +done + +echo "get job status" + +curl -X 'GET' \ + "http://$1/api/v1/project/starwhale/job/1" \ + -H 'accept: application/json' \ + -H "$auth_header" | jq -r '.data.jobStatus' > jobStatus +job_status=`cat jobStatus` + +echo "job status is $job_status" +if [[ "$job_status" != "SUCCESS" ]] ; then + exit 1 +fi diff --git a/scripts/e2e_test/service_wait.sh b/scripts/e2e_test/service_wait.sh old mode 100644 new mode 100755 diff --git a/scripts/e2e_test/start_test.sh b/scripts/e2e_test/start_test.sh index aded97de48..9707c82f19 100755 --- a/scripts/e2e_test/start_test.sh +++ b/scripts/e2e_test/start_test.sh @@ -202,6 +202,8 @@ check_controller_service() { sleep 15 done nohup kubectl port-forward --namespace $SWNS svc/$SWNAME-controller 8082:$PORT_CONTROLLER & + pwd + check_controller_port.sh & } client_test() { @@ -211,7 +213,8 @@ client_test() { rm -rf .pytest_cache rm -rf venv* pushd ../ - scripts/run_demo.sh + python3 scripts/client_test/cli_test.py simple + scripts/e2e_test/check_job.sh 127.0.0.1:$PORT_CONTROLLER popd popd diff --git a/scripts/e2e_test/test_job_run.sh b/scripts/e2e_test/test_job_run.sh deleted file mode 100755 index e6a6c5ea0b..0000000000 --- a/scripts/e2e_test/test_job_run.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -set -e - -if [[ ! -z ${DEBUG} ]]; then - set -x -fi - -curl -D - --location --request POST "http://$1/api/v1/login" \ ---header 'Accept: application/json' \ ---form 'userName="starwhale"' \ ---form 'userPwd="abcd1234"' -o /dev/null | while read line -do - if [[ "${line}" =~ ^Authorization.* ]] ; then - echo "${line}" > auth_header.h - fi -done - -auth_header=`cat auth_header.h` -sudo apt-get install jq -job_id=`curl -X 'POST' \ - "http://$1/api/v1/project/1/job" \ - -H 'accept: */*' \ - -H "$auth_header" \ - -H 'Content-Type: application/json' \ - -d '{ - "modelVersionUrl": "1", - "datasetVersionUrls": "1", - "runtimeVersionUrl": "1", - "device": "1", - "deviceAmount": 0.4, - "comment": "string" -}' | jq -r '.data'` - -if [ "$job_id" == "null" ] ; then - echo "Error! job id is null" 1>&2 - exit 1 -fi - -while true -do - if curl -X 'GET' \ - "http://$1/api/v1/project/1/job/$job_id" \ - -H 'accept: application/json' \ - -H "$auth_header" | jq -r '.data.jobStatus' > jobStatus ; then echo "8082 well"; else kubectl logs --tail=10 -l starwhale.ai/role=controller -n $SWNS; continue; fi - job_status=`cat jobStatus` - if [ "$job_status" == "null" ] ; then - echo "Error! job_status id is null" 1>&2 - exit 1 - fi - if [[ "$job_status" = "SUCCESS" ]] ; then - echo "job success done" - break - elif [[ "$job_status" = "FAIL" ]] ; then - echo "job FAIL" - break - elif [[ -z "$job_status" ]] ; then - if kill -9 `ps -ef|grep port-forward | grep -v grep | awk '{print $2}'` ; then echo "kill success"; fi - nohup kubectl port-forward --namespace $SWNS svc/$SWNAME-controller 8082:8082 & - sleep 20 - else - echo "job status for " "$job_id" "is" "$job_status" -# kubectl logs --tail=10 -l job-name=1 -n starwhale -c data-provider -# kubectl logs --tail=10 -l job-name=1 -n starwhale -c untar -# kubectl logs --tail=10 -l job-name=1 -n starwhale -c worker -# kubectl logs --tail=10 -l job-name=1 -n starwhale -c result-uploader - -# kubectl logs -f -l starwhale.ai/role=controller -n starwhale -# kubectl describe pod -l "job-name in (1,2,3,4,5,6,7,8,9,10)" -n starwhale -# kubectl describe node - sleep 10 - fi -done - -task_id=`curl -X 'GET' \ - "http://$1/api/v1/project/1/job/$job_id/task?pageNum=1&pageSize=10" \ - -H 'accept: application/json' \ - -H "$auth_header" | jq -r '.data.list[1].id'` - -echo $task_id -curl -X 'GET' "http://$1/api/v1/log/offline/$task_id" -H 'accept: application/json' -H "$auth_header" | jq -r '.data[0]' -log_file=`curl -X 'GET' "http://$1/api/v1/log/offline/$task_id" -H 'accept: application/json' -H "$auth_header" | jq -r '.data[0]'` -echo $log_file - -echo "task log is:" - -curl -X 'GET' \ - "http://$1/api/v1/log/offline/$task_id/$log_file" \ - -H 'accept: plain/text' \ - -H "$auth_header" - -#echo "agent log is:" -#docker logs compose_agent_1 - -job_status=`cat jobStatus` -if [[ "$job_status" == "FAIL" ]] ; then - exit 1 -fi - diff --git a/scripts/example/.swignore b/scripts/example/.swignore new file mode 100644 index 0000000000..92c3d73c44 --- /dev/null +++ b/scripts/example/.swignore @@ -0,0 +1,5 @@ + venv +.git +.history +.vscode +.venv diff --git a/scripts/example/code/__init__.py b/scripts/example/code/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/example/code/dataset.py b/scripts/example/code/dataset.py new file mode 100644 index 0000000000..39a2f8e303 --- /dev/null +++ b/scripts/example/code/dataset.py @@ -0,0 +1,11 @@ +import typing as t + +from starwhale import Text, BuildExecutor + + +class SimpleTextDatasetBuildExecutor(BuildExecutor): + def iter_item(self) -> t.Generator[t.Tuple[t.Any, t.Any], None, None]: + for idx in range(0, 100): + data = Text(f"data-{idx}", encoding="utf-8") + annotations = {"label": f"label-{idx}"} + yield data, annotations diff --git a/scripts/example/code/evaluator.py b/scripts/example/code/evaluator.py new file mode 100644 index 0000000000..64bf69a995 --- /dev/null +++ b/scripts/example/code/evaluator.py @@ -0,0 +1,31 @@ +import os +import random + +import numpy + +from starwhale import Text, PipelineHandler, PPLResultIterator, multi_classification + +_ROOT_DIR = os.path.dirname(os.path.dirname(__file__)) + + +class SimplePipeline(PipelineHandler): + def ppl(self, text: Text, **kw): + return ( + text.content, + numpy.exp([[random.uniform(-10, 1) for i in range(0, 100)]]).tolist(), + ) + + @multi_classification( + confusion_matrix_normalize="all", + show_hamming_loss=True, + show_cohen_kappa_score=True, + show_roc_auc=True, + all_labels=[f"label-{i}" for i in range(0, 100)], + ) + def cmp(self, ppl_result: PPLResultIterator): + result, label, pr = [], [], [] + for _data in ppl_result: + label.append(_data["annotations"]["label"]) + result.append(_data["result"][0]) + pr.extend(_data["result"][1]) + return label, result, pr diff --git a/scripts/example/dataset.yaml b/scripts/example/dataset.yaml new file mode 100644 index 0000000000..ad628aaddd --- /dev/null +++ b/scripts/example/dataset.yaml @@ -0,0 +1,9 @@ +name: simple-test + +handler: code.dataset:SimpleTextDatasetBuildExecutor + +desc: simple dataset + +attr: + alignment_size: 128 + volume_size: 10M diff --git a/scripts/example/model.yaml b/scripts/example/model.yaml new file mode 100644 index 0000000000..76f4da8c72 --- /dev/null +++ b/scripts/example/model.yaml @@ -0,0 +1,10 @@ +version: 1.0 +name: simple-test + +model: + - model/empty.pt + +run: + handler: code.evaluator:SimplePipeline + +desc: simple model diff --git a/scripts/example/model/empty.pt b/scripts/example/model/empty.pt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/example/requirements-sw-lock.txt b/scripts/example/requirements-sw-lock.txt new file mode 100644 index 0000000000..24ce15ab7e --- /dev/null +++ b/scripts/example/requirements-sw-lock.txt @@ -0,0 +1 @@ +numpy diff --git a/scripts/example/runtime.yaml b/scripts/example/runtime.yaml new file mode 100644 index 0000000000..f66609111b --- /dev/null +++ b/scripts/example/runtime.yaml @@ -0,0 +1,22 @@ +api_version: 1.1 +configs: + conda: + channels: + - conda-forge + docker: + image: ghcr.io/star-whale/runtime/pytorch + pip: + extra_index_url: https://mirrors.bfsu.edu.cn/pypi/web/simple + index_url: https://pypi.tuna.tsinghua.edu.cn/simple + trusted_host: + - pypi.tuna.tsinghua.edu.cn + - mirrors.bfsu.edu.cn +dependencies: + - pip: + - numpy + - requirements-sw-lock.txt +environment: + arch: noarch + os: ubuntu:20.04 +mode: venv +name: simple-test diff --git a/scripts/run_demo.sh b/scripts/run_demo.sh deleted file mode 100755 index eb3ec343cc..0000000000 --- a/scripts/run_demo.sh +++ /dev/null @@ -1,77 +0,0 @@ -#! /bin/bash - -set -e - -PYTHON_VERSION="${PYTHON_VERSION:=3.7}" - -python3 -V - -in_github_action() { - [ -n "$GITHUB_ACTION" ] -} - -# macos do not support realpath -# use ad-hoc method from https://stackoverflow.com/questions/3572030/bash-script-absolute-path-with-os-x -realpath() { - [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}" -} - -script_dir="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")")" -cd "$script_dir"/.. -if in_github_action; then - WORK_DIR="$(pwd)" -else - WORK_DIR=$(mktemp -d) - echo "use $(pwd) as source" -fi - -export SW_WORK_DIR=$WORK_DIR -echo $WORK_DIR > WORK_DIR - -finish() { - if ! in_github_action && test -z "$PARENT_CLEAN" ; then - echo 'cleanup work dir '"$WORK_DIR" - rm -rf "$WORK_DIR" - fi - echo 'cleanup' -} - -trap finish EXIT - - -if ! in_github_action; then - cp -rf ./client "$WORK_DIR" - cp -rf ./example "$WORK_DIR" - cp -rf ./scripts "$WORK_DIR" - cp -rf ./README.md "$WORK_DIR" - rm -rf "$WORK_DIR/.venv" - rm -rf "$WORK_DIR/example/mnist/.venv" - rm -f "$WORK_DIR/example/mnist/runtime.yaml" - - # use a separate data & config dir - LOCAL_DATA_DIR=$(mktemp -d -p $WORK_DIR) - export SW_CLI_CONFIG="$LOCAL_DATA_DIR/config.yaml" - export SW_LOCAL_STORAGE=$LOCAL_DATA_DIR/data - echo $LOCAL_DATA_DIR > LOCAL_DATA_DIR -fi - -echo "start test in $WORK_DIR" -cd "$WORK_DIR" || exit -# shellcheck source=/dev/null -python3 -m venv .venv && . .venv/bin/activate && pip install --upgrade pip -cd "$WORK_DIR/client" || exit - -echo "install swcli" -make install-sw - -cd "$WORK_DIR/example/mnist" || exit -swcli runtime quickstart shell . --python-env=venv --create-env --name pytorch-mnist -# shellcheck source=/dev/null -. .venv/bin/activate - -echo "execute test for mnist example" -pushd ../../scripts/client_test -python3 cli_test.py mnist -popd - -echo "done"