Skip to content

Check for new model inference #39

Check for new model inference

Check for new model inference #39

name: Check for new model inference
on:
push:
paths:
- "**/**/*.onnx"
workflow_dispatch:
permissions:
contents: write
pull-requests: write
jobs:
select-runner:
runs-on: ubuntu-22.04
outputs:
runner: ${{ steps.select_runner.outputs.runner }}
steps:
- uses: actions/checkout@v4
- name: Select runner
id: select_runner
run: |
if [ "${{ github.repository }}" = "NeuroDesk/cvpr-sam-on-laptop-2024" ]; then
echo "runner=self-hosted" >> $GITHUB_OUTPUT
else
echo "runner=ubuntu-22.04" >> $GITHUB_OUTPUT
fi
infer_new_model:
needs: [select-runner]
runs-on: ${{ needs.select-runner.outputs.runner }}
steps:
- name: Cleanup
shell: bash
run: |
pwd
ls -l
rm -rf test_demo/runs
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Set up environment and data from osf
run: |
python -m pip install --upgrade pip
python -m pip install osfclient numpy nibabel pandas tqdm matplotlib scipy
osf --project u8tny fetch test_demo.tar.xz
osf --project u8tny fetch checkpoints/LiteMedSAM/litemedsam_encoder.opt.quant.onnx
osf --project u8tny fetch checkpoints/LiteMedSAM/lite_medsam_decoder_optimized.onnx
tar -xvf test_demo.tar.xz
rm test_demo.tar.xz
mv litemedsam_encoder.opt.quant.onnx lite_medsam_decoder_optimized.onnx work_dir/
ls
- name: Run inference
shell: bash
run: |
mkdir segs && chmod -R 777 ./*
export DOCKER_IMAGE_NAME=hawken50
echo ${{ secrets.USERPWD }} | sudo -S docker build -f Dockerfile -t "$DOCKER_IMAGE_NAME" .
echo ${{ secrets.USERPWD }} | sudo -S docker container run -m 8G --name "$DOCKER_IMAGE_NAME" --rm "$DOCKER_IMAGE_NAME":latest /bin/bash -c "ls"
echo ${{ secrets.USERPWD }} | sudo -S docker container run -m 8G --name "$DOCKER_IMAGE_NAME" --rm "$DOCKER_IMAGE_NAME":latest /bin/bash -c "ls work_dir"
./evaluate.sh .
- name: Display accuracy, efficiency, and running time
run: |
cat ./metrics.csv >> $GITHUB_STEP_SUMMARY
cat ./running_time.csv >> $GITHUB_STEP_SUMMARY