diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 2d5d14f1601..3885e3ec9cf 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,4 @@ -name: Build and upload to internal PyPI +name: Build and upload to PyPI on: workflow_dispatch: # run on request (no need for PR) @@ -40,15 +40,9 @@ jobs: name: Publish package needs: [build_wheels, build_sdist] environment: pypi - runs-on: [self-hosted, linux, x64, dev] + runs-on: ubuntu-latest permissions: write-all steps: - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - name: Install dependencies - run: python -m pip install twine - name: Download artifacts uses: actions/download-artifact@v3 with: @@ -56,6 +50,7 @@ jobs: # if `name: artifact` is omitted, the action will create extra parent dir name: artifact path: dist + # to determine where to publish the source distribution to PyPI or TestPyPI - name: Check tag id: check-tag uses: actions-ecosystem/action-regex-match@v2 @@ -71,18 +66,15 @@ jobs: tag: ${{ github.ref }} overwrite: true file_glob: true - - name: Check dist contents - run: twine check dist/* - - name: Publish package dist to internal PyPI - run: | - export no_proxy=${{ secrets.PYPI_HOST }} - export REPOSITORY_URL=http://${{ secrets.PYPI_HOST }}:${{ secrets.PYPI_PORT }} - twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u ${{ secrets.PYPI_USER }} -p ${{ secrets.PYPI_PASSWORD }} - - name: Clean up dist - if: ${{ always() }} - run: | - if OUTPUT=$(ls | grep -c dist) - then - echo "Cleaning up dist directory" - rm -r dist - fi + - name: Publish package distributions to PyPI + if: ${{ steps.check-tag.outputs.match != '' }} + uses: pypa/gh-action-pypi-publish@v1.7.1 + with: + password: ${{ secrets.PYPI_API_TOKEN }} + - name: Publish package distributions to TestPyPI + if: ${{ steps.check-tag.outputs.match == '' }} + uses: pypa/gh-action-pypi-publish@v1.7.1 + with: + password: ${{ secrets.TESTPYPI_API_TOKEN }} + repository-url: https://test.pypi.org/legacy/ + verbose: true diff --git a/.github/workflows/publish_internal.yml b/.github/workflows/publish_internal.yml new file mode 100644 index 00000000000..800cc2c60ac --- /dev/null +++ b/.github/workflows/publish_internal.yml @@ -0,0 +1,83 @@ +name: Build and upload to internal PyPI + +on: + workflow_dispatch: # run on request (no need for PR) + +jobs: + build_wheels: + name: Build wheels + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build wheels + uses: pypa/cibuildwheel@v2.13.1 + - uses: actions/upload-artifact@v3 + with: + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install pypa/build + run: python -m pip install build + - name: Build sdist + run: python -m build --sdist + - uses: actions/upload-artifact@v3 + with: + path: dist/*.tar.gz + + publish_package: + name: Publish package + needs: [build_wheels, build_sdist] + environment: pypi + runs-on: [self-hosted, linux, x64, dev] + permissions: write-all + steps: + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Install dependencies + run: python -m pip install twine + - name: Download artifacts + uses: actions/download-artifact@v3 + with: + # unpacks default artifact into dist/ + # if `name: artifact` is omitted, the action will create extra parent dir + name: artifact + path: dist + - name: Check tag + id: check-tag + uses: actions-ecosystem/action-regex-match@v2 + with: + text: ${{ github.ref }} + regex: '^refs/heads/releases/[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+rc[0-9]+|rc[0-9]+)?$' + - name: Check dist contents + run: twine check dist/* + - name: Publish package dist to internal PyPI + if: ${{ steps.check-tag.outputs.match != '' }} + run: | + export no_proxy=${{ secrets.PYPI_HOST }} + export REPOSITORY_URL=http://${{ secrets.PYPI_HOST }}:${{ secrets.PYPI_PORT }} + twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u ${{ secrets.PYPI_USER }} -p ${{ secrets.PYPI_PASSWORD }} + - name: Publish package distributions to TestPyPI + if: ${{ steps.check-tag.outputs.match == '' }} + run: | + export REPOSITORY_URL=https://test.pypi.org/legacy/ + twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u __token__ -p ${{ secrets.TESTPYPI_API_TOKEN }} + - name: Clean up dist + if: ${{ always() }} + run: | + if OUTPUT=$(ls | grep -c dist) + then + echo "Cleaning up dist directory" + rm -r dist + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index c15ccc7979b..7c8a8dc855d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,18 +2,18 @@ All notable changes to this project will be documented in this file. -## \[v1.5.0 - unreleased\] +## \[v1.5.0\] ### New features -- Enable configurable confidence threshold for otx eval and export() +- Enable configurable confidence threshold for otx eval and export () - Add YOLOX variants as new object detector models () -- Enable FeatureVectorHook to support action tasks() +- Enable FeatureVectorHook to support action tasks () - Add ONNX metadata to detection, instance segmantation, and segmentation models () -- Add a new feature to configure input size() +- Add a new feature to configure input size () - Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime () -- Add a new object detector Lite-DINO() -- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task() +- Add a new object detector Lite-DINO () +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task () - Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS () - Add new argument to track resource usage in train command () - Add Self-SL for semantic segmentation of SegNext families () diff --git a/README.md b/README.md index 55d4fbc6ec6..c78b75cb508 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,8 @@ --- [Key Features](#key-features) • -[Installation](https://openvinotoolkit.github.io/training_extensions/stable/guide/get_started/installation.html) • -[Documentation](https://openvinotoolkit.github.io/training_extensions/stable/index.html) • +[Installation](https://openvinotoolkit.github.io/training_extensions/1.5.0/guide/get_started/installation.html) • +[Documentation](https://openvinotoolkit.github.io/training_extensions/1.5.0/index.html) • [License](#license) [![PyPI](https://img.shields.io/pypi/v/otx)](https://pypi.org/project/otx) @@ -42,7 +42,7 @@ If you are an experienced user, you can configure your own model based on [torch Furthermore, OpenVINO™ Training Extensions provides automatic configuration for ease of use. The framework will analyze your dataset and identify the most suitable model and figure out the best input size setting and other hyper-parameters. -The development team is continuously extending this [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/auto_configuration.html) functionalities to make training as simple as possible so that single CLI command can obtain accurate, efficient and robust models ready to be integrated into your project. +The development team is continuously extending this [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/auto_configuration.html) functionalities to make training as simple as possible so that single CLI command can obtain accurate, efficient and robust models ready to be integrated into your project. ### Key Features @@ -63,11 +63,11 @@ OpenVINO™ Training Extensions supports the [following learning methods](https: OpenVINO™ Training Extensions provides the following usability features: -- [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/auto_configuration.html). OpenVINO™ Training Extensions analyzes provided dataset and selects the proper task and model with appropriate input size to provide the best accuracy/speed trade-off. It will also make a random auto-split of your dataset if there is no validation set provided. +- [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/auto_configuration.html). OpenVINO™ Training Extensions analyzes provided dataset and selects the proper task and model with appropriate input size to provide the best accuracy/speed trade-off. It will also make a random auto-split of your dataset if there is no validation set provided. - [Datumaro](https://openvinotoolkit.github.io/datumaro/stable/index.html) data frontend: OpenVINO™ Training Extensions supports the most common academic field dataset formats for each task. We are constantly working to extend supported formats to give more freedom of datasets format choice. - **Distributed training** to accelerate the training process when you have multiple GPUs - **Mixed-precision training** to save GPUs memory and use larger batch sizes -- Integrated, efficient [hyper-parameter optimization module (HPO)](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/hpo.html). Through dataset proxy and built-in hyper-parameter optimizer, you can get much faster hyper-parameter optimization compared to other off-the-shelf tools. The hyperparameter optimization is dynamically scheduled based on your resource budget. +- Integrated, efficient [hyper-parameter optimization module (HPO)](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/hpo.html). Through dataset proxy and built-in hyper-parameter optimizer, you can get much faster hyper-parameter optimization compared to other off-the-shelf tools. The hyperparameter optimization is dynamically scheduled based on your resource budget. --- @@ -75,7 +75,7 @@ OpenVINO™ Training Extensions provides the following usability features: ### Installation -Please refer to the [installation guide](https://openvinotoolkit.github.io/training_extensions/stable/guide/get_started/installation.html). +Please refer to the [installation guide](https://openvinotoolkit.github.io/training_extensions/1.5.0/guide/get_started/installation.html). Note: Python 3.8, 3.9 and 3.10 were tested, along with Ubuntu 18.04, 20.04 and 22.04. @@ -91,22 +91,26 @@ Note: Python 3.8, 3.9 and 3.10 were tested, along with Ubuntu 18.04, 20.04 and 2 - `otx demo` allows one to apply a trained model on the custom data or the online footage from a web camera and see how it will work in a real-life scenario. - `otx explain` runs explain algorithm on the provided data and outputs images with the saliency maps to show how your model makes predictions. -You can find more details with examples in the [CLI command intro](https://openvinotoolkit.github.io/training_extensions/stable/guide/get_started/cli_commands.html). +You can find more details with examples in the [CLI command intro](https://openvinotoolkit.github.io/training_extensions/1.5.0/guide/get_started/cli_commands.html). --- ## Updates -### v1.4.0 (3Q23) - -- Support encrypted dataset training () -- Add custom max iou assigner to prevent CPU OOM when large annotations are used () -- Auto train type detection for Semi-SL, Self-SL and Incremental: "--train-type" now is optional () -- Add per-class XAI saliency maps for Mask R-CNN model () -- Add new object detector Deformable DETR () -- Add new object detector DINO () -- Add new visual prompting task (, , , , ) -- Add new object detector ResNeXt101-ATSS () +### v1.5.0 (4Q23) + +- Enable configurable confidence threshold for otx eval and export () +- Add YOLOX variants as new object detector models () +- Enable FeatureVectorHook to support action tasks () +- Add ONNX metadata to detection, instance segmantation, and segmentation models () +- Add a new feature to configure input size () +- Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime () +- Add a new object detector Lite-DINO () +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task () +- Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS () +- Add new argument to track resource usage in train command () +- Add Self-SL for semantic segmentation of SegNext families () +- Adapt input size automatically based on dataset statistics () ### Release History diff --git a/docs/source/guide/release_notes/index.rst b/docs/source/guide/release_notes/index.rst index 2699992177b..133b7350c9e 100644 --- a/docs/source/guide/release_notes/index.rst +++ b/docs/source/guide/release_notes/index.rst @@ -4,6 +4,35 @@ Releases .. toctree:: :maxdepth: 1 +v1.5.0 (4Q23) +------------- + +- Enable configurable confidence threshold for otx eval and export +- Add YOLOX variants as new object detector models +- Enable FeatureVectorHook to support action tasks +- Add ONNX metadata to detection, instance segmantation, and segmentation models +- Add a new feature to configure input size +- Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime +- Add a new object detector Lite-DINO +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task +- Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS +- Add new argument to track resource usage in train command +- Add Self-SL for semantic segmentation of SegNext families +- Adapt input size automatically based on dataset statistics +- Refine input data in-memory caching +- Adapt timeout value of initialization for distributed training +- Optimize data loading by merging load & resize operations w/ caching support for cls/det/iseg/sseg +- Support torch==2.0.1 +- Set "Auto" as default input size mode + + +v1.4.4 (4Q23) +------------- + +- Update ModelAPI configuration +- Add Anomaly modelAPI changes +- Update Image numpy access + v1.4.3 (4Q23) ------------- diff --git a/requirements/base.txt b/requirements/base.txt index 7203f9422a2..f1f7314df86 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,10 +1,10 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -# Base Algo Requirements. # +# Base Algo Requirements. # natsort==8.1.* prettytable==3.9.* protobuf==3.20.* pyyaml -datumaro==1.5.1rc3 +datumaro~=1.5.1rc4 psutil==5.9.* scipy==1.10.* bayesian-optimization==1.4.*