diff --git a/.github/ISSUE_TEMPLATE/0-build-issue-report.yml b/.github/ISSUE_TEMPLATE/0-build-issue-report.yml index d758111404a..a6f60d138d0 100644 --- a/.github/ISSUE_TEMPLATE/0-build-issue-report.yml +++ b/.github/ISSUE_TEMPLATE/0-build-issue-report.yml @@ -13,7 +13,7 @@ body: required: true - label: "For Python issues, I have tested with the [latest development wheel](http://www.open3d.org/docs/latest/getting_started.html#development-version-pip)." required: true - - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `master` branch)." + - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `main` branch)." required: true - type: textarea diff --git a/.github/ISSUE_TEMPLATE/1-bug-report.yml b/.github/ISSUE_TEMPLATE/1-bug-report.yml index a08052c3870..849ff752d3e 100644 --- a/.github/ISSUE_TEMPLATE/1-bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1-bug-report.yml @@ -13,7 +13,7 @@ body: required: true - label: "For Python issues, I have tested with the [latest development wheel](http://www.open3d.org/docs/latest/getting_started.html#development-version-pip)." required: true - - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `master` branch)." + - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `main` branch)." required: true - type: textarea diff --git a/.github/ISSUE_TEMPLATE/2-questions.yml b/.github/ISSUE_TEMPLATE/2-questions.yml index 751ea81ff22..0a345ef8d8c 100644 --- a/.github/ISSUE_TEMPLATE/2-questions.yml +++ b/.github/ISSUE_TEMPLATE/2-questions.yml @@ -13,7 +13,7 @@ body: required: true - label: "For Python issues, I have tested with the [latest development wheel](http://www.open3d.org/docs/latest/getting_started.html#development-version-pip)." required: true - - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `master` branch)." + - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `main` branch)." required: true - type: textarea diff --git a/.github/ISSUE_TEMPLATE/3-feature-request.yml b/.github/ISSUE_TEMPLATE/3-feature-request.yml index 8bfd10b3a6b..6cfc80765c7 100644 --- a/.github/ISSUE_TEMPLATE/3-feature-request.yml +++ b/.github/ISSUE_TEMPLATE/3-feature-request.yml @@ -13,7 +13,7 @@ body: required: true - label: "For Python issues, I have tested with the [latest development wheel](http://www.open3d.org/docs/latest/getting_started.html#development-version-pip)." required: true - - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `master` branch)." + - label: "I have checked the [release documentation](http://www.open3d.org/docs/release/) and the [latest documentation](http://www.open3d.org/docs/latest/) (for `main` branch)." required: true - type: textarea diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 7062e08561e..97e6e17b878 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -14,7 +14,7 @@ actions updates. On macOS and Windows, using Ubuntu reduces CI cost. - `.github/workflows/documentation.yml`: Github Actions workflow file to create and deploy documentation. Documentation is created for every branch - as a CI test, but deployed only for `master`. + as a CI test, but deployed only for `main`. - `util/ci_utils.sh:build_docs()`: Called by GitHub Actions to build documentation. - `unpack_docs.sh`: Called by the documentation server to deploy the docs into the website. @@ -43,9 +43,9 @@ actions updates. On macOS and Windows, using Ubuntu reduces CI cost. gsutil lifecycle set gcs.lifecycle.json gs://open3d-docs/ ``` - Objects will be stored in the bucket for one week. Currently, the - documentation server fetches the latest docs from `master` branch every hour. - If the documentation server fails to fetch the docs matching the `master` + Objects will be stored in the bucket for 30 days. Currently, the + documentation server fetches the latest docs from `main` branch every hour. + If the documentation server fails to fetch the docs matching the `main` commit id, the last successfully fetched docs will be displayed. 3. Create service account ```bash @@ -163,14 +163,14 @@ used for running CI. - ARM64 cache (limit 1.5GB) is stored on Google cloud bucket (`open3d-ci-cache` in the `isl-buckets` project). The bucket is world readable, but needs the `open3d-ci-sa` service account for writing. Every - ARM64 build downloads the cache contents before build. Only `master` branch + ARM64 build downloads the cache contents before build. Only `main` branch builds use `gsutil rsync` to update the cache in GCS. Cache transfer only takes a few minutes, but reduces ARM64 CI time to about 1:15 hours. ## Development wheels and binary archives for user testing -`master` branch Python wheels and binary archives are uploaded to a world -readable GCS bucket in `open3d-releases-master/{python-wheels,devel}` for users +`main` branch Python wheels and binary archives are uploaded to a world +readable GCS bucket in `open3d-releases/{python-wheels,devel}` for users to try out development wheels. ### Google Cloud storage @@ -185,10 +185,10 @@ bucket with: - One month (30 days) object lifecycle ```bash -gsutil mb -p open3d-dev -c STANDARD -l US -b on gs://open3d-releases-master -gsutil acl ch -u AllUsers:R gs://open3d-releases-master -gsutil lifecycle set gcs.lifecycle.json gs:/open3d-releases-master +gsutil mb -p open3d-dev -c STANDARD -l US -b on gs://open3d-releases +gsutil acl ch -u AllUsers:R gs://open3d-releases +gsutil lifecycle set gcs.lifecycle.json gs:/open3d-releases gsutil iam ch \ serviceAccount:open3d-ci-sa-gpu@open3d-dev.iam.gserviceaccount.com:objectAdmin \ - gs://open3d-releases-master + gs://open3d-releases ``` diff --git a/.github/workflows/clean-gcloud-profiles.yml b/.github/workflows/clean-gcloud-profiles.yml index d4318d9bfec..5b5398156fd 100644 --- a/.github/workflows/clean-gcloud-profiles.yml +++ b/.github/workflows/clean-gcloud-profiles.yml @@ -21,7 +21,7 @@ on: workflow_dispatch: # push: # branches: - # - master + # - main # pull_request: # types: [opened, reopened, synchronize] diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 6ee88b1abc1..85368dd8a45 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -9,7 +9,7 @@ on: default: 'ON' push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] @@ -64,10 +64,10 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - # the build system of the main repo expects a master branch. make sure - # master exists + # the build system of the main repo expects a main branch. make sure + # main exists pushd "${OPEN3D_ML_ROOT}" - git checkout -b master || true + git checkout -b main || true popd source util/ci_utils.sh install_docs_dependencies "${OPEN3D_ML_ROOT}" @@ -88,22 +88,22 @@ jobs: if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Deploy docs - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | - # Compress and upload the docs, only for master branch + # Compress and upload the docs, only for main branch docs_out_dir="docs/_out" # Docs in ${docs_out_dir}/html tar_file="${{ github.sha }}_ready.tar.gz" rm -rf ${tar_file} @@ -113,7 +113,7 @@ jobs: echo "https://storage.googleapis.com/open3d-docs/${tar_file}" - name: Check wheels and ready documentation archive - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | if [ $(gsutil ls gs://open3d-docs/${{ github.sha }}_ready* | wc -l)\ -eq 4 ]; then @@ -124,9 +124,9 @@ jobs: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz # Set holds on new artifacts, release on old - gsutil retention temp release gs://open3d-releases-master/* - gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl - gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* + gsutil retention temp release gs://open3d-releases/* + gsutil retention temp set gs://open3d-releases/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/.github/workflows/gcs.lifecycle.json b/.github/workflows/gcs.lifecycle.json index 86ce47f5bc4..1170ec3ec4b 100644 --- a/.github/workflows/gcs.lifecycle.json +++ b/.github/workflows/gcs.lifecycle.json @@ -5,7 +5,11 @@ "type": "Delete" }, "condition": { - "age": 7 + "age": 30, + "matchesPrefix": [ + "devel/", + "python-wheels/" + ] } } ] diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index e24f231f2d6..00bd9636130 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -10,7 +10,7 @@ on: push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] # Rebuild on new pushes to PR @@ -94,23 +94,23 @@ jobs: if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' && env.BUILD_SHARED_LIBS == 'ON' }} + if: ${{ github.ref == 'refs/heads/main' && env.BUILD_SHARED_LIBS == 'ON' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' && env.BUILD_SHARED_LIBS == 'ON' }} + if: ${{ github.ref == 'refs/heads/main' && env.BUILD_SHARED_LIBS == 'ON' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Upload package to GCS bucket - if: ${{ github.ref == 'refs/heads/master' && env.BUILD_SHARED_LIBS == 'ON' }} + if: ${{ github.ref == 'refs/heads/main' && env.BUILD_SHARED_LIBS == 'ON' }} run: | - gsutil cp build/package/${{ env.DEVEL_PKG_NAME }} gs://open3d-releases-master/devel/ - echo "Download devel package at: https://storage.googleapis.com/open3d-releases-master/devel/${{ env.DEVEL_PKG_NAME }}" + gsutil cp build/package/${{ env.DEVEL_PKG_NAME }} gs://open3d-releases/devel/ + echo "Download devel package at: https://storage.googleapis.com/open3d-releases/devel/${{ env.DEVEL_PKG_NAME }}" - name: Upload Open3D viewer app uses: actions/upload-artifact@v3 @@ -128,14 +128,14 @@ jobs: # https://github.community/t/how-to-conditionally-include-exclude-items-in-matrix-eg-based-on-branch/16853/6 matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' env: @@ -206,26 +206,26 @@ jobs: if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Upload wheel to GCS bucket - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} env: python_version: ${{ matrix.python_version }} run: | PYTAG="-cp$(echo ${{ env.python_version }} | tr -d '.')" - gsutil cp build/lib/python_package/pip_package/${{ env.PIP_PKG_NAME }} gs://open3d-releases-master/python-wheels/ - echo "Download pip package at: https://storage.googleapis.com/open3d-releases-master/python-wheels/${{ env.PIP_PKG_NAME }}" + gsutil cp build/lib/python_package/pip_package/${{ env.PIP_PKG_NAME }} gs://open3d-releases/python-wheels/ + echo "Download pip package at: https://storage.googleapis.com/open3d-releases/python-wheels/${{ env.PIP_PKG_NAME }}" test-wheel: name: Test wheel @@ -235,14 +235,14 @@ jobs: fail-fast: false matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' env: @@ -286,7 +286,7 @@ jobs: name: Ready docs archive # no need to run on macOS runs-on: ubuntu-latest - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} needs: [build-wheel, MacOS] steps: - name: GCloud CLI auth @@ -312,9 +312,9 @@ jobs: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz # Set holds on new artifacts, release on old - gsutil retention temp release gs://open3d-releases-master/* - gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl - gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* + gsutil retention temp release gs://open3d-releases/* + gsutil retention temp set gs://open3d-releases/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet. Docs not ready." fi diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index f6dbcac232a..744b997e2e7 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] # Rebuild on new pushes to PR diff --git a/.github/workflows/ubuntu-cuda.yml b/.github/workflows/ubuntu-cuda.yml index 5680c0e89ef..9691ab0ed30 100644 --- a/.github/workflows/ubuntu-cuda.yml +++ b/.github/workflows/ubuntu-cuda.yml @@ -9,7 +9,7 @@ on: default: 'ON' push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] @@ -156,13 +156,13 @@ jobs: if-no-files-found: error - name: Upload package to GCS bucket - if: ${{ github.ref == 'refs/heads/master' && env.BUILD_PACKAGE == 'true' }} + if: ${{ github.ref == 'refs/heads/main' && env.BUILD_PACKAGE == 'true' }} run: | gcloud compute ssh "${INSTANCE_NAME}" \ --zone="${GCE_ZONE}" \ --command="ls -alh \ && gsutil cp open3d-devel-linux-*.tar.xz \ - gs://open3d-releases-master/devel/" + gs://open3d-releases/devel/" - name: VM run docker run: | @@ -171,7 +171,7 @@ jobs: --command="sudo Open3D/docker/docker_test.sh ${CI_CONFIG}" - name: VM ccache upload - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | gcloud compute ssh "${INSTANCE_NAME}" \ --zone="${GCE_ZONE}" \ diff --git a/.github/workflows/ubuntu-openblas.yml b/.github/workflows/ubuntu-openblas.yml index 7219539f1d8..eb11202a3ab 100644 --- a/.github/workflows/ubuntu-openblas.yml +++ b/.github/workflows/ubuntu-openblas.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] diff --git a/.github/workflows/ubuntu-sycl.yml b/.github/workflows/ubuntu-sycl.yml index fc648de4026..1c1ac21697d 100644 --- a/.github/workflows/ubuntu-sycl.yml +++ b/.github/workflows/ubuntu-sycl.yml @@ -3,7 +3,7 @@ name: Ubuntu SYCL on: push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] @@ -45,18 +45,18 @@ jobs: fi - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Upload ccache to GCS - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | gsutil cp ${GITHUB_WORKSPACE}/open3d-ci-sycl.tar.gz gs://open3d-ci-cache/ || true diff --git a/.github/workflows/ubuntu-wheel.yml b/.github/workflows/ubuntu-wheel.yml index 218b8052ea8..a798762c965 100644 --- a/.github/workflows/ubuntu-wheel.yml +++ b/.github/workflows/ubuntu-wheel.yml @@ -9,7 +9,7 @@ on: default: 'ON' push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] @@ -31,14 +31,14 @@ jobs: fail-fast: false matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' env: DEVELOPER_BUILD: ${{ github.event.inputs.developer_build || 'ON' }} @@ -86,29 +86,29 @@ jobs: ${{ env.PIP_CPU_PKG_NAME }} if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Upload ccache to GCS - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | gsutil cp ${GITHUB_WORKSPACE}/${{ env.CCACHE_TAR_NAME }}.tar.gz gs://open3d-ci-cache/ - name: Upload wheel to GCS - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} run: | gsutil cp ${GITHUB_WORKSPACE}/${{ env.PIP_PKG_NAME }} \ - ${GITHUB_WORKSPACE}/${{ env.PIP_CPU_PKG_NAME }} gs://open3d-releases-master/python-wheels/ + ${GITHUB_WORKSPACE}/${{ env.PIP_CPU_PKG_NAME }} gs://open3d-releases/python-wheels/ echo "Download pip package at: - https://storage.googleapis.com/open3d-releases-master/python-wheels/${{ env.PIP_PKG_NAME }} - https://storage.googleapis.com/open3d-releases-master/python-wheels/${{ env.PIP_CPU_PKG_NAME }}" + https://storage.googleapis.com/open3d-releases/python-wheels/${{ env.PIP_PKG_NAME }} + https://storage.googleapis.com/open3d-releases/python-wheels/${{ env.PIP_CPU_PKG_NAME }}" test-wheel-cpu: name: Test wheel CPU @@ -118,14 +118,14 @@ jobs: fail-fast: false matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' env: OPEN3D_ML_ROOT: ${{ github.workspace }}/Open3D-ML @@ -177,7 +177,7 @@ jobs: ready-docs: name: Ready docs archive runs-on: ubuntu-latest - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} needs: [build-wheel] steps: - name: GCloud CLI auth @@ -203,9 +203,9 @@ jobs: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz # Set holds on new artifacts, release on old - gsutil retention temp release gs://open3d-releases-master/* - gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl - gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* + gsutil retention temp release gs://open3d-releases/* + gsutil retention temp set gs://open3d-releases/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index e1c4d0b8b44..1a69ec4da2d 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -9,7 +9,7 @@ on: default: 'ON' push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] @@ -85,19 +85,19 @@ jobs: path: open3d-viewer-*-Linux.deb if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: 'google-github-actions/auth@v1' with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_PROJECT }} - name: Upload package to GCS bucket - if: ${{ github.ref == 'refs/heads/master' && env.BUILD_SHARED_LIBS == 'ON' }} + if: ${{ github.ref == 'refs/heads/main' && env.BUILD_SHARED_LIBS == 'ON' }} run: | - gsutil cp open3d-devel-*.tar.xz gs://open3d-releases-master/devel/ - echo "Download devel package at: https://storage.googleapis.com/open3d-releases-master/devel/${{ env.DEVEL_PKG_NAME }}" + gsutil cp open3d-devel-*.tar.xz gs://open3d-releases/devel/ + echo "Download devel package at: https://storage.googleapis.com/open3d-releases/devel/${{ env.DEVEL_PKG_NAME }}" diff --git a/.github/workflows/vtk_packages.yml b/.github/workflows/vtk_packages.yml index 30347cc2941..8ae7d812187 100644 --- a/.github/workflows/vtk_packages.yml +++ b/.github/workflows/vtk_packages.yml @@ -2,7 +2,7 @@ name: VTK Packages on: # pull_request: - # branches: [ master ] + # branches: [ main ] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: diff --git a/.github/workflows/webrtc.yml b/.github/workflows/webrtc.yml index f8e29ea7e32..4602379d00f 100644 --- a/.github/workflows/webrtc.yml +++ b/.github/workflows/webrtc.yml @@ -105,7 +105,7 @@ jobs: $ErrorActionPreference = 'Stop' echo "Get depot_tools" # Checkout to a specific version - # Ref: https://chromium.googlesource.com/chromium/src/+/master/docs/building_old_revisions.md + # Ref: https://chromium.googlesource.com/chromium/src/+/main/docs/building_old_revisions.md git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git git -C depot_tools checkout $env:DEPOT_TOOLS_COMMIT $env:Path = (Get-Item depot_tools).FullName + ";" + $env:Path diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 430ebd73c60..d687f6b7e33 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,7 +10,7 @@ on: push: branches: - - master + - main pull_request: types: [opened, reopened, synchronize] # Rebuild on new pushes to PR @@ -173,25 +173,25 @@ jobs: if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} + if: ${{ github.ref == 'refs/heads/main' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} uses: google-github-actions/auth@v1 with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} + if: ${{ github.ref == 'refs/heads/main' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_DOCS_PROJECT }} - name: Upload package to GCS bucket - if: ${{ github.ref == 'refs/heads/master' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} + if: ${{ github.ref == 'refs/heads/main' && matrix.BUILD_SHARED_LIBS == 'ON' && matrix.BUILD_CUDA_MODULE == 'OFF' }} run: | gsutil cp ${{ env.BUILD_DIR }}/package/${{ env.DEVEL_PKG_NAME }} ` - gs://open3d-releases-master/devel/ + gs://open3d-releases/devel/ if ($LastExitCode -eq 0) { - echo "Download devel package at: https://storage.googleapis.com/open3d-releases-master/devel/${{ env.DEVEL_PKG_NAME }}" + echo "Download devel package at: https://storage.googleapis.com/open3d-releases/devel/${{ env.DEVEL_PKG_NAME }}" } else { throw "Devel package upload failed" } @@ -243,14 +243,14 @@ jobs: # https://github.community/t/how-to-conditionally-include-exclude-items-in-matrix-eg-based-on-branch/16853/6 matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' steps: @@ -314,28 +314,28 @@ jobs: if-no-files-found: error - name: GCloud CLI auth - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/auth@v1 with: project_id: ${{ secrets.GCE_PROJECT }} credentials_json: '${{ secrets.GCE_SA_KEY_GPU_CI }}' - name: GCloud CLI setup - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} uses: google-github-actions/setup-gcloud@v1 with: version: ${{ env.GCE_CLI_GHA_VERSION }} project_id: ${{ secrets.GCE_DOCS_PROJECT }} - name: Upload wheel to GCS bucket - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} env: python_version: ${{ matrix.python_version }} run: | $ErrorActionPreference = 'Stop' $PYTAG="-cp$(${{ env.python_version }} -replace '\.', '')" - gsutil cp ${{ env.BUILD_DIR }}/lib/python_package/pip_package/${{ env.PIP_PKG_NAME }} gs://open3d-releases-master/python-wheels/ + gsutil cp ${{ env.BUILD_DIR }}/lib/python_package/pip_package/${{ env.PIP_PKG_NAME }} gs://open3d-releases/python-wheels/ if ($LastExitCode -eq 0) { - echo "Download pip package at: https://storage.googleapis.com/open3d-releases-master/python-wheels/${{ env.PIP_PKG_NAME }}" + echo "Download pip package at: https://storage.googleapis.com/open3d-releases/python-wheels/${{ env.PIP_PKG_NAME }}" } else { throw "Wheel upload failed" } @@ -348,14 +348,14 @@ jobs: fail-fast: false matrix: python_version: ['3.8', '3.9', '3.10', '3.11'] - is_master: - - ${{ github.ref == 'refs/heads/master' }} + is_main: + - ${{ github.ref == 'refs/heads/main' }} exclude: - - is_master: false + - is_main: false python_version: '3.8' - - is_master: false + - is_main: false python_version: '3.9' - - is_master: false + - is_main: false python_version: '3.10' steps: @@ -412,7 +412,7 @@ jobs: name: Ready docs archive # no need to run on Windows runs-on: ubuntu-latest - if: ${{ github.ref == 'refs/heads/master' }} + if: ${{ github.ref == 'refs/heads/main' }} # temp workaround for Windows CUDA Debug CI out of space. Still update docs. # needs: [build-wheel, windows] needs: [build-wheel] @@ -440,9 +440,9 @@ jobs: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz # Set holds on new artifacts, release on old - gsutil retention temp release gs://open3d-releases-master/* - gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl - gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* + gsutil retention temp release gs://open3d-releases/* + gsutil retention temp set gs://open3d-releases/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ad0b678ef9..5e3d8dc1af3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,75 +1,77 @@ -## Master +## Main + +- Fix tensor based TSDF integration example. +- Use GLIBCXX_USE_CXX11_ABI=ON by default +- Python 3.9 support. Tensorflow bump 2.4.1 -> 2.5.0. PyTorch bump 1.7.1 -> 1.8.1 (LTS) +- Fix undefined names: docstr and VisibleDeprecationWarning (PR #3844) +- Corrected documentation for Tensor based PointClound, LineSet, TriangleMesh (PR #4685) +- Corrected documentation for KDTree (typo in Notebook) (PR #4744) +- Corrected documentation for visualisation tutorial +- Remove `setuptools` and `wheel` from requirements for end users (PR #5020) +- Fix various typos (PR #5070) +- Exposed more functionality in SLAM and odometry pipelines +- Fix for depth estimation for VoxelBlockGrid +- Reserve fragment buffer for VoxelBlockGrid operations +- Fix raycasting scene: Allow setting of number of threads that are used for building a raycasting scene +- Fix Python bindings for CUDA device synchronization, voxel grid saving (PR #5425) +- Support msgpack versions without cmake +- Changed TriangleMesh to store materials in a list so they can be accessed by the material index (PR #5938) +- Support multi-threading in the RayCastingScene function to commit scene (PR #6051). +- Fix some bad triangle generation in TriangleMesh::SimplifyQuadricDecimation +- Fix printing of tensor in gpu and add validation check for bounds of axis-aligned bounding box (PR #6444) +- Python 3.11 support. bump pybind11 v2.6.2 -> v2.11.1 +- Check for support of CUDA Memory Pools at runtime (#4679) +- Fix `toString`, `CreateFromPoints` methods and improve docs in `AxisAlignedBoundingBox`. 🐛📝 +- Migrate Open3d documentation to furo theme ✨ (#6470) +- Expose Near Clip + Far Clip parameters to setup_camera in OffscreenRenderer (#6520) +- Add Doppler ICP in tensor registration pipeline (PR #5237) +- Rename master branch to main. -* Fix tensor based TSDF integration example. -* Use GLIBCXX_USE_CXX11_ABI=ON by default -* Python 3.9 support. Tensorflow bump 2.4.1 -> 2.5.0. PyTorch bump 1.7.1 -> 1.8.1 (LTS) -* Fix undefined names: docstr and VisibleDeprecationWarning (PR #3844) -* Corrected documentation for Tensor based PointClound, LineSet, TriangleMesh (PR #4685) -* Corrected documentation for KDTree (typo in Notebook) (PR #4744) -* Corrected documentation for visualisation tutorial -* Remove `setuptools` and `wheel` from requirements for end users (PR #5020) -* Fix various typos (PR #5070) -* Exposed more functionality in SLAM and odometry pipelines -* Fix for depth estimation for VoxelBlockGrid -* Reserve fragment buffer for VoxelBlockGrid operations -* Fix raycasting scene: Allow setting of number of threads that are used for building a raycasting scene -* Fix Python bindings for CUDA device synchronization, voxel grid saving (PR #5425) -* Support msgpack versions without cmake -* Changed TriangleMesh to store materials in a list so they can be accessed by the material index (PR #5938) -* Support multi-threading in the RayCastingScene function to commit scene (PR #6051). -* Fix some bad triangle generation in TriangleMesh::SimplifyQuadricDecimation -* Fix printing of tensor in gpu and add validation check for bounds of axis-aligned bounding box (PR #6444) -* Python 3.11 support. bump pybind11 v2.6.2 -> v2.11.1 -* Check for support of CUDA Memory Pools at runtime (#4679) -* Fix `toString`, `CreateFromPoints` methods and improve docs in `AxisAlignedBoundingBox`. 🐛📝 -* Migrate Open3d documentation to furo theme ✨ (#6470) -* Expose Near Clip + Far Clip parameters to setup_camera in OffscreenRenderer (#6520) -* Add Doppler ICP in tensor registration pipeline (PR #5237) ## 0.13 -* CUDA support 10.1 -> 11.0. Tensorflow 2.3.1 -> 2.4.1. PyTorch 1.6.0 -> 1.7.1 (PR #3049). This requires a custom PyTorch wheel from https://github.com/isl-org/open3d_downloads/releases/tag/torch1.7.1 due to PyTorch issue #52663 +- CUDA support 10.1 -> 11.0. Tensorflow 2.3.1 -> 2.4.1. PyTorch 1.6.0 -> 1.7.1 (PR #3049). This requires a custom PyTorch wheel from due to PyTorch issue #52663 ## 0.12 -* RealSense SDK v2 integrated for reading RS bag files (PR #2646) -* Tensor based RGBDImage class, Python bindings for Image and RGBDImage -* RealSense sensor configuration, live capture and recording (with example and tutorial) (PR #2748) -* Add mouselook for the legacy visualizer (PR #2551) -* Add function to randomly downsample pointcloud (PR #3050) -* Allow TriangleMesh with textures to be added (PR #3170) -* Python property of open3d.visualization.rendering.Open3DScene `get_view` has been renamed to `view`. -* Added LineSet::CreateCameraVisualization() for creating a simple camera visualization from intrinsic and extrinsic matrices (PR #3255) +- RealSense SDK v2 integrated for reading RS bag files (PR #2646) +- Tensor based RGBDImage class, Python bindings for Image and RGBDImage +- RealSense sensor configuration, live capture and recording (with example and tutorial) (PR #2748) +- Add mouselook for the legacy visualizer (PR #2551) +- Add function to randomly downsample pointcloud (PR #3050) +- Allow TriangleMesh with textures to be added (PR #3170) +- Python property of open3d.visualization.rendering.Open3DScene `get_view` has been renamed to `view`. +- Added LineSet::CreateCameraVisualization() for creating a simple camera visualization from intrinsic and extrinsic matrices (PR #3255) ## 0.11 -* Fixes bug for preloading libc++ and libc++abi in Python -* Added GUI widgets and model-viewing app -* Fixes travis for race-condition on macOS -* Fixes appveyor configuration and to build all branches -* Updated travis.yml to support Ubuntu 18.04, gcc-7, and clang-7.0 -* Contributors guidelines updated -* Avoid cstdlib random generators in ransac registration, use C++11 random instead. -* Fixed a bug in open3d::geometry::TriangleMesh::ClusterConnectedTriangles. -* Added option BUILD_BENCHMARKS for building microbenchmarks -* Extend Python API of UniformTSDFVolume to allow setting the origin -* Corrected documentation of PointCloud.h -* Added ISS Keypoint Detector -* Added an RPC interface for external visualizers running in a separate process -* Added `maximum_error` and `boundary_weight` parameter to `simplify_quadric_decimation` -* Remove support for Python 3.5 -* Development wheels are available for user testing. See [Getting Started](http://www.open3d.org/docs/latest/getting_started.html) page for installation. -* PointCloud File IO support for new tensor data types. -* New PointCloud format support: XYZI (ASCII). -* Fast compression mode for PNG writing. (Issue #846) -* Ubuntu 20.04 (Focal) support. -* Added Line3D/Ray3D/Segment3D classes with plane, point, closest-distance, and AABB tests -* Updated Open3D.h.in to add certain missing header files -* Add Open3D-ML to Open3D wheel -* Fix a bug in PointCloud file format, use `float` instead of `float_t` -* Add per-point covariance member for geometry::PointCloud class. -* Add Generalized ICP implementation. +- Fixes bug for preloading libc++ and libc++abi in Python +- Added GUI widgets and model-viewing app +- Fixes travis for race-condition on macOS +- Fixes appveyor configuration and to build all branches +- Updated travis.yml to support Ubuntu 18.04, gcc-7, and clang-7.0 +- Contributors guidelines updated +- Avoid cstdlib random generators in ransac registration, use C++11 random instead. +- Fixed a bug in open3d::geometry::TriangleMesh::ClusterConnectedTriangles. +- Added option BUILD_BENCHMARKS for building microbenchmarks +- Extend Python API of UniformTSDFVolume to allow setting the origin +- Corrected documentation of PointCloud.h +- Added ISS Keypoint Detector +- Added an RPC interface for external visualizers running in a separate process +- Added `maximum_error` and `boundary_weight` parameter to `simplify_quadric_decimation` +- Remove support for Python 3.5 +- Development wheels are available for user testing. See [Getting Started](http://www.open3d.org/docs/latest/getting_started.html) page for installation. +- PointCloud File IO support for new tensor data types. +- New PointCloud format support: XYZI (ASCII). +- Fast compression mode for PNG writing. (Issue #846) +- Ubuntu 20.04 (Focal) support. +- Added Line3D/Ray3D/Segment3D classes with plane, point, closest-distance, and AABB tests +- Updated Open3D.h.in to add certain missing header files +- Add Open3D-ML to Open3D wheel +- Fix a bug in PointCloud file format, use `float` instead of `float_t` +- Add per-point covariance member for geometry::PointCloud class. +- Add Generalized ICP implementation. ## 0.9.0 -* Version bump to 0.9.0 +- Version bump to 0.9.0 diff --git a/README.md b/README.md index 09f94e0101b..b4b62858e75 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- +

# Open3D: A Modern Library for 3D Data Processing @@ -90,7 +90,7 @@ To use Open3D in your C++ project, checkout the following examples ## Open3D-Viewer app - + Open3D-Viewer is a standalone 3D viewer app available on Debian (Ubuntu), macOS and Windows. Download Open3D Viewer from the @@ -98,7 +98,7 @@ and Windows. Download Open3D Viewer from the ## Open3D-ML - + Open3D-ML is an extension of Open3D for 3D machine learning tasks. It builds on top of the Open3D core library and extends it with machine learning tools for diff --git a/cpp/open3d/core/Indexer.cpp b/cpp/open3d/core/Indexer.cpp index 4726196dfeb..7c6e76841d3 100644 --- a/cpp/open3d/core/Indexer.cpp +++ b/cpp/open3d/core/Indexer.cpp @@ -167,7 +167,7 @@ Indexer::Indexer(const std::vector& input_tensors, // Fill global shape for (int64_t i = 0; i < ndims_; ++i) { - master_shape_[i] = inputs_[0].shape_[i]; + primary_shape_[i] = inputs_[0].shape_[i]; } // Combine dimensions to reduce index computation. @@ -185,12 +185,12 @@ Indexer::Indexer(const std::vector& input_tensors, // outputs_[0] is used since all outputs have the same shape. ndims_ = outputs_[0].ndims_; for (int64_t i = 0; i < ndims_; ++i) { - master_shape_[i] = outputs_[0].shape_[i]; + primary_shape_[i] = outputs_[0].shape_[i]; } } - // Fill global strides master_strides_. - UpdateMasterStrides(); + // Fill global strides primary_strides_. + UpdatePrimaryStrides(); UpdateContiguousFlags(); } @@ -208,7 +208,7 @@ bool Indexer::CanUse32BitIndexing() const { int64_t max_offset = 1; for (int dim = 0; dim < ndims_; dim++) { max_offset += - (master_shape_[dim] - 1) * inputs_[i].byte_strides_[dim]; + (primary_shape_[dim] - 1) * inputs_[i].byte_strides_[dim]; } if (max_offset > max_value) { return false; @@ -220,7 +220,7 @@ bool Indexer::CanUse32BitIndexing() const { int64_t max_offset = 1; for (int dim = 0; dim < ndims_; dim++) { max_offset += - (master_shape_[dim] - 1) * outputs_[i].byte_strides_[dim]; + (primary_shape_[dim] - 1) * outputs_[i].byte_strides_[dim]; } if (max_offset > max_value) { @@ -241,15 +241,15 @@ std::unique_ptr Indexer::SplitLargestDim() { utility::LogError("Cannot split when ndims_ == 0"); return nullptr; } - if (master_shape_[ndims_ - 1] < 2) { - utility::LogError("master_shape_[ndims_ - 1] = {} < 2, cannot split.", - master_shape_[ndims_ - 1]); + if (primary_shape_[ndims_ - 1] < 2) { + utility::LogError("primary_shape_[ndims_ - 1] = {} < 2, cannot split.", + primary_shape_[ndims_ - 1]); return nullptr; } int64_t max_extent = -1; int64_t dim_to_split = -1; for (int64_t dim = ndims_ - 1; dim >= 0; dim--) { - int64_t size = master_shape_[dim]; + int64_t size = primary_shape_[dim]; // Inputs for (int64_t i = 0; i < num_inputs_; i++) { @@ -281,17 +281,17 @@ std::unique_ptr Indexer::SplitLargestDim() { ndims_, dim_to_split); return nullptr; } - if (master_shape_[dim_to_split] < 2) { + if (primary_shape_[dim_to_split] < 2) { utility::LogError( "Internal error: cannot split dimension size {}, must be >= 2.", - master_shape_[dim_to_split]); + primary_shape_[dim_to_split]); return nullptr; } std::unique_ptr copy(new Indexer(*this)); bool overlaps = IsReductionDim(dim_to_split); - auto copy_size = master_shape_[dim_to_split] / 2; - auto this_size = master_shape_[dim_to_split] - copy_size; + auto copy_size = primary_shape_[dim_to_split] / 2; + auto this_size = primary_shape_[dim_to_split] - copy_size; copy->ShrinkDim(dim_to_split, 0, copy_size); copy->final_output_ &= !overlaps; this->ShrinkDim(dim_to_split, copy_size, this_size); @@ -316,7 +316,7 @@ Indexer Indexer::GetPerOutputIndexer(int64_t output_idx) const { if (IsReductionDim(i)) { output_shape[i] = 1; } else { - output_shape[i] = master_shape_[i]; + output_shape[i] = primary_shape_[i]; } } int64_t stride = 1; @@ -351,10 +351,10 @@ Indexer Indexer::GetPerOutputIndexer(int64_t output_idx) const { } } if (!sub_indexer.IsReductionDim(dim)) { - sub_indexer.GetMasterShape()[dim] = 1; + sub_indexer.GetPrimaryShape()[dim] = 1; } } - sub_indexer.UpdateMasterStrides(); + sub_indexer.UpdatePrimaryStrides(); sub_indexer.UpdateContiguousFlags(); @@ -382,8 +382,8 @@ void Indexer::ShrinkDim(int64_t dim, int64_t start, int64_t size) { outputs_[i].byte_strides_[dim] * start; } - master_shape_[dim] = size; - UpdateMasterStrides(); + primary_shape_[dim] = size; + UpdatePrimaryStrides(); UpdateContiguousFlags(); @@ -406,7 +406,7 @@ int64_t Indexer::NumReductionDims() const { int64_t Indexer::NumWorkloads() const { int64_t num_workloads = 1; for (int64_t i = 0; i < ndims_; ++i) { - num_workloads *= master_shape_[i]; + num_workloads *= primary_shape_[i]; } return num_workloads; } @@ -415,8 +415,8 @@ int64_t Indexer::NumOutputElements() const { // All outputs have the same shape, so it's okay to use outputs_[0]. int64_t num_output_elements = 1; for (int64_t i = 0; i < ndims_; ++i) { - if (outputs_[0].byte_strides_[i] != 0 || master_shape_[i] == 0) { - num_output_elements *= master_shape_[i]; + if (outputs_[0].byte_strides_[i] != 0 || primary_shape_[i] == 0) { + num_output_elements *= primary_shape_[i]; } } return num_output_elements; @@ -428,8 +428,8 @@ void Indexer::CoalesceDimensions() { } auto can_coalesce = [&](int64_t dim0, int64_t dim1) { - auto shape0 = master_shape_[dim0]; - auto shape1 = master_shape_[dim1]; + auto shape0 = primary_shape_[dim0]; + auto shape1 = primary_shape_[dim1]; if (shape0 == 1 || shape1 == 1) { return true; } @@ -462,15 +462,15 @@ void Indexer::CoalesceDimensions() { int64_t prev_dim = 0; for (int64_t dim = 1; dim < ndims_; dim++) { if (can_coalesce(prev_dim, dim)) { - if (master_shape_[prev_dim] == 1) { + if (primary_shape_[prev_dim] == 1) { replace_stride(prev_dim, dim); } - master_shape_[prev_dim] *= master_shape_[dim]; + primary_shape_[prev_dim] *= primary_shape_[dim]; } else { prev_dim++; if (prev_dim != dim) { replace_stride(prev_dim, dim); - master_shape_[prev_dim] = master_shape_[dim]; + primary_shape_[prev_dim] = primary_shape_[dim]; } } } @@ -483,7 +483,7 @@ void Indexer::CoalesceDimensions() { outputs_[i].ndims_ = ndims_; } - UpdateMasterStrides(); + UpdatePrimaryStrides(); UpdateContiguousFlags(); } @@ -553,12 +553,12 @@ void Indexer::ReorderDimensions(const SizeVector& reduction_dims) { } } -void Indexer::UpdateMasterStrides() { +void Indexer::UpdatePrimaryStrides() { int64_t stride = 1; for (int64_t i = ndims_ - 1; i >= 0; --i) { - master_strides_[i] = stride; + primary_strides_[i] = stride; // Handles 0-sized dimensions - stride = master_shape_[i] > 1 ? stride * master_shape_[i] : stride; + stride = primary_shape_[i] > 1 ? stride * primary_shape_[i] : stride; } } @@ -629,8 +629,8 @@ ispc::Indexer Indexer::ToISPC() const { ispc_indexer.outputs_contiguous_[i] = GetOutput(i).IsContiguous(); } for (int64_t i = 0; i < NumDims(); ++i) { - ispc_indexer.master_shape_[i] = GetMasterShape()[i]; - ispc_indexer.master_strides_[i] = GetMasterStrides()[i]; + ispc_indexer.primary_shape_[i] = GetPrimaryShape()[i]; + ispc_indexer.primary_strides_[i] = GetPrimaryStrides()[i]; } ispc_indexer.ndims_ = NumDims(); diff --git a/cpp/open3d/core/Indexer.h b/cpp/open3d/core/Indexer.h index 6020fcda08d..7b0eaf7bda8 100644 --- a/cpp/open3d/core/Indexer.h +++ b/cpp/open3d/core/Indexer.h @@ -310,14 +310,14 @@ class Indexer { /// Returns number of dimensions of the Indexer. int64_t NumDims() const { return ndims_; } - /// Returns Indexer's master shape, one can iterate the Indexer with this + /// Returns Indexer's primary shape, one can iterate the Indexer with this /// shape. - const int64_t* GetMasterShape() const { return master_shape_; } - int64_t* GetMasterShape() { return master_shape_; } + const int64_t* GetPrimaryShape() const { return primary_shape_; } + int64_t* GetPrimaryShape() { return primary_shape_; } - /// Returns Indexer's master strides, one can iterate the Indexer with this - /// strides. It is always set to be the default strides from master_shape_. - const int64_t* GetMasterStrides() const { return master_strides_; } + /// Returns Indexer's primary strides, one can iterate the Indexer with this + /// strides. It is always set to be the default strides from primary_shape_. + const int64_t* GetPrimaryStrides() const { return primary_strides_; } /// Returns the total number of workloads (e.g. computations) needed for /// the op. The scheduler schedules these workloads to run on parallel @@ -394,7 +394,7 @@ class Indexer { // All outputs have the same shape and reduction dims. Even if they // don't have the same initial strides, the reduced strides are always // set to 0. Thus it is okay to use outputs_[0]. - return outputs_[0].byte_strides_[dim] == 0 && master_shape_[dim] > 1; + return outputs_[0].byte_strides_[dim] == 0 && primary_shape_[dim] > 1; } /// Get input Tensor data pointer based on \p workload_idx. @@ -492,8 +492,8 @@ class Indexer { // thread coalescing. void ReorderDimensions(const SizeVector& reduction_dims); - /// Update master_strides_ based on master_shape_. - void UpdateMasterStrides(); + /// Update primary_strides_ based on primary_shape_. + void UpdatePrimaryStrides(); /// Update input_contiguous_ and output_contiguous_. void UpdateContiguousFlags(); @@ -552,9 +552,9 @@ class Indexer { } else { int64_t offset = 0; for (int64_t i = 0; i < ndims_; ++i) { - offset += - workload_idx / master_strides_[i] * tr.byte_strides_[i]; - workload_idx = workload_idx % master_strides_[i]; + offset += workload_idx / primary_strides_[i] * + tr.byte_strides_[i]; + workload_idx = workload_idx % primary_strides_[i]; } return static_cast(tr.data_ptr_) + offset; } @@ -580,9 +580,9 @@ class Indexer { } else { int64_t offset = 0; for (int64_t i = 0; i < ndims_; ++i) { - offset += - workload_idx / master_strides_[i] * tr.byte_strides_[i]; - workload_idx = workload_idx % master_strides_[i]; + offset += workload_idx / primary_strides_[i] * + tr.byte_strides_[i]; + workload_idx = workload_idx % primary_strides_[i]; } return static_cast(static_cast( static_cast(tr.data_ptr_) + offset)); @@ -607,20 +607,20 @@ class Indexer { /// Indexer's global shape. The shape's number of elements is the /// same as GetNumWorkloads() for the Indexer. - /// - For broadcasting, master_shape_ is the same as the output shape. - /// - For reduction, master_shape_ is the same as the input shape. + /// - For broadcasting, primary_shape_ is the same as the output shape. + /// - For reduction, primary_shape_ is the same as the input shape. /// - Currently we don't allow broadcasting mixed with reduction. But if - /// broadcasting mixed with reduction is allowed, master_shape_ is a mix + /// broadcasting mixed with reduction is allowed, primary_shape_ is a mix /// of input shape and output shape. First, fill in all omitted dimensions /// (in inputs for broadcasting) and reduction dimensions (as if - /// keepdim=true always) with size 1. For each axis, the master dimension - /// is the non-1 dimension (if both are 1, then the master dimension is 1 + /// keepdim=true always) with size 1. For each axis, the primary dimension + /// is the non-1 dimension (if both are 1, then the primary dimension is 1 /// in that axis). - int64_t master_shape_[MAX_DIMS]; + int64_t primary_shape_[MAX_DIMS]; - /// The default strides for master_shape_ for internal use only. Used to + /// The default strides for primary_shape_ for internal use only. Used to /// compute the actual strides and ultimately the index offsets. - int64_t master_strides_[MAX_DIMS]; + int64_t primary_strides_[MAX_DIMS]; /// Indexer's global number of dimensions. int64_t ndims_ = 0; diff --git a/cpp/open3d/core/Indexer.isph b/cpp/open3d/core/Indexer.isph index f6ac55b0002..517b7219d7c 100644 --- a/cpp/open3d/core/Indexer.isph +++ b/cpp/open3d/core/Indexer.isph @@ -56,20 +56,20 @@ struct Indexer { /// Indexer's global shape. The shape's number of elements is the /// same as GetNumWorkloads() for the Indexer. - /// - For broadcasting, master_shape_ is the same as the output shape. - /// - For reduction, master_shape_ is the same as the input shape. + /// - For broadcasting, primary_shape_ is the same as the output shape. + /// - For reduction, primary_shape_ is the same as the input shape. /// - Currently we don't allow broadcasting mixed with reduction. But if - /// broadcasting mixed with reduction is allowed, master_shape_ is a mix + /// broadcasting mixed with reduction is allowed, primary_shape_ is a mix /// of input shape and output shape. First, fill in all omitted dimensions /// (in inputs for broadcasting) and reduction dimensions (as if - /// keepdim=true always) with size 1. For each axis, the master dimension - /// is the non-1 dimension (if both are 1, then the master dimension is 1 + /// keepdim=true always) with size 1. For each axis, the primary dimension + /// is the non-1 dimension (if both are 1, then the primary dimension is 1 /// in that axis). - int64_t master_shape_[MAX_DIMS]; + int64_t primary_shape_[MAX_DIMS]; - /// The default strides for master_shape_ for internal use only. Used to + /// The default strides for primary_shape_ for internal use only. Used to /// compute the actual strides and ultimately the index offsets. - int64_t master_strides_[MAX_DIMS]; + int64_t primary_strides_[MAX_DIMS]; /// Indexer's global number of dimensions. int64_t ndims_; @@ -93,11 +93,11 @@ static inline uint8_t* Indexer_GetWorkloadDataPtr( for (uniform int64_t i = 0; i < self->ndims_; ++i) { offset += #pragma ignore warning(perf) - workload_idx / self->master_strides_[i] * + workload_idx / self->primary_strides_[i] * tr->byte_strides_[i]; #pragma ignore warning(perf) - workload_idx = workload_idx % self->master_strides_[i]; + workload_idx = workload_idx % self->primary_strides_[i]; } return (uint8_t*)(tr->data_ptr_) + offset; } @@ -106,23 +106,23 @@ static inline uint8_t* Indexer_GetWorkloadDataPtr( /// Get data pointer from a TensorRef with \p workload_idx. /// Note: can be optimized by computing all input ptrs and output ptr /// together. -#define TEMPLATE(T) \ - static inline T* OPEN3D_SPECIALIZED(T, Indexer_GetWorkloadDataPtr)( \ - const uniform Indexer* const uniform self, \ - const uniform TensorRef* const uniform tr, \ - uniform bool tr_contiguous, int64_t workload_idx) { \ - cif(workload_idx < 0) { return NULL; } \ - if (tr_contiguous) { \ - return (T*)(tr->data_ptr_) + workload_idx; \ - } else { \ - int64_t offset = 0; \ - for (uniform int64_t i = 0; i < self->ndims_; ++i) { \ - offset += workload_idx / self->master_strides_[i] * \ - tr->byte_strides_[i]; \ - workload_idx = workload_idx % self->master_strides_[i]; \ - } \ - return (T*)((uint8_t*)(tr->data_ptr_) + offset); \ - } \ +#define TEMPLATE(T) \ + static inline T* OPEN3D_SPECIALIZED(T, Indexer_GetWorkloadDataPtr)( \ + const uniform Indexer* const uniform self, \ + const uniform TensorRef* const uniform tr, \ + uniform bool tr_contiguous, int64_t workload_idx) { \ + cif(workload_idx < 0) { return NULL; } \ + if (tr_contiguous) { \ + return (T*)(tr->data_ptr_) + workload_idx; \ + } else { \ + int64_t offset = 0; \ + for (uniform int64_t i = 0; i < self->ndims_; ++i) { \ + offset += workload_idx / self->primary_strides_[i] * \ + tr->byte_strides_[i]; \ + workload_idx = workload_idx % self->primary_strides_[i]; \ + } \ + return (T*)((uint8_t*)(tr->data_ptr_) + offset); \ + } \ } #pragma ignore warning(perf) OPEN3D_INSTANTIATE_TEMPLATE_WITH_BOOL() diff --git a/cpp/open3d/core/kernel/IndexReductionCPU.cpp b/cpp/open3d/core/kernel/IndexReductionCPU.cpp index 4458832ec96..4488b3bb6a8 100644 --- a/cpp/open3d/core/kernel/IndexReductionCPU.cpp +++ b/cpp/open3d/core/kernel/IndexReductionCPU.cpp @@ -22,7 +22,7 @@ void LaunchIndexReductionKernel(int64_t dim, Tensor& dst, const func_t& element_kernel) { // index: [N,], src: [N, D], dst: [M, D] - // In Indexer, output shape defines the actual master strides. + // In Indexer, output shape defines the actual primary strides. // However, in IndexAdd_, input dominates the iterations. // So put dst (output) at indexer's input, and src (input) at output. Indexer indexer({dst}, src, DtypePolicy::NONE); diff --git a/cpp/open3d/core/kernel/IndexReductionCUDA.cu b/cpp/open3d/core/kernel/IndexReductionCUDA.cu index 24c913a46af..922bd0a84c9 100644 --- a/cpp/open3d/core/kernel/IndexReductionCUDA.cu +++ b/cpp/open3d/core/kernel/IndexReductionCUDA.cu @@ -27,7 +27,7 @@ void LaunchIndexReductionKernel(int64_t dim, OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t); // index: [N,], src: [N, D], dst: [M, D] - // In Indexer, output shape defines the actual master strides. + // In Indexer, output shape defines the actual primary strides. // However, in IndexAdd_, input dominates the iterations. // So put dst (output) at indexer's input, and src (input) at output. Indexer indexer({dst}, src, DtypePolicy::NONE); diff --git a/cpp/open3d/core/kernel/ReductionCPU.cpp b/cpp/open3d/core/kernel/ReductionCPU.cpp index 441872168a7..7caa60b34f7 100644 --- a/cpp/open3d/core/kernel/ReductionCPU.cpp +++ b/cpp/open3d/core/kernel/ReductionCPU.cpp @@ -140,7 +140,7 @@ class CPUReductionEngine { static void LaunchReductionParallelDim(const Indexer& indexer, func_t element_kernel) { // Prefers outer dimension >= num_threads. - const int64_t* indexer_shape = indexer.GetMasterShape(); + const int64_t* indexer_shape = indexer.GetPrimaryShape(); const int64_t num_dims = indexer.NumDims(); int64_t num_threads = utility::EstimateMaxThreads(); diff --git a/cpp/open3d/core/kernel/ReductionCUDA.cu b/cpp/open3d/core/kernel/ReductionCUDA.cu index 46b0d519da2..f209b4448d9 100644 --- a/cpp/open3d/core/kernel/ReductionCUDA.cu +++ b/cpp/open3d/core/kernel/ReductionCUDA.cu @@ -141,13 +141,13 @@ public: // Map block.x to the fastest reducing dimension. It implies: // 1. BlockXReduce is required. // 2. block.y now max out to num_outputs. - dim0 = indexer.GetMasterShape()[0]; + dim0 = indexer.GetPrimaryShape()[0]; dim1 = num_outputs_; } else { // Map block.x to the fastest non reducing dimension. It implies: // 1. BlockXReduce is turned off. // 2. block.y now max out to num_inputs_per_output_. - dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()]; + dim0 = indexer.GetPrimaryShape()[indexer.NumReductionDims()]; dim1 = num_inputs_per_output_; } @@ -352,7 +352,7 @@ static OffsetCalculator<2, index_t> MakeOutputCalculator( indexer.GetOutput().byte_strides_ + num_reduction_dims, indexer.GetInput(0).byte_strides_ + num_reduction_dims, }; - const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims; + const int64_t* shape = indexer.GetPrimaryShape() + num_reduction_dims; return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data()); } @@ -364,7 +364,7 @@ static OffsetCalculator<1, index_t> MakeInputCalculator( indexer.GetInput(0).byte_strides_, }; return OffsetCalculator<1, index_t>( - num_reduction_dims, indexer.GetMasterShape(), strides.data()); + num_reduction_dims, indexer.GetPrimaryShape(), strides.data()); } template @@ -923,7 +923,7 @@ private: for (int dim = 0; dim < indexer.NumDims(); dim++) { output_memory_size = std::max( output_memory_size, - indexer.GetMasterShape()[dim] * + indexer.GetPrimaryShape()[dim] * indexer.GetOutput().byte_strides_[dim]); } owned_buf_ptr.reset(new AccumulationBuffer( diff --git a/cpp/pybind/CMakeLists.txt b/cpp/pybind/CMakeLists.txt index 6d7991d44e6..2072615e6c9 100644 --- a/cpp/pybind/CMakeLists.txt +++ b/cpp/pybind/CMakeLists.txt @@ -171,6 +171,8 @@ if (BUNDLE_OPEN3D_ML) open3d_ml PREFIX "${CMAKE_BINARY_DIR}/open3d_ml" GIT_REPOSITORY "${OPEN3D_ML_ROOT}" + GIT_TAG origin/main + GIT_SHALLOW BUILD_IN_SOURCE ON # do not configure CONFIGURE_COMMAND "" diff --git a/cpp/tests/core/Indexer.cpp b/cpp/tests/core/Indexer.cpp index 798200ba45a..bc53c4f3c59 100644 --- a/cpp/tests/core/Indexer.cpp +++ b/cpp/tests/core/Indexer.cpp @@ -67,9 +67,10 @@ TEST_P(IndexerPermuteDevices, IndexerCopyConstructor) { EXPECT_EQ(indexer_a.GetOutput(), indexer_b.GetOutput()); EXPECT_EQ(indexer_a.NumDims(), indexer_b.NumDims()); for (int64_t i = 0; i < indexer_a.NumDims(); i++) { - EXPECT_EQ(indexer_a.GetMasterShape()[i], indexer_b.GetMasterShape()[i]); - EXPECT_EQ(indexer_a.GetMasterStrides()[i], - indexer_b.GetMasterStrides()[i]); + EXPECT_EQ(indexer_a.GetPrimaryShape()[i], + indexer_b.GetPrimaryShape()[i]); + EXPECT_EQ(indexer_a.GetPrimaryStrides()[i], + indexer_b.GetPrimaryStrides()[i]); EXPECT_EQ(indexer_a.IsReductionDim(i), indexer_b.IsReductionDim(i)); } } @@ -93,11 +94,11 @@ TEST_P(IndexerPermuteDevices, BroadcastRestride) { // Check core::Indexer's global info EXPECT_EQ(indexer.NumInputs(), 2); EXPECT_EQ(indexer.NumWorkloads(), 24); - EXPECT_EQ(core::SizeVector(indexer.GetMasterShape(), - indexer.GetMasterShape() + indexer.NumDims()), + EXPECT_EQ(core::SizeVector(indexer.GetPrimaryShape(), + indexer.GetPrimaryShape() + indexer.NumDims()), core::SizeVector({2, 2, 2, 1, 3})); - EXPECT_EQ(core::SizeVector(indexer.GetMasterStrides(), - indexer.GetMasterStrides() + indexer.NumDims()), + EXPECT_EQ(core::SizeVector(indexer.GetPrimaryStrides(), + indexer.GetPrimaryStrides() + indexer.NumDims()), core::SizeVector({12, 6, 3, 3, 1})); // Check tensor shape diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci index dc6b1af2245..99e359df0e0 100644 --- a/docker/Dockerfile.ci +++ b/docker/Dockerfile.ci @@ -135,7 +135,7 @@ RUN CCACHE_DIR=$(ccache -p | grep cache_dir | grep -oE "[^ ]+$") \ && ccache -M 5G \ && ccache -s -# Checkout Open3D-ML master branch +# Checkout Open3D-ML main branch # TODO: We may add support for local Open3D-ML repo or pinned ML repo tag ENV OPEN3D_ML_ROOT=/root/Open3D-ML RUN git clone --depth 1 https://github.com/isl-org/Open3D-ML.git ${OPEN3D_ML_ROOT} diff --git a/docker/Dockerfile.wheel b/docker/Dockerfile.wheel index 9b9fac01935..6a7d7080aed 100644 --- a/docker/Dockerfile.wheel +++ b/docker/Dockerfile.wheel @@ -94,7 +94,7 @@ RUN conda create -y -n open3d python=${PYTHON_VERSION} \ RUN which python \ && python --version -# Checkout Open3D-ML master branch +# Checkout Open3D-ML main branch # TODO: We may add support for local Open3D-ML repo or pinned ML repo tag ENV OPEN3D_ML_ROOT=/root/Open3D-ML RUN git clone https://github.com/isl-org/Open3D-ML.git ${OPEN3D_ML_ROOT} diff --git a/docker/docker_build.sh b/docker/docker_build.sh index b0b405d9de2..97f8cc86c5a 100755 --- a/docker/docker_build.sh +++ b/docker/docker_build.sh @@ -432,7 +432,7 @@ sycl-shared_export_env() { export DOCKER_TAG=open3d-ci:sycl-shared # https://hub.docker.com/r/intel/oneapi-basekit - # https://github.com/intel/oneapi-containers/blob/master/images/docker/basekit/Dockerfile.ubuntu-18.04 + # https://github.com/intel/oneapi-containers/blob/main/images/docker/basekit/Dockerfile.ubuntu-18.04 export BASE_IMAGE=intel/oneapi-basekit:2022.2-devel-ubuntu20.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-sycl @@ -449,7 +449,7 @@ sycl-static_export_env() { export DOCKER_TAG=open3d-ci:sycl-static # https://hub.docker.com/r/intel/oneapi-basekit - # https://github.com/intel/oneapi-containers/blob/master/images/docker/basekit/Dockerfile.ubuntu-18.04 + # https://github.com/intel/oneapi-containers/blob/main/images/docker/basekit/Dockerfile.ubuntu-18.04 export BASE_IMAGE=intel/oneapi-basekit:2022.2-devel-ubuntu20.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-sycl diff --git a/docs/conf.py b/docs/conf.py index a02dfcf53e5..9ec55efcaf1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -89,8 +89,8 @@ def get_git_short_hash(): # source_suffix = ['.rst', '.md'] -# The master toctree document. -master_doc = "index" +# The primary toctree document. +primary_doc = "index" # General information about the project. project = u"Open3D" @@ -105,7 +105,7 @@ def get_git_short_hash(): # Usually, the `version` value is set to the current git commit hash. # At Open3D releases, the `version` value is set to Open3D version number. current_hash = get_git_short_hash() -version = "master ({})".format(current_hash) +version = "primary ({})".format(current_hash) release = version # The language for content autogenerated by Sphinx. Refer to documentation @@ -189,7 +189,7 @@ def get_git_short_hash(): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [( - master_doc, + primary_doc, "Open3D.tex", u"Open3D Documentation", u"Qianyi Zhou", @@ -200,7 +200,7 @@ def get_git_short_hash(): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "open3d", u"Open3D Documentation", [author], 1)] +man_pages = [(primary_doc, "open3d", u"Open3D Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -208,7 +208,7 @@ def get_git_short_hash(): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( - master_doc, + primary_doc, "Open3D", u"Open3D Documentation", author, diff --git a/docs/contribute/contribute.rst b/docs/contribute/contribute.rst index 4e74fefb00b..e3be248bc4b 100644 --- a/docs/contribute/contribute.rst +++ b/docs/contribute/contribute.rst @@ -35,7 +35,7 @@ We have defined a simple recipe for contributing to Open3D: 2. Read the :ref:`style_guide` guidelines and install the required tools 3. Check existing classes, examples, and related code 4. Create new features in your fork. Do not forget unit tests and documentation. Double-check the :ref:`style_guide` -5. Make a pull request to the `master branch `_ +5. Make a pull request to the `main branch `_ 6. Make sure your PR passes the CI tests. If it doesn’t, fix the code until it builds and passes the CI tests 7. Your PR will be assigned to reviewers 8. Engage with your reviewers during the review process. Address issues or concerns raised during the review. Don’t let the review die. diff --git a/docs/contribute/contribution_recipes.rst b/docs/contribute/contribution_recipes.rst index 03102849214..f03434c461b 100644 --- a/docs/contribute/contribution_recipes.rst +++ b/docs/contribute/contribution_recipes.rst @@ -16,7 +16,7 @@ Recommended procedure 3. Check existing classes, examples, and related code. 4. Fork Open3D on `GitHub `_. 5. Create new features in your fork. Do not forget unit tests and documentation. Double-check the :ref:`style_guide`. -6. Make a pull request to the `master branch `_. +6. Make a pull request to the `main branch `_. 7. Make sure your PR passes the CI tests. If it doesn’t, fix the code until it builds and passes the CI tests. 8. Your PR will be assigned to reviewers. 9. Engage with your reviewers during the review process. Address issues or concerns raised during the review. Don’t let the review die. diff --git a/docs/getting_started.in.rst b/docs/getting_started.in.rst index 018db7bcd6d..c5709ce4ae0 100644 --- a/docs/getting_started.in.rst +++ b/docs/getting_started.in.rst @@ -63,35 +63,35 @@ Development version (pip) ------------------------- To test the latest features in Open3D, download and install the development -version (``HEAD`` of ``master`` branch): +version (``HEAD`` of ``main`` branch): .. list-table:: :stub-columns: 1 :widths: auto * - Linux - - `Python 3.8 `__ - - `Python 3.9 `__ - - `Python 3.10 `__ - - `Python 3.11 `__ + - `Python 3.8 `__ + - `Python 3.9 `__ + - `Python 3.10 `__ + - `Python 3.11 `__ * - Linux (CPU) - - `Python 3.8 `__ - - `Python 3.9 `__ - - `Python 3.10 `__ - - `Python 3.11 `__ + - `Python 3.8 `__ + - `Python 3.9 `__ + - `Python 3.10 `__ + - `Python 3.11 `__ * - MacOS - - `Python 3.8 `__ - - `Python 3.9 `__ - - `Python 3.10 `__ - - `Python 3.11 `__ + - `Python 3.8 `__ + - `Python 3.9 `__ + - `Python 3.10 `__ + - `Python 3.11 `__ * - Windows - - `Python 3.8 `__ - - `Python 3.9 `__ - - `Python 3.10 `__ - - `Python 3.11 `__ + - `Python 3.8 `__ + - `Python 3.9 `__ + - `Python 3.10 `__ + - `Python 3.11 `__ Please use these links from the `latest version of this page `__ only. You can also @@ -155,30 +155,30 @@ binary package archive from `Github releases `__ (since `v0.15`). These binary package archives contain the Open3D shared library built with all supported features and are available for the main supported platforms. Also, the latest -development version (``HEAD`` of ``master`` branch) binary package archives are +development version (``HEAD`` of ``main`` branch) binary package archives are provided here [#]_: :Linux (Ubuntu 18.04+ or glibc 2.27+ [#]_): .. hlist:: :columns: 2 - * `x86_64 (CXX11 ABI) `__ - * `x86_64 (CXX11 ABI) with CUDA 11.x `__ - * `x86_64 (pre CXX11 ABI) `__ - * `x86_64 (pre CXX11 ABI) with CUDA 11.x `__ + * `x86_64 (CXX11 ABI) `__ + * `x86_64 (CXX11 ABI) with CUDA 11.x `__ + * `x86_64 (pre CXX11 ABI) `__ + * `x86_64 (pre CXX11 ABI) with CUDA 11.x `__ :MacOSX v10.15+: .. hlist:: :columns: 2 - * `x86_64 `__ + * `x86_64 `__ :Windows 10+: .. hlist:: :columns: 2 - * `x86_64 Release `__ - * `x86_64 Debug `__ + * `x86_64 Release `__ + * `x86_64 Debug `__ .. [#] Please use these links from the `latest version of this page `__ only. diff --git a/docs/index.rst b/docs/index.rst index 6be002a714b..09dda4f1dee 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,4 +1,4 @@ -.. Open3D documentation master file, created by +.. Open3D documentation primary file, created by sphinx-quickstart on Mon Apr 3 14:18:28 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. diff --git a/docs/jupyter/core/hashmap.ipynb b/docs/jupyter/core/hashmap.ipynb index 6d05755c4f9..6f16b61b3ec 100644 --- a/docs/jupyter/core/hashmap.ipynb +++ b/docs/jupyter/core/hashmap.ipynb @@ -1,754 +1,754 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Hash map\n", - "A hash map is a data structure that maps keys to values with amortized O(1) insertion, find, and deletion time. The map is unordered.\n", - "\n", - "Open3D allows parallel hashing on CPU and GPU with keys and values organized as Tensors, where we take a batch of keys and/or values as input.\n", - "\n", - "- Keys: The Open3D hash map supports multi-dimensional keys. Due to precision issue, floating-point is not recommended to be regarded as keys. By default we support up to 6D coordinates in integer. For higher dimensions, you may modify the macros and compile from source in [this file](https://github.com/isl-org/Open3D/blob/master/cpp/open3d/core/hashmap/Dispatch.h) within this snippet:\n", - "\n", - "```cpp\n", - "#define DIM_SWITCHER(DTYPE, DIM, ...) \\\n", - " if (DIM == 1) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 1) \\\n", - " return __VA_ARGS__(); \\\n", - " } else if (DIM == 2) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 2) \\\n", - " return __VA_ARGS__(); \\\n", - " } else if (DIM == 3) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 3) \\\n", - " return __VA_ARGS__(); \\\n", - " } else if (DIM == 4) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 4) \\\n", - " return __VA_ARGS__(); \\\n", - " } else if (DIM == 5) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 5) \\\n", - " return __VA_ARGS__(); \\\n", - " } else if (DIM == 6) { \\\n", - " INSTANTIATE_TYPES(DTYPE, 6) \\\n", - " return __VA_ARGS__(); \\\n", - " } else { \\\n", - " utility::LogError( \\\n", - " \"Unsupported dim {}, please modify {} and compile from \" \\\n", - " \"source\", \\\n", - " DIM, __FILE__); \\\n", - " }\n", - "```\n", - "\n", - "- Values: The Open3D hash map supports values in arbitrary dimensions and data types.\n", - "- Devices: Both CPU and CUDA are supported. The CPU hashmap is based on [TBB](https://github.com/oneapi-src/oneTBB), while the CUDA hash map is based upon [stdgpu](https://github.com/stotko/stdgpu)." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import open3d.core as o3c\n", - "import numpy as np\n", - "\n", - "capacity = 10\n", - "device = o3c.Device('cpu:0')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## A simple example\n", - "We first create a simple hash map from integers to integers.\n", - "\n", - "We specify an initial estimated capacity. This capacity will be automatically adjusted when insertion occurs. Then we specify the element shape of keys and values, corresponding to the shape of each individual element." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "hashmap = o3c.HashMap(capacity,\n", - " key_dtype=o3c.int64,\n", - " key_element_shape=(1,),\n", - " value_dtype=o3c.int64,\n", - " value_element_shape=(1,),\n", - " device=device)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Insertion\n", - "Next we show how to insert a batch of (key, value) pairs. You'll need to prepare two tensors:\n", - "\n", - "The `keys` tensor contains all keys. \n", - "\n", - "- The `keys` tensor must be on the same device as the hash map. \n", - "- The shape of the `keys` tensor is `key_elment_shape` with `N` prefixed to the front. \n", - "\n", - "For example \n", - " \n", - "1. if `key_element_shape == ()`, `keys.shape == (N,)`; \n", - "2. if `key_element_shape == (3,)`, `keys.shape == (N, 3).`; \n", - "3. if `key_element_shape == (8, 8, 8)`, `keys.shape == (N, 8, 8, 8).`\n", - " \n", - "The `vals` tensor contains all values. \n", - " \n", - "- The `vals` tensor must be on the same device as the hash map. \n", - "- The shape of the `vals` tensor is `val_elment_shape` with `N` prefixed to the front. \n", - "\n", - "For example \n", - "\n", - "1. if `val_elment_shape == ()`, `vals.shape == (N,)`; \n", - "2. if `val_elment_shape == (3,)`, `vals.shape == (N, 3).`;\n", - "3. if `val_elment_shape == (8, 8, 8)`, `vals.shape == (N, 8, 8, 8).`" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# Prepare a batch of 7 key/values, each a int64 element\n", - "keys = o3c.Tensor([[100], [200], [400], [800], [300], [200], [100]],\n", - " dtype=o3c.int64,\n", - " device=device)\n", - "vals = o3c.Tensor([[1], [2], [4], [8], [3], [2], [1]],\n", - " dtype=o3c.int64,\n", - " device=device)\n", - "buf_indices, masks = hashmap.insert(keys, vals)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, `masks` indicates whether a (key, value) pair is successfully inserted. \n", - "A mask of value `True` means the insertion is successful and `False` if the insertion is skipped.\n", - "\n", - "Unsuccessful insertion only happens when there are duplicated keys. \n", - "\n", - "If there are duplicated keys, it is guaranteed that only **one** of the duplicated keys and its corresponding value will be inserted. That is, for a set of duplicated keys, one and only one will get a `True` mask. \n", - "\n", - "Since the insertion runs in parallel, there is no guarantee **which one** of the duplicated keys will be inserted. That is, for a set of duplicated keys, we don't know which key gets the `True` mask.\n", - "\n", - "Using advanced indexing, we can obtain which keys are inserted:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "masks: \n", - " [True True True True True False False]\n", - "Tensor[shape={7}, stride={1}, Bool, CPU:0, 0x5571293a85a0]\n", - "inserted keys: \n", - " [[100],\n", - " [200],\n", - " [400],\n", - " [800],\n", - " [300]]\n", - "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x557128c4f760]\n" - ] - } - ], - "source": [ - "print('masks: \\n', masks)\n", - "print('inserted keys: \\n', keys[masks])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, let's look at the usage of `buf_indices`. In our hash map, keys and values are stored in contiguous buffer tensors that could be conveniently accessed by indices. Instead of returning iterators that are less friendly to vectorized programming, we return such buffer indices. \n", - "\n", - "
\n", - "These indices are not necessarily the same as input indices due to concurrency. Also, the indices are by default stored in int32 due to the underlying implementations. A conversion to int64 is required for advanced indexing.\n", - "
" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "buffer indices: \n", - " [0 1 3 4 2]\n", - "Tensor[shape={5}, stride={1}, Int64, CPU:0, 0x5571293ae6d0]\n", - "inserted keys: \n", - " [[100],\n", - " [200],\n", - " [400],\n", - " [800],\n", - " [300]]\n", - "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a73b0]\n", - "inserted values: \n", - " [[1],\n", - " [2],\n", - " [4],\n", - " [8],\n", - " [3]]\n", - "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6f50]\n" - ] - } - ], - "source": [ - "buf_keys = hashmap.key_tensor()\n", - "buf_vals = hashmap.value_tensor()\n", - "buf_indices = buf_indices[masks].to(o3c.int64)\n", - "print('buffer indices: \\n', buf_indices)\n", - "\n", - "inserted_keys = buf_keys[buf_indices]\n", - "inserted_vals = buf_vals[buf_indices]\n", - "print('inserted keys: \\n', inserted_keys)\n", - "print('inserted values: \\n', inserted_vals)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Query\n", - "The query operation follows the similar convention. Note as the operation is read-only, duplicate keys are allowed and will be returned properly." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "found valid keys: \n", - " [[100],\n", - " [300],\n", - " [200],\n", - " [100]]\n", - "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293a3ae0]\n", - "found valid values: \n", - " [[1],\n", - " [3],\n", - " [2],\n", - " [1]]\n", - "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293ae6d0]\n" - ] - } - ], - "source": [ - "query_keys = o3c.Tensor([[1000], [100], [300], [200], [100], [0]],\n", - " dtype=o3c.int64,\n", - " device=device)\n", - "buf_indices, masks = hashmap.find(query_keys)\n", - "valid_keys = query_keys[masks]\n", - "buf_indices = buf_indices[masks].to(o3c.int64)\n", - "valid_vals = hashmap.value_tensor()[buf_indices]\n", - "print('found valid keys: \\n', valid_keys)\n", - "print('found valid values: \\n', valid_vals)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Active entries in the hash map\n", - "Sometimes we are interested in all the active entries. This can be achieved by:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "def print_active_entries(hashmap):\n", - " active_buf_indices = hashmap.active_buf_indices().to(o3c.int64)\n", - "\n", - " active_keys = hashmap.key_tensor()[active_buf_indices]\n", - " print('all active keys:\\n', active_keys)\n", - "\n", - " active_vals = hashmap.value_tensor()[active_buf_indices]\n", - " print('all active values:\\n', active_vals)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Again, due to concurrency, the order is not guaranteed, but the key-value correspondence will be of course preserved." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Erase\n", - "We can similarly erase keys. The behavior is similar to insert:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "erase masks:\n", - " [True False False]\n", - "Tensor[shape={3}, stride={1}, Bool, CPU:0, 0x5571292dd430]\n", - "erased keys:\n", - " [[100]]\n", - "Tensor[shape={1, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6a10]\n" - ] - } - ], - "source": [ - "erase_keys = o3c.Tensor([[100], [1000], [100]], dtype=o3c.int64, device=device)\n", - "masks = hashmap.erase(erase_keys)\n", - "print('erase masks:\\n', masks)\n", - "print('erased keys:\\n', erase_keys[masks])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can see that active entries have been changed:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "all active keys:\n", - " [[300],\n", - " [200],\n", - " [400],\n", - " [800]]\n", - "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293a9ad0]\n", - "all active values:\n", - " [[3],\n", - " [2],\n", - " [4],\n", - " [8]]\n", - "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b1d10]\n" - ] - } - ], - "source": [ - "print_active_entries(hashmap)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Activate\n", - "In some cases, we know a key is occupied, but do not know the associated value - we prefer to compute and modify it in-place afterwards. This can be achieved by a chain of operations:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "all active keys:\n", - " [[300],\n", - " [1000],\n", - " [200],\n", - " [400],\n", - " [0],\n", - " [800]]\n", - "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6b00]\n", - "all active values:\n", - " [[3],\n", - " [10],\n", - " [2],\n", - " [4],\n", - " [0],\n", - " [8]]\n", - "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6a60]\n" - ] - } - ], - "source": [ - "activate_keys = o3c.Tensor([[1000], [0]], dtype=o3c.int64, device=device)\n", - "buf_indices, masks = hashmap.activate(activate_keys)\n", - "\n", - "buf_vals = hashmap.value_tensor()\n", - "# Note the assigned tensor has to be strictly in the shape of (N, 1) due to broadcasting\n", - "buf_vals[buf_indices[masks].to(o3c.int64)] \\\n", - " = o3c.Tensor([[10], [0]],\n", - " dtype=o3c.int64,\n", - " device=device)\n", - "\n", - "print_active_entries(hashmap)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Rehashing and reserve\n", - "Rehashing will be automatically triggered when the initial capacity is exceeded after multiple insertions, where the capacity of the hash map is doubled. Rehashing will change the location (i.e. buffer indices) of the inserted key-value pairs, so an update of the buffer indices in the downstream applications is required." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "size: 6\n", - "capacity: 10\n", - "size: 9\n", - "capacity: 10\n", - "all active keys:\n", - " [[300],\n", - " [1500],\n", - " [700],\n", - " [1000],\n", - " [200],\n", - " [400],\n", - " [1200],\n", - " [0],\n", - " [800]]\n", - "Tensor[shape={9, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b4d80]\n", - "all active values:\n", - " [[3],\n", - " [-1],\n", - " [7],\n", - " [10],\n", - " [2],\n", - " [4],\n", - " [12],\n", - " [0],\n", - " [8]]\n", - "Tensor[shape={9, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b64d0]\n", - "size: 12\n", - "capacity: 20\n", - "all active keys:\n", - " [[1700],\n", - " [300],\n", - " [1500],\n", - " [700],\n", - " [1000],\n", - " [200],\n", - " [1800],\n", - " [400],\n", - " [1200],\n", - " [1600],\n", - " [0],\n", - " [800]]\n", - "Tensor[shape={12, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b6410]\n", - "all active values:\n", - " [[17],\n", - " [3],\n", - " [-1],\n", - " [7],\n", - " [10],\n", - " [2],\n", - " [18],\n", - " [4],\n", - " [12],\n", - " [16],\n", - " [0],\n", - " [8]]\n", - "Tensor[shape={12, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b8250]\n" - ] - } - ], - "source": [ - "print('size:', hashmap.size())\n", - "print('capacity:', hashmap.capacity())\n", - "\n", - "keys = o3c.Tensor([[700], [1200], [1500]], dtype=o3c.int64, device=device)\n", - "vals = o3c.Tensor([[7], [12], [-1]], dtype=o3c.int64, device=device)\n", - "buf_indices, masks = hashmap.insert(keys, vals)\n", - "print('size:', hashmap.size())\n", - "print('capacity:', hashmap.capacity())\n", - "print_active_entries(hashmap)\n", - "\n", - "keys = o3c.Tensor([[1600], [1700], [1800]], dtype=o3c.int64, device=device)\n", - "vals = o3c.Tensor([[16], [17], [18]], dtype=o3c.int64, device=device)\n", - "buf_indices, masks = hashmap.insert(keys, vals)\n", - "print('size:', hashmap.size())\n", - "print('capacity:', hashmap.capacity())\n", - "print_active_entries(hashmap)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Rehashing is slow, as it increases the hash map capacity, collects all the active entries, and insert them back to the hash map. If we know the capacity beforehand, we can pre-allocate a chunk of memory and avoid rehashing:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "size: 12\n", - "capacity: 100\n" - ] - } - ], - "source": [ - "hashmap.reserve(100)\n", - "print('size:', hashmap.size())\n", - "print('capacity:', hashmap.capacity())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Multi-valued hash map" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In real-world applications, we want to map coordinates to complex data structures, e.g. a voxel position to its color and weight. This can be achieved by a multi-valued hash map. \n", - "\n", - "
\n", - "This is not a multimap and does not allow duplicate keys. A multi-valued hash map can be constructed by\n", - "
" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "([1 0 2]\n", - " Tensor[shape={3}, stride={1}, Int32, CPU:0, 0x5571293b83f0],\n", - " [True True True]\n", - " Tensor[shape={3}, stride={1}, Bool, CPU:0, 0x5571293bab60])" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "mhashmap = o3c.HashMap(capacity,\n", - " key_dtype=o3c.int32,\n", - " key_element_shape=(3,),\n", - " value_dtypes=(o3c.uint8, o3c.float32),\n", - " value_element_shapes=((3,), (1,)),\n", - " device=device)\n", - "voxel_coords = o3c.Tensor([[0, 1, 0], [-1, 2, 3], [3, 4, 1]],\n", - " dtype=o3c.int32,\n", - " device=device)\n", - "voxel_colors = o3c.Tensor([[0, 255, 0], [255, 255, 0], [255, 0, 0]],\n", - " dtype=o3c.uint8,\n", - " device=device)\n", - "voxel_weights = o3c.Tensor([[0.9], [0.1], [0.3]],\n", - " dtype=o3c.float32,\n", - " device=device)\n", - "mhashmap.insert(voxel_coords, (voxel_colors, voxel_weights))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can then query and access by indices with a slightly different routine:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "found coordinates:\n", - " [[0 1 0]]\n", - "Tensor[shape={1, 3}, stride={3, 1}, Int32, CPU:0, 0x5571293c47c0]\n", - "found colors:\n", - " [[0 255 0]]\n", - "Tensor[shape={1, 3}, stride={3, 1}, UInt8, CPU:0, 0x5571293c47a0]\n", - "found weights:\n", - " [[0.9]]\n", - "Tensor[shape={1, 1}, stride={1, 1}, Float32, CPU:0, 0x5571284a6540]\n" - ] - } - ], - "source": [ - "query_coords = o3c.Tensor([[0, 1, 0]], dtype=o3c.int32, device=device)\n", - "buf_indices, masks = mhashmap.find(query_coords)\n", - "\n", - "valid_keys = query_coords[masks]\n", - "buf_indices = buf_indices[masks].to(o3c.int64)\n", - "valid_colors = mhashmap.value_tensor(0)[buf_indices]\n", - "valid_weights = mhashmap.value_tensor(1)[buf_indices]\n", - "print('found coordinates:\\n', valid_keys)\n", - "print('found colors:\\n', valid_colors)\n", - "print('found weights:\\n', valid_weights)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "all active keys:\n", - " [[0 1 0],\n", - " [3 4 1],\n", - " [-1 2 3]]\n", - "Tensor[shape={3, 3}, stride={3, 1}, Int32, CPU:0, 0x5571293b1d40]\n", - "active value 0\n", - ": [[0 255 0],\n", - " [255 0 0],\n", - " [255 255 0]]\n", - "Tensor[shape={3, 3}, stride={3, 1}, UInt8, CPU:0, 0x5571293c5610]\n", - "active value 1\n", - ": [[0.9],\n", - " [0.3],\n", - " [0.1]]\n", - "Tensor[shape={3, 1}, stride={1, 1}, Float32, CPU:0, 0x5571293c56d0]\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hash map\n", + "A hash map is a data structure that maps keys to values with amortized O(1) insertion, find, and deletion time. The map is unordered.\n", + "\n", + "Open3D allows parallel hashing on CPU and GPU with keys and values organized as Tensors, where we take a batch of keys and/or values as input.\n", + "\n", + "- Keys: The Open3D hash map supports multi-dimensional keys. Due to precision issue, floating-point is not recommended to be regarded as keys. By default we support up to 6D coordinates in integer. For higher dimensions, you may modify the macros and compile from source in [this file](https://github.com/isl-org/Open3D/blob/main/cpp/open3d/core/hashmap/Dispatch.h) within this snippet:\n", + "\n", + "```cpp\n", + "#define DIM_SWITCHER(DTYPE, DIM, ...) \\\n", + " if (DIM == 1) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 1) \\\n", + " return __VA_ARGS__(); \\\n", + " } else if (DIM == 2) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 2) \\\n", + " return __VA_ARGS__(); \\\n", + " } else if (DIM == 3) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 3) \\\n", + " return __VA_ARGS__(); \\\n", + " } else if (DIM == 4) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 4) \\\n", + " return __VA_ARGS__(); \\\n", + " } else if (DIM == 5) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 5) \\\n", + " return __VA_ARGS__(); \\\n", + " } else if (DIM == 6) { \\\n", + " INSTANTIATE_TYPES(DTYPE, 6) \\\n", + " return __VA_ARGS__(); \\\n", + " } else { \\\n", + " utility::LogError( \\\n", + " \"Unsupported dim {}, please modify {} and compile from \" \\\n", + " \"source\", \\\n", + " DIM, __FILE__); \\\n", + " }\n", + "```\n", + "\n", + "- Values: The Open3D hash map supports values in arbitrary dimensions and data types.\n", + "- Devices: Both CPU and CUDA are supported. The CPU hashmap is based on [TBB](https://github.com/oneapi-src/oneTBB), while the CUDA hash map is based upon [stdgpu](https://github.com/stotko/stdgpu)." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import open3d.core as o3c\n", + "import numpy as np\n", + "\n", + "capacity = 10\n", + "device = o3c.Device('cpu:0')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A simple example\n", + "We first create a simple hash map from integers to integers.\n", + "\n", + "We specify an initial estimated capacity. This capacity will be automatically adjusted when insertion occurs. Then we specify the element shape of keys and values, corresponding to the shape of each individual element." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "hashmap = o3c.HashMap(capacity,\n", + " key_dtype=o3c.int64,\n", + " key_element_shape=(1,),\n", + " value_dtype=o3c.int64,\n", + " value_element_shape=(1,),\n", + " device=device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Insertion\n", + "Next we show how to insert a batch of (key, value) pairs. You'll need to prepare two tensors:\n", + "\n", + "The `keys` tensor contains all keys. \n", + "\n", + "- The `keys` tensor must be on the same device as the hash map. \n", + "- The shape of the `keys` tensor is `key_elment_shape` with `N` prefixed to the front. \n", + "\n", + "For example \n", + " \n", + "1. if `key_element_shape == ()`, `keys.shape == (N,)`; \n", + "2. if `key_element_shape == (3,)`, `keys.shape == (N, 3).`; \n", + "3. if `key_element_shape == (8, 8, 8)`, `keys.shape == (N, 8, 8, 8).`\n", + " \n", + "The `vals` tensor contains all values. \n", + " \n", + "- The `vals` tensor must be on the same device as the hash map. \n", + "- The shape of the `vals` tensor is `val_elment_shape` with `N` prefixed to the front. \n", + "\n", + "For example \n", + "\n", + "1. if `val_elment_shape == ()`, `vals.shape == (N,)`; \n", + "2. if `val_elment_shape == (3,)`, `vals.shape == (N, 3).`;\n", + "3. if `val_elment_shape == (8, 8, 8)`, `vals.shape == (N, 8, 8, 8).`" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Prepare a batch of 7 key/values, each a int64 element\n", + "keys = o3c.Tensor([[100], [200], [400], [800], [300], [200], [100]],\n", + " dtype=o3c.int64,\n", + " device=device)\n", + "vals = o3c.Tensor([[1], [2], [4], [8], [3], [2], [1]],\n", + " dtype=o3c.int64,\n", + " device=device)\n", + "buf_indices, masks = hashmap.insert(keys, vals)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, `masks` indicates whether a (key, value) pair is successfully inserted. \n", + "A mask of value `True` means the insertion is successful and `False` if the insertion is skipped.\n", + "\n", + "Unsuccessful insertion only happens when there are duplicated keys. \n", + "\n", + "If there are duplicated keys, it is guaranteed that only **one** of the duplicated keys and its corresponding value will be inserted. That is, for a set of duplicated keys, one and only one will get a `True` mask. \n", + "\n", + "Since the insertion runs in parallel, there is no guarantee **which one** of the duplicated keys will be inserted. That is, for a set of duplicated keys, we don't know which key gets the `True` mask.\n", + "\n", + "Using advanced indexing, we can obtain which keys are inserted:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "masks: \n", + " [True True True True True False False]\n", + "Tensor[shape={7}, stride={1}, Bool, CPU:0, 0x5571293a85a0]\n", + "inserted keys: \n", + " [[100],\n", + " [200],\n", + " [400],\n", + " [800],\n", + " [300]]\n", + "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x557128c4f760]\n" + ] + } + ], + "source": [ + "print('masks: \\n', masks)\n", + "print('inserted keys: \\n', keys[masks])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's look at the usage of `buf_indices`. In our hash map, keys and values are stored in contiguous buffer tensors that could be conveniently accessed by indices. Instead of returning iterators that are less friendly to vectorized programming, we return such buffer indices. \n", + "\n", + "
\n", + "These indices are not necessarily the same as input indices due to concurrency. Also, the indices are by default stored in int32 due to the underlying implementations. A conversion to int64 is required for advanced indexing.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "buffer indices: \n", + " [0 1 3 4 2]\n", + "Tensor[shape={5}, stride={1}, Int64, CPU:0, 0x5571293ae6d0]\n", + "inserted keys: \n", + " [[100],\n", + " [200],\n", + " [400],\n", + " [800],\n", + " [300]]\n", + "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a73b0]\n", + "inserted values: \n", + " [[1],\n", + " [2],\n", + " [4],\n", + " [8],\n", + " [3]]\n", + "Tensor[shape={5, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6f50]\n" + ] + } + ], + "source": [ + "buf_keys = hashmap.key_tensor()\n", + "buf_vals = hashmap.value_tensor()\n", + "buf_indices = buf_indices[masks].to(o3c.int64)\n", + "print('buffer indices: \\n', buf_indices)\n", + "\n", + "inserted_keys = buf_keys[buf_indices]\n", + "inserted_vals = buf_vals[buf_indices]\n", + "print('inserted keys: \\n', inserted_keys)\n", + "print('inserted values: \\n', inserted_vals)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Query\n", + "The query operation follows the similar convention. Note as the operation is read-only, duplicate keys are allowed and will be returned properly." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "found valid keys: \n", + " [[100],\n", + " [300],\n", + " [200],\n", + " [100]]\n", + "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293a3ae0]\n", + "found valid values: \n", + " [[1],\n", + " [3],\n", + " [2],\n", + " [1]]\n", + "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293ae6d0]\n" + ] + } + ], + "source": [ + "query_keys = o3c.Tensor([[1000], [100], [300], [200], [100], [0]],\n", + " dtype=o3c.int64,\n", + " device=device)\n", + "buf_indices, masks = hashmap.find(query_keys)\n", + "valid_keys = query_keys[masks]\n", + "buf_indices = buf_indices[masks].to(o3c.int64)\n", + "valid_vals = hashmap.value_tensor()[buf_indices]\n", + "print('found valid keys: \\n', valid_keys)\n", + "print('found valid values: \\n', valid_vals)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Active entries in the hash map\n", + "Sometimes we are interested in all the active entries. This can be achieved by:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def print_active_entries(hashmap):\n", + " active_buf_indices = hashmap.active_buf_indices().to(o3c.int64)\n", + "\n", + " active_keys = hashmap.key_tensor()[active_buf_indices]\n", + " print('all active keys:\\n', active_keys)\n", + "\n", + " active_vals = hashmap.value_tensor()[active_buf_indices]\n", + " print('all active values:\\n', active_vals)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again, due to concurrency, the order is not guaranteed, but the key-value correspondence will be of course preserved." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Erase\n", + "We can similarly erase keys. The behavior is similar to insert:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "erase masks:\n", + " [True False False]\n", + "Tensor[shape={3}, stride={1}, Bool, CPU:0, 0x5571292dd430]\n", + "erased keys:\n", + " [[100]]\n", + "Tensor[shape={1, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6a10]\n" + ] + } + ], + "source": [ + "erase_keys = o3c.Tensor([[100], [1000], [100]], dtype=o3c.int64, device=device)\n", + "masks = hashmap.erase(erase_keys)\n", + "print('erase masks:\\n', masks)\n", + "print('erased keys:\\n', erase_keys[masks])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can see that active entries have been changed:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "all active keys:\n", + " [[300],\n", + " [200],\n", + " [400],\n", + " [800]]\n", + "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293a9ad0]\n", + "all active values:\n", + " [[3],\n", + " [2],\n", + " [4],\n", + " [8]]\n", + "Tensor[shape={4, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b1d10]\n" + ] + } + ], + "source": [ + "print_active_entries(hashmap)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Activate\n", + "In some cases, we know a key is occupied, but do not know the associated value - we prefer to compute and modify it in-place afterwards. This can be achieved by a chain of operations:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "all active keys:\n", + " [[300],\n", + " [1000],\n", + " [200],\n", + " [400],\n", + " [0],\n", + " [800]]\n", + "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6b00]\n", + "all active values:\n", + " [[3],\n", + " [10],\n", + " [2],\n", + " [4],\n", + " [0],\n", + " [8]]\n", + "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6a60]\n" + ] + } + ], + "source": [ + "activate_keys = o3c.Tensor([[1000], [0]], dtype=o3c.int64, device=device)\n", + "buf_indices, masks = hashmap.activate(activate_keys)\n", + "\n", + "buf_vals = hashmap.value_tensor()\n", + "# Note the assigned tensor has to be strictly in the shape of (N, 1) due to broadcasting\n", + "buf_vals[buf_indices[masks].to(o3c.int64)] \\\n", + " = o3c.Tensor([[10], [0]],\n", + " dtype=o3c.int64,\n", + " device=device)\n", + "\n", + "print_active_entries(hashmap)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Rehashing and reserve\n", + "Rehashing will be automatically triggered when the initial capacity is exceeded after multiple insertions, where the capacity of the hash map is doubled. Rehashing will change the location (i.e. buffer indices) of the inserted key-value pairs, so an update of the buffer indices in the downstream applications is required." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "size: 6\n", + "capacity: 10\n", + "size: 9\n", + "capacity: 10\n", + "all active keys:\n", + " [[300],\n", + " [1500],\n", + " [700],\n", + " [1000],\n", + " [200],\n", + " [400],\n", + " [1200],\n", + " [0],\n", + " [800]]\n", + "Tensor[shape={9, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b4d80]\n", + "all active values:\n", + " [[3],\n", + " [-1],\n", + " [7],\n", + " [10],\n", + " [2],\n", + " [4],\n", + " [12],\n", + " [0],\n", + " [8]]\n", + "Tensor[shape={9, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b64d0]\n", + "size: 12\n", + "capacity: 20\n", + "all active keys:\n", + " [[1700],\n", + " [300],\n", + " [1500],\n", + " [700],\n", + " [1000],\n", + " [200],\n", + " [1800],\n", + " [400],\n", + " [1200],\n", + " [1600],\n", + " [0],\n", + " [800]]\n", + "Tensor[shape={12, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b6410]\n", + "all active values:\n", + " [[17],\n", + " [3],\n", + " [-1],\n", + " [7],\n", + " [10],\n", + " [2],\n", + " [18],\n", + " [4],\n", + " [12],\n", + " [16],\n", + " [0],\n", + " [8]]\n", + "Tensor[shape={12, 1}, stride={1, 1}, Int64, CPU:0, 0x5571293b8250]\n" + ] + } + ], + "source": [ + "print('size:', hashmap.size())\n", + "print('capacity:', hashmap.capacity())\n", + "\n", + "keys = o3c.Tensor([[700], [1200], [1500]], dtype=o3c.int64, device=device)\n", + "vals = o3c.Tensor([[7], [12], [-1]], dtype=o3c.int64, device=device)\n", + "buf_indices, masks = hashmap.insert(keys, vals)\n", + "print('size:', hashmap.size())\n", + "print('capacity:', hashmap.capacity())\n", + "print_active_entries(hashmap)\n", + "\n", + "keys = o3c.Tensor([[1600], [1700], [1800]], dtype=o3c.int64, device=device)\n", + "vals = o3c.Tensor([[16], [17], [18]], dtype=o3c.int64, device=device)\n", + "buf_indices, masks = hashmap.insert(keys, vals)\n", + "print('size:', hashmap.size())\n", + "print('capacity:', hashmap.capacity())\n", + "print_active_entries(hashmap)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Rehashing is slow, as it increases the hash map capacity, collects all the active entries, and insert them back to the hash map. If we know the capacity beforehand, we can pre-allocate a chunk of memory and avoid rehashing:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "size: 12\n", + "capacity: 100\n" + ] + } + ], + "source": [ + "hashmap.reserve(100)\n", + "print('size:', hashmap.size())\n", + "print('capacity:', hashmap.capacity())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multi-valued hash map" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In real-world applications, we want to map coordinates to complex data structures, e.g. a voxel position to its color and weight. This can be achieved by a multi-valued hash map. \n", + "\n", + "
\n", + "This is not a multimap and does not allow duplicate keys. A multi-valued hash map can be constructed by\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "([1 0 2]\n", + " Tensor[shape={3}, stride={1}, Int32, CPU:0, 0x5571293b83f0],\n", + " [True True True]\n", + " Tensor[shape={3}, stride={1}, Bool, CPU:0, 0x5571293bab60])" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mhashmap = o3c.HashMap(capacity,\n", + " key_dtype=o3c.int32,\n", + " key_element_shape=(3,),\n", + " value_dtypes=(o3c.uint8, o3c.float32),\n", + " value_element_shapes=((3,), (1,)),\n", + " device=device)\n", + "voxel_coords = o3c.Tensor([[0, 1, 0], [-1, 2, 3], [3, 4, 1]],\n", + " dtype=o3c.int32,\n", + " device=device)\n", + "voxel_colors = o3c.Tensor([[0, 255, 0], [255, 255, 0], [255, 0, 0]],\n", + " dtype=o3c.uint8,\n", + " device=device)\n", + "voxel_weights = o3c.Tensor([[0.9], [0.1], [0.3]],\n", + " dtype=o3c.float32,\n", + " device=device)\n", + "mhashmap.insert(voxel_coords, (voxel_colors, voxel_weights))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then query and access by indices with a slightly different routine:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "found coordinates:\n", + " [[0 1 0]]\n", + "Tensor[shape={1, 3}, stride={3, 1}, Int32, CPU:0, 0x5571293c47c0]\n", + "found colors:\n", + " [[0 255 0]]\n", + "Tensor[shape={1, 3}, stride={3, 1}, UInt8, CPU:0, 0x5571293c47a0]\n", + "found weights:\n", + " [[0.9]]\n", + "Tensor[shape={1, 1}, stride={1, 1}, Float32, CPU:0, 0x5571284a6540]\n" + ] + } + ], + "source": [ + "query_coords = o3c.Tensor([[0, 1, 0]], dtype=o3c.int32, device=device)\n", + "buf_indices, masks = mhashmap.find(query_coords)\n", + "\n", + "valid_keys = query_coords[masks]\n", + "buf_indices = buf_indices[masks].to(o3c.int64)\n", + "valid_colors = mhashmap.value_tensor(0)[buf_indices]\n", + "valid_weights = mhashmap.value_tensor(1)[buf_indices]\n", + "print('found coordinates:\\n', valid_keys)\n", + "print('found colors:\\n', valid_colors)\n", + "print('found weights:\\n', valid_weights)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "all active keys:\n", + " [[0 1 0],\n", + " [3 4 1],\n", + " [-1 2 3]]\n", + "Tensor[shape={3, 3}, stride={3, 1}, Int32, CPU:0, 0x5571293b1d40]\n", + "active value 0\n", + ": [[0 255 0],\n", + " [255 0 0],\n", + " [255 255 0]]\n", + "Tensor[shape={3, 3}, stride={3, 1}, UInt8, CPU:0, 0x5571293c5610]\n", + "active value 1\n", + ": [[0.9],\n", + " [0.3],\n", + " [0.1]]\n", + "Tensor[shape={3, 1}, stride={1, 1}, Float32, CPU:0, 0x5571293c56d0]\n" + ] + } + ], + "source": [ + "def print_active_multivalue_entries(mhashmap):\n", + " active_buf_indices = mhashmap.active_buf_indices().to(o3c.int64)\n", + "\n", + " active_keys = mhashmap.key_tensor()[active_buf_indices]\n", + " print('all active keys:\\n', active_keys)\n", + "\n", + " n_buffers = len(mhashmap.value_tensors())\n", + " for i in range(n_buffers):\n", + " active_val_i = mhashmap.value_tensor(i)[active_buf_indices]\n", + " print('active value {}\\n:'.format(i), active_val_i)\n", + "\n", + "\n", + "print_active_multivalue_entries(mhashmap)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hash set\n", + "Hash set is a simplified hash map where we do not care about the values. It preserves most of the operations in a hash map, and could be useful for removing duplicates." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "active keys:\n", + " [[5],\n", + " [9],\n", + " [1],\n", + " [3],\n", + " [11],\n", + " [7]]\n", + "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6b00]\n" + ] + } + ], + "source": [ + "hashset = o3c.HashSet(capacity,\n", + " key_dtype=o3c.int64,\n", + " key_element_shape=(1,),\n", + " device=device)\n", + "keys = o3c.Tensor([1, 3, 5, 7, 5, 3, 1], dtype=o3c.int64,\n", + " device=device).reshape((-1, 1))\n", + "hashset.insert(keys)\n", + "\n", + "keys = o3c.Tensor([5, 7, 9, 11], dtype=o3c.int64, device=device).reshape(\n", + " (-1, 1))\n", + "hashset.insert(keys)\n", + "\n", + "\n", + "def print_active_keys(hashset):\n", + " active_buf_indices = hashset.active_buf_indices().to(o3c.int64)\n", + " active_keys = hashset.key_tensor()[active_buf_indices]\n", + " print('active keys:\\n', active_keys)\n", + "\n", + "\n", + "print_active_keys(hashset)" + ] } - ], - "source": [ - "def print_active_multivalue_entries(mhashmap):\n", - " active_buf_indices = mhashmap.active_buf_indices().to(o3c.int64)\n", - "\n", - " active_keys = mhashmap.key_tensor()[active_buf_indices]\n", - " print('all active keys:\\n', active_keys)\n", - "\n", - " n_buffers = len(mhashmap.value_tensors())\n", - " for i in range(n_buffers):\n", - " active_val_i = mhashmap.value_tensor(i)[active_buf_indices]\n", - " print('active value {}\\n:'.format(i), active_val_i)\n", - "\n", - "\n", - "print_active_multivalue_entries(mhashmap)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Hash set\n", - "Hash set is a simplified hash map where we do not care about the values. It preserves most of the operations in a hash map, and could be useful for removing duplicates." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "active keys:\n", - " [[5],\n", - " [9],\n", - " [1],\n", - " [3],\n", - " [11],\n", - " [7]]\n", - "Tensor[shape={6, 1}, stride={1, 1}, Int64, CPU:0, 0x5571284a6b00]\n" - ] + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" } - ], - "source": [ - "hashset = o3c.HashSet(capacity,\n", - " key_dtype=o3c.int64,\n", - " key_element_shape=(1,),\n", - " device=device)\n", - "keys = o3c.Tensor([1, 3, 5, 7, 5, 3, 1], dtype=o3c.int64,\n", - " device=device).reshape((-1, 1))\n", - "hashset.insert(keys)\n", - "\n", - "keys = o3c.Tensor([5, 7, 9, 11], dtype=o3c.int64, device=device).reshape(\n", - " (-1, 1))\n", - "hashset.insert(keys)\n", - "\n", - "\n", - "def print_active_keys(hashset):\n", - " active_buf_indices = hashset.active_buf_indices().to(o3c.int64)\n", - " active_keys = hashset.key_tensor()[active_buf_indices]\n", - " print('active keys:\\n', active_keys)\n", - "\n", - "\n", - "print_active_keys(hashset)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 + "nbformat": 4, + "nbformat_minor": 4 } diff --git a/docs/open3d_ml.rst b/docs/open3d_ml.rst index c8a8a5d68ba..f99d9495417 100644 --- a/docs/open3d_ml.rst +++ b/docs/open3d_ml.rst @@ -3,7 +3,7 @@ Open3D-ML ========= -.. image:: https://raw.githubusercontent.com/isl-org/Open3D-ML/master/docs/images/getting_started_ml_visualizer.gif +.. image:: https://raw.githubusercontent.com/isl-org/Open3D-ML/main/docs/images/getting_started_ml_visualizer.gif :width: 480px :align: center diff --git a/docs/tutorial/visualization/web_visualizer.rst b/docs/tutorial/visualization/web_visualizer.rst index f0bf7d64a93..036b5e1e869 100644 --- a/docs/tutorial/visualization/web_visualizer.rst +++ b/docs/tutorial/visualization/web_visualizer.rst @@ -85,7 +85,7 @@ Example: ./DrawWebRTC google-chrome http://localhost:8888 # Or, open the address in your browser -Checkout `examples/cpp/DrawWebRTC.cpp `_ +Checkout `examples/cpp/DrawWebRTC.cpp `_ for the complete source code. Python server @@ -105,7 +105,7 @@ Example: python examples/python/visualization/draw_webrtc.py google-chrome http://localhost:8888 # Or, open the address in your browser -Checkout `examples/python/visualization/draw_webrtc.py `_ +Checkout `examples/python/visualization/draw_webrtc.py `_ for the complete source code. IP/port binding @@ -162,7 +162,7 @@ or, you may also install JupyterLab instead: jupyter-lab Then, run the example notebook -`docs/jupyter/visualization/jupyter_visualization.ipynb `_. +`docs/jupyter/visualization/jupyter_visualization.ipynb `_. Unlike standalone mode, ``WEBRTC_IP`` and ``WEBRTC_PORT`` are not used in Jupyter mode. However, you might want to diff --git a/docs/versions.js b/docs/versions.js index 1522323da11..a27d4fb65a9 100644 --- a/docs/versions.js +++ b/docs/versions.js @@ -14,8 +14,8 @@ document.write('\ \ \ \ - \ - \ + \ + \ \ \ \ diff --git a/util/ci_utils.sh b/util/ci_utils.sh index e490b0d2282..637cac44d32 100644 --- a/util/ci_utils.sh +++ b/util/ci_utils.sh @@ -144,8 +144,8 @@ build_pip_package() { set +u if [ -f "${OPEN3D_ML_ROOT}/set_open3d_ml_root.sh" ]; then echo "Open3D-ML available at ${OPEN3D_ML_ROOT}. Bundling Open3D-ML in wheel." - # the build system of the main repo expects a master branch. make sure master exists - git -C "${OPEN3D_ML_ROOT}" checkout -b master || true + # the build system of the main repo expects a main branch. make sure main exists + git -C "${OPEN3D_ML_ROOT}" checkout -b main || true BUNDLE_OPEN3D_ML=ON else echo "Open3D-ML not available." @@ -256,7 +256,7 @@ test_wheel() { # find "$DLL_PATH"/cpu/ -type f -execdir otool -L {} \; # fi echo - # FIXME: Needed because Open3D-ML master TF and PyTorch is older than dev. + # FIXME: Needed because Open3D-ML main TF and PyTorch is older than dev. if [ $BUILD_CUDA_MODULE == ON ]; then install_python_dependencies with-cuda else @@ -445,7 +445,7 @@ build_docs() { } maximize_ubuntu_github_actions_build_space() { - # https://github.com/easimon/maximize-build-space/blob/master/action.yml + # https://github.com/easimon/maximize-build-space/blob/main/action.yml df -h . # => 26GB $SUDO rm -rf /usr/share/dotnet # ~17GB $SUDO rm -rf /usr/local/lib/android # ~11GB
mastermaster C++mainmain C++
0.17.0 (release)