diff --git a/.github/workflows/aocc-auto.yml b/.github/workflows/aocc-auto.yml index 50eb1d29f32..df6b4592bc7 100644 --- a/.github/workflows/aocc-auto.yml +++ b/.github/workflows/aocc-auto.yml @@ -28,33 +28,33 @@ jobs: sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 sudo apt install -y m4 flex bison cmake libzip-dev openssl build-essential - - name: Install AOCC 4.1.0 + - name: Install AOCC 4.2.0 shell: bash run: | - wget https://download.amd.com/developer/eula/aocc/aocc-4-1/aocc-compiler-4.1.0.tar - tar -xvf aocc-compiler-4.1.0.tar - cd aocc-compiler-4.1.0 + wget https://download.amd.com/developer/eula/aocc/aocc-4-2/aocc-compiler-4.2.0.tar + tar -xvf aocc-compiler-4.2.0.tar + cd aocc-compiler-4.2.0 bash install.sh source /home/runner/work/hdf5/hdf5/setenv_AOCC.sh which clang which flang clang -v - - name: Cache OpenMPI 4.1.5 installation - id: cache-openmpi-4_1_5 + - name: Cache OpenMPI 4.1.6 installation + id: cache-openmpi-4_1_6 uses: actions/cache@v4 with: - path: /home/runner/work/hdf5/hdf5/openmpi-4.1.5-install - key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_5-cache + path: /home/runner/work/hdf5/hdf5/openmpi-4.1.6-install + key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_6-cache - - name: Install OpenMPI 4.1.5 - if: ${{ steps.cache-openmpi-4_1_5.outputs.cache-hit != 'true' }} + - name: Install OpenMPI 4.1.6 + if: ${{ steps.cache-openmpi-4_1_6.outputs.cache-hit != 'true' }} run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib - wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.5.tar.gz - tar zxvf openmpi-4.1.5.tar.gz - cd openmpi-4.1.5 - ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/usr/local/lib + wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.6.tar.gz + tar zxvf openmpi-4.1.6.tar.gz + cd openmpi-4.1.6 + ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install make make install @@ -62,9 +62,9 @@ jobs: env: NPROCS: 2 run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib - export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib - export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib:/usr/local/lib + export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib:/usr/local/lib + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" @@ -73,15 +73,15 @@ jobs: --enable-build-mode=${{ inputs.build_mode }} \ --enable-shared \ --enable-parallel \ - LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib \ - -L/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib" + LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib \ + -L/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib" - name: Autotools Build shell: bash env: NPROCS: 2 run: | - export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH make -j3 working-directory: ${{ runner.workspace }}/build @@ -89,7 +89,7 @@ jobs: env: NPROCS: 2 run: | - export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH make check -j working-directory: ${{ runner.workspace }}/build @@ -97,6 +97,6 @@ jobs: env: NPROCS: 2 run: | - export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH make install working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/aocc-cmake.yml b/.github/workflows/aocc-cmake.yml index 20ab7899057..d4796f67f1d 100644 --- a/.github/workflows/aocc-cmake.yml +++ b/.github/workflows/aocc-cmake.yml @@ -28,42 +28,42 @@ jobs: sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 sudo apt install -y m4 flex bison cmake libzip-dev openssl build-essential - - name: Install AOCC 4.1.0 + - name: Install AOCC 4.2.0 shell: bash run: | - wget https://download.amd.com/developer/eula/aocc/aocc-4-1/aocc-compiler-4.1.0.tar - tar -xvf aocc-compiler-4.1.0.tar - cd aocc-compiler-4.1.0 + wget https://download.amd.com/developer/eula/aocc/aocc-4-2/aocc-compiler-4.2.0.tar + tar -xvf aocc-compiler-4.2.0.tar + cd aocc-compiler-4.2.0 bash install.sh source /home/runner/work/hdf5/hdf5/setenv_AOCC.sh which clang which flang clang -v - - name: Cache OpenMPI 4.1.5 installation - id: cache-openmpi-4_1_5 + - name: Cache OpenMPI 4.1.6 installation + id: cache-openmpi-4_1_6 uses: actions/cache@v4 with: - path: /home/runner/work/hdf5/hdf5/openmpi-4.1.5-install - key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_5-cache + path: /home/runner/work/hdf5/hdf5/openmpi-4.1.6-install + key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_6-cache - - name: Install OpenMPI 4.1.5 - if: ${{ steps.cache-openmpi-4_1_5.outputs.cache-hit != 'true' }} + - name: Install OpenMPI 4.1.6 + if: ${{ steps.cache-openmpi-4_1_6.outputs.cache-hit != 'true' }} run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib - wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.5.tar.gz - tar zxvf openmpi-4.1.5.tar.gz - cd openmpi-4.1.5 - ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/usr/local/lib + wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.6.tar.gz + tar zxvf openmpi-4.1.6.tar.gz + cd openmpi-4.1.6 + ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install make make install - name: CMake Configure shell: bash run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib - export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib - export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib:/usr/local/lib + export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib:/usr/local/lib + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" CC=mpicc cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ diff --git a/.github/workflows/cmake-bintest.yml b/.github/workflows/cmake-bintest.yml index f7c01b71ae7..fd6f84bad69 100644 --- a/.github/workflows/cmake-bintest.yml +++ b/.github/workflows/cmake-bintest.yml @@ -98,7 +98,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Install CMake Dependencies (Linux) - run: sudo apt-get install ninja-build doxygen graphviz + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz - name: Set up JDK 19 uses: actions/setup-java@v4 diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index a35f9e7367b..b6720a24553 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -122,7 +122,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Install CMake Dependencies (Linux) - run: sudo apt-get install ninja-build graphviz + run: | + sudo apt-get update + sudo apt-get install ninja-build graphviz - name: Install Dependencies uses: ssciwr/doxygen-install@v1 @@ -330,8 +332,9 @@ jobs: steps: - name: Install CMake Dependencies (Linux S3) run: | - sudo apt-get install ninja-build doxygen graphviz - sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - name: Set file base name (Linux S3) id: set-file-base @@ -493,7 +496,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Install CMake Dependencies (Linux_intel) - run: sudo apt-get install ninja-build graphviz + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz - name: Install Dependencies uses: ssciwr/doxygen-install@v1 diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index b2b01f5dcdd..b1338444dde 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -22,7 +22,7 @@ jobs: - name: Install Dependencies (Linux) shell: bash run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev diff --git a/.github/workflows/julia-cmake.yml b/.github/workflows/julia-cmake.yml index 96170b3aec6..1972deefd8f 100644 --- a/.github/workflows/julia-cmake.yml +++ b/.github/workflows/julia-cmake.yml @@ -22,7 +22,7 @@ jobs: - name: Install Dependencies shell: bash run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential diff --git a/.github/workflows/main-auto-par-spc.yml b/.github/workflows/main-auto-par-spc.yml index 5047685338d..01dab1f4834 100644 --- a/.github/workflows/main-auto-par-spc.yml +++ b/.github/workflows/main-auto-par-spc.yml @@ -28,7 +28,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -86,7 +86,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-auto-par.yml b/.github/workflows/main-auto-par.yml index 913ff5232c6..53058a8c551 100644 --- a/.github/workflows/main-auto-par.yml +++ b/.github/workflows/main-auto-par.yml @@ -36,7 +36,7 @@ jobs: # SETUP - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-auto-spc.yml b/.github/workflows/main-auto-spc.yml index 5a129be5ee4..dc8f702c3a3 100644 --- a/.github/workflows/main-auto-spc.yml +++ b/.github/workflows/main-auto-spc.yml @@ -28,7 +28,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -89,7 +89,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -150,7 +150,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -211,7 +211,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -272,7 +272,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -333,7 +333,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -394,7 +394,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -457,7 +457,7 @@ jobs: # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-auto.yml b/.github/workflows/main-auto.yml index cad8decd49a..4197099d2b7 100644 --- a/.github/workflows/main-auto.yml +++ b/.github/workflows/main-auto.yml @@ -37,7 +37,7 @@ jobs: # SETUP - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-cmake-par.yml b/.github/workflows/main-cmake-par.yml index 0db234242f3..d6f9a08a636 100644 --- a/.github/workflows/main-cmake-par.yml +++ b/.github/workflows/main-cmake-par.yml @@ -27,7 +27,7 @@ jobs: - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-cmake-spc.yml b/.github/workflows/main-cmake-spc.yml index f969723a8a8..b8f87cf26be 100644 --- a/.github/workflows/main-cmake-spc.yml +++ b/.github/workflows/main-cmake-spc.yml @@ -26,7 +26,7 @@ jobs: # SETUP - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -86,7 +86,7 @@ jobs: # SETUP - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 @@ -146,7 +146,7 @@ jobs: # SETUP - name: Install Linux Dependencies run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/main-cmake.yml b/.github/workflows/main-cmake.yml index aac5bc4a240..0a0842f7b94 100644 --- a/.github/workflows/main-cmake.yml +++ b/.github/workflows/main-cmake.yml @@ -124,7 +124,7 @@ jobs: - name: Install CMake Dependencies (Linux) run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install gcc-12 g++-12 gfortran-12 diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml index 5d014de2a54..0473f572df7 100644 --- a/.github/workflows/nvhpc-cmake.yml +++ b/.github/workflows/nvhpc-cmake.yml @@ -22,7 +22,7 @@ jobs: - name: Install Dependencies shell: bash run: | - sudo apt update + sudo apt-get update sudo apt-get install ninja-build doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index aea3fd62443..9f707dfca05 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -872,93 +872,113 @@ H5ConversionTests (${HDF_PREFIX}_LLONG_TO_LDOUBLE_CORRECT TRUE "Checking IF corr H5ConversionTests (${HDF_PREFIX}_DISABLE_SOME_LDOUBLE_CONV FALSE "Checking IF the cpu is power9 and cannot correctly converting long double values") #----------------------------------------------------------------------------- -# Check if _Float16 type is available +# Options for enabling/disabling support for non-standard features, datatypes, +# etc. These features should still be checked for at configure time, but these +# options allow disabling of support for these features when compiler support +# is incomplete or broken. In this case, configure time checks may not be +# enough to properly enable/disable a feature and can cause library build +# problems. #----------------------------------------------------------------------------- -message (STATUS "Checking if _Float16 support is available") -set (${HDF_PREFIX}_HAVE__FLOAT16 0) -HDF_CHECK_TYPE_SIZE (_Float16 ${HDF_PREFIX}_SIZEOF__FLOAT16) -if (${HDF_PREFIX}_SIZEOF__FLOAT16) - # Request _Float16 support - set (CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} "-D__STDC_WANT_IEC_60559_TYPES_EXT__") - - # Some compilers expose the _Float16 datatype, but not the macros and - # functions used with the datatype. We need the macros for proper - # datatype conversion support. Check for these here. - CHECK_SYMBOL_EXISTS (FLT16_EPSILON "float.h" h5_have_flt16_epsilon) - CHECK_SYMBOL_EXISTS (FLT16_MIN "float.h" h5_have_flt16_min) - CHECK_SYMBOL_EXISTS (FLT16_MAX "float.h" h5_have_flt16_max) - CHECK_SYMBOL_EXISTS (FLT16_MIN_10_EXP "float.h" h5_have_flt16_min_10_exp) - CHECK_SYMBOL_EXISTS (FLT16_MAX_10_EXP "float.h" h5_have_flt16_max_10_exp) - CHECK_SYMBOL_EXISTS (FLT16_MANT_DIG "float.h" h5_have_flt16_mant_dig) - - if (h5_have_flt16_epsilon AND h5_have_flt16_min AND - h5_have_flt16_max AND h5_have_flt16_min_10_exp AND - h5_have_flt16_max_10_exp AND h5_have_flt16_mant_dig) - # Some compilers like OneAPI on Windows appear to detect _Float16 support - # properly up to this point, and, in the absence of any architecture-specific - # tuning compiler flags, will generate code for H5Tconv.c that performs - # software conversions on _Float16 variables with compiler-internal functions - # such as __extendhfsf2, __truncsfhf2, or __truncdfhf2. However, these - # compilers will fail to link these functions into the build for currently - # unknown reasons and cause the build to fail. Since these are compiler-internal - # functions that we don't appear to have much control over, let's try to - # compile a program that will generate these functions to check for _Float16 - # support. If we fail to compile this program, we will simply disable - # _Float16 support for the time being. - - # Some compilers, notably AppleClang on MacOS 12, will succeed in the - # configure check below when optimization flags like -O3 are manually - # passed in CMAKE_C_FLAGS. However, the build will then fail when it - # reaches compilation of H5Tconv.c because of the issue mentioned above. - # MacOS 13 appears to have fixed this, but, just to be sure, backup and - # clear CMAKE_C_FLAGS before performing these configure checks. - set (cmake_c_flags_backup "${CMAKE_C_FLAGS}") - set (CMAKE_C_FLAGS "") - - H5ConversionTests ( - ${HDF_PREFIX}_FLOAT16_CONVERSION_FUNCS_LINK - FALSE - "Checking if compiler can convert _Float16 type with casts" - ) - - set (CMAKE_C_FLAGS "${cmake_c_flags_backup}") - - if (${${HDF_PREFIX}_FLOAT16_CONVERSION_FUNCS_LINK}) - # Finally, MacOS 13 appears to have a bug specifically when converting - # long double values to _Float16. Release builds of the dt_arith test - # would cause any assignments to a _Float16 variable to be elided, - # whereas Debug builds would perform incorrect hardware conversions by - # simply chopping off all the bytes of the value except for the first 2. - # These tests pass on MacOS 14, so let's perform a quick test to check - # if the hardware conversion is done correctly. +# Option to enable or disable all non-standard features. Specific features can +# be enabled or disabled with their respective options below +option (HDF5_ENABLE_NONSTANDARD_FEATURES "Enable support for non-standard programming language features" ON) +# Options for enabling or disabling individual features +option (HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16 "Enable support for _Float16 C datatype" ${HDF5_ENABLE_NONSTANDARD_FEATURES}) - # Backup and clear CMAKE_C_FLAGS before performing configure checks +#----------------------------------------------------------------------------- +# Check if _Float16 type is available +#----------------------------------------------------------------------------- +if (HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16) + message (STATUS "Checking if _Float16 support is available") + HDF_CHECK_TYPE_SIZE (_Float16 ${HDF_PREFIX}_SIZEOF__FLOAT16) + + if (${HDF_PREFIX}_SIZEOF__FLOAT16) + # Request _Float16 support + set (CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} "-D__STDC_WANT_IEC_60559_TYPES_EXT__") + + # Some compilers expose the _Float16 datatype, but not the macros and + # functions used with the datatype. We need the macros for proper + # datatype conversion support. Check for these here. + CHECK_SYMBOL_EXISTS (FLT16_EPSILON "float.h" h5_have_flt16_epsilon) + CHECK_SYMBOL_EXISTS (FLT16_MIN "float.h" h5_have_flt16_min) + CHECK_SYMBOL_EXISTS (FLT16_MAX "float.h" h5_have_flt16_max) + CHECK_SYMBOL_EXISTS (FLT16_MIN_10_EXP "float.h" h5_have_flt16_min_10_exp) + CHECK_SYMBOL_EXISTS (FLT16_MAX_10_EXP "float.h" h5_have_flt16_max_10_exp) + CHECK_SYMBOL_EXISTS (FLT16_MANT_DIG "float.h" h5_have_flt16_mant_dig) + + if (h5_have_flt16_epsilon AND h5_have_flt16_min AND + h5_have_flt16_max AND h5_have_flt16_min_10_exp AND + h5_have_flt16_max_10_exp AND h5_have_flt16_mant_dig) + # Some compilers like OneAPI on Windows appear to detect _Float16 support + # properly up to this point, and, in the absence of any architecture-specific + # tuning compiler flags, will generate code for H5Tconv.c that performs + # software conversions on _Float16 variables with compiler-internal functions + # such as __extendhfsf2, __truncsfhf2, or __truncdfhf2. However, these + # compilers will fail to link these functions into the build for currently + # unknown reasons and cause the build to fail. Since these are compiler-internal + # functions that we don't appear to have much control over, let's try to + # compile a program that will generate these functions to check for _Float16 + # support. If we fail to compile this program, we will simply disable + # _Float16 support for the time being. + + # Some compilers, notably AppleClang on MacOS 12, will succeed in the + # configure check below when optimization flags like -O3 are manually + # passed in CMAKE_C_FLAGS. However, the build will then fail when it + # reaches compilation of H5Tconv.c because of the issue mentioned above. + # MacOS 13 appears to have fixed this, but, just to be sure, backup and + # clear CMAKE_C_FLAGS before performing these configure checks. set (cmake_c_flags_backup "${CMAKE_C_FLAGS}") set (CMAKE_C_FLAGS "") H5ConversionTests ( - ${HDF_PREFIX}_LDOUBLE_TO_FLOAT16_CORRECT - TRUE - "Checking if correctly converting long double to _Float16 values" + ${HDF_PREFIX}_FLOAT16_CONVERSION_FUNCS_LINK + FALSE + "Checking if compiler can convert _Float16 type with casts" ) set (CMAKE_C_FLAGS "${cmake_c_flags_backup}") - if (NOT ${${HDF_PREFIX}_LDOUBLE_TO_FLOAT16_CORRECT}) - message (VERBOSE "Conversions from long double to _Float16 appear to be incorrect. These will be emulated through a soft conversion function.") - endif () + if (${${HDF_PREFIX}_FLOAT16_CONVERSION_FUNCS_LINK}) + # Finally, MacOS 13 appears to have a bug specifically when converting + # long double values to _Float16. Release builds of the dt_arith test + # would cause any assignments to a _Float16 variable to be elided, + # whereas Debug builds would perform incorrect hardware conversions by + # simply chopping off all the bytes of the value except for the first 2. + # These tests pass on MacOS 14, so let's perform a quick test to check + # if the hardware conversion is done correctly. + + # Backup and clear CMAKE_C_FLAGS before performing configure checks + set (cmake_c_flags_backup "${CMAKE_C_FLAGS}") + set (CMAKE_C_FLAGS "") + + H5ConversionTests ( + ${HDF_PREFIX}_LDOUBLE_TO_FLOAT16_CORRECT + TRUE + "Checking if correctly converting long double to _Float16 values" + ) - set (${HDF_PREFIX}_HAVE__FLOAT16 1) + set (CMAKE_C_FLAGS "${cmake_c_flags_backup}") + + if (NOT ${${HDF_PREFIX}_LDOUBLE_TO_FLOAT16_CORRECT}) + message (VERBOSE "Conversions from long double to _Float16 appear to be incorrect. These will be emulated through a soft conversion function.") + endif () - # Check if we can use fabsf16 - CHECK_FUNCTION_EXISTS (fabsf16 ${HDF_PREFIX}_HAVE_FABSF16) + set (${HDF_PREFIX}_HAVE__FLOAT16 1) + + # Check if we can use fabsf16 + CHECK_FUNCTION_EXISTS (fabsf16 ${HDF_PREFIX}_HAVE_FABSF16) + else () + message (STATUS "_Float16 support has been disabled because the compiler couldn't compile and run a test program for _Float16 conversions") + message (STATUS "Check ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log for information on why the test program couldn't be compiled/run") + endif () else () - message (STATUS "_Float16 support has been disabled because the compiler couldn't compile and run a test program for _Float16 conversions") - message (STATUS "Check ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log for information on why the test program couldn't be compiled/run") + message (STATUS "_Float16 support has been disabled since the required macros (FLT16_MAX, FLT16_EPSILON, etc. were not found)") endif () else () - message (STATUS "_Float16 support has been disabled since the required macros (FLT16_MAX, FLT16_EPSILON, etc. were not found)") + message (STATUS "_Float16 support has been disabled since the _Float16 type was not found") endif () else () - message (STATUS "_Float16 support has been disabled since the _Float16 type was not found") + set (${HDF_PREFIX}_SIZEOF__FLOAT16 0 CACHE INTERNAL "SizeOf for ${HDF_PREFIX}_SIZEOF__FLOAT16") + unset (${HDF_PREFIX}_HAVE__FLOAT16 CACHE) + unset (${HDF_PREFIX}_LDOUBLE_TO_FLOAT16_CORRECT CACHE) endif () diff --git a/config/cmake/FindMFU.cmake b/config/cmake/FindMFU.cmake index 2a4278a1af6..8bbc870f5f8 100644 --- a/config/cmake/FindMFU.cmake +++ b/config/cmake/FindMFU.cmake @@ -92,7 +92,7 @@ if (NOT MFU_FOUND) message (VERBOSE "${MFU_DIR_MESSAGE}") else () if (MFU_FIND_REQUIRED) - message (FATAL_ERROR "Mfu was NOT found and is Required by this project") + message (FATAL_ERROR "MFU was NOT found and is required.") endif () endif () endif () diff --git a/config/cmake/HDF5UseFortran.cmake b/config/cmake/HDF5UseFortran.cmake index b4172eace4b..98fa75ecfc9 100644 --- a/config/cmake/HDF5UseFortran.cmake +++ b/config/cmake/HDF5UseFortran.cmake @@ -219,7 +219,7 @@ if (${HAVE_ISO_FORTRAN_ENV}) set(CMAKE_REQUIRED_QUIET TRUE) set(save_CMAKE_Fortran_FLAGS ${CMAKE_Fortran_FLAGS}) - if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") + if (CMAKE_Fortran_COMPILER_ID MATCHES "Intel") set(CMAKE_Fortran_FLAGS "-warn error") endif () diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index 987411533af..ad1c30bcd80 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -54,6 +54,8 @@ set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@) set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@) set (${HDF5_PACKAGE_NAME}_BUILD_STATIC_TOOLS @HDF5_BUILD_STATIC_TOOLS@) #----------------------------------------------------------------------------- +set (${HDF5_PACKAGE_NAME}_ENABLE_NONSTANDARD_FEATURE_FLOAT16 @HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16@) +#----------------------------------------------------------------------------- set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@) set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@) set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@) diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in index abf33c734f2..bbe6674007d 100644 --- a/config/cmake/libhdf5.settings.cmake.in +++ b/config/cmake/libhdf5.settings.cmake.in @@ -78,6 +78,7 @@ Dimension scales w/ new references: @DIMENSION_SCALES_WITH_NEW_REF@ Default API mapping: @DEFAULT_API_VERSION@ With deprecated public symbols: @HDF5_ENABLE_DEPRECATED_SYMBOLS@ I/O filters (external): @EXTERNAL_FILTERS@ + _Float16 support: @HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16@ Map (H5M) API: @H5_HAVE_MAP_API@ Direct VFD: @HDF5_ENABLE_DIRECT_VFD@ Mirror VFD: @H5_HAVE_MIRROR_VFD@ diff --git a/configure.ac b/configure.ac index 962405abcf3..4861bf9d976 100644 --- a/configure.ac +++ b/configure.ac @@ -574,104 +574,156 @@ AC_CHECK_SIZEOF([float]) AC_CHECK_SIZEOF([double]) AC_CHECK_SIZEOF([long double]) +#----------------------------------------------------------------------------- +# Option for enabling/disabling support for non-standard features, datatypes, +# etc. These features should still be checked for at configure time, but these +# options allow disabling of support for these features when compiler support +# is incomplete or broken. In this case, configure time checks may not be +# enough to properly enable/disable a feature and can cause library build +# problems. +#----------------------------------------------------------------------------- +AC_MSG_CHECKING([if non-standard feature support is enabled]) +AC_ARG_ENABLE([nonstandard-features], + [AS_HELP_STRING([--enable-nonstandard-features], + [Enable support for non-standard programming language features [default=yes]])], + [NONSTANDARD_FEATURES=$enableval]) + +## Set default +if test "X-$NONSTANDARD_FEATURES" = X- ; then + NONSTANDARD_FEATURES=yes +fi + +case "X-$NONSTANDARD_FEATURES" in + X-yes|X-no) + AC_MSG_RESULT([$NONSTANDARD_FEATURES]) + ;; + *) + AC_MSG_ERROR([Unrecognized value: $NONSTANDARD_FEATURES]) + ;; +esac + ## ---------------------------------------------------------------------- ## Check if _Float16 support is available ## -AC_MSG_NOTICE([checking if _Float16 support is available]) -HAVE__FLOAT16="no" -AC_CHECK_SIZEOF([_Float16]) -if test "$ac_cv_sizeof__Float16" != 0; then - # Some compilers expose the _Float16 datatype, but not the macros and - # functions used with the datatype. We need the macros for proper - # datatype conversion support. Check for these here. - AC_CHECK_DECL([FLT16_EPSILON], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - AC_CHECK_DECL([FLT16_MIN], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - AC_CHECK_DECL([FLT16_MAX], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - AC_CHECK_DECL([FLT16_MIN_10_EXP], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - AC_CHECK_DECL([FLT16_MAX_10_EXP], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - AC_CHECK_DECL([FLT16_MANT_DIG], [], [], [[ - #define __STDC_WANT_IEC_60559_TYPES_EXT__ - #include ]]) - - if test "X$ac_cv_have_decl_FLT16_EPSILON" = "Xyes" && - test "X$ac_cv_have_decl_FLT16_MIN" = "Xyes" && - test "X$ac_cv_have_decl_FLT16_MAX" = "Xyes" && - test "X$ac_cv_have_decl_FLT16_MIN_10_EXP" = "Xyes" && - test "X$ac_cv_have_decl_FLT16_MAX_10_EXP" = "Xyes" && - test "X$ac_cv_have_decl_FLT16_MANT_DIG" = "Xyes" ; then - # Some compilers like OneAPI on Windows appear to detect _Float16 support - # properly up to this point, and, in the absence of any architecture-specific - # tuning compiler flags, will generate code for H5Tconv.c that performs - # software conversions on _Float16 variables with compiler-internal functions - # such as __extendhfsf2, __truncsfhf2, or __truncdfhf2. However, these - # compilers will fail to link these functions into the build for currently - # unknown reasons and cause the build to fail. Since these are compiler-internal - # functions that we don't appear to have much control over, let's try to - # compile a program that will generate these functions to check for _Float16 - # support. If we fail to compile this program, we will simply disable - # _Float16 support for the time being. - AC_MSG_CHECKING([if compiler can correctly compile and run a test program which converts _Float16 to other types with casts]) - TEST_SRC="`(echo \"#define H5_FLOAT16_CONVERSION_FUNCS_LINK_TEST 1\"; cat $srcdir/config/cmake/ConversionTests.c)`" - AC_CACHE_VAL([hdf5_cv_float16_conversion_funcs_link], - [AC_RUN_IFELSE( - [AC_LANG_SOURCE([$TEST_SRC])], - [hdf5_cv_float16_conversion_funcs_link=yes], [hdf5_cv_float16_conversion_funcs_link=no], [hdf5_cv_float16_conversion_funcs_link=no])]) - - if test ${hdf5_cv_float16_conversion_funcs_link} = "yes"; then - AC_MSG_RESULT([yes]) +AC_MSG_CHECKING([if _Float16 support is enabled]) +AC_ARG_ENABLE([nonstandard-feature-float16], + [AS_HELP_STRING([--enable-nonstandard-feature-float16], + [Enable support for _Float16 C datatype [default=yes]])], + [ENABLE_FLOAT16=$enableval]) - # Finally, MacOS 13 appears to have a bug specifically when converting - # long double values to _Float16. Release builds of the dt_arith test - # would cause any assignments to a _Float16 variable to be elided, - # whereas Debug builds would perform incorrect hardware conversions by - # simply chopping off all the bytes of the value except for the first 2. - # These tests pass on MacOS 14, so let's perform a quick test to check - # if the hardware conversion is done correctly. - AC_MSG_CHECKING([if compiler can correctly convert long double values to _Float16]) - TEST_SRC="`(echo \"#define H5_LDOUBLE_TO_FLOAT16_CORRECT_TEST 1\"; cat $srcdir/config/cmake/ConversionTests.c)`" - if test ${ac_cv_sizeof_long_double} = 0; then - hdf5_cv_ldouble_to_float16_correct=${hdf5_cv_ldouble_to_float16_correct=no} - else - AC_CACHE_VAL([hdf5_cv_ldouble_to_float16_correct], - [AC_RUN_IFELSE( - [AC_LANG_SOURCE([$TEST_SRC])], - [hdf5_cv_ldouble_to_float16_correct=yes], [hdf5_cv_ldouble_to_float16_correct=no], [hdf5_cv_ldouble_to_float16_correct=yes])]) - fi +## Set default +if test "X-$ENABLE_FLOAT16" = X- ; then + ENABLE_FLOAT16=$NONSTANDARD_FEATURES +fi - if test ${hdf5_cv_ldouble_to_float16_correct} = "yes"; then - AC_DEFINE([LDOUBLE_TO_FLOAT16_CORRECT], [1], - [Define if your system can convert long double to _Float16 values correctly.]) +case "X-$ENABLE_FLOAT16" in + X-yes|X-no) + AC_MSG_RESULT([$ENABLE_FLOAT16]) + ;; + *) + AC_MSG_ERROR([Unrecognized value: $ENABLE_FLOAT16]) + ;; +esac + +HAVE__FLOAT16="no" +if test "X$ENABLE_FLOAT16" = "Xyes"; then + AC_MSG_NOTICE([checking if _Float16 support is available]) + AC_CHECK_SIZEOF([_Float16]) + if test "$ac_cv_sizeof__Float16" != 0; then + # Some compilers expose the _Float16 datatype, but not the macros and + # functions used with the datatype. We need the macros for proper + # datatype conversion support. Check for these here. + AC_CHECK_DECL([FLT16_EPSILON], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + AC_CHECK_DECL([FLT16_MIN], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + AC_CHECK_DECL([FLT16_MAX], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + AC_CHECK_DECL([FLT16_MIN_10_EXP], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + AC_CHECK_DECL([FLT16_MAX_10_EXP], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + AC_CHECK_DECL([FLT16_MANT_DIG], [], [], [[ + #define __STDC_WANT_IEC_60559_TYPES_EXT__ + #include ]]) + + if test "X$ac_cv_have_decl_FLT16_EPSILON" = "Xyes" && + test "X$ac_cv_have_decl_FLT16_MIN" = "Xyes" && + test "X$ac_cv_have_decl_FLT16_MAX" = "Xyes" && + test "X$ac_cv_have_decl_FLT16_MIN_10_EXP" = "Xyes" && + test "X$ac_cv_have_decl_FLT16_MAX_10_EXP" = "Xyes" && + test "X$ac_cv_have_decl_FLT16_MANT_DIG" = "Xyes" ; then + # Some compilers like OneAPI on Windows appear to detect _Float16 support + # properly up to this point, and, in the absence of any architecture-specific + # tuning compiler flags, will generate code for H5Tconv.c that performs + # software conversions on _Float16 variables with compiler-internal functions + # such as __extendhfsf2, __truncsfhf2, or __truncdfhf2. However, these + # compilers will fail to link these functions into the build for currently + # unknown reasons and cause the build to fail. Since these are compiler-internal + # functions that we don't appear to have much control over, let's try to + # compile a program that will generate these functions to check for _Float16 + # support. If we fail to compile this program, we will simply disable + # _Float16 support for the time being. + AC_MSG_CHECKING([if compiler can correctly compile and run a test program which converts _Float16 to other types with casts]) + TEST_SRC="`(echo \"#define H5_FLOAT16_CONVERSION_FUNCS_LINK_TEST 1\"; cat $srcdir/config/cmake/ConversionTests.c)`" + AC_CACHE_VAL([hdf5_cv_float16_conversion_funcs_link], + [AC_RUN_IFELSE( + [AC_LANG_SOURCE([$TEST_SRC])], + [hdf5_cv_float16_conversion_funcs_link=yes], [hdf5_cv_float16_conversion_funcs_link=no], [hdf5_cv_float16_conversion_funcs_link=no])]) + + if test ${hdf5_cv_float16_conversion_funcs_link} = "yes"; then AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - AC_MSG_NOTICE([Conversions from long double to _Float16 appear to be incorrect. These will be emulated through a soft conversion function.]) - fi - HAVE__FLOAT16="yes" + # Finally, MacOS 13 appears to have a bug specifically when converting + # long double values to _Float16. Release builds of the dt_arith test + # would cause any assignments to a _Float16 variable to be elided, + # whereas Debug builds would perform incorrect hardware conversions by + # simply chopping off all the bytes of the value except for the first 2. + # These tests pass on MacOS 14, so let's perform a quick test to check + # if the hardware conversion is done correctly. + AC_MSG_CHECKING([if compiler can correctly convert long double values to _Float16]) + TEST_SRC="`(echo \"#define H5_LDOUBLE_TO_FLOAT16_CORRECT_TEST 1\"; cat $srcdir/config/cmake/ConversionTests.c)`" + if test ${ac_cv_sizeof_long_double} = 0; then + hdf5_cv_ldouble_to_float16_correct=${hdf5_cv_ldouble_to_float16_correct=no} + else + AC_CACHE_VAL([hdf5_cv_ldouble_to_float16_correct], + [AC_RUN_IFELSE( + [AC_LANG_SOURCE([$TEST_SRC])], + [hdf5_cv_ldouble_to_float16_correct=yes], [hdf5_cv_ldouble_to_float16_correct=no], [hdf5_cv_ldouble_to_float16_correct=yes])]) + fi + + if test ${hdf5_cv_ldouble_to_float16_correct} = "yes"; then + AC_DEFINE([LDOUBLE_TO_FLOAT16_CORRECT], [1], + [Define if your system can convert long double to _Float16 values correctly.]) + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + AC_MSG_NOTICE([Conversions from long double to _Float16 appear to be incorrect. These will be emulated through a soft conversion function.]) + fi + + HAVE__FLOAT16="yes" - # Check if we can use fabsf16 - AC_CHECK_FUNC([fabsf16], [AC_DEFINE([HAVE_FABSF16], [1], - [Define if has fabsf16 function])], []) + # Check if we can use fabsf16 + AC_CHECK_FUNC([fabsf16], [AC_DEFINE([HAVE_FABSF16], [1], + [Define if has fabsf16 function])], []) - # Define HAVE__FLOAT16 macro for H5pubconf.h if _Float16 is available. - AC_DEFINE([HAVE__FLOAT16], [1], [Determine if _Float16 is available]) - else - AC_MSG_RESULT([no]) + # Define HAVE__FLOAT16 macro for H5pubconf.h if _Float16 is available. + AC_DEFINE([HAVE__FLOAT16], [1], [Determine if _Float16 is available]) + else + AC_MSG_RESULT([no]) + fi fi - fi - AC_MSG_CHECKING([if _Float16 support is enabled]) - AC_MSG_RESULT([$HAVE__FLOAT16]) + AC_MSG_CHECKING([if _Float16 support is enabled]) + AC_MSG_RESULT([$HAVE__FLOAT16]) + fi +else + AC_DEFINE([SIZEOF__FLOAT16], [0]) fi # Define HAVE__FLOAT16 value to substitute into other files for conditional testing diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index d10c6d7f0b3..1013a022c12 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,36 @@ New Features Configuration: ------------- + - Added configure options for enabling/disabling non-standard programming + language features + + * Added a new configuration option that allows enabling or disabling of + support for features that are extensions to programming languages, such + as support for the _Float16 datatype: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURES (ON/OFF) (Default: ON) + Autotools: --enable-nonstandard-features (yes/no) (Default: yes) + + When this option is enabled, configure time checks are still performed + to ensure that a feature can be used properly, but these checks may not + be sufficient when compiler support for a feature is incomplete or broken, + resulting in library build failures. When set to OFF/no, this option + provides a way to disable support for all non-standard features to avoid + these issues. Individual features can still be re-enabled with their + respective configuration options. + + * Added a new configuration option that allows enabling or disabling of + support for the _Float16 C datatype: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16 (ON/OFF) (Default: ON) + Autotools: --enable-nonstandard-feature-float16 (yes/no) (Default: yes) + + While support for the _Float16 C datatype can generally be detected and + used properly, some compilers have incomplete support for the datatype + and will pass configure time checks while still failing to build HDF5. + This option provides a way to disable support for the _Float16 datatype + when the compiler doesn't have the proper support for it. + - Deprecate bin/cmakehdf5 script With the improvements made in CMake since version 3.23 and the addition @@ -182,6 +212,29 @@ New Features Library: -------- + - Relaxed behavior of H5Pset_page_buffer_size() when opening files + + This API call sets the size of a file's page buffer cache. This call + was extremely strict about matching its parameters to the file strategy + and page size used to create the file, requiring a separate open of the + file to obtain these parameters. + + These requirements have been relaxed when using the fapl to open + a previously-created file: + + * When opening a file that does not use the H5F_FSPACE_STRATEGY_PAGE + strategy, the setting is ignored and the file will be opened, but + without a page buffer cache. This was previously an error. + + * When opening a file that has a page size larger than the desired + page buffer cache size, the page buffer cache size will be increased + to the file's page size. This was previously an error. + + The behavior when creating a file using H5Pset_page_buffer_size() is + unchanged. + + Fixes GitHub issue #3382 + - Added support for _Float16 16-bit half-precision floating-point datatype Support for the _Float16 C datatype has been added on platforms where: @@ -461,6 +514,14 @@ Bug Fixes since HDF5-1.14.3 release Library ------- + - Fixed a bug when using array datatypes with certain parent types + + Array datatype conversion would never use a background buffer, even if the + array's parent type (what the array is an array of) required a background + buffer for conversion. This resulted in crashes in some cases when using + an array of compound, variable length, or reference datatypes. Array types + now use a background buffer if needed by the parent type. + - Fixed potential buffer read overflows in H5PB_read H5PB_read previously did not account for the fact that the size of the diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 310f774e8da..4727cc5d125 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -7028,9 +7028,6 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, H5O_layout_chunk bkg = udata.bkg; done: - /* Caller expects that source datatype will be freed */ - if (dt_src && (H5T_close(dt_src) < 0)) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_dst && (H5T_close(dt_dst) < 0)) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_mem && (H5T_close(dt_mem) < 0)) diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 864d4963a6e..de84034f638 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -58,6 +58,7 @@ typedef struct H5D_compact_iovv_memmanage_ud_t { /* Layout operation callbacks */ static herr_t H5D__compact_construct(H5F_t *f, H5D_t *dset); +static herr_t H5D__compact_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id); static bool H5D__compact_is_space_alloc(const H5O_storage_t *storage); static herr_t H5D__compact_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo); static herr_t H5D__compact_iovv_memmanage_cb(hsize_t dst_off, hsize_t src_off, size_t len, void *_udata); @@ -79,7 +80,7 @@ static herr_t H5D__compact_dest(H5D_t *dset); /* Compact storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_COMPACT[1] = {{ H5D__compact_construct, /* construct */ - NULL, /* init */ + H5D__compact_init, /* init */ H5D__compact_is_space_alloc, /* is_space_alloc */ NULL, /* is_data_cached */ H5D__compact_io_init, /* io_init */ @@ -198,6 +199,63 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__compact_construct() */ +/*------------------------------------------------------------------------- + * Function: H5D__compact_init + * + * Purpose: Initialize the info for a compact dataset. This is + * called when the dataset is initialized. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__compact_init(H5F_t H5_ATTR_UNUSED *f, const H5D_t *dset, hid_t H5_ATTR_UNUSED dapl_id) +{ + hssize_t snelmts; /* Temporary holder for number of elements in dataspace */ + hsize_t nelmts; /* Number of elements in dataspace */ + size_t dt_size; /* Size of datatype */ + hsize_t data_size; /* Dataset size, in bytes */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + assert(dset); + assert(H5D_COMPACT == dset->shared->layout.storage.type); + + /* + * Now that we've read the dataset's datatype, dataspace and + * layout information, perform a quick check for compact datasets + * to ensure that the size of the internal buffer that was + * allocated for the dataset's raw data matches the size of + * the data. A corrupted file can cause a mismatch between the + * two, which might result in buffer overflows during future + * I/O to the dataset. + */ + if (0 == (dt_size = H5T_GET_SIZE(dset->shared->type))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get datatype size"); + if ((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get number of elements in dataset's dataspace"); + nelmts = (hsize_t)snelmts; + + /* Compute the size of the dataset's contiguous storage */ + data_size = nelmts * dt_size; + + /* Check for overflow during multiplication */ + if (nelmts != (data_size / dt_size)) + HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed"); + + /* Check for mismatch */ + if (dset->shared->layout.storage.u.compact.size != data_size) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "bad value from dataset header - size of compact dataset's data buffer doesn't match " + "size of dataset data"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__compact_init() */ + /*------------------------------------------------------------------------- * Function: H5D__compact_is_space_alloc * @@ -606,9 +664,6 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *_storage_src, H5F_t *f_ds storage_dst->dirty = true; done: - /* Caller expects that source datatype will be freed */ - if (dt_src && (H5T_close(dt_src) < 0)) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_dst && (H5T_close(dt_dst) < 0)) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_mem && (H5T_close(dt_mem) < 0)) diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index d9137b9479a..11b0a8e4ef8 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -356,6 +356,65 @@ H5D__contig_delete(H5F_t *f, const H5O_storage_t *storage) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__contig_delete */ +/*------------------------------------------------------------------------- + * Function: H5D__contig_check + * + * Purpose: Sanity check the contiguous info for a dataset. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__contig_check(const H5F_t *f, const H5O_layout_t *layout, const H5S_extent_t *extent, const H5T_t *dt) +{ + hsize_t nelmts; /* Number of elements in dataspace */ + size_t dt_size; /* Size of datatype */ + hsize_t data_size; /* Raw data size */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + assert(f); + assert(layout); + assert(extent); + assert(dt); + + /* Retrieve the number of elements in the dataspace */ + nelmts = H5S_extent_nelem(extent); + + /* Get the datatype's size */ + if (0 == (dt_size = H5T_GET_SIZE(dt))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of datatype"); + + /* Compute the size of the dataset's contiguous storage */ + data_size = nelmts * dt_size; + + /* Check for overflow during multiplication */ + if (nelmts != (data_size / dt_size)) + HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed"); + + /* Check for invalid (corrupted in the file, probably) dimensions */ + if (H5_addr_defined(layout->storage.u.contig.addr)) { + haddr_t rel_eoa; /* Relative end of file address */ + + if (HADDR_UNDEF == (rel_eoa = H5F_get_eoa(f, H5FD_MEM_DRAW))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to determine file size"); + + /* Check for invalid dataset size (from bad dimensions) putting the + * dataset elements off the end of the file + */ + if (H5_addr_le((layout->storage.u.contig.addr + data_size), layout->storage.u.contig.addr)) + HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "invalid dataset size, likely file corruption"); + if (H5_addr_gt((layout->storage.u.contig.addr + data_size), rel_eoa)) + HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "invalid dataset size, likely file corruption"); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__contig_check() */ + /*------------------------------------------------------------------------- * Function: H5D__contig_construct * @@ -438,11 +497,10 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset) *------------------------------------------------------------------------- */ static herr_t -H5D__contig_init(H5F_t H5_ATTR_UNUSED *f, const H5D_t *dset, hid_t H5_ATTR_UNUSED dapl_id) +H5D__contig_init(H5F_t *f, const H5D_t *dset, hid_t H5_ATTR_UNUSED dapl_id) { - hsize_t tmp_size; /* Temporary holder for raw data size */ - size_t tmp_sieve_buf_size; /* Temporary holder for sieve buffer size */ - herr_t ret_value = SUCCEED; /* Return value */ + size_t tmp_sieve_buf_size; /* Temporary holder for sieve buffer size */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -450,6 +508,11 @@ H5D__contig_init(H5F_t H5_ATTR_UNUSED *f, const H5D_t *dset, hid_t H5_ATTR_UNUSE assert(f); assert(dset); + /* Sanity check the dataset's info */ + if (H5D__contig_check(f, &dset->shared->layout, H5S_GET_EXTENT(dset->shared->space), dset->shared->type) < + 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid dataset info"); + /* Compute the size of the contiguous storage for versions of the * layout message less than version 3 because versions 1 & 2 would * truncate the dimension sizes to 32-bits of information. - QAK 5/26/04 @@ -469,25 +532,16 @@ H5D__contig_init(H5F_t H5_ATTR_UNUSED *f, const H5D_t *dset, hid_t H5_ATTR_UNUSE HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of datatype"); /* Compute the size of the dataset's contiguous storage */ - tmp_size = nelmts * dt_size; - - /* Check for overflow during multiplication */ - if (nelmts != (tmp_size / dt_size)) - HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed"); - - /* Assign the dataset's contiguous storage size */ - dset->shared->layout.storage.u.contig.size = tmp_size; - } /* end if */ - else - tmp_size = dset->shared->layout.storage.u.contig.size; + dset->shared->layout.storage.u.contig.size = nelmts * dt_size; + } /* Get the sieve buffer size for the file */ tmp_sieve_buf_size = H5F_SIEVE_BUF_SIZE(dset->oloc.file); /* Adjust the sieve buffer size to the smaller one between the dataset size and the buffer size * from the file access property. (SLU - 2012/3/30) */ - if (tmp_size < tmp_sieve_buf_size) - dset->shared->cache.contig.sieve_buf_size = tmp_size; + if (dset->shared->layout.storage.u.contig.size < tmp_sieve_buf_size) + dset->shared->cache.contig.sieve_buf_size = dset->shared->layout.storage.u.contig.size; else dset->shared->cache.contig.sieve_buf_size = tmp_sieve_buf_size; @@ -1810,9 +1864,6 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, H5F_t *f } /* end while */ done: - /* Caller expects that source datatype will be freed */ - if (dt_src && (H5T_close(dt_src) < 0)) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_dst && (H5T_close(dt_dst) < 0)) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); if (dt_mem && (H5T_close(dt_mem) < 0)) diff --git a/src/H5Defl.c b/src/H5Defl.c index 05ca107faec..22348e33fcc 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -55,6 +55,7 @@ typedef struct H5D_efl_writevv_ud_t { /* Layout operation callbacks */ static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset); +static herr_t H5D__efl_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id); static herr_t H5D__efl_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo); static ssize_t H5D__efl_readvv(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], @@ -77,7 +78,7 @@ static herr_t H5D__efl_write(const H5O_efl_t *efl, const H5D_t *dset, haddr_t ad /* External File List (EFL) storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_EFL[1] = {{ H5D__efl_construct, /* construct */ - NULL, /* init */ + H5D__efl_init, /* init */ H5D__efl_is_space_alloc, /* is_space_alloc */ NULL, /* is_data_cached */ H5D__efl_io_init, /* io_init */ @@ -137,8 +138,9 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to determine datatype size"); /* Check for storage overflows */ - max_points = H5S_get_npoints_max(dset->shared->space); - max_storage = H5O_efl_total_size(&dset->shared->dcpl_cache.efl); + max_points = H5S_get_npoints_max(dset->shared->space); + if (H5O_efl_total_size(&dset->shared->dcpl_cache.efl, &max_storage) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of external file"); if (H5S_UNLIMITED == max_points) { if (H5O_EFL_UNLIMITED != max_storage) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unlimited dataspace but finite storage"); @@ -149,8 +151,8 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "dataspace size exceeds external storage size"); /* Compute the total size of dataset */ - stmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space); - assert(stmp_size >= 0); + if ((stmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve number of elements in dataspace"); tmp_size = (hsize_t)stmp_size * dt_size; H5_CHECKED_ASSIGN(dset->shared->layout.storage.u.contig.size, hsize_t, tmp_size, hssize_t); @@ -161,6 +163,57 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__efl_construct() */ +/*------------------------------------------------------------------------- + * Function: H5D__efl_init + * + * Purpose: Initialize the info for a EFL dataset. This is + * called when the dataset is initialized. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__efl_init(H5F_t H5_ATTR_UNUSED *f, const H5D_t *dset, hid_t H5_ATTR_UNUSED dapl_id) +{ + size_t dt_size; /* Size of datatype */ + hssize_t snelmts; /* Temporary holder for number of elements in dataspace */ + hsize_t nelmts; /* Number of elements in dataspace */ + hsize_t data_size; /* Raw data size */ + hsize_t max_storage; /* Maximum storage size */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + assert(dset); + + /* Retrieve the size of the dataset's datatype */ + if (0 == (dt_size = H5T_get_size(dset->shared->type))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to determine datatype size"); + + /* Retrieve the number of elements in the dataspace */ + if ((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve number of elements in dataspace"); + nelmts = (hsize_t)snelmts; + + /* Compute the size of the dataset's contiguous storage */ + data_size = nelmts * dt_size; + + /* Check for overflow during multiplication */ + if (nelmts != (data_size / dt_size)) + HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed"); + + /* Check for storage overflows */ + if (H5O_efl_total_size(&dset->shared->dcpl_cache.efl, &max_storage) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of external file"); + if (H5O_EFL_UNLIMITED != max_storage && data_size > max_storage) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "dataspace size exceeds external storage size"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__efl_init() */ + /*------------------------------------------------------------------------- * Function: H5D__efl_is_space_alloc * diff --git a/src/H5Dint.c b/src/H5Dint.c index 3b9d000f523..71ddda387c7 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -1673,11 +1673,12 @@ static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id) { H5P_genplist_t *plist; /* Property list */ - H5O_fill_t *fill_prop; /* Pointer to dataset's fill value info */ + H5O_fill_t *fill_prop = NULL; /* Pointer to dataset's fill value info */ unsigned alloc_time_state; /* Allocation time state */ htri_t msg_exists; /* Whether a particular type of message exists */ bool layout_init = false; /* Flag to indicate that chunk information was initialized */ bool must_init_storage = false; + bool fill_init = false; /* Flag to indicate that fill information was initialized */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dataset->oloc.addr) @@ -1722,34 +1723,6 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) /* Indicate that the layout information was initialized */ layout_init = true; - /* - * Now that we've read the dataset's datatype, dataspace and - * layout information, perform a quick check for compact datasets - * to ensure that the size of the internal buffer that was - * allocated for the dataset's raw data matches the size of - * the data. A corrupted file can cause a mismatch between the - * two, which might result in buffer overflows during future - * I/O to the dataset. - */ - if (H5D_COMPACT == dataset->shared->layout.type) { - hssize_t dset_nelemts = 0; - size_t dset_type_size = H5T_GET_SIZE(dataset->shared->type); - size_t dset_data_size = 0; - - assert(H5D_COMPACT == dataset->shared->layout.storage.type); - - if ((dset_nelemts = H5S_GET_EXTENT_NPOINTS(dataset->shared->space)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, - "can't get number of elements in dataset's dataspace"); - - dset_data_size = (size_t)dset_nelemts * dset_type_size; - - if (dataset->shared->layout.storage.u.compact.size != dset_data_size) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, - "bad value from dataset header - size of compact dataset's data buffer doesn't match " - "size of dataset data"); - } - /* Set up flush append property */ if (H5D__append_flush_setup(dataset, dapl_id)) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set up flush append property"); @@ -1763,6 +1736,9 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) if (msg_exists) { if (NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_NEW_ID, fill_prop)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message"); + + /* Indicate that the fill information was initialized */ + fill_init = true; } /* end if */ else { /* For backward compatibility, try to retrieve the old fill value message */ @@ -1771,6 +1747,9 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) if (msg_exists) { if (NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_ID, fill_prop)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message"); + + /* Indicate that the fill information was initialized */ + fill_init = true; } /* end if */ else { /* Set the space allocation time appropriately, based on the type of dataset storage */ @@ -1809,6 +1788,33 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) (dataset->shared->layout.type == H5D_VIRTUAL && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR)) alloc_time_state = 1; + /* Check if there is a fill value, but no type yet */ + if (fill_prop->buf != NULL && fill_prop->type == NULL) { + H5T_t *tmp_type; /* Temporary pointer to dataset's datatype */ + + /* Copy the dataset type into the fill value message */ + if (NULL == (tmp_type = H5T_copy(dataset->shared->type, H5T_COPY_TRANSIENT))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy dataset datatype for fill value"); + + /* Check if conversion is necessary on fill buffer, and if fill-value + * buffer is too small to hold the result. + */ + if (!H5T_noop_conv(dataset->shared->type, tmp_type)) { + size_t bkg_size = MAX(H5T_GET_SIZE(tmp_type), H5T_GET_SIZE(dataset->shared->type)); + + assert(fill_prop->size >= 0); + if ((size_t)fill_prop->size < bkg_size) { + if (H5T_close_real(tmp_type) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't free temporary datatype"); + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "fill value size doesn't match dataset's datatype size"); + } + } + + if (H5T_close_real(tmp_type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't free temporary datatype"); + } + /* Set revised fill value properties, if they are different from the defaults */ if (H5P_fill_value_cmp(&H5D_def_dset.dcpl_cache.fill, fill_prop, sizeof(H5O_fill_t))) { if (H5P_set(plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0) @@ -1845,6 +1851,8 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) if (H5_addr_defined(dataset->oloc.addr) && H5O_close(&(dataset->oloc), NULL) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release object header"); if (dataset->shared) { + if (fill_init) + H5O_msg_reset(H5O_FILL_ID, fill_prop); if (layout_init) if (dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy layout info"); diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 5a0b586594f..4214341b434 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -712,6 +712,8 @@ H5_DLL herr_t H5D__contig_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinf H5_DLL herr_t H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, H5F_t *f_dst, H5O_storage_contig_t *storage_dst, H5T_t *src_dtype, H5O_copy_t *cpy_info); H5_DLL herr_t H5D__contig_delete(H5F_t *f, const H5O_storage_t *store); +H5_DLL herr_t H5D__contig_check(const H5F_t *f, const H5O_layout_t *layout, const H5S_extent_t *extent, + const H5T_t *dt); /* Functions that operate on chunked dataset storage */ H5_DLL htri_t H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, haddr_t caddr, diff --git a/src/H5FDdevelop.h b/src/H5FDdevelop.h index 75e63b14c07..27aa2444102 100644 --- a/src/H5FDdevelop.h +++ b/src/H5FDdevelop.h @@ -25,7 +25,9 @@ /* Public Macros */ /*****************/ -/* H5FD_class_t struct version */ +/** + * Version of the file driver struct, H5FD_class_t + */ #define H5FD_CLASS_VERSION 0x01 /* File driver struct version */ /* Map "fractal heap" header blocks to 'ohdr' type file memory, since its @@ -43,70 +45,92 @@ * * -QAK */ -#define H5FD_MEM_FHEAP_HDR H5FD_MEM_OHDR -#define H5FD_MEM_FHEAP_IBLOCK H5FD_MEM_OHDR -#define H5FD_MEM_FHEAP_DBLOCK H5FD_MEM_LHEAP + +/** + * Fractal heap header block; it is mapped to 'ohdr' type file memory to + * benefit from their similarity. + */ +#define H5FD_MEM_FHEAP_HDR H5FD_MEM_OHDR +/** + * Fractal heap indirect block; it is mapped to 'ohdr' type file memory + * because the indirect blocks are similar to fractal heap header blocks. + */ +#define H5FD_MEM_FHEAP_IBLOCK H5FD_MEM_OHDR +/** + * Fractal heap direct block; it is mapped to 'lheap' type file memory, + * because the fractal heap direct blocks will be replacing local heaps. + */ +#define H5FD_MEM_FHEAP_DBLOCK H5FD_MEM_LHEAP +/** + * Fractal heap 'huge' object; it is mapped to 'draw' type file memory because + * the fractal heap 'huge' objects represent large objects that are directly + * stored in the file. + */ #define H5FD_MEM_FHEAP_HUGE_OBJ H5FD_MEM_DRAW -/* Map "free space" header blocks to 'ohdr' type file memory, since its - * a fair amount of work to add a new kind of file memory and they are similar - * enough to object headers and probably too minor to deserve their own type. - * - * Map "free space" serialized sections to 'lheap' type file memory, since they - * are similar enough to local heap info. - * - * -QAK +/** + * Free space header blocks; it is mapped to 'ohdr' type file memory to + * benefit from their similarity. + */ +#define H5FD_MEM_FSPACE_HDR H5FD_MEM_OHDR +/** + * Free space serialized section; it is mapped to 'lheap' type file memory + * because it is similar enough to local heap info. */ -#define H5FD_MEM_FSPACE_HDR H5FD_MEM_OHDR #define H5FD_MEM_FSPACE_SINFO H5FD_MEM_LHEAP -/* Map "shared object header message" master table to 'ohdr' type file memory, - * since its a fair amount of work to add a new kind of file memory and they are - * similar enough to object headers and probably too minor to deserve their own - * type. - * - * Map "shared object header message" indices to 'btree' type file memory, - * since they are similar enough to B-tree nodes. - * - * -QAK +/** + * Shared object header message master table; it is mapped to 'ohdr' type file + * memory to benefit from their similarity. */ #define H5FD_MEM_SOHM_TABLE H5FD_MEM_OHDR +/** + * Shared object header message index; it is mapped to 'btree' type file memory + * because the indices are similar enough to B-tree nodes. + */ #define H5FD_MEM_SOHM_INDEX H5FD_MEM_BTREE - -/* Map "extensible array" header blocks to 'ohdr' type file memory, since its - * a fair amount of work to add a new kind of file memory and they are similar - * enough to object headers and probably too minor to deserve their own type. - * - * Map "extensible array" index blocks to 'ohdr' type file memory, since they - * are similar to extensible array header blocks. - * - * Map "extensible array" super blocks to 'btree' type file memory, since they - * are similar enough to B-tree nodes. - * - * Map "extensible array" data blocks & pages to 'lheap' type file memory, since - * they are similar enough to local heap info. - * - * -QAK +/** + * Extensible array header block; it is mapped to 'ohdr' type file memory to + * benefit from their similarity. + */ +#define H5FD_MEM_EARRAY_HDR H5FD_MEM_OHDR +/** + * Extensible array index block; it is mapped to 'ohdr' type file memory because + * these index blocks are similar to extensible array header blocks. + */ +#define H5FD_MEM_EARRAY_IBLOCK H5FD_MEM_OHDR +/** + * Extensible array super block; it is mappend to 'btree' type file memory + * because the indices are similar enough to B-tree nodes. + */ +#define H5FD_MEM_EARRAY_SBLOCK H5FD_MEM_BTREE +/** + * Extensible array data block; it is mapped to 'lheap' type file memory + * because it is similar enough to local heap info. + */ +#define H5FD_MEM_EARRAY_DBLOCK H5FD_MEM_LHEAP +/** + * Extensible array data block & page; it is mapped to 'lheap' type file memory + * because it is similar enough to local heap info. */ -#define H5FD_MEM_EARRAY_HDR H5FD_MEM_OHDR -#define H5FD_MEM_EARRAY_IBLOCK H5FD_MEM_OHDR -#define H5FD_MEM_EARRAY_SBLOCK H5FD_MEM_BTREE -#define H5FD_MEM_EARRAY_DBLOCK H5FD_MEM_LHEAP #define H5FD_MEM_EARRAY_DBLK_PAGE H5FD_MEM_LHEAP - -/* Map "fixed array" header blocks to 'ohdr' type file memory, since its - * a fair amount of work to add a new kind of file memory and they are similar - * enough to object headers and probably too minor to deserve their own type. - * - * Map "fixed array" data blocks & pages to 'lheap' type file memory, since - * they are similar enough to local heap info. - * +/** + * Fixed array header block; it is mapped to 'ohdr' type file memory to + * benefit their similarity. + */ +#define H5FD_MEM_FARRAY_HDR H5FD_MEM_OHDR +/** + * Fixed array data block; it is mapped to 'lheap' type file memory + * because it is similar enough to local heap info. + */ +#define H5FD_MEM_FARRAY_DBLOCK H5FD_MEM_LHEAP +/** + * Fixed array data block & page; it is mapped to 'lheap' type file memory + * because it is similar enough to local heap info. */ -#define H5FD_MEM_FARRAY_HDR H5FD_MEM_OHDR -#define H5FD_MEM_FARRAY_DBLOCK H5FD_MEM_LHEAP #define H5FD_MEM_FARRAY_DBLK_PAGE H5FD_MEM_LHEAP -/* +/** * A free-list map which maps all types of allocation requests to a single * free list. This is useful for drivers that don't really care about * keeping different requests segregated in the underlying file and which @@ -124,7 +148,7 @@ H5FD_MEM_SUPER /*ohdr*/ \ } -/* +/** * A free-list map which segregates requests into `raw' or `meta' data * pools. */ @@ -139,7 +163,7 @@ H5FD_MEM_SUPER /*ohdr*/ \ } -/* +/** * The default free list map which causes each request type to use it's own * free-list. */ @@ -161,80 +185,163 @@ /* Forward declaration */ typedef struct H5FD_t H5FD_t; -/* Class information for each file driver */ +/** + * Class information for each file driver + */ typedef struct H5FD_class_t { - unsigned version; /**< File driver class struct version number */ + unsigned version; + /**< File driver class struct version number */ + H5FD_class_value_t value; - const char *name; - haddr_t maxaddr; + /**< File driver identifier */ + + const char *name; + /**< File driver name, must be unique */ + + haddr_t maxaddr; + /**< Maximum address for file */ + H5F_close_degree_t fc_degree; + /**< File close behavior degree */ + herr_t (*terminate)(void); + /**< Shutdowns this driver */ + hsize_t (*sb_size)(H5FD_t *file); + /**< Gets the size of the private information to be stored in the superblock */ + herr_t (*sb_encode)(H5FD_t *file, char *name /*out*/, unsigned char *p /*out*/); + /**< Encodes driver information from the superblock */ + herr_t (*sb_decode)(H5FD_t *f, const char *name, const unsigned char *p); - size_t fapl_size; + /**< Decodes the superblock information for this driver */ + + size_t fapl_size; /**< Size of driver-specific file access properties */ void *(*fapl_get)(H5FD_t *file); + /**< Returns the file access property list */ + void *(*fapl_copy)(const void *fapl); + /**< Copies the file access property list */ + herr_t (*fapl_free)(void *fapl); + /**< Frees the driver-specific file access property list */ + size_t dxpl_size; + /**< Size of the transfer property list */ + void *(*dxpl_copy)(const void *dxpl); + /**< Copies the transfer property list */ + herr_t (*dxpl_free)(void *dxpl); + /**< Frees the transfer property list */ + H5FD_t *(*open)(const char *name, unsigned flags, hid_t fapl, haddr_t maxaddr); + /**< Create or open an HDF5 file of this driver */ + herr_t (*close)(H5FD_t *file); + /**< Close an HDF5 file of this driver */ + int (*cmp)(const H5FD_t *f1, const H5FD_t *f2); + /**< Compares two files belonging to this driver */ + herr_t (*query)(const H5FD_t *f1, unsigned long *flags); + /**< Sets the flags that this driver is capable of supporting */ + herr_t (*get_type_map)(const H5FD_t *file, H5FD_mem_t *type_map); + /**< Retrieves the memory type mapping for this file */ + haddr_t (*alloc)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size); + /**< Allocates file memory */ + herr_t (*free)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, hsize_t size); + /**< Frees the resources for this driver */ + haddr_t (*get_eoa)(const H5FD_t *file, H5FD_mem_t type); + /**< Gets the address of first byte past the addressed space */ + herr_t (*set_eoa)(H5FD_t *file, H5FD_mem_t type, haddr_t addr); + /**< Sets the end-of-address marker for the file */ + haddr_t (*get_eof)(const H5FD_t *file, H5FD_mem_t type); + /**< Gets the address of first byte past the file-end */ + herr_t (*get_handle)(H5FD_t *file, hid_t fapl, void **file_handle); + /**< Returns the file handle of this file driver */ + herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, void *buffer); + /**< Reads the specified number of bytes of data from the file beginning at the specified + * address into the provided buffer, according to the specified data transfer properties */ + herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, const void *buffer); + /**< Writes the specified number of bytes of data to the file beginning at the specified + * address from the provided buffer, according to the specified data transfer properties */ + herr_t (*read_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], void *bufs[]); + /**< Reads the specified length of data from the file into the provided array */ + herr_t (*write_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], const void *bufs[]); + /**< Writes the specified length of data in the provided array to the file at the specified offsets */ + herr_t (*read_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[], void *bufs[] /*out*/); + /**< */ + herr_t (*write_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[], const void *bufs[] /*in*/); + /**< */ + herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing); + /**< Flushes all data to disk */ + herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing); + /**< Truncates a file */ + herr_t (*lock)(H5FD_t *file, hbool_t rw); + /**< Places an advisory lock on a file */ + herr_t (*unlock)(H5FD_t *file); + /**< Removes the existing lock on a file */ + herr_t (*del)(const char *name, hid_t fapl); + /**< Deletes a file */ + herr_t (*ctl)(H5FD_t *file, uint64_t op_code, uint64_t flags, const void *input, void **output); + /**< Performs a CTL operation */ + H5FD_mem_t fl_map[H5FD_MEM_NTYPES]; + /**< Free-list map */ } H5FD_class_t; -/* A free list is a singly-linked list of address/size pairs. */ +/** + * A free list is a singly-linked list of address/size pairs. + */ typedef struct H5FD_free_t { haddr_t addr; hsize_t size; struct H5FD_free_t *next; } H5FD_free_t; -/* +/** * The main datatype for each driver. Public fields common to all drivers * are declared here and the driver appends private fields in memory. */ struct H5FD_t { - hid_t driver_id; /*driver ID for this file */ - const H5FD_class_t *cls; /*constant class info */ - unsigned long fileno; /* File 'serial' number */ - unsigned access_flags; /* File access flags (from create or open) */ - unsigned long feature_flags; /* VFL Driver feature Flags */ - haddr_t maxaddr; /* For this file, overrides class */ - haddr_t base_addr; /* Base address for HDF5 data w/in file */ + hid_t driver_id; /**< Driver ID for this file */ + const H5FD_class_t *cls; /**< Constant class info */ + unsigned long fileno; /**< File 'serial' number */ + unsigned access_flags; /**< File access flags (from create or open) */ + unsigned long feature_flags; /**< VFL Driver feature Flags */ + haddr_t maxaddr; /**< For this file, overrides class */ + haddr_t base_addr; /**< Base address for HDF5 data w/in file */ /* Space allocation management fields */ - hsize_t threshold; /* Threshold for alignment */ - hsize_t alignment; /* Allocation alignment */ - hbool_t paged_aggr; /* Paged aggregation for file space is enabled or not */ + hsize_t threshold; /**< Threshold for alignment */ + hsize_t alignment; /**< Allocation alignment */ + hbool_t paged_aggr; /**< Paged aggregation for file space is enabled or not */ }; /* VFD initialization function */ diff --git a/src/H5Fint.c b/src/H5Fint.c index 3f5a1379834..45ded824a7e 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -2073,7 +2073,21 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) if (H5F__super_read(file, a_plist, true) < 0) HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock"); - /* Create the page buffer before initializing the superblock */ + /* Skip trying to create a page buffer if the file space strategy + * stored in the superblock isn't paged. + */ + if (shared->fs_strategy != H5F_FSPACE_STRATEGY_PAGE) + page_buf_size = 0; + + /* If the page buffer is enabled, the strategy is paged, and the size in + * the fapl is smaller than the file's page size, bump the page buffer + * size up to the file's page size. + */ + if (page_buf_size > 0 && shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE && + shared->fs_page_size > page_buf_size) + page_buf_size = shared->fs_page_size; + + /* Create the page buffer *after* reading the superblock */ if (page_buf_size) if (H5PB_create(shared, page_buf_size, page_buf_min_meta_perc, page_buf_min_raw_perc) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create page buffer"); diff --git a/src/H5Gent.c b/src/H5Gent.c index 5b7c064ecfa..40872a041cc 100644 --- a/src/H5Gent.c +++ b/src/H5Gent.c @@ -342,139 +342,77 @@ H5G__ent_reset(H5G_entry_t *ent) } /* end H5G__ent_reset() */ /*------------------------------------------------------------------------- - * Function: H5G__ent_convert + * Function: H5G__ent_to_link * - * Purpose: Convert a link to a symbol table entry + * Purpose: Convert a symbol table entry to a link * - * Return: Success: Non-negative - * Failure: Negative + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ herr_t -H5G__ent_convert(H5F_t *f, H5HL_t *heap, const char *name, const H5O_link_t *lnk, H5O_type_t obj_type, - const void *crt_info, H5G_entry_t *ent) +H5G__ent_to_link(const H5G_entry_t *ent, const H5HL_t *heap, H5O_link_t *lnk) { - size_t name_offset; /* Offset of name in heap */ - herr_t ret_value = SUCCEED; /* Return value */ + const char *name; /* Pointer to link name in heap */ + size_t block_size; /* Size of the heap block */ + bool dup_soft = false; /* xstrdup the symbolic link name or not */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* check arguments */ - assert(f); + assert(ent); assert(heap); - assert(name); assert(lnk); - /* Reset the new entry */ - H5G__ent_reset(ent); - - /* Add the new name to the heap */ - if (H5HL_insert(f, heap, strlen(name) + 1, name, &name_offset) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert symbol name into heap"); - ent->name_off = name_offset; - - /* Build correct information for symbol table entry based on link type */ - switch (lnk->type) { - case H5L_TYPE_HARD: - if (obj_type == H5O_TYPE_GROUP) { - const H5G_obj_create_t *gcrt_info = (const H5G_obj_create_t *)crt_info; - - ent->type = gcrt_info->cache_type; - if (ent->type != H5G_NOTHING_CACHED) - ent->cache = gcrt_info->cache; -#ifndef NDEBUG - else { - /* Make sure there is no stab message in the target object - */ - H5O_loc_t targ_oloc; /* Location of link target */ - htri_t stab_exists; /* Whether the target symbol table exists */ - - /* Build target object location */ - if (H5O_loc_reset(&targ_oloc) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTRESET, FAIL, "unable to initialize target location"); - targ_oloc.file = f; - targ_oloc.addr = lnk->u.hard.addr; - - /* Check if a symbol table message exists */ - if ((stab_exists = H5O_msg_exists(&targ_oloc, H5O_STAB_ID)) < 0) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to check for STAB message"); - - assert(!stab_exists); - } /* end else */ -#endif /* NDEBUG */ - } /* end if */ - else if (obj_type == H5O_TYPE_UNKNOWN) { - /* Try to retrieve symbol table information for caching */ - H5O_loc_t targ_oloc; /* Location of link target */ - H5O_t *oh; /* Link target object header */ - H5O_stab_t stab; /* Link target symbol table */ - htri_t stab_exists; /* Whether the target symbol table exists */ - - /* Build target object location */ - if (H5O_loc_reset(&targ_oloc) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTRESET, FAIL, "unable to initialize target location"); - targ_oloc.file = f; - targ_oloc.addr = lnk->u.hard.addr; - - /* Get the object header */ - if (NULL == (oh = H5O_protect(&targ_oloc, H5AC__READ_ONLY_FLAG, false))) - HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect target object header"); - - /* Check if a symbol table message exists */ - if ((stab_exists = H5O_msg_exists_oh(oh, H5O_STAB_ID)) < 0) { - if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) - HERROR(H5E_SYM, H5E_CANTUNPROTECT, "unable to release object header"); - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to check for STAB message"); - } /* end if */ - - if (stab_exists) { - /* Read symbol table message */ - if (NULL == H5O_msg_read_oh(f, oh, H5O_STAB_ID, &stab)) { - if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) - HERROR(H5E_SYM, H5E_CANTUNPROTECT, "unable to release object header"); - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to read STAB message"); - } /* end if */ - - /* Cache symbol table message */ - ent->type = H5G_CACHED_STAB; - ent->cache.stab.btree_addr = stab.btree_addr; - ent->cache.stab.heap_addr = stab.heap_addr; - } /* end if */ - else - /* No symbol table message, don't cache anything */ - ent->type = H5G_NOTHING_CACHED; - - if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to release object header"); - } /* end else */ - else - ent->type = H5G_NOTHING_CACHED; + /* Get the size of the heap block */ + block_size = H5HL_heap_get_size(heap); - ent->header = lnk->u.hard.addr; - break; + /* Get pointer to link's name in the heap */ + if (NULL == (name = (const char *)H5HL_offset_into(heap, ent->name_off))) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table link name"); - case H5L_TYPE_SOFT: { - size_t lnk_offset; /* Offset to sym-link value */ + /* Set (default) common info for link */ + lnk->cset = H5F_DEFAULT_CSET; + lnk->corder = 0; + lnk->corder_valid = false; /* Creation order not valid for this link */ + if (NULL == (lnk->name = H5MM_strndup(name, (block_size - ent->name_off)))) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to duplicate link name"); - /* Insert link value into local heap */ - if (H5HL_insert(f, heap, strlen(lnk->u.soft.name) + 1, lnk->u.soft.name, &lnk_offset) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to write link value to local heap"); + /* Object is a symbolic or hard link */ + if (ent->type == H5G_CACHED_SLINK) { + const char *s; /* Pointer to link value */ - ent->type = H5G_CACHED_SLINK; - ent->cache.slink.lval_offset = lnk_offset; - } break; + if (NULL == (s = (const char *)H5HL_offset_into(heap, ent->cache.slink.lval_offset))) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbolic link name"); - case H5L_TYPE_ERROR: - case H5L_TYPE_EXTERNAL: - case H5L_TYPE_MAX: - default: - HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "unrecognized link type"); - } /* end switch */ + /* Copy the link value */ + if (NULL == (lnk->u.soft.name = H5MM_strndup(s, (block_size - ent->cache.slink.lval_offset)))) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to duplicate symbolic link name"); + + dup_soft = true; + + /* Set link type */ + lnk->type = H5L_TYPE_SOFT; + } /* end if */ + else { + /* Set address of object */ + lnk->u.hard.addr = ent->header; + + /* Set link type */ + lnk->type = H5L_TYPE_HARD; + } /* end else */ done: + if (ret_value < 0) { + if (lnk->name) + H5MM_xfree(lnk->name); + if (ent->type == H5G_CACHED_SLINK && dup_soft) + H5MM_xfree(lnk->u.soft.name); + } + FUNC_LEAVE_NOAPI(ret_value) -} /* end H5G__ent_convert() */ +} /* end H5G__ent_to_link() */ /*------------------------------------------------------------------------- * Function: H5G__ent_debug diff --git a/src/H5Glink.c b/src/H5Glink.c index b862947f195..1982f54984c 100644 --- a/src/H5Glink.c +++ b/src/H5Glink.c @@ -175,70 +175,6 @@ H5G__link_cmp_corder_dec(const void *lnk1, const void *lnk2) FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__link_cmp_corder_dec() */ -/*------------------------------------------------------------------------- - * Function: H5G__ent_to_link - * - * Purpose: Convert a symbol table entry to a link - * - * Return: Non-negative on success/Negative on failure - * - *------------------------------------------------------------------------- - */ -herr_t -H5G__ent_to_link(H5O_link_t *lnk, const H5HL_t *heap, const H5G_entry_t *ent, const char *name) -{ - bool dup_soft = false; /* xstrdup the symbolic link name or not */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* check arguments */ - assert(lnk); - assert(heap); - assert(ent); - assert(name); - - /* Set (default) common info for link */ - lnk->cset = H5F_DEFAULT_CSET; - lnk->corder = 0; - lnk->corder_valid = false; /* Creation order not valid for this link */ - if ((lnk->name = H5MM_xstrdup(name)) == NULL) - HGOTO_ERROR(H5E_LINK, H5E_CANTGET, FAIL, "unable to duplicate link name"); - - /* Object is a symbolic or hard link */ - if (ent->type == H5G_CACHED_SLINK) { - const char *s; /* Pointer to link value */ - - if ((s = (const char *)H5HL_offset_into(heap, ent->cache.slink.lval_offset)) == NULL) - HGOTO_ERROR(H5E_LINK, H5E_CANTGET, FAIL, "unable to get symbolic link name"); - - /* Copy the link value */ - if ((lnk->u.soft.name = H5MM_xstrdup(s)) == NULL) - HGOTO_ERROR(H5E_LINK, H5E_CANTGET, FAIL, "unable to duplicate symbolic link name"); - - dup_soft = true; - - /* Set link type */ - lnk->type = H5L_TYPE_SOFT; - } /* end if */ - else { - /* Set address of object */ - lnk->u.hard.addr = ent->header; - - /* Set link type */ - lnk->type = H5L_TYPE_HARD; - } /* end else */ - -done: - if (ret_value < 0) { - if (lnk->name) - H5MM_xfree(lnk->name); - if (ent->type == H5G_CACHED_SLINK && dup_soft) - H5MM_xfree(lnk->u.soft.name); - } - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5G__ent_to_link() */ - /*------------------------------------------------------------------------- * Function: H5G_link_to_info * @@ -317,6 +253,140 @@ H5G_link_to_info(const H5O_loc_t *link_loc, const H5O_link_t *lnk, H5L_info2_t * FUNC_LEAVE_NOAPI(ret_value) } /* end H5G_link_to_info() */ +/*------------------------------------------------------------------------- + * Function: H5G__link_to_ent + * + * Purpose: Convert a link to a symbol table entry + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5G__link_to_ent(H5F_t *f, H5HL_t *heap, const H5O_link_t *lnk, H5O_type_t obj_type, const void *crt_info, + H5G_entry_t *ent) +{ + size_t name_offset; /* Offset of name in heap */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* check arguments */ + assert(f); + assert(heap); + assert(lnk && lnk->name); + + /* Reset the new entry */ + H5G__ent_reset(ent); + + /* Add the new name to the heap */ + if (H5HL_insert(f, heap, strlen(lnk->name) + 1, lnk->name, &name_offset) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert symbol name into heap"); + ent->name_off = name_offset; + + /* Build correct information for symbol table entry based on link type */ + switch (lnk->type) { + case H5L_TYPE_HARD: + if (obj_type == H5O_TYPE_GROUP) { + const H5G_obj_create_t *gcrt_info = (const H5G_obj_create_t *)crt_info; + + ent->type = gcrt_info->cache_type; + if (ent->type != H5G_NOTHING_CACHED) + ent->cache = gcrt_info->cache; +#ifndef NDEBUG + else { + /* Make sure there is no stab message in the target object + */ + H5O_loc_t targ_oloc; /* Location of link target */ + htri_t stab_exists; /* Whether the target symbol table exists */ + + /* Build target object location */ + if (H5O_loc_reset(&targ_oloc) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTRESET, FAIL, "unable to initialize target location"); + targ_oloc.file = f; + targ_oloc.addr = lnk->u.hard.addr; + + /* Check if a symbol table message exists */ + if ((stab_exists = H5O_msg_exists(&targ_oloc, H5O_STAB_ID)) < 0) + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to check for STAB message"); + + assert(!stab_exists); + } /* end else */ +#endif /* NDEBUG */ + } /* end if */ + else if (obj_type == H5O_TYPE_UNKNOWN) { + /* Try to retrieve symbol table information for caching */ + H5O_loc_t targ_oloc; /* Location of link target */ + H5O_t *oh; /* Link target object header */ + H5O_stab_t stab; /* Link target symbol table */ + htri_t stab_exists; /* Whether the target symbol table exists */ + + /* Build target object location */ + if (H5O_loc_reset(&targ_oloc) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTRESET, FAIL, "unable to initialize target location"); + targ_oloc.file = f; + targ_oloc.addr = lnk->u.hard.addr; + + /* Get the object header */ + if (NULL == (oh = H5O_protect(&targ_oloc, H5AC__READ_ONLY_FLAG, false))) + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect target object header"); + + /* Check if a symbol table message exists */ + if ((stab_exists = H5O_msg_exists_oh(oh, H5O_STAB_ID)) < 0) { + if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) + HERROR(H5E_SYM, H5E_CANTUNPROTECT, "unable to release object header"); + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to check for STAB message"); + } /* end if */ + + if (stab_exists) { + /* Read symbol table message */ + if (NULL == H5O_msg_read_oh(f, oh, H5O_STAB_ID, &stab)) { + if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) + HERROR(H5E_SYM, H5E_CANTUNPROTECT, "unable to release object header"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to read STAB message"); + } /* end if */ + + /* Cache symbol table message */ + ent->type = H5G_CACHED_STAB; + ent->cache.stab.btree_addr = stab.btree_addr; + ent->cache.stab.heap_addr = stab.heap_addr; + } /* end if */ + else + /* No symbol table message, don't cache anything */ + ent->type = H5G_NOTHING_CACHED; + + if (H5O_unprotect(&targ_oloc, oh, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to release object header"); + } /* end else */ + else + ent->type = H5G_NOTHING_CACHED; + + ent->header = lnk->u.hard.addr; + break; + + case H5L_TYPE_SOFT: { + size_t lnk_offset; /* Offset to sym-link value */ + + /* Insert link value into local heap */ + if (H5HL_insert(f, heap, strlen(lnk->u.soft.name) + 1, lnk->u.soft.name, &lnk_offset) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to write link value to local heap"); + + ent->type = H5G_CACHED_SLINK; + ent->cache.slink.lval_offset = lnk_offset; + } break; + + case H5L_TYPE_ERROR: + case H5L_TYPE_EXTERNAL: + case H5L_TYPE_MAX: + default: + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "unrecognized link type"); + } /* end switch */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5G__link_to_ent() */ + /*------------------------------------------------------------------------- * Function: H5G__link_to_loc * diff --git a/src/H5Gloc.c b/src/H5Gloc.c index 1ba45dee223..d04c582d009 100644 --- a/src/H5Gloc.c +++ b/src/H5Gloc.c @@ -553,7 +553,7 @@ H5G__loc_insert(H5G_loc_t *grp_loc, char *name, H5G_loc_t *obj_loc, H5O_type_t o lnk.u.hard.addr = obj_loc->oloc->addr; /* Insert new group into current group's symbol table */ - if (H5G_obj_insert(grp_loc->oloc, name, &lnk, true, obj_type, crt_info) < 0) + if (H5G_obj_insert(grp_loc->oloc, &lnk, true, obj_type, crt_info) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert object"); /* Set the name of the object location */ @@ -644,24 +644,21 @@ H5G__loc_addr_cb(H5G_loc_t H5_ATTR_UNUSED *grp_loc /*in*/, const char H5_ATTR_UN const H5O_link_t H5_ATTR_UNUSED *lnk, H5G_loc_t *obj_loc, void *_udata /*in,out*/, H5G_own_loc_t *own_loc /*out*/) { - haddr_t *udata = (haddr_t *)_udata; /* User data passed in */ - herr_t ret_value = SUCCEED; /* Return value */ + haddr_t *udata = (haddr_t *)_udata; /* User data passed in */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_PACKAGE_NOERR /* Check if the name in this group resolved to a valid link */ if (obj_loc == NULL) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "name doesn't exist"); - - /* Set address of object */ - *udata = obj_loc->oloc->addr; + *udata = HADDR_UNDEF; /* No object found */ + else + *udata = obj_loc->oloc->addr; /* Set address of object */ -done: /* Indicate that this callback didn't take ownership of the group * * location for the object */ *own_loc = H5G_OWN_NONE; - FUNC_LEAVE_NOAPI(ret_value) + FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5G__loc_addr_cb() */ /*------------------------------------------------------------------------- diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index 7ae700a1a36..a06d44cea75 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -775,8 +775,8 @@ * \image html Groups_fig9_aa.gif "c) A link named dset2 to the same dataset is created in /group2." * * - * \image html Groups_fig9_bb.gif "d) The link from /group1 to dset1 is removed. The dataset is - * still in the file, but can be accessed only as /group2/dset2." + * \image html Groups_fig9_bb.gif "d) The link from /group1 to dset1 is removed." + * The dataset is still in the file, but can be accessed only as /group2/dset2. * * * @@ -811,8 +811,7 @@ * * * - * \image html Groups_fig10_c.gif "c) dset1 has three names: /group1/dset1, /group2/dset2, and - * /group1/GXX/dset2." + * \image html Groups_fig10_c.gif "c) dset1 has 3 names: /group1/dset1, /group2/dset2, and /group1/GXX/dset2." * * * \image html Groups_fig10_d.gif "d) dset1 has an infinite number of available path names." diff --git a/src/H5Gnode.c b/src/H5Gnode.c index b4bd1214bf8..f211e74e041 100644 --- a/src/H5Gnode.c +++ b/src/H5Gnode.c @@ -344,6 +344,7 @@ H5G__node_cmp2(void *_lt_key, void *_udata, void *_rt_key) H5G_node_key_t *lt_key = (H5G_node_key_t *)_lt_key; H5G_node_key_t *rt_key = (H5G_node_key_t *)_rt_key; const char *s1, *s2; + size_t max_len; int ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -359,8 +360,14 @@ H5G__node_cmp2(void *_lt_key, void *_udata, void *_rt_key) if ((s2 = (const char *)H5HL_offset_into(udata->heap, rt_key->offset)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get key name"); + /* Compute maximum length of string to compare */ + if (rt_key->offset > lt_key->offset) + max_len = udata->block_size - rt_key->offset; + else + max_len = udata->block_size - lt_key->offset; + /* Set return value */ - ret_value = strcmp(s1, s2); + ret_value = strncmp(s1, s2, max_len); done: FUNC_LEAVE_NOAPI(ret_value) @@ -406,13 +413,13 @@ H5G__node_cmp3(void *_lt_key, void *_udata, void *_rt_key) /* left side */ if ((s = (const char *)H5HL_offset_into(udata->heap, lt_key->offset)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get key name"); - if (strcmp(udata->name, s) <= 0) + if (strncmp(udata->name, s, (udata->block_size - lt_key->offset)) <= 0) ret_value = (-1); else { /* right side */ if ((s = (const char *)H5HL_offset_into(udata->heap, rt_key->offset)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get key name"); - if (strcmp(udata->name, s) > 0) + if (strncmp(udata->name, s, (udata->block_size - rt_key->offset)) > 0) ret_value = 1; } /* end else */ @@ -476,7 +483,7 @@ H5G__node_found(H5F_t *f, haddr_t addr, const void H5_ATTR_UNUSED *_lt_key, bool if ((s = (const char *)H5HL_offset_into(udata->common.heap, sn->entry[idx].name_off)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table name"); - cmp = strcmp(udata->common.name, s); + cmp = strncmp(udata->common.name, s, (udata->common.block_size - sn->entry[idx].name_off)); if (cmp < 0) rt = idx; @@ -574,7 +581,7 @@ H5G__node_insert(H5F_t *f, haddr_t addr, void H5_ATTR_UNUSED *_lt_key, bool H5_A HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5B_INS_ERROR, "unable to get symbol table name"); /* Check if symbol is already present */ - if (0 == (cmp = strcmp(udata->common.name, s))) + if (0 == (cmp = strncmp(udata->common.name, s, (udata->common.block_size - sn->entry[idx].name_off)))) HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, H5B_INS_ERROR, "symbol is already present in symbol table"); if (cmp < 0) @@ -585,8 +592,7 @@ H5G__node_insert(H5F_t *f, haddr_t addr, void H5_ATTR_UNUSED *_lt_key, bool H5_A idx += cmp > 0 ? 1 : 0; /* Convert link information & name to symbol table entry */ - if (H5G__ent_convert(f, udata->common.heap, udata->common.name, udata->lnk, udata->obj_type, - udata->crt_info, &ent) < 0) + if (H5G__link_to_ent(f, udata->common.heap, udata->lnk, udata->obj_type, udata->crt_info, &ent) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTCONVERT, H5B_INS_ERROR, "unable to convert link"); /* Determine where to place entry in node */ @@ -727,7 +733,7 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i idx = (lt + rt) / 2; if ((s = (const char *)H5HL_offset_into(udata->common.heap, sn->entry[idx].name_off)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5B_INS_ERROR, "unable to get symbol table name"); - cmp = strcmp(udata->common.name, s); + cmp = strncmp(udata->common.name, s, (udata->common.block_size - sn->entry[idx].name_off)); if (cmp < 0) rt = idx; else @@ -740,7 +746,11 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i /* Get a pointer to the name of the link */ if (NULL == (lnk.name = (char *)H5HL_offset_into(udata->common.heap, sn->entry[idx].name_off))) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5B_INS_ERROR, "unable to get link name"); - link_name_len = strlen(lnk.name) + 1; + + /* Compute the size of the link name in the heap, being defensive about corrupted data */ + link_name_len = strnlen(lnk.name, (udata->common.block_size - sn->entry[idx].name_off)) + 1; + if (link_name_len > (udata->common.block_size - sn->entry[idx].name_off)) + link_name_len = (udata->common.block_size - sn->entry[idx].name_off); /* Set up rest of link structure */ lnk.corder_valid = false; @@ -778,7 +788,13 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i if (lnk.u.soft.name) { size_t soft_link_len; /* Length of string in local heap */ - soft_link_len = strlen(lnk.u.soft.name) + 1; + /* Compute the size of the soft link name in the heap, being defensive about corrupted data */ + soft_link_len = strnlen(lnk.u.soft.name, + (udata->common.block_size - sn->entry[idx].cache.slink.lval_offset)) + + 1; + if (soft_link_len > (udata->common.block_size - sn->entry[idx].cache.slink.lval_offset)) + soft_link_len = (udata->common.block_size - sn->entry[idx].cache.slink.lval_offset); + if (H5HL_remove(f, udata->common.heap, sn->entry[idx].cache.slink.lval_offset, soft_link_len) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, H5B_INS_ERROR, @@ -912,15 +928,10 @@ H5G__node_iterate(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, if (udata->skip > 0) --udata->skip; else { - H5O_link_t lnk; /* Link for entry */ - const char *name; /* Pointer to link name in heap */ - - /* Get the pointer to the name of the link in the heap */ - if ((name = (const char *)H5HL_offset_into(udata->heap, ents[u].name_off)) == NULL) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5_ITER_ERROR, "unable to get symbol table node name"); + H5O_link_t lnk; /* Link for entry */ /* Convert the entry to a link */ - if (H5G__ent_to_link(&lnk, udata->heap, &ents[u], name) < 0) + if (H5G__ent_to_link(&ents[u], udata->heap, &lnk) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTCONVERT, H5_ITER_ERROR, "unable to convert symbol table entry to link"); @@ -1130,7 +1141,6 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const H5G_bt_it_cpy_t *udata = (H5G_bt_it_cpy_t *)_udata; const H5O_loc_t *src_oloc = udata->src_oloc; H5O_copy_t *cpy_info = udata->cpy_info; - H5HL_t *heap = NULL; H5G_node_t *sn = NULL; unsigned int i; /* Local index variable */ int ret_value = H5_ITER_CONT; @@ -1141,32 +1151,30 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const assert(f); assert(H5_addr_defined(addr)); assert(udata); + assert(udata->src_heap); /* load the symbol table into memory from the source file */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); - /* get the base address of the heap */ - if (NULL == (heap = H5HL_protect(f, udata->src_heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, H5_ITER_ERROR, "unable to protect symbol name"); - /* copy object in this node one by one */ for (i = 0; i < sn->nsyms; i++) { H5G_entry_t *src_ent = &(sn->entry[i]); /* Convenience variable to refer to current source group entry */ H5O_link_t lnk; /* Link to insert */ - const char *name; /* Name of source object */ + char *name; /* Name of source object */ H5G_entry_t tmp_src_ent; /* Temporary copy. Change will not affect the cache */ H5O_type_t obj_type = H5O_TYPE_UNKNOWN; /* Target object type */ H5G_copy_file_ud_t *cpy_udata; /* Copy file udata */ H5G_obj_create_t gcrt_info; /* Group creation info */ + size_t max_link_len; /* Max. length of string in local heap */ /* expand soft link */ if (H5G_CACHED_SLINK == src_ent->type && cpy_info->expand_soft_link) { - haddr_t obj_addr; /* Address of object pointed to by soft link */ - H5G_loc_t grp_loc; /* Group location holding soft link */ - H5G_name_t grp_path; /* Path for group holding soft link */ - char *link_name; /* Pointer to value of soft link */ + haddr_t obj_addr = HADDR_UNDEF; /* Address of object pointed to by soft link */ + H5G_loc_t grp_loc; /* Group location holding soft link */ + H5G_name_t grp_path; /* Path for group holding soft link */ + char *link_name; /* Pointer to value of soft link */ /* Make a temporary copy, so that it will not change the info in the cache */ H5MM_memcpy(&tmp_src_ent, src_ent, sizeof(H5G_entry_t)); @@ -1179,17 +1187,24 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const H5_GCC_CLANG_DIAG_ON("cast-qual") /* Get pointer to link value in local heap */ - if ((link_name = (char *)H5HL_offset_into(heap, tmp_src_ent.cache.slink.lval_offset)) == NULL) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5_ITER_ERROR, "unable to get link name"); + if ((link_name = + (char *)H5HL_offset_into(udata->src_heap, tmp_src_ent.cache.slink.lval_offset)) == NULL) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5_ITER_ERROR, "unable to get link name"); + + /* Sanity check soft link name, to detect running off the end of the heap block */ + max_link_len = udata->src_block_size - tmp_src_ent.cache.slink.lval_offset; + if (strnlen(link_name, max_link_len) == max_link_len) + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, H5_ITER_ERROR, "invalid link name offset"); /* Check if the object pointed by the soft link exists in the source file */ - if (H5G__loc_addr(&grp_loc, link_name, &obj_addr) >= 0) { + if (H5G__loc_addr(&grp_loc, link_name, &obj_addr) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTFIND, H5_ITER_ERROR, + "unable to check if soft link resolves to an object"); + if (H5_addr_defined(obj_addr)) { tmp_src_ent.header = obj_addr; src_ent = &tmp_src_ent; } /* end if */ - else - H5E_clear_stack(NULL); /* discard any errors from a dangling soft link */ - } /* if ((H5G_CACHED_SLINK == src_ent->type)... */ + } /* if ((H5G_CACHED_SLINK == src_ent->type)... */ /* Check if object in source group is a hard link */ if (H5_addr_defined(src_ent->header)) { @@ -1230,28 +1245,39 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const /* Construct link information for eventual insertion */ lnk.type = H5L_TYPE_SOFT; - if ((lnk.u.soft.name = (char *)H5HL_offset_into(heap, src_ent->cache.slink.lval_offset)) == NULL) + if ((lnk.u.soft.name = + (char *)H5HL_offset_into(udata->src_heap, src_ent->cache.slink.lval_offset)) == NULL) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5_ITER_ERROR, "unable to get link name"); + + /* Sanity check soft link name, to detect running off the end of the heap block */ + max_link_len = udata->src_block_size - src_ent->cache.slink.lval_offset; + if (strnlen(lnk.u.soft.name, max_link_len) == max_link_len) + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, H5_ITER_ERROR, "invalid link name offset"); } /* else if */ else assert(0 && "Unknown entry type"); + /* Determine name of source object */ + if ((name = (char *)H5HL_offset_into(udata->src_heap, src_ent->name_off)) == NULL) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5_ITER_ERROR, "unable to get source object name"); + + /* Sanity check soft link name, to detect running off the end of the heap block */ + max_link_len = udata->src_block_size - src_ent->name_off; + if (strnlen(name, max_link_len) == max_link_len) + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, H5_ITER_ERROR, "invalid link name offset"); + /* Set up common link data */ lnk.cset = H5F_DEFAULT_CSET; /* XXX: Allow user to set this */ lnk.corder = 0; /* Creation order is not tracked for old-style links */ lnk.corder_valid = false; /* Creation order is not valid */ - /* lnk.name = name; */ /* This will be set in callback */ - - /* Determine name of source object */ - if ((name = (const char *)H5HL_offset_into(heap, src_ent->name_off)) == NULL) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5_ITER_ERROR, "unable to get source object name"); + lnk.name = name; /* Name of link */ /* Set copied metadata tag */ H5_BEGIN_TAG(H5AC__COPIED_TAG) /* Insert the new object in the destination file's group */ /* (Don't increment the link count - that's already done above for hard links) */ - if (H5G__stab_insert_real(udata->dst_file, udata->dst_stab, name, &lnk, obj_type, + if (H5G__stab_insert_real(udata->dst_file, udata->dst_stab, &lnk, obj_type, (obj_type == H5O_TYPE_GROUP ? &gcrt_info : NULL)) < 0) HGOTO_ERROR_TAG(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "unable to insert the name"); @@ -1261,9 +1287,6 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const } /* end of for (i=0; insyms; i++) */ done: - if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to unprotect symbol name"); - if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); @@ -1318,18 +1341,13 @@ H5G__node_build_table(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr /* Iterate over the symbol table node entries, adding to link table */ for (u = 0; u < sn->nsyms; u++) { - const char *name; /* Pointer to link name in heap */ - size_t linkno; /* Link allocated */ - - /* Get pointer to link's name in the heap */ - if ((name = (const char *)H5HL_offset_into(udata->heap, sn->entry[u].name_off)) == NULL) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5_ITER_ERROR, "unable to get symbol table link name"); + size_t linkno; /* Link allocated */ /* Determine the link to operate on in the table */ linkno = udata->ltable->nlinks++; /* Convert the entry to a link */ - if (H5G__ent_to_link(&udata->ltable->lnks[linkno], udata->heap, &sn->entry[u], name) < 0) + if (H5G__ent_to_link(&sn->entry[u], udata->heap, &udata->ltable->lnks[linkno]) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTCONVERT, H5_ITER_ERROR, "unable to convert symbol table entry to link"); } /* end for */ @@ -1411,7 +1429,8 @@ H5G_node_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, had H5G_bt_common_t udata; /*data to pass through B-tree */ H5E_clear_stack(NULL); /* discard that error */ - udata.heap = heap; + udata.heap = heap; + udata.block_size = H5HL_heap_get_size(heap); if (H5B_debug(f, addr, stream, indent, fwidth, H5B_SNODE, &udata) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to debug B-tree node"); } /* end if */ diff --git a/src/H5Gobj.c b/src/H5Gobj.c index 2b73c2b5cd9..51feeaad56d 100644 --- a/src/H5Gobj.c +++ b/src/H5Gobj.c @@ -397,7 +397,7 @@ H5G__obj_stab_to_new_cb(const H5O_link_t *lnk, void *_udata) /* Insert link into group */ H5_GCC_CLANG_DIAG_OFF("cast-qual") - if (H5G_obj_insert(udata->grp_oloc, lnk->name, (H5O_link_t *)lnk, false, H5O_TYPE_UNKNOWN, NULL) < 0) + if (H5G_obj_insert(udata->grp_oloc, (H5O_link_t *)lnk, false, H5O_TYPE_UNKNOWN, NULL) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, H5_ITER_ERROR, "can't insert link into group"); H5_GCC_CLANG_DIAG_ON("cast-qual") @@ -409,8 +409,8 @@ H5G__obj_stab_to_new_cb(const H5O_link_t *lnk, void *_udata) * Function: H5G_obj_insert * * Purpose: Insert a new symbol into the group described by GRP_OLOC. - * file F. The name of the new symbol is NAME and its symbol - * table entry is OBJ_LNK. Increment the reference + * file F. The name of the new symbol is OBJ_LNK->NAME and its + * symbol table entry is OBJ_LNK. Increment the reference * count for the object the link points if OBJ_LNK is a hard link * and ADJ_LINK is true. * @@ -419,8 +419,8 @@ H5G__obj_stab_to_new_cb(const H5O_link_t *lnk, void *_udata) *------------------------------------------------------------------------- */ herr_t -H5G_obj_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, bool adj_link, - H5O_type_t obj_type, const void *crt_info) +H5G_obj_insert(const H5O_loc_t *grp_oloc, H5O_link_t *obj_lnk, bool adj_link, H5O_type_t obj_type, + const void *crt_info) { H5O_pline_t tmp_pline; /* Pipeline message */ H5O_pline_t *pline = NULL; /* Pointer to pipeline message */ @@ -434,7 +434,6 @@ H5G_obj_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, /* check arguments */ assert(grp_oloc && grp_oloc->file); - assert(name && *name); assert(obj_lnk); /* Check if we have information about the number of objects in this group */ @@ -544,7 +543,7 @@ H5G_obj_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, * group is in the "new format" now and the link info should be * set up, etc. */ - if (H5G_obj_insert(grp_oloc, name, obj_lnk, adj_link, obj_type, crt_info) < 0) + if (H5G_obj_insert(grp_oloc, obj_lnk, adj_link, obj_type, crt_info) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert link into group"); /* Done with insertion now */ @@ -557,7 +556,7 @@ H5G_obj_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, /* Insert into symbol table or "dense" storage */ if (use_old_format) { /* Insert into symbol table */ - if (H5G__stab_insert(grp_oloc, name, obj_lnk, obj_type, crt_info) < 0) + if (H5G__stab_insert(grp_oloc, obj_lnk, obj_type, crt_info) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert entry into symbol table"); } /* end if */ else { diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h index a90ada92ba9..f4f1749dc88 100644 --- a/src/H5Gpkg.h +++ b/src/H5Gpkg.h @@ -151,8 +151,9 @@ typedef struct { */ typedef struct H5G_bt_common_t { /* downward */ - const char *name; /*points to temporary memory */ - H5HL_t *heap; /*symbol table heap */ + const char *name; /* Points to temporary memory */ + H5HL_t *heap; /* Symbol table heap */ + size_t block_size; /* Size of the heap block */ } H5G_bt_common_t; /* @@ -210,11 +211,12 @@ typedef struct H5G_bt_it_it_t { /* Data passed through B-tree iteration for copying copy symbol table content */ typedef struct H5G_bt_it_cpy_t { - const H5O_loc_t *src_oloc; /* Source object location */ - haddr_t src_heap_addr; /* Heap address of the source symbol table */ - H5F_t *dst_file; /* File of destination group */ - const H5O_stab_t *dst_stab; /* Symbol table message for destination group */ - H5O_copy_t *cpy_info; /* Information for copy operation */ + const H5O_loc_t *src_oloc; /* Source object location */ + H5HL_t *src_heap; /* Heap for the source symbol table */ + size_t src_block_size; /* Size of the heap block */ + H5F_t *dst_file; /* File of destination group */ + const H5O_stab_t *dst_stab; /* Symbol table message for destination group */ + H5O_copy_t *cpy_info; /* Information for copy operation */ } H5G_bt_it_cpy_t; /* Common information for "by index" lookups in symbol tables */ @@ -345,9 +347,9 @@ H5_DLL const char *H5G__component(const char *name, size_t *size_p); */ H5_DLL herr_t H5G__stab_create(H5O_loc_t *grp_oloc, const H5O_ginfo_t *ginfo, H5O_stab_t *stab); H5_DLL herr_t H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint); -H5_DLL herr_t H5G__stab_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, - H5O_type_t obj_type, const void *crt_info); -H5_DLL herr_t H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name, H5O_link_t *obj_lnk, +H5_DLL herr_t H5G__stab_insert(const H5O_loc_t *grp_oloc, H5O_link_t *obj_lnk, H5O_type_t obj_type, + const void *crt_info); +H5_DLL herr_t H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, H5O_link_t *obj_lnk, H5O_type_t obj_type, const void *crt_info); H5_DLL herr_t H5G__stab_delete(H5F_t *f, const H5O_stab_t *stab); H5_DLL herr_t H5G__stab_iterate(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t skip, hsize_t *last_lnk, @@ -374,8 +376,7 @@ H5_DLL void H5G__ent_reset(H5G_entry_t *ent); H5_DLL herr_t H5G__ent_decode_vec(const H5F_t *f, const uint8_t **pp, const uint8_t *p_end, H5G_entry_t *ent, unsigned n); H5_DLL herr_t H5G__ent_encode_vec(const H5F_t *f, uint8_t **pp, const H5G_entry_t *ent, unsigned n); -H5_DLL herr_t H5G__ent_convert(H5F_t *f, H5HL_t *heap, const char *name, const H5O_link_t *lnk, - H5O_type_t obj_type, const void *crt_info, H5G_entry_t *ent); +H5_DLL herr_t H5G__ent_to_link(const H5G_entry_t *ent, const H5HL_t *heap, H5O_link_t *lnk); H5_DLL herr_t H5G__ent_debug(const H5G_entry_t *ent, FILE *stream, int indent, int fwidth, const H5HL_t *heap); @@ -392,7 +393,8 @@ H5_DLL herr_t H5G__node_iterate_size(H5F_t *f, const void *_lt_key, haddr_t addr H5_DLL herr_t H5G__node_free(H5G_node_t *sym); /* Functions that understand links in groups */ -H5_DLL herr_t H5G__ent_to_link(H5O_link_t *lnk, const H5HL_t *heap, const H5G_entry_t *ent, const char *name); +H5_DLL herr_t H5G__link_to_ent(H5F_t *f, H5HL_t *heap, const H5O_link_t *lnk, H5O_type_t obj_type, + const void *crt_info, H5G_entry_t *ent); H5_DLL herr_t H5G__link_to_loc(const H5G_loc_t *grp_loc, const H5O_link_t *lnk, H5G_loc_t *obj_loc); H5_DLL herr_t H5G__link_sort_table(H5G_link_table_t *ltable, H5_index_t idx_type, H5_iter_order_t order); H5_DLL herr_t H5G__link_iterate_table(const H5G_link_table_t *ltable, hsize_t skip, hsize_t *last_lnk, diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h index bb172eb2af7..a756a1c324f 100644 --- a/src/H5Gprivate.h +++ b/src/H5Gprivate.h @@ -228,8 +228,8 @@ H5_DLL herr_t H5G_link_to_info(const struct H5O_loc_t *link_loc, const struct H5 /* * Functions that understand group objects */ -H5_DLL herr_t H5G_obj_insert(const struct H5O_loc_t *grp_oloc, const char *name, struct H5O_link_t *obj_lnk, - bool adj_link, H5O_type_t obj_type, const void *crt_info); +H5_DLL herr_t H5G_obj_insert(const struct H5O_loc_t *grp_oloc, struct H5O_link_t *obj_lnk, bool adj_link, + H5O_type_t obj_type, const void *crt_info); H5_DLL herr_t H5G_obj_get_name_by_idx(const struct H5O_loc_t *oloc, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, char *name, size_t name_size, size_t *name_len); diff --git a/src/H5Gstab.c b/src/H5Gstab.c index ef9773d7451..1ebd13681b1 100644 --- a/src/H5Gstab.c +++ b/src/H5Gstab.c @@ -227,8 +227,8 @@ H5G__stab_create(H5O_loc_t *grp_oloc, const H5O_ginfo_t *ginfo, H5O_stab_t *stab *------------------------------------------------------------------------- */ herr_t -H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name, H5O_link_t *obj_lnk, - H5O_type_t obj_type, const void *crt_info) +H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, H5O_link_t *obj_lnk, H5O_type_t obj_type, + const void *crt_info) { H5HL_t *heap = NULL; /* Pointer to local heap */ H5G_bt_ins_t udata; /* Data to pass through B-tree */ @@ -239,7 +239,6 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name, H5O_li /* check arguments */ assert(f); assert(stab); - assert(name && *name); assert(obj_lnk); /* Pin the heap down in memory */ @@ -247,11 +246,12 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name, H5O_li HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ - udata.common.name = name; - udata.common.heap = heap; - udata.lnk = obj_lnk; - udata.obj_type = obj_type; - udata.crt_info = crt_info; + udata.common.name = obj_lnk->name; + udata.common.heap = heap; + udata.common.block_size = H5HL_heap_get_size(heap); + udata.lnk = obj_lnk; + udata.obj_type = obj_type; + udata.crt_info = crt_info; /* Insert into symbol table */ if (H5B_insert(f, H5B_SNODE, stab->btree_addr, &udata) < 0) @@ -268,17 +268,14 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name, H5O_li /*------------------------------------------------------------------------- * Function: H5G__stab_insert * - * Purpose: Insert a new symbol into the table described by GRP_ENT in - * file F. The name of the new symbol is NAME and its symbol - * table entry is OBJ_ENT. + * Purpose: Insert a new link, OBJ_LNK, into the group, GRP_OLOC. * * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ herr_t -H5G__stab_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_lnk, H5O_type_t obj_type, - const void *crt_info) +H5G__stab_insert(const H5O_loc_t *grp_oloc, H5O_link_t *obj_lnk, H5O_type_t obj_type, const void *crt_info) { H5O_stab_t stab; /* Symbol table message */ herr_t ret_value = SUCCEED; /* Return value */ @@ -287,15 +284,14 @@ H5G__stab_insert(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *obj_ln /* check arguments */ assert(grp_oloc && grp_oloc->file); - assert(name && *name); assert(obj_lnk); /* Retrieve symbol table message */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table"); - if (H5G__stab_insert_real(grp_oloc->file, &stab, name, obj_lnk, obj_type, crt_info) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "unable to insert the name"); + if (H5G__stab_insert_real(grp_oloc->file, &stab, obj_lnk, obj_type, crt_info) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "unable to insert the link"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -332,9 +328,10 @@ H5G__stab_remove(const H5O_loc_t *loc, H5RS_str_t *grp_full_path_r, const char * HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ - udata.common.name = name; - udata.common.heap = heap; - udata.grp_full_path_r = grp_full_path_r; + udata.common.name = name; + udata.common.heap = heap; + udata.common.block_size = H5HL_heap_get_size(heap); + udata.grp_full_path_r = grp_full_path_r; /* Remove from symbol table */ if (H5B_remove(loc->file, H5B_SNODE, stab.btree_addr, &udata) < 0) @@ -386,9 +383,10 @@ H5G__stab_remove_by_idx(const H5O_loc_t *grp_oloc, H5RS_str_t *grp_full_path_r, HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ - udata.common.name = obj_lnk.name; - udata.common.heap = heap; - udata.grp_full_path_r = grp_full_path_r; + udata.common.name = obj_lnk.name; + udata.common.heap = heap; + udata.common.block_size = H5HL_heap_get_size(heap); + udata.grp_full_path_r = grp_full_path_r; /* Remove link from symbol table */ if (H5B_remove(grp_oloc->file, H5B_SNODE, stab.btree_addr, &udata) < 0) @@ -640,6 +638,7 @@ H5G__stab_get_name_by_idx_cb(const H5G_entry_t *ent, void *_udata) H5G_bt_it_gnbi_t *udata = (H5G_bt_it_gnbi_t *)_udata; size_t name_off; /* Offset of name in heap */ const char *name; /* Pointer to name string in heap */ + size_t block_size; /* Size of the heap block */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -648,13 +647,16 @@ H5G__stab_get_name_by_idx_cb(const H5G_entry_t *ent, void *_udata) assert(ent); assert(udata && udata->heap); + /* Get the size of the heap block */ + block_size = H5HL_heap_get_size(udata->heap); + /* Get name offset in heap */ name_off = ent->name_off; if ((name = (const char *)H5HL_offset_into(udata->heap, name_off)) == NULL) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table link name"); - if ((udata->name = H5MM_strdup(name)) == NULL) + if (NULL == (udata->name = H5MM_strndup(name, (block_size - name_off)))) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to duplicate symbol table link name"); done: @@ -768,7 +770,7 @@ H5G__stab_lookup_cb(const H5G_entry_t *ent, void *_udata) /* Check for setting link info */ if (udata->lnk) /* Convert the entry to a link */ - if (H5G__ent_to_link(udata->lnk, udata->heap, ent, udata->name) < 0) + if (H5G__ent_to_link(ent, udata->heap, udata->lnk) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTCONVERT, FAIL, "unable to convert symbol table entry to link"); done: @@ -815,10 +817,11 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, bool *found, H5O_l udata.heap = heap; /* Set up the user data for actual B-tree find operation */ - bt_udata.common.name = name; - bt_udata.common.heap = heap; - bt_udata.op = H5G__stab_lookup_cb; - bt_udata.op_data = &udata; + bt_udata.common.name = name; + bt_udata.common.heap = heap; + bt_udata.common.block_size = H5HL_heap_get_size(heap); + bt_udata.op = H5G__stab_lookup_cb; + bt_udata.op_data = &udata; /* Search the B-tree */ if (H5B_find(grp_oloc->file, H5B_SNODE, stab.btree_addr, found, &bt_udata) < 0) @@ -846,8 +849,7 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, bool *found, H5O_l static herr_t H5G__stab_lookup_by_idx_cb(const H5G_entry_t *ent, void *_udata) { - H5G_bt_it_lbi_t *udata = (H5G_bt_it_lbi_t *)_udata; - const char *name; /* Pointer to name string in heap */ + H5G_bt_it_lbi_t *udata = (H5G_bt_it_lbi_t *)_udata; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -856,12 +858,8 @@ H5G__stab_lookup_by_idx_cb(const H5G_entry_t *ent, void *_udata) assert(ent); assert(udata && udata->heap); - /* Get a pointer to the link name */ - if ((name = (const char *)H5HL_offset_into(udata->heap, ent->name_off)) == NULL) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table link name"); - /* Convert the entry to a link */ - if (H5G__ent_to_link(udata->lnk, udata->heap, ent, name) < 0) + if (H5G__ent_to_link(ent, udata->heap, udata->lnk) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTCONVERT, FAIL, "unable to convert symbol table entry to link"); udata->found = true; diff --git a/src/H5Gtest.c b/src/H5Gtest.c index a6a64067979..0b0708119e1 100644 --- a/src/H5Gtest.c +++ b/src/H5Gtest.c @@ -796,10 +796,10 @@ H5G__verify_cached_stabs_test(hid_t gid) { H5G_t *grp = NULL; /* Group */ htri_t stab_exists; - H5O_stab_t stab; /* Symbol table message */ - H5G_bt_common_t udata = {NULL, NULL}; /* Dummy udata so H5B_iterate doesn't freak out */ - haddr_t prev_tag = HADDR_UNDEF; /* Previous metadata tag */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_stab_t stab; /* Symbol table message */ + H5G_bt_common_t udata = {NULL, NULL, 0}; /* Dummy udata so H5B_iterate doesn't freak out */ + haddr_t prev_tag = HADDR_UNDEF; /* Previous metadata tag */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE diff --git a/src/H5HG.c b/src/H5HG.c index 3709c705566..a859b40f100 100644 --- a/src/H5HG.c +++ b/src/H5HG.c @@ -556,6 +556,11 @@ H5HG_read(H5F_t *f, H5HG_t *hobj, void *object /*out*/, size_t *buf_size) assert(f); assert(hobj); + /* Heap object idx 0 is the free space in the heap and should never be given out */ + if (0 == hobj->idx) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + /* Load the heap */ if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap"); @@ -630,6 +635,11 @@ H5HG_link(H5F_t *f, const H5HG_t *hobj, int adjust) if (0 == (H5F_INTENT(f) & H5F_ACC_RDWR)) HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file"); + /* Heap object idx 0 is the free space in the heap and should never be given out */ + if (0 == hobj->idx) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + /* Load the heap */ if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); @@ -682,6 +692,11 @@ H5HG_get_obj_size(H5F_t *f, H5HG_t *hobj, size_t *obj_size) assert(hobj); assert(obj_size); + /* Heap object idx 0 is the free space in the heap and should never be given out */ + if (0 == hobj->idx) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + /* Load the heap */ if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); @@ -731,6 +746,11 @@ H5HG_remove(H5F_t *f, H5HG_t *hobj) if (0 == (H5F_INTENT(f) & H5F_ACC_RDWR)) HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file"); + /* Heap object idx 0 is the free space in the heap and should never be given out */ + if (0 == hobj->idx) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + /* Load the heap */ if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); diff --git a/src/H5HL.c b/src/H5HL.c index 680ef0a15db..f6e589b016d 100644 --- a/src/H5HL.c +++ b/src/H5HL.c @@ -295,13 +295,11 @@ H5HL__minimize_heap_space(H5F_t *f, H5HL_t *heap) H5HL_t * H5HL_protect(H5F_t *f, haddr_t addr, unsigned flags) { - H5HL_cache_prfx_ud_t prfx_udata; /* User data for protecting local heap prefix */ - H5HL_prfx_t *prfx = NULL; /* Local heap prefix */ - H5HL_dblk_t *dblk = NULL; /* Local heap data block */ - H5HL_t *heap = NULL; /* Heap data structure */ - unsigned prfx_cache_flags = H5AC__NO_FLAGS_SET; /* Cache flags for unprotecting prefix entry */ - unsigned dblk_cache_flags = H5AC__NO_FLAGS_SET; /* Cache flags for unprotecting data block entry */ - H5HL_t *ret_value = NULL; + H5HL_cache_prfx_ud_t prfx_udata; /* User data for protecting local heap prefix */ + H5HL_prfx_t *prfx = NULL; /* Local heap prefix */ + H5HL_dblk_t *dblk = NULL; /* Local heap data block */ + H5HL_t *heap = NULL; /* Heap data structure */ + H5HL_t *ret_value = NULL; FUNC_ENTER_NOAPI(NULL) @@ -328,19 +326,25 @@ H5HL_protect(H5F_t *f, haddr_t addr, unsigned flags) /* Check if the heap is already pinned in memory */ /* (for re-entrant situation) */ if (heap->prots == 0) { + void *pin_obj; /* Pointer to local heap object to pin */ + /* Check if heap has separate data block */ if (heap->single_cache_obj) - /* Set the flag for pinning the prefix when unprotecting it */ - prfx_cache_flags |= H5AC__PIN_ENTRY_FLAG; + /* Pin prefix */ + pin_obj = prfx; else { /* Protect the local heap data block */ if (NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, H5AC_LHEAP_DBLK, heap->dblk_addr, heap, flags))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap data block"); - /* Set the flag for pinning the data block when unprotecting it */ - dblk_cache_flags |= H5AC__PIN_ENTRY_FLAG; + /* Pin data block */ + pin_obj = dblk; } + + /* Pin local heap object */ + if (H5AC_pin_protected_entry(pin_obj) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTPIN, NULL, "unable to pin local heap object"); } /* Increment # of times heap is protected */ @@ -351,11 +355,11 @@ H5HL_protect(H5F_t *f, haddr_t addr, unsigned flags) done: /* Release the prefix from the cache, now pinned */ - if (prfx && heap && H5AC_unprotect(f, H5AC_LHEAP_PRFX, heap->prfx_addr, prfx, prfx_cache_flags) < 0) + if (prfx && heap && H5AC_unprotect(f, H5AC_LHEAP_PRFX, heap->prfx_addr, prfx, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, NULL, "unable to release local heap prefix"); /* Release the data block from the cache, now pinned */ - if (dblk && heap && H5AC_unprotect(f, H5AC_LHEAP_DBLK, heap->dblk_addr, dblk, dblk_cache_flags) < 0) + if (dblk && heap && H5AC_unprotect(f, H5AC_LHEAP_DBLK, heap->dblk_addr, dblk, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, NULL, "unable to release local heap data block"); FUNC_LEAVE_NOAPI(ret_value) @@ -888,6 +892,26 @@ H5HL_delete(H5F_t *f, haddr_t addr) FUNC_LEAVE_NOAPI(ret_value) } /* end H5HL_delete() */ +/*------------------------------------------------------------------------- + * Function: H5HL_heap_get_size + * + * Purpose: Retrieves the current size of a heap's block + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +size_t +H5HL_heap_get_size(const H5HL_t *heap) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Check arguments */ + assert(heap); + + FUNC_LEAVE_NOAPI(heap->dblk_size) +} /* end H5HL_heap_get_size() */ + /*------------------------------------------------------------------------- * Function: H5HL_get_size * diff --git a/src/H5HLcache.c b/src/H5HLcache.c index b38ff7c2848..d0836fed4f7 100644 --- a/src/H5HLcache.c +++ b/src/H5HLcache.c @@ -232,7 +232,7 @@ H5HL__fl_deserialize(H5HL_t *heap) const uint8_t *image; /* Pointer into image buffer */ /* Sanity check */ - if (free_block >= heap->dblk_size) + if ((free_block + (2 * heap->sizeof_size)) > heap->dblk_size) HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "bad heap free list"); /* Allocate & initialize free list node */ diff --git a/src/H5HLprivate.h b/src/H5HLprivate.h index 243d7653a86..045c5d7ac1e 100644 --- a/src/H5HLprivate.h +++ b/src/H5HLprivate.h @@ -44,6 +44,7 @@ typedef struct H5HL_t H5HL_t; */ H5_DLL herr_t H5HL_create(H5F_t *f, size_t size_hint, haddr_t *addr /*out*/); H5_DLL herr_t H5HL_delete(H5F_t *f, haddr_t addr); +H5_DLL size_t H5HL_heap_get_size(const H5HL_t *heap); H5_DLL herr_t H5HL_get_size(H5F_t *f, haddr_t addr, size_t *size); H5_DLL herr_t H5HL_heapsize(H5F_t *f, haddr_t addr, hsize_t *heap_size); H5_DLL herr_t H5HL_insert(H5F_t *f, H5HL_t *heap, size_t size, const void *buf, size_t *offset); diff --git a/src/H5Lint.c b/src/H5Lint.c index d02e8c48a48..10b935ab09d 100644 --- a/src/H5Lint.c +++ b/src/H5Lint.c @@ -572,7 +572,7 @@ H5L__link_cb(H5G_loc_t *grp_loc /*in*/, const char *name, const H5O_link_t H5_AT H5_GCC_CLANG_DIAG_ON("cast-qual") /* Insert link into group */ - if (H5G_obj_insert(grp_loc->oloc, name, udata->lnk, true, + if (H5G_obj_insert(grp_loc->oloc, udata->lnk, true, udata->ocrt_info ? udata->ocrt_info->obj_type : H5O_TYPE_UNKNOWN, udata->ocrt_info ? udata->ocrt_info->crt_info : NULL) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTINIT, FAIL, "unable to create new link for object"); @@ -1315,7 +1315,7 @@ H5L__move_dest_cb(H5G_loc_t *grp_loc /*in*/, const char *name, const H5O_link_t H5_GCC_CLANG_DIAG_ON("cast-qual") /* Insert the link into the group */ - if (H5G_obj_insert(grp_loc->oloc, name, udata->lnk, true, H5O_TYPE_UNKNOWN, NULL) < 0) + if (H5G_obj_insert(grp_loc->oloc, udata->lnk, true, H5O_TYPE_UNKNOWN, NULL) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTINIT, FAIL, "unable to create new link to object"); /* If the link was a user-defined link, call its move callback if it has one */ diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 53a7d0a7cfe..6916a9044c7 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -309,6 +309,10 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, bool *dirty) &(udata->common), dirty) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk"); + /* Check for corruption in object header # of messages */ + if (oh->version == H5O_VERSION_1 && udata->v1_pfx_nmesgs < oh->nmesgs) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header message count"); + /* Note that we've loaded the object header from the file */ udata->made_attempt = true; diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index f2f307e82db..d6002046269 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -359,6 +359,12 @@ H5O__copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, H5 /* Get message class to operate on */ copy_type = mesg_src->type; + /* Sanity check message for possible corruption */ + if (H5O_UNKNOWN_ID != mesg_src->type->id && H5O_NULL_ID != mesg_src->type->id) + if (0 == mesg_src->raw_size) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "message of type '%s' has zero size", + mesg_src->type->name); + /* Check for continuation message; these are converted to NULL * messages because the destination OH will have only one chunk */ diff --git a/src/H5Odtype.c b/src/H5Odtype.c index 674d8d4ea1c..24671b02107 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -366,6 +366,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t hsize_t dim[H5O_LAYOUT_NDIMS]; /* Dimensions of the array */ H5T_t *array_dt; /* Temporary pointer to the array datatype */ H5T_t *temp_type; /* Temporary pointer to the field's datatype */ + unsigned memb_idx; /* Local index counter */ /* Get the length of the field name */ if (!skip) { @@ -388,6 +389,13 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + /* Check for duplicated field name */ + for (memb_idx = 0; memb_idx < dt->shared->u.compnd.nmembs; memb_idx++) + if (0 == strcmp((const char *)*pp, dt->shared->u.compnd.memb[memb_idx].name)) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, + "duplicated compound field name '%s', for fields %u and %u", + (const char *)*pp, memb_idx, dt->shared->u.compnd.nmembs); + /* Decode the field name */ if (NULL == (dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name = H5MM_xstrdup((const char *)*pp))) @@ -509,6 +517,18 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t if (version == H5O_DTYPE_VERSION_1) { /* Check if this member is an array field */ if (ndims > 0) { + /* Validate decoded dims */ + for (unsigned u = 0; u < ndims; u++) + if (!(dim[u] > 0)) { + dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name = + H5MM_xfree(dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name); + if (H5T_close_real(temp_type) < 0) + HDONE_ERROR(H5E_DATATYPE, H5E_CANTRELEASE, FAIL, + "can't release datatype info"); + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, + "zero-sized dimension specified"); + } + /* Create the array datatype for the field */ if ((array_dt = H5T__array_create(temp_type, ndims, dim)) == NULL) { dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name = @@ -690,6 +710,8 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t */ actual_name_length = strlen((const char *)*pp); } + if (0 == actual_name_length) + HGOTO_ERROR(H5E_OHDR, H5E_BADSIZE, FAIL, "0 length enum name"); if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); diff --git a/src/H5Oefl.c b/src/H5Oefl.c index 57e5e6991df..351dbd30211 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -76,9 +76,10 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED { H5O_efl_t *mesg = NULL; int version; - const uint8_t *p_end = p + p_size - 1; /* pointer to last byte in p */ - const char *s = NULL; - H5HL_t *heap = NULL; + const uint8_t *p_end = p + p_size - 1; /* pointer to last byte in p */ + const char *s = NULL; + H5HL_t *heap = NULL; + size_t block_size; /* Size of the heap block */ void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -139,6 +140,9 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "entry at offset 0 in local heap not an empty string"); #endif + /* Get the size of the heap block */ + block_size = H5HL_heap_get_size(heap); + for (size_t u = 0; u < mesg->nused; u++) { hsize_t offset = 0; @@ -152,7 +156,7 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to get external file name"); if (*s == '\0') HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "invalid external file name"); - mesg->slot[u].name = H5MM_xstrdup(s); + mesg->slot[u].name = H5MM_strndup(s, (block_size - mesg->slot[u].name_offset)); if (mesg->slot[u].name == NULL) HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "string duplication failed"); @@ -386,33 +390,35 @@ H5O__efl_reset(void *_mesg) /*------------------------------------------------------------------------- * Function: H5O_efl_total_size * - * Purpose: Return the total size of the external file list by summing + * Purpose: Query the total size of the external file list by summing * the sizes of all of the files. * - * Return: Success: Total reserved size for external data. - * - * Failure: 0 + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ -hsize_t -H5O_efl_total_size(H5O_efl_t *efl) +herr_t +H5O_efl_total_size(const H5O_efl_t *efl, hsize_t *size) { - hsize_t ret_value = 0, tmp; + hsize_t total_size = 0, tmp; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(0) + FUNC_ENTER_NOAPI(FAIL) if (efl->nused > 0 && H5O_EFL_UNLIMITED == efl->slot[efl->nused - 1].size) - ret_value = H5O_EFL_UNLIMITED; + *size = H5O_EFL_UNLIMITED; else { size_t u; /* Local index variable */ - for (u = 0; u < efl->nused; u++, ret_value = tmp) { - tmp = ret_value + efl->slot[u].size; - if (tmp <= ret_value) - HGOTO_ERROR(H5E_EFL, H5E_OVERFLOW, 0, "total external storage size overflowed"); + for (u = 0; u < efl->nused; u++, total_size = tmp) { + tmp = total_size + efl->slot[u].size; + if (tmp < total_size) + HGOTO_ERROR(H5E_EFL, H5E_OVERFLOW, FAIL, "total external storage size overflowed"); } /* end for */ - } /* end else */ + + /* Set the size to return */ + *size = total_size; + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Olayout.c b/src/H5Olayout.c index d14e0009953..e5ffc382476 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -1226,8 +1226,7 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ H5D_copy_file_ud_t *udata = (H5D_copy_file_ud_t *)_udata; /* Dataset copying user data */ H5O_layout_t *layout_src = (H5O_layout_t *)mesg_src; H5O_layout_t *layout_dst = NULL; - bool copied = false; /* Whether the data was copied */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -1248,18 +1247,28 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ if (H5D__compact_copy(file_src, &layout_src->storage.u.compact, file_dst, &layout_dst->storage.u.compact, udata->src_dtype, cpy_info) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, NULL, "unable to copy chunked storage"); - copied = true; } /* end if */ break; - case H5D_CONTIGUOUS: + case H5D_CONTIGUOUS: { + hsize_t nelmts; /* Number of elements in dataset's extent */ + size_t dt_size; /* Size of dataset's datatype in bytes */ + /* Sanity check the dataset's info */ + if (H5D__contig_check(file_src, layout_src, udata->src_space_extent, udata->src_dtype) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid layout / dataspace / datatype info"); + /* Compute the size of the contiguous storage for versions of the * layout message less than version 3 because versions 1 & 2 would * truncate the dimension sizes to 32-bits of information. - QAK 5/26/04 */ + nelmts = H5S_extent_nelem(udata->src_space_extent); + dt_size = H5T_get_size(udata->src_dtype); if (layout_src->version < H5O_LAYOUT_VERSION_3) - layout_dst->storage.u.contig.size = - H5S_extent_nelem(udata->src_space_extent) * H5T_get_size(udata->src_dtype); + layout_dst->storage.u.contig.size = nelmts * dt_size; + else + /* Sanity check layout storage size */ + if (layout_dst->storage.u.contig.size != (nelmts * dt_size)) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid layout storage size "); if (H5D__contig_is_space_alloc(&layout_src->storage) || (cpy_info->shared_fo && @@ -1268,9 +1277,8 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ if (H5D__contig_copy(file_src, &layout_src->storage.u.contig, file_dst, &layout_dst->storage.u.contig, udata->src_dtype, cpy_info) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, NULL, "unable to copy contiguous storage"); - copied = true; } /* end if */ - break; + } break; case H5D_CHUNKED: if (H5D__chunk_is_space_alloc(&layout_src->storage) || @@ -1281,7 +1289,6 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ &layout_dst->storage.u.chunk, udata->src_space_extent, udata->src_dtype, udata->common.src_pline, cpy_info) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, NULL, "unable to copy chunked storage"); - copied = true; } /* end if */ break; @@ -1298,10 +1305,6 @@ H5O__layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, bool H5_ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "Invalid layout class"); } /* end switch */ - /* Check if copy routine was invoked (which frees the source datatype) */ - if (copied) - udata->src_dtype = NULL; - /* Set return value */ ret_value = layout_dst; diff --git a/src/H5Opkg.h b/src/H5Opkg.h index 8e32f3ae13a..1803ae0daa7 100644 --- a/src/H5Opkg.h +++ b/src/H5Opkg.h @@ -600,8 +600,8 @@ H5_DLL herr_t H5O__condense_header(H5F_t *f, H5O_t *oh); H5_DLL herr_t H5O__release_mesg(H5F_t *f, H5O_t *oh, H5O_mesg_t *mesg, bool adj_link); /* Shared object operators */ -H5_DLL void *H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *buf, - const H5O_msg_class_t *type); +H5_DLL void *H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, size_t buf_size, + const uint8_t *buf, const H5O_msg_class_t *type); H5_DLL herr_t H5O__shared_encode(const H5F_t *f, uint8_t *buf /*out*/, const H5O_shared_t *sh_mesg); H5_DLL size_t H5O__shared_size(const H5F_t *f, const H5O_shared_t *sh_mesg); H5_DLL herr_t H5O__shared_delete(H5F_t *f, H5O_t *open_oh, const H5O_msg_class_t *mesg_type, diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 968a23caada..3f0ff07cd13 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -1004,7 +1004,7 @@ H5_DLL herr_t H5O_loc_free(H5O_loc_t *loc); H5_DLL H5O_loc_t *H5O_get_loc(hid_t id); /* EFL operators */ -H5_DLL hsize_t H5O_efl_total_size(H5O_efl_t *efl); +H5_DLL herr_t H5O_efl_total_size(const H5O_efl_t *efl, hsize_t *size); /* File space info routines */ H5_DLL herr_t H5O_fsinfo_set_version(H5F_libver_t low, H5F_libver_t high, H5O_fsinfo_t *fsinfo); diff --git a/src/H5Osdspace.c b/src/H5Osdspace.c index 1658fa719f9..365e3511243 100644 --- a/src/H5Osdspace.c +++ b/src/H5Osdspace.c @@ -175,29 +175,31 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN /* Decode dimension sizes */ if (sdim->rank > 0) { - - /* Sizes */ - /* Check that we have space to decode sdim->rank values */ if (H5_IS_BUFFER_OVERFLOW(p, (H5F_sizeof_size(f) * sdim->rank), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + /* Sizes */ if (NULL == (sdim->size = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed"); for (i = 0; i < sdim->rank; i++) H5F_DECODE_LENGTH(f, p, sdim->size[i]); - /* Max sizes */ - if (flags & H5S_VALID_MAX) { - if (NULL == (sdim->max = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed"); - /* Check that we have space to decode sdim->rank values */ if (H5_IS_BUFFER_OVERFLOW(p, (H5F_sizeof_size(f) * sdim->rank), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - for (i = 0; i < sdim->rank; i++) + + /* Max sizes */ + if (NULL == (sdim->max = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed"); + for (i = 0; i < sdim->rank; i++) { H5F_DECODE_LENGTH(f, p, sdim->max[i]); + if (sdim->size[i] > sdim->max[i]) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "dataspace dim %u size of %llu is greater than maxdim size of %llu", i, + (unsigned long long)sdim->size[i], (unsigned long long)sdim->max[i]); + } } /* NOTE: The version 1 permutation indexes were never implemented so diff --git a/src/H5Oshared.c b/src/H5Oshared.c index 9c32caf426b..d562d1764a8 100644 --- a/src/H5Oshared.c +++ b/src/H5Oshared.c @@ -286,12 +286,13 @@ H5O__shared_link_adj(H5F_t *f, H5O_t *open_oh, const H5O_msg_class_t *type, H5O_ *------------------------------------------------------------------------- */ void * -H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *buf, +H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, size_t buf_size, const uint8_t *buf, const H5O_msg_class_t *type) { - H5O_shared_t sh_mesg; /* Shared message info */ - unsigned version; /* Shared message version */ - void *ret_value = NULL; /* Return value */ + const uint8_t *buf_end = buf + buf_size - 1; /* End of the buffer */ + H5O_shared_t sh_mesg; /* Shared message info */ + unsigned version; /* Shared message version */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -301,6 +302,8 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b assert(type); /* Version */ + if (H5_IS_BUFFER_OVERFLOW(buf, 1, buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); version = *buf++; if (version < H5O_SHARED_VERSION_1 || version > H5O_SHARED_VERSION_LATEST) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for shared object message"); @@ -308,6 +311,8 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b /* Get the shared information type * Flags are unused before version 3. */ + if (H5_IS_BUFFER_OVERFLOW(buf, 1, buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (version >= H5O_SHARED_VERSION_2) sh_mesg.type = *buf++; else { @@ -316,8 +321,11 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b } /* end else */ /* Skip reserved bytes (for version 1) */ - if (version == H5O_SHARED_VERSION_1) + if (version == H5O_SHARED_VERSION_1) { + if (H5_IS_BUFFER_OVERFLOW(buf, 6, buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); buf += 6; + } /* Body */ if (version == H5O_SHARED_VERSION_1) { @@ -325,7 +333,11 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b sh_mesg.u.loc.index = 0; /* Decode stored "symbol table entry" into message location */ + if (H5_IS_BUFFER_OVERFLOW(buf, H5F_SIZEOF_SIZE(f), buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); buf += H5F_SIZEOF_SIZE(f); /* Skip over local heap address */ + if (H5_IS_BUFFER_OVERFLOW(buf, H5F_sizeof_addr(f), buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &buf, &(sh_mesg.u.loc.oh_addr)); } /* end if */ else if (version >= H5O_SHARED_VERSION_2) { @@ -334,6 +346,8 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b */ if (sh_mesg.type == H5O_SHARE_TYPE_SOHM) { assert(version >= H5O_SHARED_VERSION_3); + if (H5_IS_BUFFER_OVERFLOW(buf, sizeof(sh_mesg.u.heap_id), buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5MM_memcpy(&sh_mesg.u.heap_id, buf, sizeof(sh_mesg.u.heap_id)); } /* end if */ else { @@ -344,6 +358,8 @@ H5O__shared_decode(H5F_t *f, H5O_t *open_oh, unsigned *ioflags, const uint8_t *b sh_mesg.type = H5O_SHARE_TYPE_COMMITTED; sh_mesg.u.loc.index = 0; + if (H5_IS_BUFFER_OVERFLOW(buf, H5F_sizeof_addr(f), buf_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &buf, &sh_mesg.u.loc.oh_addr); } /* end else */ } /* end else if */ diff --git a/src/H5Oshared.h b/src/H5Oshared.h index 2813badd339..d22e2e1b14c 100644 --- a/src/H5Oshared.h +++ b/src/H5Oshared.h @@ -57,7 +57,7 @@ H5O_SHARED_DECODE(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *iofla /* Check for shared message */ if (mesg_flags & H5O_MSG_FLAG_SHARED) { /* Retrieve native message info indirectly through shared message */ - if (NULL == (ret_value = H5O__shared_decode(f, open_oh, ioflags, p, H5O_SHARED_TYPE))) + if (NULL == (ret_value = H5O__shared_decode(f, open_oh, ioflags, p_size, p, H5O_SHARED_TYPE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "unable to decode shared message"); /* We currently do not support automatically fixing shared messages */ diff --git a/src/H5Ostab.c b/src/H5Ostab.c index 5000ca84d7d..c87034dc13f 100644 --- a/src/H5Ostab.c +++ b/src/H5Ostab.c @@ -331,6 +331,7 @@ H5O__stab_post_copy_file(const H5O_loc_t *src_oloc, const void *mesg_src, H5O_lo const H5O_stab_t *stab_src = (const H5O_stab_t *)mesg_src; H5O_stab_t *stab_dst = (H5O_stab_t *)mesg_dst; H5G_bt_it_cpy_t udata; /* B-tree user data */ + H5HL_t *heap = NULL; /* Pointer to source group's heap */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -346,18 +347,26 @@ H5O__stab_post_copy_file(const H5O_loc_t *src_oloc, const void *mesg_src, H5O_lo if (cpy_info->max_depth >= 0 && cpy_info->curr_depth >= cpy_info->max_depth) HGOTO_DONE(SUCCEED); + /* Get the heap for the copy*/ + if (NULL == (heap = H5HL_protect(src_oloc->file, stab_src->heap_addr, H5AC__READ_ONLY_FLAG))) + HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect local heap"); + /* Set up B-tree iteration user data */ - udata.src_oloc = src_oloc; - udata.src_heap_addr = stab_src->heap_addr; - udata.dst_file = dst_oloc->file; - udata.dst_stab = stab_dst; - udata.cpy_info = cpy_info; + udata.src_oloc = src_oloc; + udata.src_heap = heap; + udata.src_block_size = H5HL_heap_get_size(heap); + udata.dst_file = dst_oloc->file; + udata.dst_stab = stab_dst; + udata.cpy_info = cpy_info; /* Iterate over objects in group, copying them */ if ((H5B_iterate(src_oloc->file, H5B_SNODE, stab_src->btree_addr, H5G__node_copy, &udata)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed"); done: + if (heap && H5HL_unprotect(heap) < 0) + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect local heap"); + FUNC_LEAVE_NOAPI(ret_value) } /* H5O__stab_post_copy_file() */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 97f7ad13f7a..67425a9c02a 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -5748,7 +5748,16 @@ H5_DLL herr_t H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t * If a non-zero page buffer size is set, and the file space strategy * is not set to paged or the page size for the file space strategy is * larger than the page buffer size, the subsequent call to H5Fcreate() - * or H5Fopen() using the \p plist_id will fail. + * using the \p plist_id will fail. + * + * \note As of HDF5 1.14.4, this property will be ignored when an existing + * file is being opened and the file space strategy stored in the + * file isn't paged. This was previously a failure. + * + * \note As of HDF5 1.14.4, if a file with a paged file space strategy is + * opened with a page size that is smaller than the file's page size, + * the page cache size will be rounded up to the file's page size. + * This was previously a failure. * * The function also allows setting the minimum percentage of pages for * metadata and raw data to prevent a certain type of data to evict hot diff --git a/src/H5S.c b/src/H5S.c index 8d64426edd8..ccae94b8fb9 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -1222,7 +1222,8 @@ H5S_set_extent_simple(H5S_t *space, unsigned rank, const hsize_t *dims, const hs FUNC_ENTER_NOAPI(FAIL) /* Check args */ - assert(rank <= H5S_MAX_RANK); + if (rank > H5S_MAX_RANK) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "dataspace rank too large: %u", rank); /* shift out of the previous state to a "simple" dataspace. */ if (H5S__extent_release(&space->extent) < 0) @@ -1594,6 +1595,30 @@ H5S_decode(const unsigned char **p) FUNC_LEAVE_NOAPI(ret_value) } /* end H5S_decode() */ +/*------------------------------------------------------------------------- + * Function: H5S_get_simple_extent + * + * Purpose: Internal function for retrieving the extent for a dataspace object + * + * Return: Success: Pointer to the extent for a dataspace (not copied) + * Failure: NULL + * + * Note: This routine participates in the "Inlining C function pointers" + * pattern, don't call it directly, use the appropriate macro + * defined in H5Sprivate.h. + * + *------------------------------------------------------------------------- + */ +const H5S_extent_t * +H5S_get_simple_extent(const H5S_t *space) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(space); + + FUNC_LEAVE_NOAPI(&space->extent) +} /* end H5S_get_simple_extent() */ + /*------------------------------------------------------------------------- * Function: H5S_get_simple_extent_type * diff --git a/src/H5Spoint.c b/src/H5Spoint.c index d642ea2bed0..331a9c685af 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -1414,6 +1414,9 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, bo if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection rank"); UINT32DECODE(pp, rank); + if (0 == rank || rank > H5S_MAX_RANK) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "invalid rank (%u) for serialized point selection", + rank); if (!*space) { /* Patch the rank of the allocated dataspace */ @@ -1455,10 +1458,6 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, bo break; } /* end switch */ - /* Allocate space for the coordinates */ - if (NULL == (coord = (hsize_t *)H5MM_malloc(num_elem * rank * sizeof(hsize_t)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate coordinate information"); - /* Determine necessary size of buffer for coordinates */ enc_type_size = 0; @@ -1479,10 +1478,19 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, bo coordinate_buffer_requirement = num_elem * rank * enc_type_size; + /* Check for overflow during multiplication */ + if (num_elem != (coordinate_buffer_requirement / (rank * enc_type_size))) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "size of point selection buffer overflowed"); + + /* Check for possible buffer overrun */ if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, coordinate_buffer_requirement, p_end)) HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection coordinates"); + /* Allocate space for the coordinates */ + if (NULL == (coord = (hsize_t *)H5MM_malloc(num_elem * rank * sizeof(hsize_t)))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information"); + /* Retrieve the coordinates from the buffer */ for (tcoord = coord, i = 0; i < num_elem; i++) for (j = 0; j < (unsigned)rank; j++, tcoord++) diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index 8fd73f54740..24a83353f70 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -130,6 +130,7 @@ typedef struct H5S_sel_iter_op_t { /* If the module using this macro is allowed access to the private variables, access them directly */ #ifdef H5S_MODULE +#define H5S_GET_EXTENT(S) (&(S)->extent) #define H5S_GET_EXTENT_TYPE(S) ((S)->extent.type) #define H5S_GET_EXTENT_NDIMS(S) ((S)->extent.rank) #define H5S_GET_EXTENT_NPOINTS(S) ((S)->extent.nelem) @@ -157,6 +158,7 @@ typedef struct H5S_sel_iter_op_t { ((*(ITER)->type->iter_get_seq_list)(ITER, MAXSEQ, MAXBYTES, NSEQ, NBYTES, OFF, LEN)) #define H5S_SELECT_ITER_RELEASE(ITER) ((*(ITER)->type->iter_release)(ITER)) #else /* H5S_MODULE */ +#define H5S_GET_EXTENT(S) (H5S_get_simple_extent(S)) #define H5S_GET_EXTENT_TYPE(S) (H5S_get_simple_extent_type(S)) #define H5S_GET_EXTENT_NDIMS(S) (H5S_get_simple_extent_ndims(S)) #define H5S_GET_EXTENT_NPOINTS(S) (H5S_get_simple_extent_npoints(S)) @@ -200,14 +202,15 @@ struct H5O_loc_t; typedef struct H5S_t H5S_t; /* Operations on dataspaces */ -H5_DLL herr_t H5S_init(void); -H5_DLL H5S_t *H5S_copy(const H5S_t *src, bool share_selection, bool copy_max); -H5_DLL herr_t H5S_close(H5S_t *ds); -H5_DLL H5S_class_t H5S_get_simple_extent_type(const H5S_t *ds); -H5_DLL hssize_t H5S_get_simple_extent_npoints(const H5S_t *ds); -H5_DLL hsize_t H5S_get_npoints_max(const H5S_t *ds); -H5_DLL bool H5S_has_extent(const H5S_t *ds); -H5_DLL int H5S_get_simple_extent_ndims(const H5S_t *ds); +H5_DLL herr_t H5S_init(void); +H5_DLL H5S_t *H5S_copy(const H5S_t *src, bool share_selection, bool copy_max); +H5_DLL herr_t H5S_close(H5S_t *ds); +H5_DLL const H5S_extent_t *H5S_get_simple_extent(const H5S_t *ds); +H5_DLL H5S_class_t H5S_get_simple_extent_type(const H5S_t *ds); +H5_DLL hssize_t H5S_get_simple_extent_npoints(const H5S_t *ds); +H5_DLL hsize_t H5S_get_npoints_max(const H5S_t *ds); +H5_DLL bool H5S_has_extent(const H5S_t *ds); +H5_DLL int H5S_get_simple_extent_ndims(const H5S_t *ds); H5_DLL int H5S_get_simple_extent_dims(const H5S_t *ds, hsize_t dims[] /*out*/, hsize_t max_dims[] /*out*/); H5_DLL herr_t H5S_write(H5F_t *f, struct H5O_t *oh, unsigned update_flags, H5S_t *ds); H5_DLL herr_t H5S_append(H5F_t *f, struct H5O_t *oh, H5S_t *ds); diff --git a/src/H5T.c b/src/H5T.c index 81a49e717f4..cc37d2be3c5 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -377,9 +377,10 @@ static herr_t H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t static htri_t H5T__compiler_conv(H5T_t *src, H5T_t *dst); static herr_t H5T__set_size(H5T_t *dt, size_t size); static herr_t H5T__close_cb(H5T_t *dt, void **request); +static herr_t H5T__init_path_table(void); +static bool H5T__path_table_search(const H5T_t *src, const H5T_t *dst, int *idx, int *last_cmp); static H5T_path_t *H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_conv_func_t *conv); -static herr_t H5T__path_find_init_path_table(void); static herr_t H5T__path_find_init_new_path(H5T_path_t *path, const H5T_t *src, const H5T_t *dst, H5T_conv_func_t *conv, H5T_conv_ctx_t *conv_ctx); static herr_t H5T__path_free(H5T_path_t *path, H5T_conv_ctx_t *conv_ctx); @@ -4981,33 +4982,47 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, bool superset) } /* end H5T_cmp() */ /*------------------------------------------------------------------------- - * Function: H5T__bsearch_path_table - * - * Purpose: Performs a binary search on the type conversion path table. - * If `last_cmp` is non-NULL, the value of the last comparison - * is returned through it. If `idx` is non-NULL, the idx into - * the path table where the matching path was found is returned - * through it. If no matching path is found, the value for - * `idx` will be the index into the path table where a path - * entry with source and destination datatypes matching src and - * dst should be inserted. In this case, the caller should be - * sure to increment the index value by 1 if the value of the - * last comparison is > 0. - * - * Return: Success: Pointer to the path in the path table - * Failure: NULL if no matching path is found in the table + * Function: H5T__path_table_search + * + * Purpose: Searches the global datatype conversion path table for a + * conversion path between two datatypes. + * + * Sets `idx` to be the index of the last table entry compared + * (which will be the index of the matching conversion path on + * success). If no matching path is found, the value for `idx` + * will be the index into the path table where a new path + * entry with source and destination datatypes matching src + * and dst should be inserted. In this case, the caller should + * be sure to increment the index value by 1 if the value of + * the last comparison is > 0. + * + * If `last_cmp` is non-NULL, the value of the last comparison + * (-1, 0, or 1) is returned through it. + * + * Return: Success: true (conversion path found, index in *idx) + * Failure: false (no conversion path between types) * *------------------------------------------------------------------------- */ -static void * -H5T__bsearch_path_table(const H5T_t *src, const H5T_t *dst, int *last_cmp, int *idx) +static bool +H5T__path_table_search(const H5T_t *src, const H5T_t *dst, int *idx, int *last_cmp) { - int cmp; - int lt, rt, md; - void *ret_value = NULL; + int lt, rt, md; /* Left, middle, and right edges */ + int cmp; /* Comparison result */ + bool ret_value = false; /* Return value */ FUNC_ENTER_PACKAGE_NOERR + /* Sanity check */ + assert(0 != H5T_g.npaths); + assert(src); + assert(src->shared); + assert(dst); + assert(dst->shared); + assert(idx); + + /* Find the conversion path in the table, using a binary search */ + /* NOTE: Doesn't match against entry 0, which is the no-op path */ lt = md = 1; rt = H5T_g.npaths; cmp = -1; @@ -5023,16 +5038,17 @@ H5T__bsearch_path_table(const H5T_t *src, const H5T_t *dst, int *last_cmp, int * else if (cmp > 0) lt = md + 1; else - ret_value = H5T_g.path[md]; + /* Match found */ + ret_value = true; } + /* Set middle index & comparison values */ + *idx = md; if (last_cmp) *last_cmp = cmp; - if (idx) - *idx = md; FUNC_LEAVE_NOAPI(ret_value) -} /* end H5T__bsearch_path_table() */ +} /* end H5T__path_table_search() */ /*------------------------------------------------------------------------- * Function: H5T_path_find @@ -5137,7 +5153,7 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, NULL, "unable to get conversion exception callback"); /* Make sure the path table is initialized */ - if ((0 == H5T_g.npaths) && (H5T__path_find_init_path_table() < 0)) + if ((0 == H5T_g.npaths) && (H5T__init_path_table() < 0)) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize type conversion path table"); /* Find the conversion path. If no "force conversion" flags are @@ -5149,8 +5165,11 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co src->shared->force_conv == false && dst->shared->force_conv == false && 0 == H5T_cmp(src, dst, true); if (noop_conv) matched_path = H5T_g.path[0]; - else - matched_path = H5T__bsearch_path_table(src, dst, &last_cmp, &path_idx); + else { + /* Search the table of conversion paths */ + if (H5T__path_table_search(src, dst, &path_idx, &last_cmp)) + matched_path = H5T_g.path[path_idx]; + } /* Keep a record of the number of paths in the table, in case one of the * initialization calls below (hard or soft) causes more entries to be @@ -5198,7 +5217,8 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co * and re-compute the correct location for this path if so. - QAK, 1/26/02 */ if (old_npaths != H5T_g.npaths) - matched_path = H5T__bsearch_path_table(src, dst, &last_cmp, &path_idx); + if (H5T__path_table_search(src, dst, &path_idx, &last_cmp)) + matched_path = H5T_g.path[path_idx]; /* Replace an existing table entry or add a new entry */ if (matched_path && new_path) { @@ -5253,7 +5273,7 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co } /* end H5T__path_find_real() */ /*------------------------------------------------------------------------- - * Function: H5T__path_find_init_path_table + * Function: H5T__init_path_table * * Purpose: Helper function to allocate and initialize the table holding * pointers to datatype conversion paths. Sets the no-op @@ -5264,26 +5284,28 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co *------------------------------------------------------------------------- */ static herr_t -H5T__path_find_init_path_table(void) +H5T__init_path_table(void) { herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - assert(0 == H5T_g.npaths); + /* Sanity check */ + if (0 != H5T_g.npaths) + HGOTO_ERROR(H5E_DATATYPE, H5E_ALREADYINIT, FAIL, + "datatype conversion path table is already initialized"); - if (NULL == (H5T_g.path = H5MM_malloc(H5T_DEF_CONV_TABLE_SLOTS * sizeof(H5T_path_t *)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + if (NULL == (H5T_g.path = H5MM_calloc(H5T_DEF_CONV_TABLE_SLOTS * sizeof(H5T_path_t *)))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "memory allocation failed for type conversion path table"); - H5T_g.apaths = H5T_DEF_CONV_TABLE_SLOTS; - H5T_g.path[0] = NULL; + H5T_g.apaths = H5T_DEF_CONV_TABLE_SLOTS; /* * Allocate a path for the no-op conversion function * and set it as the first entry in the table */ if (NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for no-op conversion path"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "memory allocation failed for no-op conversion path"); /* Initialize the no-op path */ snprintf(H5T_g.path[0]->name, sizeof(H5T_g.path[0]->name), "no-op"); @@ -5302,7 +5324,9 @@ H5T__path_find_init_path_table(void) } /* end if */ H5T_g.path[0]->is_noop = true; - H5T_g.npaths = 1; + + /* Set # of initial paths in the table */ + H5T_g.npaths = 1; done: if (ret_value < 0) { @@ -5312,7 +5336,7 @@ H5T__path_find_init_path_table(void) } FUNC_LEAVE_NOAPI(ret_value) -} +} /* end H5T__init_path_table() */ /*------------------------------------------------------------------------- * Function: H5T__path_find_init_new_path @@ -5673,6 +5697,47 @@ H5T_path_noop(const H5T_path_t *p) FUNC_LEAVE_NOAPI(p->is_noop || (p->is_hard && 0 == H5T_cmp(p->src, p->dst, false))) } /* end H5T_path_noop() */ +/*------------------------------------------------------------------------- + * Function: H5T_noop_conv + * + * Purpose: Check if a conversion between two dataypes will be a no-op + * + * Return: true / false (can't fail) + * + *------------------------------------------------------------------------- + */ +bool +H5T_noop_conv(const H5T_t *src, const H5T_t *dst) +{ + bool ret_value = false; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Sanity check */ + assert(src); + assert(src->shared); + assert(dst); + assert(dst->shared); + + /* Check the conversion path. If source and destination types are equal + * then its a no-op conversion, as long as neither type has a "force conversion" + * flag. Otherwise search over the conversion table entries. + */ + if (src->shared->force_conv == false && dst->shared->force_conv == false && + 0 == H5T_cmp(src, dst, true)) { + ret_value = true; + } /* end if */ + else { + int idx = 0; /* Matching entry */ + + /* Search the table of conversion paths */ + if (H5T__path_table_search(src, dst, &idx, NULL)) + ret_value = H5T_path_noop(H5T_g.path[idx]); + } /* end else */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5T_noop_conv() */ + /*------------------------------------------------------------------------- * Function: H5T_path_compound_subset * diff --git a/src/H5Tconv.c b/src/H5Tconv.c index 55b6d7d01da..fe207b0c89d 100644 --- a/src/H5Tconv.c +++ b/src/H5Tconv.c @@ -1186,6 +1186,11 @@ typedef struct H5T_conv_enum_t { int *src2dst; /*map from src to dst index */ } H5T_conv_enum_t; +/* Cnversion data for H5T__conv_array() */ +typedef struct H5T_conv_array_t { + H5T_path_t *tpath; /* Conversion path for parent types */ +} H5T_conv_array_t; + /* Conversion data for the hardware conversion functions */ typedef struct H5T_conv_hw_t { size_t s_aligned; /*number source elements aligned */ @@ -1221,9 +1226,6 @@ static herr_t H5T__reverse_order(uint8_t *rev, uint8_t *s, size_t size, H5T_orde /* Declare a free list to manage pieces of vlen data */ H5FL_BLK_DEFINE_STATIC(vlen_seq); -/* Declare a free list to manage pieces of array data */ -H5FL_BLK_DEFINE_STATIC(array_seq); - /* Declare a free list to manage pieces of reference data */ H5FL_BLK_DEFINE_STATIC(ref_seq); @@ -2521,7 +2523,7 @@ H5T__conv_struct(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H } /* end if */ else offset -= dst_memb->size; - memmove(xbkg + dst_memb->offset, xbuf + offset, dst_memb->size); + memcpy(xbkg + dst_memb->offset, xbuf + offset, dst_memb->size); } /* end for */ tmp_conv_ctx.u.conv.recursive = false; @@ -2543,7 +2545,7 @@ H5T__conv_struct(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H * buffer. */ for (xbuf = buf, xbkg = bkg, elmtno = 0; elmtno < nelmts; elmtno++) { - memmove(xbuf, xbkg, dst->shared->size); + memcpy(xbuf, xbkg, dst->shared->size); xbuf += buf_stride ? buf_stride : dst->shared->size; xbkg += bkg_delta; } /* end for */ @@ -2743,7 +2745,7 @@ H5T__conv_struct_opt(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, con copy_size = priv->subset_info.copy_size; for (elmtno = 0; elmtno < nelmts; elmtno++) { - memmove(xbkg, xbuf, copy_size); + memcpy(xbkg, xbuf, copy_size); /* Update pointers */ xbuf += buf_stride; @@ -2781,7 +2783,7 @@ H5T__conv_struct_opt(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, con "unable to convert compound datatype member"); for (elmtno = 0; elmtno < nelmts; elmtno++) { - memmove(xbkg, xbuf, dst_memb->size); + memcpy(xbkg, xbuf, dst_memb->size); xbuf += buf_stride; xbkg += bkg_stride; } /* end for */ @@ -2826,7 +2828,7 @@ H5T__conv_struct_opt(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, con HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCONVERT, FAIL, "unable to convert compound datatype member"); for (elmtno = 0; elmtno < nelmts; elmtno++) { - memmove(xbkg, xbuf, dst_memb->size); + memcpy(xbkg, xbuf, dst_memb->size); xbuf += buf_stride; xbkg += bkg_stride; } /* end for */ @@ -2840,7 +2842,7 @@ H5T__conv_struct_opt(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, con /* Move background buffer into result buffer */ for (xbuf = buf, xbkg = bkg, elmtno = 0; elmtno < nelmts; elmtno++) { - memmove(xbuf, xbkg, dst->shared->size); + memcpy(xbuf, xbkg, dst->shared->size); xbuf += buf_stride; xbkg += bkg_stride; } /* end for */ @@ -3094,7 +3096,7 @@ H5T__conv_enum_init(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, cons } FUNC_LEAVE_NOAPI(ret_value) -} +} /* end H5T__conv_enum_init() */ /*------------------------------------------------------------------------- * Function: H5T__conv_enum @@ -3904,20 +3906,19 @@ H5T__conv_vlen(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T herr_t H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T_conv_ctx_t H5_ATTR_UNUSED *conv_ctx, size_t nelmts, size_t buf_stride, - size_t bkg_stride, void *_buf, void H5_ATTR_UNUSED *_bkg) -{ - H5T_conv_ctx_t tmp_conv_ctx = {0}; /* Temporary conversion context */ - H5T_path_t *tpath; /* Type conversion path */ - H5T_t *tsrc_cpy = NULL; /*temporary copy of source base datatype */ - H5T_t *tdst_cpy = NULL; /*temporary copy of destination base datatype */ - hid_t tsrc_id = H5I_INVALID_HID; /*temporary type atom */ - hid_t tdst_id = H5I_INVALID_HID; /*temporary type atom */ - uint8_t *sp, *dp; /*source and dest traversal ptrs */ - ssize_t src_delta, dst_delta; /*source & destination stride */ - int direction; /*direction of traversal */ - bool need_ids = false; /*whether we need IDs for the datatypes */ - void *bkg_buf = NULL; /*temporary background buffer */ - herr_t ret_value = SUCCEED; /* Return value */ + size_t bkg_stride, void *_buf, void *_bkg) +{ + H5T_conv_array_t *priv = NULL; /* Private conversion data */ + H5T_conv_ctx_t tmp_conv_ctx = {0}; /* Temporary conversion context */ + H5T_t *tsrc_cpy = NULL; /* Temporary copy of source base datatype */ + H5T_t *tdst_cpy = NULL; /* Temporary copy of destination base datatype */ + hid_t tsrc_id = H5I_INVALID_HID; /* Temporary type atom */ + hid_t tdst_id = H5I_INVALID_HID; /* Temporary type atom */ + uint8_t *sp, *dp, *bp; /* Source, dest, and bkg traversal ptrs */ + ssize_t src_delta, dst_delta, bkg_delta; /* Source, dest, and bkg strides */ + int direction; /* Direction of traversal */ + bool need_ids = false; /* Whether we need IDs for the datatypes */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -3944,13 +3945,35 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "array datatypes do not have the same sizes of dimensions"); - /* Array datatypes don't need a background buffer */ - cdata->need_bkg = H5T_BKG_NO; + /* Initialize parent type conversion if necessary. We need to do this here because we need to + * report whether we need a background buffer or not. */ + if (!cdata->priv) { + /* Allocate private data */ + if (NULL == (priv = (H5T_conv_array_t *)(cdata->priv = calloc(1, sizeof(*priv))))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); + + /* Find conversion path between parent types */ + if (NULL == (priv->tpath = H5T_path_find(src->shared->parent, dst->shared->parent))) { + free(priv); + cdata->priv = NULL; + HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, + "unable to convert between src and dest datatype"); + } + + /* Array datatypes don't need a background buffer by themselves, but the parent type might. + * Pass the need_bkg field through to the upper layer. */ + cdata->need_bkg = priv->tpath->cdata.need_bkg; + } break; case H5T_CONV_FREE: - /* QAK - Nothing to do currently */ + /* + * Free private data + */ + free(cdata->priv); + cdata->priv = NULL; + break; case H5T_CONV_CONV: @@ -3961,6 +3984,7 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype"); if (NULL == conv_ctx) HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid datatype conversion context pointer"); + priv = (H5T_conv_array_t *)cdata->priv; /* Initialize temporary conversion context */ tmp_conv_ctx = *conv_ctx; @@ -3974,11 +3998,14 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, */ if (src->shared->size >= dst->shared->size || buf_stride > 0) { sp = dp = (uint8_t *)_buf; + bp = _bkg; direction = 1; } else { - sp = (uint8_t *)_buf + (nelmts - 1) * (buf_stride ? buf_stride : src->shared->size); - dp = (uint8_t *)_buf + (nelmts - 1) * (buf_stride ? buf_stride : dst->shared->size); + sp = (uint8_t *)_buf + (nelmts - 1) * (buf_stride ? buf_stride : src->shared->size); + dp = (uint8_t *)_buf + (nelmts - 1) * (buf_stride ? buf_stride : dst->shared->size); + bp = _bkg ? (uint8_t *)_bkg + (nelmts - 1) * (bkg_stride ? bkg_stride : dst->shared->size) + : NULL; direction = -1; } @@ -3990,13 +4017,10 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, H5_CHECK_OVERFLOW(dst->shared->size, size_t, ssize_t); src_delta = (ssize_t)direction * (ssize_t)(buf_stride ? buf_stride : src->shared->size); dst_delta = (ssize_t)direction * (ssize_t)(buf_stride ? buf_stride : dst->shared->size); + bkg_delta = (ssize_t)direction * (ssize_t)(bkg_stride ? bkg_stride : dst->shared->size); /* Set up conversion path for base elements */ - if (NULL == (tpath = H5T_path_find(src->shared->parent, dst->shared->parent))) { - HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, - "unable to convert between src and dest datatypes"); - } - else if (!H5T_path_noop(tpath)) { + if (!H5T_path_noop(priv->tpath)) { if (NULL == (tsrc_cpy = H5T_copy(src->shared->parent, H5T_COPY_ALL))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy src base type for conversion"); @@ -4019,17 +4043,6 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, tmp_conv_ctx.u.conv.dst_type_id = tdst_id; } - /* Check if we need a background buffer for this conversion */ - if (tpath->cdata.need_bkg) { - size_t bkg_buf_size; /*size of background buffer in bytes */ - - /* Allocate background buffer */ - bkg_buf_size = src->shared->u.array.nelem * MAX(src->shared->size, dst->shared->size); - if (NULL == (bkg_buf = H5FL_BLK_CALLOC(array_seq, bkg_buf_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for type conversion"); - } /* end if */ - /* Perform the actual conversion */ tmp_conv_ctx.u.conv.recursive = true; for (size_t elmtno = 0; elmtno < nelmts; elmtno++) { @@ -4037,13 +4050,15 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, memmove(dp, sp, src->shared->size); /* Convert array */ - if (H5T_convert_with_ctx(tpath, tsrc_cpy, tdst_cpy, &tmp_conv_ctx, src->shared->u.array.nelem, - (size_t)0, bkg_stride, dp, bkg_buf) < 0) + if (H5T_convert_with_ctx(priv->tpath, tsrc_cpy, tdst_cpy, &tmp_conv_ctx, + src->shared->u.array.nelem, (size_t)0, (size_t)0, dp, bp) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCONVERT, FAIL, "datatype conversion failed"); - /* Advance the source & destination pointers */ + /* Advance the source, destination, and background pointers */ sp += src_delta; dp += dst_delta; + if (bp) + bp += bkg_delta; } /* end for */ tmp_conv_ctx.u.conv.recursive = false; @@ -4071,10 +4086,6 @@ H5T__conv_array(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); } - /* Release the background buffer, if we have one */ - if (bkg_buf) - bkg_buf = H5FL_BLK_FREE(array_seq, bkg_buf); - FUNC_LEAVE_NOAPI(ret_value) } /* end H5T__conv_array() */ diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 46b2c92fa83..99ea256b27d 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -131,6 +131,7 @@ H5_DLL herr_t H5T_convert_committed_datatype(H5T_t *dt, H5F_t *f); H5_DLL htri_t H5T_is_relocatable(const H5T_t *dt); H5_DLL H5T_path_t *H5T_path_find(const H5T_t *src, const H5T_t *dst); H5_DLL bool H5T_path_noop(const H5T_path_t *p); +H5_DLL bool H5T_noop_conv(const H5T_t *src, const H5T_t *dst); H5_DLL H5T_bkg_t H5T_path_bkg(const H5T_path_t *p); H5_DLL H5T_subset_info_t *H5T_path_compound_subset(const H5T_path_t *p); H5_DLL herr_t H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, diff --git a/src/H5VLnative_blob.c b/src/H5VLnative_blob.c index 718d5487561..890e82dc1b5 100644 --- a/src/H5VLnative_blob.c +++ b/src/H5VLnative_blob.c @@ -113,14 +113,17 @@ H5VL__native_blob_get(void *obj, const void *blob_id, void *buf, size_t size, vo UINT32DECODE(id, hobjid.idx); /* Check if this sequence actually has any data */ - if (hobjid.addr > 0) + if (hobjid.addr > 0) { + /* Verify the size is correct */ + if (H5HG_get_obj_size(f, &hobjid, &hobj_size) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTGETSIZE, FAIL, "can't get object size"); + if (hobj_size != size) + HGOTO_ERROR(H5E_VOL, H5E_BADSIZE, FAIL, "Expected global heap object size does not match"); + /* Read the VL information from disk */ if (NULL == H5HG_read(f, &hobjid, buf, &hobj_size)) HGOTO_ERROR(H5E_VOL, H5E_READERROR, FAIL, "unable to read VL information"); - - /* Verify the size is correct */ - if (hobj_size != size) - HGOTO_ERROR(H5E_VOL, H5E_CANTDECODE, FAIL, "Expected global heap object size does not match"); + } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Znbit.c b/src/H5Znbit.c index fb5c5c51f8d..429394d56c8 100644 --- a/src/H5Znbit.c +++ b/src/H5Znbit.c @@ -947,8 +947,10 @@ H5Z__filter_nbit(unsigned flags, size_t cd_nelmts, const unsigned cd_values[], s HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed for nbit decompression"); /* decompress the buffer */ - if (H5Z__nbit_decompress(outbuf, d_nelmts, (unsigned char *)*buf, cd_values) < 0) + if (H5Z__nbit_decompress(outbuf, d_nelmts, (unsigned char *)*buf, cd_values) < 0) { + H5MM_xfree(outbuf); HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, 0, "can't decompress buffer"); + } } /* end if */ /* output; compress */ else { @@ -1180,7 +1182,7 @@ static herr_t H5Z__nbit_decompress_one_compound(unsigned char *data, size_t data_offset, unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[], unsigned *parms_index) { - unsigned i, nmembers, member_offset, member_class, member_size, used_size = 0, size; + unsigned i, nmembers, member_offset, member_class, member_size, used_size = 0, prev_used_size, size; parms_atomic p; herr_t ret_value = SUCCEED; /* Return value */ @@ -1194,10 +1196,15 @@ H5Z__nbit_decompress_one_compound(unsigned char *data, size_t data_offset, unsig member_class = parms[(*parms_index)++]; /* Check for overflow */ - member_size = parms[*parms_index]; + member_size = parms[*parms_index]; + prev_used_size = used_size; used_size += member_size; if (used_size > size) - HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "compound member offset overflowed compound size"); + HGOTO_ERROR(H5E_PLINE, H5E_BADVALUE, FAIL, "compound member size overflowed compound size"); + if (used_size <= prev_used_size) + HGOTO_ERROR(H5E_PLINE, H5E_BADVALUE, FAIL, "compound member size overflowed compound size"); + if ((member_offset + member_size) > size) + HGOTO_ERROR(H5E_PLINE, H5E_BADRANGE, FAIL, "compound member offset overflowed compound size"); switch (member_class) { case H5Z_NBIT_ATOMIC: p.size = member_size; diff --git a/src/H5build_settings.autotools.c.in b/src/H5build_settings.autotools.c.in index abdc53a9cd3..edde377b43a 100644 --- a/src/H5build_settings.autotools.c.in +++ b/src/H5build_settings.autotools.c.in @@ -99,6 +99,7 @@ const char H5build_settings[]= " Default API mapping: @DEFAULT_API_VERSION@\n" " With deprecated public symbols: @DEPRECATED_SYMBOLS@\n" " I/O filters (external): @EXTERNAL_FILTERS@\n" + " _Float16 support: @HAVE__FLOAT16@\n" " Map (H5M) API: @MAP_API@\n" " Direct VFD: @DIRECT_VFD@\n" " Mirror VFD: @MIRROR_VFD@\n" diff --git a/src/H5build_settings.cmake.c.in b/src/H5build_settings.cmake.c.in index 67ebec7e45d..c1139b465e6 100644 --- a/src/H5build_settings.cmake.c.in +++ b/src/H5build_settings.cmake.c.in @@ -98,6 +98,7 @@ const char H5build_settings[]= " Default API mapping: @DEFAULT_API_VERSION@\n" " With deprecated public symbols: @HDF5_ENABLE_DEPRECATED_SYMBOLS@\n" " I/O filters (external): @EXTERNAL_FILTERS@\n" + " _Float16 support: @HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16@\n" " Map (H5M) API: @H5_HAVE_MAP_API@\n" " Direct VFD: @H5_HAVE_DIRECT@\n" " Mirror VFD: @H5_HAVE_MIRROR_VFD@\n" diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in index f9e7928e7e1..87219969bf3 100644 --- a/src/libhdf5.settings.in +++ b/src/libhdf5.settings.in @@ -80,6 +80,7 @@ Dimension scales w/ new references: @DIMENSION_SCALES_WITH_NEW_REF@ Default API mapping: @DEFAULT_API_VERSION@ With deprecated public symbols: @DEPRECATED_SYMBOLS@ I/O filters (external): @EXTERNAL_FILTERS@ + _Float16 support: @HAVE__FLOAT16@ Map (H5M) API: @MAP_API@ Direct VFD: @DIRECT_VFD@ Mirror VFD: @MIRROR_VFD@ diff --git a/test/CMakePassthroughVOLTests.cmake b/test/CMakePassthroughVOLTests.cmake index fbff51aebf0..55f112b90d2 100644 --- a/test/CMakePassthroughVOLTests.cmake +++ b/test/CMakePassthroughVOLTests.cmake @@ -182,6 +182,15 @@ add_custom_target(HDF5_VOLTEST_LIB_files ALL COMMENT "Copying files needed by HD macro (ADD_VOL_TEST volname volinfo resultcode) #message(STATUS "volname=${volname} volinfo=${volinfo}") + foreach (h5_test ${H5_EXPRESS_TESTS}) + if (NOT h5_test IN_LIST H5_VOL_SKIP_TESTS) + if (WIN32) + CHECK_VOL_TEST (${h5_test} ${volname} "${volinfo}" ${resultcode}) + else () + DO_VOL_TEST (${h5_test} ${volname} "${volinfo}" ${resultcode}) + endif () + endif () + endforeach () foreach (h5_test ${H5_TESTS}) if (NOT h5_test IN_LIST H5_VOL_SKIP_TESTS) if (WIN32) diff --git a/test/CMakeVFDTests.cmake b/test/CMakeVFDTests.cmake index a3b40a6107a..6086cbc9537 100644 --- a/test/CMakeVFDTests.cmake +++ b/test/CMakeVFDTests.cmake @@ -203,6 +203,15 @@ add_custom_target(HDF5_VFDTEST_LIB_files ALL COMMENT "Copying files needed by HD endmacro () macro (ADD_VFD_TEST vfdname resultcode) + foreach (h5_test ${H5_EXPRESS_TESTS}) + if (NOT h5_test IN_LIST H5_VFD_SKIP_TESTS) + if (WIN32) + CHECK_VFD_TEST (${h5_test} ${vfdname} ${resultcode}) + else () + DO_VFD_TEST (${h5_test} ${vfdname} ${resultcode}) + endif () + endif () + endforeach () foreach (h5_test ${H5_TESTS}) if (NOT h5_test IN_LIST H5_VFD_SKIP_TESTS) if (WIN32) diff --git a/test/chunk_info.c b/test/chunk_info.c index fba429f2b2b..a5dde34c5a7 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -2367,9 +2367,9 @@ test_chunk_address_with_userblock(hid_t fapl_id) int data = -1; int data_ub = -1; - if (HDlseek(fd, (off_t)(od.addresses[i]), SEEK_SET) < 0) + if (HDlseek(fd, (HDoff_t)(od.addresses[i]), SEEK_SET) < 0) TEST_ERROR; - if (HDlseek(fd_ub, (off_t)(od_ub.addresses[i]), SEEK_SET) < 0) + if (HDlseek(fd_ub, (HDoff_t)(od_ub.addresses[i]), SEEK_SET) < 0) TEST_ERROR; if (HDread(fd, &data, sizeof(int)) != sizeof(int)) diff --git a/test/dtypes.c b/test/dtypes.c index 836d6d89eb0..ea589583e65 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -73,9 +73,9 @@ } \ } while (0) -static const char *FILENAME[] = {"dtypes0", "dtypes1", "dtypes2", "dtypes3", "dtypes4", - "dtypes5", "dtypes6", "dtypes7", "dtypes8", "dtypes9", - "dtypes10", "dtypes11", NULL}; +static const char *FILENAME[] = {"dtypes0", "dtypes1", "dtypes2", "dtypes3", "dtypes4", + "dtypes5", "dtypes6", "dtypes7", "dtypes8", "dtypes9", + "dtypes10", "dtypes11", "dtypes12", NULL}; #define TESTFILE "bad_compound.h5" @@ -6464,6 +6464,217 @@ test__Float16(void) #endif } +/*------------------------------------------------------------------------- + * Function: test_array_cmpd_vl + * + * Purpose: Tests that conversion occurs correctly with an array of + * arrays of compounds containing a variable length sequence. + * + * Return: Success: 0 + * + * Failure: number of errors + * + *------------------------------------------------------------------------- + */ +static int +test_array_cmpd_vl(void) +{ + typedef struct cmpd_struct { + hvl_t vl; + } cmpd_struct; + + int int_wdata[2][3][2] = {{{0, 1}, {2, 3}, {4, 5}}, {{6, 7}, {8, 9}, {10, 11}}}; + cmpd_struct wdata[2][3]; + cmpd_struct rdata[2][3]; + hid_t file; + hid_t vl_tid, cmpd_tid, inner_array_tid, outer_array_tid; + hid_t space_id; + hid_t dset_id; + hsize_t dim1[1]; + char filename[1024]; + + TESTING("array of arrays of compounds with a vlen"); + + /* Create File */ + h5_fixname(FILENAME[12], H5P_DEFAULT, filename, sizeof filename); + if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create file!\n"); + goto error; + } /* end if */ + + /* Create VL of ints datatype */ + if ((vl_tid = H5Tvlen_create(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create datatype!\n"); + goto error; + } /* end if */ + + /* Create compound datatype */ + if ((cmpd_tid = H5Tcreate(H5T_COMPOUND, sizeof(struct cmpd_struct))) < 0) { + H5_FAILED(); + AT(); + printf("Can't create datatype!\n"); + goto error; + } /* end if */ + + if (H5Tinsert(cmpd_tid, "vl", HOFFSET(struct cmpd_struct, vl), vl_tid) < 0) { + H5_FAILED(); + AT(); + printf("Can't insert field 'vl'\n"); + goto error; + } /* end if */ + + /* Create inner array type */ + dim1[0] = 3; + if ((inner_array_tid = H5Tarray_create2(cmpd_tid, 1, dim1)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create datatype!\n"); + goto error; + } /* end if */ + + /* Create outer array type */ + dim1[0] = 2; + if ((outer_array_tid = H5Tarray_create2(inner_array_tid, 1, dim1)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create datatype!\n"); + goto error; + } /* end if */ + + /* Create space, dataset */ + dim1[0] = 1; + if ((space_id = H5Screate_simple(1, dim1, NULL)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create space\n"); + goto error; + } /* end if */ + + if ((dset_id = H5Dcreate2(file, "Dataset", outer_array_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + AT(); + printf("Can't create dataset\n"); + goto error; + } /* end if */ + + /* Initialize wdata */ + for (int i = 0; i < 2; i++) + for (int j = 0; j < 3; j++) { + wdata[i][j].vl.len = 2; + wdata[i][j].vl.p = int_wdata[i][j]; + } + + /* Write data */ + if (H5Dwrite(dset_id, outer_array_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't write data\n"); + goto error; + } /* end if */ + + /* Initialize rdata */ + (void)memset(rdata, 0, sizeof(rdata)); + + /* Read data */ + if (H5Dread(dset_id, outer_array_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't read data\n"); + goto error; + } /* end if */ + + /* Check for correctness of read data */ + for (int i = 0; i < 2; i++) + for (int j = 0; j < 3; j++) + if (rdata[i][j].vl.len != 2 || ((int *)rdata[i][j].vl.p)[0] != int_wdata[i][j][0] || + ((int *)rdata[i][j].vl.p)[1] != int_wdata[i][j][1]) { + H5_FAILED(); + AT(); + printf("incorrect read data at [%d][%d]\n", i, j); + goto error; + } + + /* Reclaim memory */ + if (H5Treclaim(outer_array_tid, space_id, H5P_DEFAULT, rdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't reclaim memory\n"); + goto error; + } /* end if */ + + /* Adjust write buffer */ + for (int i = 0; i < 2; i++) + for (int j = 0; j < 3; j++) { + int_wdata[i][j][0] += 100; + int_wdata[i][j][1] += 100; + } + + /* Overwrite dataset with adjusted wdata */ + if (H5Dwrite(dset_id, outer_array_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't write data\n"); + goto error; + } /* end if */ + + /* Initialize rdata */ + (void)memset(rdata, 0, sizeof(rdata)); + + /* Read data */ + if (H5Dread(dset_id, outer_array_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't read data\n"); + goto error; + } /* end if */ + + /* Check for correctness of read data */ + for (int i = 0; i < 2; i++) + for (int j = 0; j < 3; j++) + if (rdata[i][j].vl.len != 2 || ((int *)rdata[i][j].vl.p)[0] != int_wdata[i][j][0] || + ((int *)rdata[i][j].vl.p)[1] != int_wdata[i][j][1]) { + H5_FAILED(); + AT(); + printf("incorrect read data at [%d][%d]\n", i, j); + goto error; + } + + /* Reclaim memory */ + if (H5Treclaim(outer_array_tid, space_id, H5P_DEFAULT, rdata) < 0) { + H5_FAILED(); + AT(); + printf("Can't reclaim memory\n"); + goto error; + } /* end if */ + + /* Close */ + if (H5Dclose(dset_id) < 0) + goto error; + if (H5Tclose(outer_array_tid) < 0) + goto error; + if (H5Tclose(inner_array_tid) < 0) + goto error; + if (H5Tclose(cmpd_tid) < 0) + goto error; + if (H5Tclose(vl_tid) < 0) + goto error; + if (H5Sclose(space_id) < 0) + goto error; + if (H5Fclose(file) < 0) + goto error; + + PASSED(); + return 0; + +error: + return 1; +} /* end test_array_cmpd_vl() */ + /*------------------------------------------------------------------------- * Function: test_encode * @@ -9700,6 +9911,7 @@ main(void) nerrors += test_bitfield_funcs(); nerrors += test_opaque(); nerrors += test_set_order(); + nerrors += test_array_cmpd_vl(); nerrors += test__Float16(); diff --git a/test/page_buffer.c b/test/page_buffer.c index 8c977fedf44..05fa148a152 100644 --- a/test/page_buffer.c +++ b/test/page_buffer.c @@ -1650,6 +1650,114 @@ test_min_threshold(hid_t orig_fapl, const char *driver_name) } /* test_min_threshold */ +/*------------------------------------------------------------------------- + * Function: test_pb_fapl_tolerance_at_open() + * + * Purpose: Tests if the library tolerates setting fapl page buffer + * values via H5Pset_page_buffer_size() when opening a file + * that does not use page buffering or has a size smaller + * than the file's page size. + * + * As of HDF5 1.14.4, these should succeed. + * + * Return: 0 if test is successful + * 1 if test fails + * + *------------------------------------------------------------------------- + */ +static unsigned +test_pb_fapl_tolerance_at_open(void) +{ + const char *filename = "pb_fapl_tolerance.h5"; + hid_t fapl = H5I_INVALID_HID; + hid_t fcpl = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; + H5F_t *f = NULL; + + TESTING("if opening non-page-buffered files works w/ H5Pset_page_buffer_size()"); + + /* Create a file WITHOUT page buffering */ + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Set up page buffering values on a fapl */ + if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + if (H5Pset_page_buffer_size(fapl, 512, 0, 0) < 0) + TEST_ERROR; + + /* Attempt to open non-page-buf file w/ page buf fapl. Should succeed, + * but without a page buffer. + */ + if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + TEST_ERROR; + if (NULL == (f = (H5F_t *)H5VL_object(fid))) + TEST_ERROR; + if (f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE) + TEST_ERROR; + if (f->shared->page_buf != NULL) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Set up a fcpl with a page size that is larger than the fapl size */ + if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) + TEST_ERROR; + if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, 1) < 0) + TEST_ERROR; + if (H5Pset_file_space_page_size(fcpl, 4096) < 0) + TEST_ERROR; + + /* Create a file that uses page buffering with a larger page size */ + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Attempt to open page-buf file w/ fapl page buf size that is too small. + * Should succeed with a page buffer size that matches the file's page size. + */ + if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + TEST_ERROR; + if (NULL == (f = (H5F_t *)H5VL_object(fid))) + TEST_ERROR; + if (f->shared->fs_strategy != H5F_FSPACE_STRATEGY_PAGE) + TEST_ERROR; + if (f->shared->page_buf == NULL) + TEST_ERROR; + if (f->shared->fs_page_size != 4096) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Shut down */ + if (H5Pclose(fcpl) < 0) + TEST_ERROR; + if (H5Pclose(fapl) < 0) + TEST_ERROR; + + HDremove(filename); + + PASSED(); + + return 0; + +error: + + H5E_BEGIN_TRY + { + H5Pclose(fapl); + H5Pclose(fcpl); + H5Fclose(fid); + } + H5E_END_TRY + + return 1; + +} /* test_pb_fapl_tolerance_at_open */ + /*------------------------------------------------------------------------- * Function: test_stats_collection() * @@ -2083,12 +2191,12 @@ main(void) SKIPPED(); puts("Skip page buffering test because paged aggregation is disabled for multi/split drivers"); exit(EXIT_SUCCESS); - } /* end if */ + } if ((fapl = h5_fileaccess()) < 0) { nerrors++; PUTS_ERROR("Can't get VFD-dependent fapl"); - } /* end if */ + } /* Push API context */ if (H5CX_push() < 0) @@ -2107,6 +2215,7 @@ main(void) nerrors += test_lru_processing(fapl, driver_name); nerrors += test_min_threshold(fapl, driver_name); nerrors += test_stats_collection(fapl, driver_name); + nerrors += test_pb_fapl_tolerance_at_open(); #endif /* H5_HAVE_PARALLEL */ diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c index dfffac67385..d7e7ff574a9 100644 --- a/tools/lib/h5tools_utils.c +++ b/tools/lib/h5tools_utils.c @@ -589,12 +589,12 @@ dump_table(hid_t fid, char *tablename, table_t *table) PRINTSTREAM(rawoutstream, "%s: # of entries = %d\n", tablename, table->nobjs); for (u = 0; u < table->nobjs; u++) { - H5VLconnector_token_to_str(fid, table->objs[u].obj_token, &obj_tok_str); + H5Otoken_to_str(fid, &table->objs[u].obj_token, &obj_tok_str); PRINTSTREAM(rawoutstream, "%s %s %d %d\n", obj_tok_str, table->objs[u].objname, table->objs[u].displayed, table->objs[u].recorded); - H5VLfree_token_str(fid, obj_tok_str); + H5free_memory(obj_tok_str); } } diff --git a/tools/src/h5jam/h5jam.c b/tools/src/h5jam/h5jam.c index f311257e361..ffef472c9ba 100644 --- a/tools/src/h5jam/h5jam.c +++ b/tools/src/h5jam/h5jam.c @@ -162,7 +162,7 @@ main(int argc, char *argv[]) hsize_t startub; hsize_t where; hsize_t newubsize; - off_t fsize; + HDoff_t fsize; h5_stat_t sbuf; h5_stat_t sbuf2; int res; @@ -253,7 +253,7 @@ main(int argc, char *argv[]) goto done; } - fsize = (off_t)sbuf.st_size; + fsize = (HDoff_t)sbuf.st_size; h5fid = HDopen(input_file, O_RDONLY); if (h5fid < 0) { diff --git a/tools/src/h5jam/h5unjam.c b/tools/src/h5jam/h5unjam.c index a93e85120b6..0ea5d494662 100644 --- a/tools/src/h5jam/h5unjam.c +++ b/tools/src/h5jam/h5unjam.c @@ -173,7 +173,7 @@ main(int argc, char *argv[]) { hid_t ifile = H5I_INVALID_HID; hid_t plist = H5I_INVALID_HID; - off_t fsize; + HDoff_t fsize; hsize_t usize; htri_t testval; herr_t status; @@ -306,9 +306,9 @@ copy_to_file(FILE *infid, FILE *ofid, ssize_t _where, ssize_t show_much) { static char buf[COPY_BUF_SIZE]; size_t how_much; - off_t where = (off_t)_where; - off_t to; - off_t from; + HDoff_t where = (HDoff_t)_where; + HDoff_t to; + HDoff_t from; herr_t ret_value = 0; /* nothing to copy */ @@ -349,8 +349,8 @@ copy_to_file(FILE *infid, FILE *ofid, ssize_t _where, ssize_t show_much) /* Update positions/size */ how_much -= bytes_read; - from += (off_t)bytes_read; - to += (off_t)bytes_read; + from += (HDoff_t)bytes_read; + to += (HDoff_t)bytes_read; /* Write nchars bytes to output file */ bytes_wrote = fwrite(buf, (size_t)1, bytes_read, ofid);