diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0954bce45c6..12604288019 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -68,7 +68,7 @@ jobs: - uses: msys2/setup-msys2@v2 with: update: true - install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-protobuf mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git pkg-config + install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-protobuf mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound mingw-w64-x86_64-rust git pkg-config - name: build run: | ${{env.CCACHE_SETTINGS}} diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index 3ee6067482b..5cfe1ad28c5 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -31,36 +31,43 @@ jobs: toolchain: - name: "RISCV 64bit" host: "riscv64-linux-gnu" + rust_host: "riscv64gc-unknown-linux-gnu" packages: "python3 gperf g++-riscv64-linux-gnu" - name: "ARM v7" host: "arm-linux-gnueabihf" + rust_host: "armv7-unknown-linux-gnueabihf" packages: "python3 gperf g++-arm-linux-gnueabihf" - name: "ARM v8" host: "aarch64-linux-gnu" + rust_host: "aarch64-unknown-linux-gnu" packages: "python3 gperf g++-aarch64-linux-gnu" - - name: "i686 Win" - host: "i686-w64-mingw32" - packages: "python3 g++-mingw-w64-i686" - name: "i686 Linux" host: "i686-pc-linux-gnu" + rust_host: "i686-unknown-linux-gnu" packages: "gperf cmake g++-multilib python3-zmq" - name: "Win64" host: "x86_64-w64-mingw32" + rust_host: "x86_64-pc-windows-gnu" packages: "cmake python3 g++-mingw-w64-x86-64" - name: "x86_64 Linux" host: "x86_64-unknown-linux-gnu" + rust_host: "x86_64-unknown-linux-gnu" packages: "gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "Cross-Mac x86_64" host: "x86_64-apple-darwin" + rust_host: "x86_64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "Cross-Mac aarch64" host: "aarch64-apple-darwin" + rust_host: "aarch64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "x86_64 Freebsd" host: "x86_64-unknown-freebsd" + rust_host: "x86_64-unknown-freebsd" packages: "clang-8 gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "ARMv8 Android" host: "aarch64-linux-android" + rust_host: "aarch64-linux-android" packages: "gperf cmake python3" name: ${{ matrix.toolchain.name }} steps: @@ -95,8 +102,13 @@ jobs: run: ${{env.APT_SET_CONF}} - name: install dependencies run: sudo apt update; sudo apt -y install build-essential libtool cmake autotools-dev automake pkg-config bsdmainutils curl git ca-certificates ccache ${{ matrix.toolchain.packages }} + - name: install rust target + # We can't use the latest Rust due to LLVM 17 not working with old `ld`s (such as in Ubuntu 20.04) for RISC-V + # We could update ld (a pain), update Ubuntu (requires a large amount of changes), or downgrade Rust + # We can't use Rust 1.70 due to LLVM 16 requiring ld >= 2.40 when building for Windows + run: rustup toolchain install 1.69; rustup default 1.69; rustup target add ${{ matrix.toolchain.rust_host }} - name: prepare w64-mingw32 - if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' || matrix.toolchain.host == 'i686-w64-mingw32' }} + if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' }} run: | sudo update-alternatives --set ${{ matrix.toolchain.host }}-g++ $(which ${{ matrix.toolchain.host }}-g++-posix) sudo update-alternatives --set ${{ matrix.toolchain.host }}-gcc $(which ${{ matrix.toolchain.host }}-gcc-posix) diff --git a/.github/workflows/gitian.yml b/.github/workflows/gitian.yml deleted file mode 100644 index db9735ac332..00000000000 --- a/.github/workflows/gitian.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: ci/gh-actions/gitian - -on: - push: - tags: - - '*' - -jobs: - build-gitian: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - operating-system: - - name: "Linux" - option: "l" - - name: "Windows" - option: "w" - - name: "Android" - option: "a" - - name: "FreeBSD" - option: "f" - - name: "macOS" - option: "m" - name: ${{ matrix.operating-system.name }} - steps: - - name: prepare - run: | - sudo apt update - curl -O https://raw.githubusercontent.com/monero-project/monero/${{ github.ref_name }}/contrib/gitian/gitian-build.py - chmod +x gitian-build.py - - name: setup - run: | - ./gitian-build.py --setup --docker github-actions ${{ github.ref_name }} - - name: build - run: | - ./gitian-build.py --docker --detach-sign --no-commit --build -j 3 -o ${{ matrix.operating-system.option }} github-actions ${{ github.ref_name }} - - name: post build - run: | - cd out/${{ github.ref_name }} - shasum -a256 * - echo \`\`\` >> $GITHUB_STEP_SUMMARY - shasum -a256 * >> $GITHUB_STEP_SUMMARY - echo \`\`\` >> $GITHUB_STEP_SUMMARY - - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.operating-system.name }} - path: | - out/${{ github.ref_name }}/* diff --git a/.github/workflows/guix.yml b/.github/workflows/guix.yml new file mode 100644 index 00000000000..e2eb3d9d096 --- /dev/null +++ b/.github/workflows/guix.yml @@ -0,0 +1,104 @@ +name: ci/gh-actions/guix + +on: + push: + paths: + - 'contrib/depends/**' + - 'contrib/guix/**' + - '!contrib/**.md' + - '.github/workflows/guix.yml' + pull_request: + paths: + - 'contrib/depends/**' + - 'contrib/guix/**' + - '!contrib/**.md' + - '.github/workflows/guix.yml' + +jobs: + cache-sources: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: depends sources cache + id: cache + uses: actions/cache@v4 + with: + path: contrib/depends/sources + key: sources-${{ hashFiles('contrib/depends/packages/*') }} + - name: download depends sources + if: steps.cache.outputs.cache-hit != 'true' + run: make -C contrib/depends download + + build-guix: + runs-on: ubuntu-24.04 + needs: [cache-sources] + strategy: + fail-fast: false + matrix: + toolchain: + - target: "x86_64-linux-gnu" + - target: "aarch64-linux-gnu" + - target: "arm-linux-gnueabihf" + - target: "riscv64-linux-gnu" + - target: "i686-linux-gnu" + - target: "x86_64-w64-mingw32" + - target: "x86_64-unknown-freebsd" + - target: "x86_64-apple-darwin" + - target: "aarch64-apple-darwin" + - target: "aarch64-linux-android" + - target: "arm-linux-androideabi" + + name: ${{ matrix.toolchain.target }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - name: remove bundled packages + # This is faster than rm -rf + run: | + sudo mkdir /empty + sudo rsync -a --delete /empty/ /usr/local + - name: depends sources cache + uses: actions/cache/restore@v4 + with: + path: contrib/depends/sources + key: sources-${{ hashFiles('contrib/depends/packages/*') }} + - name: install dependencies + run: sudo apt update; sudo apt -y install guix git ca-certificates + - name: apparmor workaround + # https://bugs.launchpad.net/ubuntu/+source/guix/+bug/2064115 + run: | + sudo tee /etc/apparmor.d/guix << EOF + abi , + include + profile guix /usr/bin/guix flags=(unconfined) { + userns, + include if exists + } + EOF + sudo /etc/init.d/apparmor reload + sudo aa-enforce guix || true + sudo apt purge apparmor + - name: build + run: ADDITIONAL_GUIX_TIMEMACHINE_FLAGS="--disable-authentication" SUBSTITUTE_URLS='http://bordeaux.guix.gnu.org' HOSTS="${{ matrix.toolchain.target }}" ./contrib/guix/guix-build + - uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.toolchain.target }} + path: | + guix/guix-build-*/output/${{ matrix.toolchain.target }}/* + guix/guix-build-*/logs/${{ matrix.toolchain.target }}/* + + bundle-logs: + runs-on: ubuntu-24.04 + needs: [build-guix] + steps: + - uses: actions/download-artifact@v4 + with: + merge-multiple: true + - uses: actions/upload-artifact@v4 + with: + name: "logs" + path: '**/logs/**' diff --git a/.gitignore b/.gitignore index 9f62575e5ab..7da4837ccd2 100644 --- a/.gitignore +++ b/.gitignore @@ -25,10 +25,8 @@ miniupnpcstrings.h version/ ClangBuildAnalyzerSession.txt -# gitian -contrib/gitian/builder/ -contrib/gitian/docker/ -contrib/gitian/sigs/ +# guix +/guix # Created by https://www.gitignore.io diff --git a/CMakeLists.txt b/CMakeLists.txt index 60cda040ae5..35da5a3e0cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1129,7 +1129,7 @@ if(MINGW) elseif(APPLE OR OPENBSD OR ANDROID) set(EXTRA_LIBRARIES "") elseif(FREEBSD) - set(EXTRA_LIBRARIES execinfo) + set(EXTRA_LIBRARIES execinfo elf) elseif(DRAGONFLY) find_library(COMPAT compat) set(EXTRA_LIBRARIES execinfo ${COMPAT}) diff --git a/README.md b/README.md index 653a1a3893e..f6ee4be1847 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ Portions Copyright (c) 2012-2013 The Cryptonote developers. - [Release staging schedule and protocol](#release-staging-schedule-and-protocol) - [Compiling Monero from source](#compiling-monero-from-source) - [Dependencies](#dependencies) - - [Gitian builds](#gitian-builds) + - [Guix builds](#guix-builds) - [Internationalization](#Internationalization) - [Using Tor](#using-tor) - [Pruning](#Pruning) @@ -597,9 +597,9 @@ USE_DEVICE_TREZOR=OFF make release For more information, please check out Trezor [src/device_trezor/README.md](src/device_trezor/README.md). -### Gitian builds +### Guix builds -See [contrib/gitian/README.md](contrib/gitian/README.md). +See [contrib/guix/README.md](contrib/guix/README.md). ## Installing Monero from a package diff --git a/contrib/depends/Makefile b/contrib/depends/Makefile index f612dbffbed..a86281a554d 100644 --- a/contrib/depends/Makefile +++ b/contrib/depends/Makefile @@ -85,6 +85,7 @@ include builders/$(build_os).mk include builders/default.mk include packages/packages.mk +ifeq ($(GUIX_ENVIRONMENT),) build_id_string:=$(BUILD_ID_SALT) build_id_string+=$(shell $(build_CC) --version 2>/dev/null) build_id_string+=$(shell $(build_AR) --version 2>/dev/null) @@ -98,6 +99,10 @@ $(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) $(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) $(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) $(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) +else +build_id_string:=$(realpath $(GUIX_ENVIRONMENT)) +$(host_arch)_$(host_os)_id_string:=$(realpath $(GUIX_ENVIRONMENT)) +endif packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages) @@ -111,7 +116,7 @@ $(host_arch)_$(host_os)_native_toolchain?=$($(host_os)_native_toolchain) include funcs.mk toolchain_path=$($($(host_arch)_$(host_os)_native_toolchain)_prefixbin) -final_build_id_long+=$(shell $(build_SHA256SUM) toolchain.cmake.in) +final_build_id_long+=:[sha256sum]:$(shell $(build_SHA256SUM) toolchain.cmake.in) final_build_id+=$(shell echo -n "$(final_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH)) $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) $(AT)rm -rf $(@D) @@ -124,8 +129,8 @@ $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) $(host_prefix)/share/toolchain.cmake : toolchain.cmake.in $(host_prefix)/.stamp_$(final_build_id) $(AT)@mkdir -p $(@D) $(AT)sed -e 's|@HOST@|$(host)|' \ - -e 's|@CC@|$(toolchain_path)$(host_CC)|' \ - -e 's|@CXX@|$(toolchain_path)$(host_CXX)|' \ + -e 's|@CC@|$(host_CC)|' \ + -e 's|@CXX@|$(host_CXX)|' \ -e 's|@AR@|$(toolchain_path)$(host_AR)|' \ -e 's|@RANLIB@|$(toolchain_path)$(host_RANLIB)|' \ -e 's|@NM@|$(toolchain_path)$(host_NM)|' \ diff --git a/contrib/depends/funcs.mk b/contrib/depends/funcs.mk index e08669c981c..be928c192b8 100644 --- a/contrib/depends/funcs.mk +++ b/contrib/depends/funcs.mk @@ -1,18 +1,25 @@ define int_vars #Set defaults for vars which may be overridden per-package -$(1)_cc=$($($(1)_type)_CC) -$(1)_cxx=$($($(1)_type)_CXX) -$(1)_objc=$($($(1)_type)_OBJC) -$(1)_objcxx=$($($(1)_type)_OBJCXX) -$(1)_ar=$($($(1)_type)_AR) -$(1)_ranlib=$($($(1)_type)_RANLIB) -$(1)_libtool=$($($(1)_type)_LIBTOOL) -$(1)_nm=$($($(1)_type)_NM) -$(1)_cflags=$($($(1)_type)_CFLAGS) $($($(1)_type)_$(release_type)_CFLAGS) -$(1)_cxxflags=$($($(1)_type)_CXXFLAGS) $($($(1)_type)_$(release_type)_CXXFLAGS) -$(1)_arflags=$($($(1)_type)_ARFLAGS) $($($(1)_type)_$(release_type)_ARFLAGS) -$(1)_ldflags=$($($(1)_type)_LDFLAGS) $($($(1)_type)_$(release_type)_LDFLAGS) -L$($($(1)_type)_prefix)/lib -$(1)_cppflags=$($($(1)_type)_CPPFLAGS) $($($(1)_type)_$(release_type)_CPPFLAGS) -I$($($(1)_type)_prefix)/include +$(1)_cc=$$($$($(1)_type)_CC) +$(1)_cxx=$$($$($(1)_type)_CXX) +$(1)_objc=$$($$($(1)_type)_OBJC) +$(1)_objcxx=$$($$($(1)_type)_OBJCXX) +$(1)_ar=$$($$($(1)_type)_AR) +$(1)_ranlib=$$($$($(1)_type)_RANLIB) +$(1)_libtool=$$($$($(1)_type)_LIBTOOL) +$(1)_nm=$$($$($(1)_type)_NM) +$(1)_cflags=$$($$($(1)_type)_CFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CFLAGS) +$(1)_cxxflags=$$($$($(1)_type)_CXXFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CXXFLAGS) +$(1)_arflags=$$($$($(1)_type)_ARFLAGS) \ + $$($$($(1)_type)_$(release_type)_ARFLAGS) +$(1)_ldflags=$$($$($(1)_type)_LDFLAGS) \ + $$($$($(1)_type)_$$(release_type)_LDFLAGS) \ + -L$$($($(1)_type)_prefix)/lib +$(1)_cppflags=$$($$($(1)_type)_CPPFLAGS) \ + $$($$($(1)_type)_$$(release_type)_CPPFLAGS) \ + -I$$($$($(1)_type)_prefix)/include $(1)_recipe_hash:= endef @@ -37,6 +44,7 @@ endef define int_get_build_recipe_hash $(eval $(1)_all_file_checksums:=$(shell $(build_SHA256SUM) $(meta_depends) packages/$(1).mk $(addprefix $(PATCHES_PATH)/$(1)/,$($(1)_patches)) | cut -d" " -f1)) +final_build_id_long+=:[$(1)_all_file_checksums]$(foreach checksum,$($(1)_all_file_checksums),$(shell echo ":$(checksum)")): $(eval $(1)_recipe_hash:=$(shell echo -n "$($(1)_all_file_checksums)" | $(build_SHA256SUM) | cut -d" " -f1)) endef @@ -46,7 +54,7 @@ $(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type $(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash))) $(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id_string)) $(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH))) -final_build_id_long+=$($(package)_build_id_long) +final_build_id_long+=:[recipe]:$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type):[deps]$(foreach dep,$($(1)_build_id_deps),$(shell echo ":$(dep)")):[$($(1)_type)_id]:$($($(1)_type)_id_string): #compute package-specific paths $(1)_build_subdir?=. @@ -267,4 +275,4 @@ $(foreach package,$(all_packages),$(eval $(call int_config_attach_build_config,$ $(foreach package,$(all_packages),$(eval $(call int_add_cmds,$(package)))) #special exception: if a toolchain package exists, all non-native packages depend on it -$(foreach package,$(packages),$(eval $($(package)_unpacked): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) )) +$(foreach package,$(packages),$(eval $($(package)_extracted): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) )) diff --git a/contrib/depends/hosts/darwin.mk b/contrib/depends/hosts/darwin.mk index 9a96e26019b..58ff5dc32c3 100644 --- a/contrib/depends/hosts/darwin.mk +++ b/contrib/depends/hosts/darwin.mk @@ -1,23 +1,65 @@ OSX_MIN_VERSION=10.13 +OSX_SDK_VERSION=11.0 +XCODE_VERSION=12.2 +XCODE_BUILD_ID=12B45b LD64_VERSION=609 -ifeq (aarch64, $(host_arch)) -CC_target=arm64-apple-$(host_os) -else -CC_target=$(host) -endif -darwin_CC=clang -target $(CC_target) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(host_prefix)/native/SDK/ -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks -mlinker-version=$(LD64_VERSION) -B$(host_prefix)/native/bin/$(host)- -darwin_CXX=clang++ -target $(CC_target) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(host_prefix)/native/SDK/ -iwithsysroot/usr/include/c++/v1 -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks -mlinker-version=$(LD64_VERSION) -stdlib=libc++ -B$(host_prefix)/native/bin/$(host)- + +OSX_SDK=$(host_prefix)/native/SDK + +darwin_native_toolchain=darwin_sdk native_cctools + +clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") +clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") + +# Flag explanations: +# +# -mlinker-version +# +# Ensures that modern linker features are enabled. See here for more +# details: https://github.com/bitcoin/bitcoin/pull/19407. +# +# -B$(build_prefix)/bin +# +# Explicitly point to our binaries (e.g. cctools) so that they are +# ensured to be found and preferred over other possibilities. +# +# -isysroot$(OSX_SDK) -nostdlibinc +# +# Disable default include paths built into the compiler as well as +# those normally included for libc and libc++. The only path that +# remains implicitly is the clang resource dir. +# +# -iwithsysroot / -iframeworkwithsysroot +# +# Adds the desired paths from the SDK +# + +darwin_CC=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH \ + $(clang_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ + -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ + -isysroot$(OSX_SDK) \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks + +darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH \ + $(clangxx_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ + -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include/c++/v1 \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks darwin_CFLAGS=-pipe darwin_CXXFLAGS=$(darwin_CFLAGS) darwin_ARFLAGS=cr -darwin_release_CFLAGS=-O1 +darwin_release_CFLAGS=-O2 darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) darwin_debug_CFLAGS=-O1 darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS) -darwin_native_toolchain=native_cctools darwin_sdk - darwin_cmake_system=Darwin diff --git a/contrib/depends/hosts/freebsd.mk b/contrib/depends/hosts/freebsd.mk index d3c6c6149b0..aeaccbf2730 100644 --- a/contrib/depends/hosts/freebsd.mk +++ b/contrib/depends/hosts/freebsd.mk @@ -1,5 +1,14 @@ -freebsd_CC=clang-8 -freebsd_CXX=clang++-8 +clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") +clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") + +freebsd_CC=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH $(clang_prog) --target=$(host) --sysroot=$(host_prefix)/native -iwithsysroot/usr/include +freebsd_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ + -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ + -u LIBRARY_PATH $(clangxx_prog) --target=$(host) -stdlib=libc++ --sysroot=$(host_prefix)/native \ + -iwithsysroot/usr/include/c++/v1 -iwithsysroot/usr/include + freebsd_AR=ar freebsd_RANLIB=ranlib freebsd_NM=nm diff --git a/contrib/depends/hosts/linux.mk b/contrib/depends/hosts/linux.mk index 0f9379f4ae6..ba0c4c3027c 100644 --- a/contrib/depends/hosts/linux.mk +++ b/contrib/depends/hosts/linux.mk @@ -10,6 +10,7 @@ linux_debug_CXXFLAGS=$(linux_debug_CFLAGS) linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC +ifeq ($(GUIX_ENVIRONMENT),) ifeq (86,$(findstring 86,$(build_arch))) i686_linux_CC=gcc -m32 i686_linux_CXX=g++ -m32 @@ -30,5 +31,6 @@ i686_linux_CXX=$(default_host_CXX) -m32 x86_64_linux_CC=$(default_host_CC) -m64 x86_64_linux_CXX=$(default_host_CXX) -m64 endif +endif linux_cmake_system=Linux diff --git a/contrib/depends/packages/android_ndk.mk b/contrib/depends/packages/android_ndk.mk index 65fa89de6cd..e32e47739b7 100644 --- a/contrib/depends/packages/android_ndk.mk +++ b/contrib/depends/packages/android_ndk.mk @@ -3,7 +3,7 @@ $(package)_version=18b $(package)_download_path=https://dl.google.com/android/repository/ $(package)_file_name=android-ndk-r$($(package)_version)-linux-x86_64.zip $(package)_sha256_hash=4f61cbe4bbf6406aa5ef2ae871def78010eed6271af72de83f8bd0b07a9fd3fd -$(package)_patches=api_definition.patch +$(package)_patches=api_definition.patch fix_env.patch define $(package)_set_vars $(package)_config_opts_arm=--arch arm @@ -17,11 +17,12 @@ endef define $(package)_preprocess_cmds cd android-ndk-r$($(package)_version) && \ - patch -p1 < $($(package)_patch_dir)/api_definition.patch + patch -p1 < $($(package)_patch_dir)/api_definition.patch && \ + patch -p1 < $($(package)_patch_dir)/fix_env.patch endef define $(package)_stage_cmds - android-ndk-r$($(package)_version)/build/tools/make_standalone_toolchain.py --api 21 \ + python3 android-ndk-r$($(package)_version)/build/tools/make_standalone_toolchain.py --api 21 \ --install-dir $(build_prefix) --stl=libc++ $($(package)_config_opts) &&\ mv $(build_prefix) $($(package)_staging_dir)/$(host_prefix) endef diff --git a/contrib/depends/packages/boost.mk b/contrib/depends/packages/boost.mk index d81bca973a4..3f7fba1f167 100644 --- a/contrib/depends/packages/boost.mk +++ b/contrib/depends/packages/boost.mk @@ -1,9 +1,9 @@ -package=boost +package=boost $(package)_version=1_64_0 $(package)_download_path=https://downloads.sourceforge.net/project/boost/boost/1.64.0/ $(package)_file_name=$(package)_$($(package)_version).tar.bz2 $(package)_sha256_hash=7bcc5caace97baa948931d712ea5f37038dbb1c5d89b43ad4def4ed7cb683332 -$(package)_patches=fix_aroptions.patch fix_arm_arch.patch +$(package)_patches=fix_aroptions.patch fix_arm_arch.patch fix_coalesce.patch define $(package)_set_vars $(package)_config_opts_release=variant=release @@ -23,14 +23,16 @@ $(package)_toolset_darwin=darwin $(package)_archiver_darwin=$($(package)_libtool) $(package)_config_libraries_$(host_os)="chrono,filesystem,program_options,system,thread,test,date_time,regex,serialization" $(package)_config_libraries_mingw32="chrono,filesystem,program_options,system,thread,test,date_time,regex,serialization,locale" -$(package)_cxxflags=-std=c++11 -$(package)_cxxflags_linux=-fPIC -$(package)_cxxflags_freebsd=-fPIC +$(package)_cxxflags+=-std=c++11 +$(package)_cxxflags_linux+=-fPIC +$(package)_cxxflags_freebsd+=-fPIC +$(package)_cxxflags_darwin+=-ffile-prefix-map=$($(package)_extract_dir)=/usr endef define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/fix_aroptions.patch &&\ patch -p1 < $($(package)_patch_dir)/fix_arm_arch.patch &&\ + patch -p1 < $($(package)_patch_dir)/fix_coalesce.patch &&\ echo "using $(boost_toolset_$(host_os)) : : $($(package)_cxx) : \"$($(package)_cxxflags) $($(package)_cppflags)\" \"$($(package)_ldflags)\" \"$(boost_archiver_$(host_os))\" \"$($(package)_arflags)\" \"$(host_STRIP)\" \"$(host_RANLIB)\" \"$(host_WINDRES)\" : ;" > user-config.jam endef diff --git a/contrib/depends/packages/darwin_sdk.mk b/contrib/depends/packages/darwin_sdk.mk index 3355dcf3a66..388726220ef 100644 --- a/contrib/depends/packages/darwin_sdk.mk +++ b/contrib/depends/packages/darwin_sdk.mk @@ -4,7 +4,11 @@ $(package)_download_path=https://bitcoincore.org/depends-sources/sdks $(package)_file_name=Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz $(package)_sha256_hash=df75d30ecafc429e905134333aeae56ac65fac67cb4182622398fd717df77619 +# Prevent clang from including readline headers from the SDK. We statically link +# our own version of readline. + define $(package)_stage_cmds mkdir -p $($(package)_staging_dir)/$(host_prefix)/native/SDK &&\ + rm -rf usr/include/readline && \ mv * $($(package)_staging_dir)/$(host_prefix)/native/SDK endef diff --git a/contrib/depends/packages/freebsd_base.mk b/contrib/depends/packages/freebsd_base.mk index ad9975f8d5a..16d4a1a73d5 100644 --- a/contrib/depends/packages/freebsd_base.mk +++ b/contrib/depends/packages/freebsd_base.mk @@ -1,23 +1,20 @@ package=freebsd_base -$(package)_version=11.3 -$(package)_download_path=https://download.freebsd.org/ftp/releases/amd64/$($(package)_version)-RELEASE/ +$(package)_version=12.3 +$(package)_download_path=https://archive.freebsd.org/old-releases/amd64/$($(package)_version)-RELEASE/ $(package)_download_file=base.txz $(package)_file_name=freebsd-base-$($(package)_version).txz -$(package)_sha256_hash=4599023ac136325b86f2fddeec64c1624daa83657e40b00b2ef944c81463a4ff +$(package)_sha256_hash=e85b256930a2fbc04b80334106afecba0f11e52e32ffa197a88d7319cf059840 define $(package)_extract_cmds echo $($(package)_sha256_hash) $($(1)_source_dir)/$($(package)_file_name) | sha256sum -c &&\ tar xf $($(1)_source_dir)/$($(package)_file_name) ./lib/ ./usr/lib/ ./usr/include/ endef -define $(package)_build_cmds - mkdir bin &&\ - echo "#!/bin/sh\n\nexec /usr/bin/clang-8 -target x86_64-unknown-freebsd$($(package)_version) --sysroot=$(host_prefix)/native $$$$""@" > bin/clang-8 &&\ - echo "#!/bin/sh\n\nexec /usr/bin/clang++-8 -target x86_64-unknown-freebsd$($(package)_version) --sysroot=$(host_prefix)/native $$$$""@" > bin/clang++-8 &&\ - chmod 755 bin/* -endef +# Prevent clang from including OpenSSL headers from the system base. We +# statically link our own version of OpenSSL. define $(package)_stage_cmds mkdir $($(package)_staging_dir)/$(host_prefix)/native &&\ - mv bin lib usr $($(package)_staging_dir)/$(host_prefix)/native + rm -rf usr/include/openssl &&\ + mv lib usr $($(package)_staging_dir)/$(host_prefix)/native endef diff --git a/contrib/depends/packages/native_cctools.mk b/contrib/depends/packages/native_cctools.mk index 8c1ea4c62a7..a019b1649ed 100644 --- a/contrib/depends/packages/native_cctools.mk +++ b/contrib/depends/packages/native_cctools.mk @@ -5,14 +5,18 @@ $(package)_download_file=$($(package)_version).tar.gz $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=70a7189418c2086d20c299c5d59250cf5940782c778892ccc899c66516ed240e $(package)_build_subdir=cctools -$(package)_dependencies=native_clang native_libtapi $(package)_patches=no-build-date.patch +$(package)_dependencies=native_libtapi define $(package)_set_vars $(package)_config_opts=--target=$(host) --disable-lto-support --with-libtapi=$(host_prefix) $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib -$(package)_cc=$(host_prefix)/native/bin/clang -$(package)_cxx=$(host_prefix)/native/bin/clang++ +$(package)_cc=$(clang_prog) +$(package)_cxx=$(clangxx_prog) +endef + +define $(package)_preprocess_cmds + cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools endef define $(package)_preprocess_cmds diff --git a/contrib/depends/packages/native_clang.mk b/contrib/depends/packages/native_clang.mk deleted file mode 100644 index 4ff21ada0d6..00000000000 --- a/contrib/depends/packages/native_clang.mk +++ /dev/null @@ -1,28 +0,0 @@ -package=native_clang -$(package)_version=9.0.0 -$(package)_download_path=https://releases.llvm.org/$($(package)_version) -$(package)_download_file=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-18.04.tar.xz -$(package)_file_name=clang-llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-18.04.tar.xz -$(package)_sha256_hash=a23b082b30c128c9831dbdd96edad26b43f56624d0ad0ea9edec506f5385038d - -define $(package)_extract_cmds - echo $($(package)_sha256_hash) $($(package)_source) | sha256sum -c &&\ - mkdir -p toolchain/bin toolchain/lib/clang/3.5/include && \ - tar --strip-components=1 -C toolchain -xf $($(package)_source) && \ - rm -f toolchain/lib/libc++abi.so* && \ - echo "#!/bin/sh" > toolchain/bin/$(host)-dsymutil && \ - echo "exit 0" >> toolchain/bin/$(host)-dsymutil && \ - chmod +x toolchain/bin/$(host)-dsymutil -endef - -define $(package)_stage_cmds - cd $($(package)_extract_dir)/toolchain && \ - mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include && \ - mkdir -p $($(package)_staging_prefix_dir)/bin $($(package)_staging_prefix_dir)/include && \ - cp bin/clang $($(package)_staging_prefix_dir)/bin/ &&\ - cp -P bin/clang++ $($(package)_staging_prefix_dir)/bin/ &&\ - cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ - cp -rf lib/clang/$($(package)_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include/ && \ - cp bin/dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \ - if `test -d lib/c++/`; then cp -rf lib/c++/ $($(package)_staging_prefix_dir)/lib/; fi -endef diff --git a/contrib/depends/packages/native_libtapi.mk b/contrib/depends/packages/native_libtapi.mk index c5625501a26..36234a001c4 100644 --- a/contrib/depends/packages/native_libtapi.mk +++ b/contrib/depends/packages/native_libtapi.mk @@ -4,30 +4,16 @@ $(package)_download_path=https://github.com/tpoechtrager/apple-libtapi/archive $(package)_download_file=$($(package)_version).tar.gz $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=62e419c12d1c9fad67cc1cd523132bc00db050998337c734c15bc8d73cc02b61 -$(package)_build_subdir=build -$(package)_dependencies=native_clang $(package)_patches=no_embed_git_rev.patch define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/no_embed_git_rev.patch endef -define $(package)_config_cmds - echo -n $(build_prefix) > INSTALLPREFIX; \ - CC=$(host_prefix)/native/bin/clang CXX=$(host_prefix)/native/bin/clang++ \ - cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) \ - -DLLVM_INCLUDE_TESTS=OFF \ - -DCMAKE_BUILD_TYPE=RELEASE \ - -DTAPI_REPOSITORY_STRING="1100.0.11" \ - -DTAPI_FULL_VERSION="11.0.0" \ - -DCMAKE_CXX_FLAGS="-I $($(package)_extract_dir)/src/llvm/projects/clang/include -I $($(package)_build_dir)/projects/clang/include" \ - $($(package)_extract_dir)/src/llvm -endef - define $(package)_build_cmds - $(MAKE) clangBasic && $(MAKE) libtapi + CC=$(clang_prog) CXX=$(clangxx_prog) INSTALLPREFIX=$($(package)_staging_prefix_dir) ./build.sh endef define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install-libtapi install-tapi-headers + ./install.sh endef diff --git a/contrib/depends/packages/packages.mk b/contrib/depends/packages/packages.mk index 14561964f59..cb4e8e228a0 100644 --- a/contrib/depends/packages/packages.mk +++ b/contrib/depends/packages/packages.mk @@ -24,5 +24,5 @@ mingw32_packages = $(hardware_packages) mingw32_native_packages = $(hardware_native_packages) ifneq ($(build_os),darwin) -darwin_native_packages += darwin_sdk native_clang native_cctools native_libtapi +darwin_native_packages += darwin_sdk native_cctools native_libtapi endif diff --git a/contrib/depends/patches/android_ndk/fix_env.patch b/contrib/depends/patches/android_ndk/fix_env.patch new file mode 100644 index 00000000000..01928dd4857 --- /dev/null +++ b/contrib/depends/patches/android_ndk/fix_env.patch @@ -0,0 +1,30 @@ +diff --git a/build/tools/make_standalone_toolchain.py b/build/tools/make_standalone_toolchain.py +index b8172b2..19c0ad6 100755 +--- a/build/tools/make_standalone_toolchain.py ++++ b/build/tools/make_standalone_toolchain.py +@@ -224,10 +224,10 @@ def make_clang_scripts(install_dir, triple, api, windows): + clang.write(textwrap.dedent("""\ + #!/bin/bash + if [ "$1" != "-cc1" ]; then +- `dirname $0`/clang{version} {flags} "$@" ++ env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH -u LIBRARY_PATH `dirname $0`/clang{version} {flags} "$@" + else + # target/triple already spelled out. +- `dirname $0`/clang{version} "$@" ++ env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH -u LIBRARY_PATH `dirname $0`/clang{version} "$@" + fi + """.format(version=version_number, flags=unix_flags))) + +@@ -239,10 +239,10 @@ def make_clang_scripts(install_dir, triple, api, windows): + clangpp.write(textwrap.dedent("""\ + #!/bin/bash + if [ "$1" != "-cc1" ]; then +- `dirname $0`/clang{version}++ {flags} "$@" ++ env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH -u LIBRARY_PATH `dirname $0`/clang{version}++ {flags} "$@" + else + # target/triple already spelled out. +- `dirname $0`/clang{version}++ "$@" ++ env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH -u LIBRARY_PATH `dirname $0`/clang{version}++ "$@" + fi + """.format(version=version_number, flags=unix_flags))) + diff --git a/contrib/depends/patches/boost/fix_coalesce.patch b/contrib/depends/patches/boost/fix_coalesce.patch new file mode 100644 index 00000000000..c00aed3722b --- /dev/null +++ b/contrib/depends/patches/boost/fix_coalesce.patch @@ -0,0 +1,22 @@ +Boost 1.64.0 doesn't recognize that we're building with Clang and passes a +flags that results in an error. We don't support GCC < 4.0 at all, so +commenting out the lines here is fine. Patch can be dropped when we update to +Boost 1.84.0 + +--- boost_1_64_0/tools/build/src/tools/darwin.jam ++++ boost_1_64_0/tools/build/src/tools/darwin.jam +@@ -138,10 +138,10 @@ rule init ( version ? : command * : options * : requirement * ) + common.handle-options darwin : $(condition) : $(command) : $(options) ; + + # - GCC 4.0 and higher in Darwin does not have -fcoalesce-templates. +- if $(real-version) < "4.0.0" +- { +- flags darwin.compile.c++ OPTIONS $(condition) : -fcoalesce-templates ; +- } ++ #if $(real-version) < "4.0.0" ++ #{ ++ # flags darwin.compile.c++ OPTIONS $(condition) : -fcoalesce-templates ; ++ #} + # - GCC 4.2 and higher in Darwin does not have -Wno-long-double. + if $(real-version) < "4.2.0" + { diff --git a/contrib/depends/toolchain.cmake.in b/contrib/depends/toolchain.cmake.in index 50eaaa59373..9c76490196c 100644 --- a/contrib/depends/toolchain.cmake.in +++ b/contrib/depends/toolchain.cmake.in @@ -78,12 +78,14 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") SET(CMAKE_OSX_ARCHITECTURES "x86_64") endif() SET(_CMAKE_TOOLCHAIN_PREFIX @prefix@/native/bin/${CONF_TRIPLE}-) - SET(CMAKE_C_COMPILER @prefix@/native/bin/clang) + SET(CMAKE_C_COMPILER @CC@) SET(CMAKE_C_COMPILER_TARGET ${CLANG_TARGET}) SET(CMAKE_C_FLAGS_INIT -B${_CMAKE_TOOLCHAIN_PREFIX}) - SET(CMAKE_CXX_COMPILER @prefix@/native/bin/clang++ -stdlib=libc++) + SET(CMAKE_CXX_COMPILER @CXX@ -stdlib=libc++) SET(CMAKE_CXX_COMPILER_TARGET ${CLANG_TARGET}) SET(CMAKE_CXX_FLAGS_INIT -B${_CMAKE_TOOLCHAIN_PREFIX}) + SET(CMAKE_ASM_COMPILER clang) + SET(CMAKE_ASM-ATT_COMPILER as) SET(CMAKE_ASM_COMPILER_TARGET ${CLANG_TARGET}) SET(CMAKE_ASM-ATT_COMPILER_TARGET ${CLANG_TARGET}) SET(APPLE True) @@ -112,6 +114,11 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") SET(CMAKE_C_COMPILER "${_CMAKE_TOOLCHAIN_PREFIX}clang") SET(CMAKE_CXX_COMPILER "${_CMAKE_TOOLCHAIN_PREFIX}clang++") else() + if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + SET(CMAKE_ASM_COMPILER clang) + SET(CMAKE_ASM-ATT_COMPILER as) + endif() + SET(CMAKE_C_COMPILER @CC@) SET(CMAKE_CXX_COMPILER @CXX@) endif() diff --git a/contrib/gitian/DOCKRUN.md b/contrib/gitian/DOCKRUN.md deleted file mode 100644 index 06e4894a243..00000000000 --- a/contrib/gitian/DOCKRUN.md +++ /dev/null @@ -1,114 +0,0 @@ -Quick Gitian building with docker -================================= - -*Setup instructions for a Gitian build of Monero using Docker.* - -Gitian supports other container mechanisms too but if you have a Debian or -Ubuntu-based host the steps can be greatly simplified. - -Preparing the Gitian builder host ---------------------------------- - -The procedure here will create a docker container for build preparation, as well as -for actually running the builds. The only items you must install on your own host -are docker and apt-cacher-ng. With docker installed, you should also give yourself -permission to use docker by adding yourself to the docker group. - -```bash -sudo apt-get install docker.io apt-cacher-ng -sudo usermod -aG docker $USER -su $USER -``` - -The final `su` command is needed to start a new shell with your new group membership, -since the `usermod` command doesn't affect any existing sessions. - -You'll also need to clone the monero repository and navigate to the `contrib/gitian` directory: - -```bash -git clone https://github.com/monero-project/monero.git -cd monero/contrib/gitian -``` - -Other User Preparation ----------------------- - -The final step will be to `gpg` sign the results of your build and upload them to GitHub. -Before you can do that, you'll need -* a GitHub account. -If your GitHub account name is different from your local account name, you must -set your GitHub account name for the script to use: - -```bash -export GH_USER= -``` - -* PGP keys - if you don't have one already, you can use `gpg --quick-gen-key` to generate it. -* a fork of the [gitian.sigs](https://github.com/monero-project/gitian.sigs/) repo on your GitHub account. -Please follow the directions there for uploading your key first. - -**Note:** Please ensure your gpg public key is available to check signatures by adding it to the [gitian.sigs/gitian-pubkeys/](https://github.com/monero-project/gitian.sigs/tree/master/gitian-pubkeys) directory in a pull request. - - -Building the Binaries ---------------------- - -The dockrun.sh script will do everything to build the binaries. Just specify the -version to build as its only argument, e.g. - -```bash -VERSION=v0.18.1.0 -./dockrun.sh $VERSION -``` - -The build should run to completion with no errors, and will display the SHA256 checksums -of the resulting binaries. You'll be prompted to check if the sums look good, and if so -then the results will be signed, and the signatures will be pushed to GitHub. - -***Note: In order to publish the signed assertions via this script, you need to have your SSH key uploaded to GitHub beforehand. See https://docs.github.com/articles/generating-an-ssh-key/ for more info.*** - -You can also look in the [gitian.sigs](https://github.com/monero-project/gitian.sigs/) repo and / or [getmonero.org release checksums](https://web.getmonero.org/downloads/hashes.txt) to see if others got the same checksum for the same version tag. If there is ever a mismatch -- **STOP! Something is wrong**. Contact others on IRC / GitHub to figure out what is going on. - - -Other Options -------------- - -This script just runs the [gitian-build.py](gitian-build.py) inside a container named `gitrun`. -You can set other options for that script by setting the OPT variable when running `dockrun.sh` -e.g. - -```bash -# Run build processes with 8 threads -OPT="-j 8" ./dockrun.sh $VERSION -``` - -Post-build ----------- - -You can examine the build and install logs by running a shell in the container, e.g. - -```bash -# Tail running logs -docker exec -it gitrun /bin/bash -tail -F builder/var/install.log -tail -F builder/var/build.log - -# Inspect logs, in format install-.log and build-.log -docker exec -it gitrun /bin/bash -more builder/var/install-linux.log -more builder/var/build-linux.log -``` - -You can find the compiled archives inside of the container at the following directory: - -```bash -docker exec -it gitrun /bin/bash -ls -la out/$VERSION/ -``` - -To copy the compiled archives to the local host out of the Docker container, you can run the following: - -```bash -mkdir out -docker cp gitrun:/home/ubuntu/out/$VERSION out -``` diff --git a/contrib/gitian/README.md b/contrib/gitian/README.md deleted file mode 100644 index 5211b84094e..00000000000 --- a/contrib/gitian/README.md +++ /dev/null @@ -1,272 +0,0 @@ -Gitian building -================ - -*Setup instructions for a Gitian build of Monero.* - -Gitian is the deterministic build process that is used to build the Monero CLI -executables. It provides a way to be reasonably sure that the -executables are really built from the git source. It also makes sure that -the same, tested dependencies are used and statically built into the executable. - -Multiple developers build the source code by following a specific descriptor -("recipe"), cryptographically sign the result, and upload the resulting signature. -These results are compared and only if they match, the build is accepted and provided -for download. - -Gitian runs compilation steps in an isolated container. It is flexible and gives you full -control over the build environment, while still ensuring reproducibility and consistent output -formats. - -More independent Gitian builders are needed, which is why this guide exists. -It is preferred you follow these steps yourself instead of using someone else's -VM image to avoid 'contaminating' the build. - -Preparing the Gitian builder host ---------------------------------- - -The first step is to prepare the host environment that will be used to perform the Gitian builds. -This guide explains how to set up the environment, and how to start the builds. - -* Gitian host OS should be Ubuntu 18.04 "Bionic Beaver". If you are on a mac or windows for example, you can run it in a VM but will be slower. - -* Gitian gives you the option of using any of 3 different virtualization tools: `kvm`, `docker` or `lxc`. This documentation will only show how to build with `lxc` and `docker` (documentation for `kvm` is welcome). Building with `lxc` is the default, but is more complicated, so we recommend docker your first time. - -* For a shortcut using `docker` follow the instructions in [DOCKRUN.md](DOCKRUN.md) instead -of following the rest of this document.. - -## Create the gitianuser account - -You need to create a new user called `gitianuser` and be logged in as that user. The user needs `sudo` access. - -```bash -sudo adduser gitianuser -sudo usermod -aG sudo gitianuser -``` - -LXC ---- - -LXC builds should be run on Ubuntu 18.04 "Bionic Beaver". - -Note that a version of `lxc-execute` higher or equal to 2.1.1 is required. -You can check the version with `lxc-execute --version`. - -First we need to set up dependencies. Type/paste the following in the terminal: - -```bash -sudo apt-get install git ruby apt-cacher-ng qemu-utils debootstrap lxc python-cheetah parted kpartx bridge-utils make ubuntu-archive-keyring curl firewalld -``` - -Then set up LXC and the rest with the following, which is a complex jumble of settings and workarounds: - -```bash -sudo -s -# the version of lxc-start in Debian needs to run as root, so make sure -# that the build script can execute it without providing a password -echo "%sudo ALL=NOPASSWD: /usr/bin/lxc-start" > /etc/sudoers.d/gitian-lxc -echo "%sudo ALL=NOPASSWD: /usr/bin/lxc-execute" >> /etc/sudoers.d/gitian-lxc -# make /etc/rc.local script that sets up bridge between guest and host -echo '#!/bin/sh -e' > /etc/rc.local -echo 'brctl addbr br0' >> /etc/rc.local -echo 'ip addr add 10.0.2.2/24 broadcast 10.0.2.255 dev br0' >> /etc/rc.local -echo 'ip link set br0 up' >> /etc/rc.local -echo 'firewall-cmd --zone=trusted --add-interface=br0' >> /etc/rc.local -echo 'exit 0' >> /etc/rc.local -chmod +x /etc/rc.local -# make sure that USE_LXC is always set when logging in as gitianuser, -# and configure LXC IP addresses -echo 'export USE_LXC=1' >> /home/gitianuser/.profile -echo 'export GITIAN_HOST_IP=10.0.2.2' >> /home/gitianuser/.profile -echo 'export LXC_GUEST_IP=10.0.2.5' >> /home/gitianuser/.profile -reboot -``` - -This setup is required to enable networking in the container. - -Docker ------- - -Prepare for building with docker: - -```bash -sudo bash -c 'apt-get update && apt-get upgrade -y && apt-get install git curl docker.io' -``` - -Consider adding `gitianuser` to the `docker` group after reading about [the security implications](https://docs.docker.com/v17.09/engine/installation/linux/linux-postinstall/): - -```bash -sudo groupadd docker -sudo usermod -aG docker gitianuser -``` - -Optionally add yourself to the docker group. Note that this will give docker root access to your system. - -```bash -sudo usermod -aG docker $USER -``` - -Manual Building -------------------- - -======= -The script automatically installs some packages with apt. If you are not running it on a debian-like system, pass `--no-apt` along with the other -arguments to it. It calls all available .yml descriptors, which in turn pass the build configurations for different platforms to gitian. -The instructions below use the automated script [gitian-build.py](gitian-build.py) which is tested to work on Ubuntu. - -It calls all available .yml descriptors, which in turn pass the build configurations for different platforms to gitian. -Help for the build steps taken can be accessed with `./gitian-build.py --help`. - -Initial Gitian Setup --------------------- - -The `gitian-build.py` script will checkout different release tags, so it's best to copy it to the top level directory: - -```bash -cp monero/contrib/gitian/gitian-build.py . -``` - -### Setup the required environment - -Common setup part: - -```bash -su - gitianuser - -GH_USER=YOUR_GITHUB_USER_NAME -VERSION=v0.18.0.0 -``` - -Where `GH_USER` is your GitHub user name and `VERSION` is the version tag you want to build. -The `gitian-build.py`'s `--setup` switch will also refresh the environment of any stale files and submodules. - -Setup for LXC: - -```bash -./gitian-build.py --setup $GH_USER $VERSION -``` - -Setup for docker: - -```bash -./gitian-build.py --setup --docker $GH_USER $VERSION -``` - -While gitian and this build script does provide a way for you to sign the build directly, it is recommended to sign in a separate step. This script is only there for convenience. Separate steps for building can still be taken. -In order to sign gitian builds on your host machine, which has your PGP key, -fork the [gitian.sigs repository](https://github.com/monero-project/gitian.sigs) and clone it on your host machine, -or pass the signed assert file back to your build machine. - -```bash -git clone https://github.com/monero-project/gitian.sigs/ -pushd gitian.sigs -git remote add $GH_USER https://github.com/$GH_USER/gitian.sigs -popd -``` - -Build the binaries ------------------- - -To build the most recent tag (pass in `--docker` if using docker): - -```bash -./gitian-build.py --detach-sign --no-commit --build $GH_USER $VERSION -``` - -To speed up the build, use `-j 5 --memory 10000` as the first arguments, where `5` is the number of CPU's you allocated to the VM plus one, and 10000 is a little bit less than then the MB's of RAM you allocated. If there is memory corruption on your machine, try to tweak these values. A good rule of thumb is, that Monero currently needs about 2 GB of RAM per core. - -A full example for `docker` would look like the following: - -```bash -./gitian-build.py -j 5 --memory 10000 --docker --detach-sign --no-commit --build $GH_USER $VERSION -``` - -If all went well, this produces a number of (uncommitted) `.assert` files in the gitian.sigs directory. - -Checking your work ------------------- - -Take a look in the assert files and note the SHA256 checksums listed there. - -You should verify that the checksum that is listed matches each of the binaries you actually built. -This may be done on Linux using the `sha256sum` command or on MacOS using `shasum --algorithm 256` for example. -An example script to verify the checksums would be: - -```bash -pushd out/${VERSION} - -for ASSERT in ../../sigs/${VERSION}-*/*/*.assert; do - if ! sha256sum --ignore-missing -c "${ASSERT}" ; then - echo "FAILED for ${ASSERT} ! Please inspect manually." - fi -done - -popd -``` - -Don't ignore the incorrect formatting of the found assert files. These files you'll have to compare manually (currently OSX and FreeBSD). - - -You can also look in the [gitian.sigs](https://github.com/monero-project/gitian.sigs/) repo and / or [getmonero.org release checksums](https://web.getmonero.org/downloads/hashes.txt) to see if others got the same checksum for the same version tag. If there is ever a mismatch -- **STOP! Something is wrong**. Contact others on IRC / github to figure out what is going on. - - -Signing assert files --------------------- - -If you chose to do detached signing using `--detach-sign` above (recommended), you need to copy these uncommitted changes to your host machine, then sign them using your gpg key like so: - -```bash -for ASSERT in sigs/${VERSION}-*/*/*.assert; do gpg --detach-sign ${ASSERT}; done -``` - -This will create a `.sig` file for each `.assert` file above (2 files for each platform). - - -Submitting your signed assert files ------------------------------------ - -Make a pull request (both the `.assert` and `.assert.sig` files) to the -[monero-project/gitian.sigs](https://github.com/monero-project/gitian.sigs/) repository: - -```bash -cd gitian.sigs -git checkout -b $VERSION -# add your assert and sig files... -git commit -S -a -m "Add $GH_USER $VERSION" -git push --set-upstream $GH_USER $VERSION -``` - -**Note:** Please ensure your gpg public key is available to check signatures by adding it to the [gitian.sigs/gitian-pubkeys/](https://github.com/monero-project/gitian.sigs/tree/master/gitian-pubkeys) directory in a pull request. - - -More Build Options ------------------- - -You can choose your own remote and commit hash by running for example: -```bash -./gitian-build.py --detach-sign --no-commit --url https://github.com/moneromooo-monero/bitmonero -b moneromooo 1f5680c8db8f4cc7acc04a04c724b832003440fd -``` - -Note that you won't be able to build commits authored before the gitian scripts -were added. Gitian clones the source files from the given url, be sure to push -to the remote you provide before building. -To get all build options run: -```bash -./gitian-build.py --help -``` - -Doing Successive Builds ------------------------ - -If you need to do multiple iterations (while developing/testing) you can use the -`--rebuild` option instead of `--build` on subsequent iterations. This skips the -initial check for the freshness of the depends tools. In particular, doing this -check all the time prevents rebuilding when you have no network access. - - -Local-Only Builds ------------------ - -If you need to run builds while disconnected from the internet, make sure you have -local up-to-date repos in advance. Then specify your local repo using the `--url` -option when building. This will avoid attempts to git pull across a network. - diff --git a/contrib/gitian/dockrun.sh b/contrib/gitian/dockrun.sh deleted file mode 100755 index 396db126b09..00000000000 --- a/contrib/gitian/dockrun.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/sh - -if [ $# -ne 1 ]; then - echo "usage: $0 " - exit 1 -fi -VERSION=$1 - -DOCKER=`command -v docker` -CACHER=`command -v apt-cacher-ng` - -if [ -z "$DOCKER" -o -z "$CACHER" ]; then - echo "$0: you must first install docker.io and apt-cacher-ng" - echo " e.g. sudo apt-get install docker.io apt-cacher-ng" - exit 1 -fi - -GH_USER=${GH_USER-$USER} - -TAG=gitrun-bionic -TAG2=base-bionic-amd64 -IMAGE=`docker images | grep $TAG` - -WORKDIR=/home/ubuntu - -if [ -z "$IMAGE" ]; then -GID=`getent group docker` -mkdir -p docker -cd docker - -# container for running gitian-build.py -cat < ${TAG}.Dockerfile -FROM ubuntu:bionic - -ENV DEBIAN_FRONTEND=noninteractive -RUN echo 'Acquire::http { Proxy "http://172.17.0.1:3142"; };' > /etc/apt/apt.conf.d/50cacher -RUN echo "$GID" >> /etc/group -RUN apt-get update && apt-get --no-install-recommends -y install lsb-release ruby git make wget docker.io python3 curl - -RUN useradd -ms /bin/bash -U ubuntu -G docker -USER ubuntu:docker -WORKDIR $WORKDIR - -RUN git clone https://github.com/monero-project/gitian.sigs.git sigs; \ - git clone https://github.com/devrandom/gitian-builder.git builder; \ - cd builder; git checkout c0f77ca018cb5332bfd595e0aff0468f77542c23; mkdir -p inputs var; cd inputs; \ - git clone https://github.com/monero-project/monero - -CMD ["sleep", "infinity"] -EOF - -docker build --pull -f ${TAG}.Dockerfile -t $TAG . - -cd .. -docker run -v /var/run/docker.sock:/var/run/docker.sock -d --name gitrun $TAG - -fi - -IMAGE=`docker images | grep $TAG2` -if [ -z "$IMAGE" ]; then -mkdir -p docker -cd docker - -# container for actually running each build -cat < ${TAG2}.Dockerfile -FROM ubuntu:bionic - -ENV DEBIAN_FRONTEND=noninteractive -RUN echo 'Acquire::http { Proxy "http://172.17.0.1:3142"; };' > /etc/apt/apt.conf.d/50cacher -RUN apt-get update && apt-get --no-install-recommends -y install build-essential git language-pack-en \ - wget lsb-release curl gcc-7 g++-7 gcc g++ binutils-gold pkg-config autoconf libtool automake faketime \ - bsdmainutils ca-certificates python cmake gperf - -RUN useradd -ms /bin/bash -U ubuntu -USER ubuntu:ubuntu -WORKDIR $WORKDIR - -CMD ["sleep", "infinity"] -EOF - -docker build --pull -f ${TAG2}.Dockerfile -t $TAG2 . - -cd .. - -fi - -RUNNING=`docker ps | grep gitrun` -if [ -z "$RUNNING" ]; then - BUILT=`docker ps -a | grep gitrun` - if [ -z "$BUILT" ]; then - docker run -v /var/run/docker.sock:/var/run/docker.sock -d --name gitrun $TAG - else - docker start gitrun - fi -fi -docker cp gitian-build.py gitrun:$WORKDIR/ -docker exec -t gitrun ./gitian-build.py -d -b -D -n $OPT $GH_USER $VERSION -RC=$? -if [ $RC != 0 ]; then - exit $RC -fi -echo "\nBuild Results:\n" -docker exec gitrun sh -c "sha256sum out/$VERSION/*" -echo "\nIf these results look correct, type \"sign\" to sign them, otherwise ^C to stop now." -read check -if [ "$check" != "sign" ]; then - echo "Not signing, bye." - exit 1 -fi - -if [ ! -d sigs ]; then - git clone https://github.com/monero-project/gitian.sigs.git sigs - cd sigs - git remote add $GH_USER git@github.com:$GH_USER/gitian.sigs.git - cd .. -fi - -DIRS=`docker exec gitrun sh -c "echo sigs/$VERSION-*"` -for i in $DIRS; do - docker cp gitrun:$WORKDIR/$i sigs - gpg --detach-sign $i/$GH_USER/*.assert -done - -cd sigs -git checkout -b $VERSION -git add $VERSION-* -git commit -S -m "Add $GH_USER $VERSION" -git push --set-upstream $GH_USER $VERSION diff --git a/contrib/gitian/gitian-android.yml b/contrib/gitian/gitian-android.yml deleted file mode 100644 index 7e9ca817828..00000000000 --- a/contrib/gitian/gitian-android.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -name: "monero-android-0.18" -enable_cache: true -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "gperf" -- "gcc-7" -- "g++-7" -- "gcc" -- "g++" -- "binutils-gold" -- "git" -- "pkg-config" -- "build-essential" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "ca-certificates" -- "python" -- "cmake" -- "unzip" -remotes: -- "url": "https://github.com/monero-project/monero.git" - "dir": "monero" -files: [] -script: | - - WRAP_DIR=$HOME/wrapped - HOSTS="arm-linux-android aarch64-linux-android" - FAKETIME_HOST_PROGS="clang clang++ ar nm" - FAKETIME_PROGS="date" - HOST_CFLAGS="-O2 -g" - HOST_CXXFLAGS="-O2 -g" - HOST_LDFLAGS=-static-libstdc++ - - export GZIP="-9n" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - export GITIAN=1 - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - export ZERO_AR_DATE=1 - - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - ABI=$i - if expr $i : arm- > /dev/null - then - ABI=$i"eabi" - fi - NDKDIR="${BUILD_DIR}/monero/contrib/depends/$i/native/bin" - for prog in ${FAKETIME_HOST_PROGS}; do - WRAPPER=${WRAP_DIR}/${ABI}-${prog} - echo '#!/usr/bin/env bash' > ${WRAPPER} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAPPER} - echo "export FAKETIME=\"$1\"" >> ${WRAPPER} - echo "$NDKDIR/${ABI}-$prog \$@" >> ${WRAPPER} - chmod +x ${WRAPPER} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - # gcc 7+ honors SOURCE_DATE_EPOCH, no faketime needed - export SOURCE_DATE_EPOCH=`date -d 2000-01-01T12:00:00 +%s` - - git config --global core.abbrev 9 - cd monero - # Set the version string that gets added to the tar archive name - version="`git describe`" - if [[ $version == *"-"*"-"* ]]; then - version="`git rev-parse --short=9 HEAD`" - version="`echo $version | head -c 9`" - fi - - BASEPREFIX=`pwd`/contrib/depends - # Build dependencies for each host - export TAR_OPTIONS=--mtime=2000-01-01T12:00:00 - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - - # Build in a new dir for each host - export TAR_OPTIONS=--mtime=${REFERENCE_DATE}T${REFERENCE_TIME} - for i in ${HOSTS}; do - export PATH=${WRAP_DIR}:${BASEPREFIX}/${i}/native/bin:${PATH_orig} - mkdir build && cd build - cmake .. -DCMAKE_TOOLCHAIN_FILE=${BASEPREFIX}/${i}/share/toolchain.cmake -DCMAKE_BUILD_TYPE=Release - make ${MAKEOPTS} - chmod 755 bin/* - cp ../LICENSE ../README.md ../docs/ANONYMITY_NETWORKS.md bin - chmod 644 bin/LICENSE bin/*.md - DISTNAME=monero-${i}-${version} - mv bin ${DISTNAME} - find ${DISTNAME}/ | sort | tar --no-recursion --owner=0 --group=0 -c -T - | bzip2 -9 > ${OUTDIR}/${DISTNAME}.tar.bz2 - cd .. - rm -rf build - done - diff --git a/contrib/gitian/gitian-build.py b/contrib/gitian/gitian-build.py deleted file mode 100755 index 859f460a78b..00000000000 --- a/contrib/gitian/gitian-build.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os -import subprocess -import sys - -gsigs = 'https://github.com/monero-project/gitian.sigs.git' -gbrepo = 'https://github.com/devrandom/gitian-builder.git' - -platforms = {'l': ['Linux', 'linux', 'tar.bz2'], - 'a': ['Android', 'android', 'tar.bz2'], - 'f': ['FreeBSD', 'freebsd', 'tar.bz2'], - 'w': ['Windows', 'win', 'zip'], - 'm': ['MacOS', 'osx', 'tar.bz2'] } - -def setup(): - global args, workdir - programs = ['apt-cacher-ng', 'ruby', 'git', 'make', 'wget'] - if args.kvm: - programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] - else: - programs += ['lxc', 'debootstrap'] - if not args.no_apt: - subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) - if not os.path.isdir('sigs'): - subprocess.check_call(['git', 'clone', gsigs, 'sigs']) - if not os.path.isdir('builder'): - subprocess.check_call(['git', 'clone', gbrepo, 'builder']) - os.chdir('builder') - subprocess.check_call(['git', 'checkout', 'c0f77ca018cb5332bfd595e0aff0468f77542c23']) - os.makedirs('inputs', exist_ok=True) - os.chdir('inputs') - if os.path.isdir('monero'): - # Remove the potentially stale monero dir. Otherwise you might face submodule mismatches. - subprocess.check_call(['rm', 'monero', '-fR']) - subprocess.check_call(['git', 'clone', args.url, 'monero']) - os.chdir('..') - make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] - if args.docker: - try: - subprocess.check_output(['docker', '--help']) - except: - print("ERROR: Could not find 'docker' command. Ensure this is in your PATH.") - sys.exit(1) - make_image_prog += ['--docker'] - elif not args.kvm: - make_image_prog += ['--lxc'] - subprocess.check_call(make_image_prog) - os.chdir(workdir) - if args.is_bionic and not args.kvm and not args.docker: - subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) - print('Reboot is required') - sys.exit(0) - -def rebuild(): - global args, workdir - - print('\nBuilding Dependencies\n') - os.makedirs('../out/' + args.version, exist_ok=True) - - - for i in args.os: - os_name = platforms[i][0] - tag_name = platforms[i][1] - suffix = platforms[i][2] - - print('\nCompiling ' + args.version + ' ' + os_name) - infile = 'inputs/monero/contrib/gitian/gitian-' + tag_name + '.yml' - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'monero='+args.commit, '--url', 'monero='+args.url, infile]) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-'+tag_name, '--destination', '../sigs/', infile]) - subprocess.check_call('mv build/out/monero-*.' + suffix + ' ../out/'+args.version, shell=True) - print('Moving var/install.log to var/install-' + tag_name + '.log') - subprocess.check_call('mv var/install.log var/install-' + tag_name + '.log', shell=True) - print('Moving var/build.log to var/build-' + tag_name + '.log') - subprocess.check_call('mv var/build.log var/build-' + tag_name + '.log', shell=True) - - os.chdir(workdir) - - if args.commit_files: - print('\nCommitting '+args.version+' Unsigned Sigs\n') - os.chdir('sigs') - for i, v in platforms: - subprocess.check_call(['git', 'add', args.version+'-'+v[1]+'/'+args.signer]) - subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) - os.chdir(workdir) - - -def build(): - global args, workdir - - print('\nChecking Depends Freshness\n') - os.chdir('builder') - os.makedirs('inputs', exist_ok=True) - - subprocess.check_call(['make', '-C', 'inputs/monero/contrib/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) - - rebuild() - - -def verify(): - global args, workdir - os.chdir('builder') - - for i, v in platforms: - print('\nVerifying v'+args.version+' '+v[0]+'\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../sigs/', '-r', args.version+'-'+v[1], 'inputs/monero/contrib/gitian/gitian-'+v[1]+'.yml']) - os.chdir(workdir) - -def main(): - global args, workdir - - parser = argparse.ArgumentParser(description='Script for running full Gitian builds.', usage='%(prog)s [options] signer version') - parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') - parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') - parser.add_argument('-u', '--url', dest='url', default='https://github.com/monero-project/monero', help='Specify the URL of the repository. Default is %(default)s') - parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') - parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') - parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') - parser.add_argument('-o', '--os', dest='os', default='lafwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, a for Android, f for FreeBSD, w for Windows, m for MacOS') - parser.add_argument('-r', '--rebuild', action='store_true', dest='rebuild', help='Redo a Gitian build') - parser.add_argument('-R', '--rebuildsign', action='store_true', dest='rebuildsign', help='Redo and sign a Gitian build') - parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') - parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') - parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') - parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') - parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. If you run this script on a non-debian based system, pass the --no-apt flag') - parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') - parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') - parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file') - parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build.') - parser.add_argument('-a', '--no-apt', action='store_true', dest='no_apt', help='Indicate that apt is not installed on the system') - - args = parser.parse_args() - workdir = os.getcwd() - - args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) - - if args.buildsign: - args.build = True - args.sign = True - - if args.rebuildsign: - args.rebuild = True - args.sign = True - - if args.kvm and args.docker: - raise Exception('Error: cannot have both kvm and docker') - - args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' - - # Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker - if args.docker: - os.environ['USE_DOCKER'] = '1' - elif not args.kvm: - os.environ['USE_LXC'] = '1' - if not 'GITIAN_HOST_IP' in os.environ.keys(): - os.environ['GITIAN_HOST_IP'] = '10.0.2.2' - if not 'LXC_GUEST_IP' in os.environ.keys(): - os.environ['LXC_GUEST_IP'] = '10.0.2.5' - - script_name = os.path.basename(sys.argv[0]) - # Signer and version shouldn't be empty - if args.signer == '': - print(script_name+': Missing signer.') - print('Try '+script_name+' --help for more information') - sys.exit(1) - if args.version == '': - print(script_name+': Missing version.') - print('Try '+script_name+' --help for more information') - sys.exit(1) - - # Add leading 'v' for tags - if args.commit and args.pull: - raise Exception('Cannot have both commit and pull') - args.commit = args.commit if args.commit else args.version - - if args.setup: - setup() - - os.makedirs('builder/inputs/monero', exist_ok=True) - os.chdir('builder/inputs/monero') - if args.pull: - subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) - args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True).strip() - args.version = 'pull-' + args.version - print(args.commit) - subprocess.check_call(['git', 'fetch']) - subprocess.check_call(['git', 'checkout', args.commit]) - os.chdir(workdir) - - if args.build: - build() - - if args.rebuild: - os.chdir('builder') - rebuild() - - if args.verify: - verify() - -if __name__ == '__main__': - main() diff --git a/contrib/gitian/gitian-freebsd.yml b/contrib/gitian/gitian-freebsd.yml deleted file mode 100644 index 7a17f0750c6..00000000000 --- a/contrib/gitian/gitian-freebsd.yml +++ /dev/null @@ -1,128 +0,0 @@ ---- -name: "monero-freebsd-0.18" -enable_cache: true -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "clang-8" -- "gperf" -- "gcc-7" -- "g++-7" -- "gcc" -- "g++" -- "binutils-gold" -- "git" -- "pkg-config" -- "build-essential" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "ca-certificates" -- "python" -- "cmake" -remotes: -- "url": "https://github.com/monero-project/monero.git" - "dir": "monero" -files: [] -script: | - - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-unknown-freebsd" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="clang-8 clang++-8 date" - HOST_CFLAGS="-O2 -g" - HOST_CXXFLAGS="-O2 -g" - HOST_LDFLAGS=-static-libstdc++ - - export GZIP="-9n" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - export GITIAN=1 - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - export ZERO_AR_DATE=1 - - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - WRAPPER=${WRAP_DIR}/${i}-${prog} - echo '#!/usr/bin/env bash' > ${WRAPPER} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAPPER} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAPPER} - echo "export FAKETIME=\"$1\"" >> ${WRAPPER} - echo "$NDKDIR/${ABI}-$prog \$@" >> ${WRAPPER} - chmod +x ${WRAPPER} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - # gcc 7+ honors SOURCE_DATE_EPOCH, no faketime needed - export SOURCE_DATE_EPOCH=`date -d 2000-01-01T12:00:00 +%s` - - git config --global core.abbrev 9 - cd monero - # Set the version string that gets added to the tar archive name - version="`git describe`" - if [[ $version == *"-"*"-"* ]]; then - version="`git rev-parse --short=9 HEAD`" - version="`echo $version | head -c 9`" - fi - - BASEPREFIX=`pwd`/contrib/depends - # Build dependencies for each host - export TAR_OPTIONS=--mtime=2000-01-01T12:00:00 - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - - ORIGPATH="$PATH" - # Build in a new dir for each host - export SOURCE_DATE_EPOCH=`date -d ${REFERENCE_DATE}T${REFERENCE_TIME} +%s` - export TAR_OPTIONS=--mtime=${REFERENCE_DATE}T${REFERENCE_TIME} - for i in ${HOSTS}; do - export PATH=${WRAP_DIR}:${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir build && cd build - cmake .. -DCMAKE_TOOLCHAIN_FILE=${BASEPREFIX}/${i}/share/toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_SKIP_RPATH=ON - make ${MAKEOPTS} - chmod 755 bin/* - cp ../LICENSE ../README.md ../docs/ANONYMITY_NETWORKS.md bin - chmod 644 bin/LICENSE bin/*.md - DISTNAME=monero-${i}-${version} - mv bin ${DISTNAME} - find ${DISTNAME}/ | sort | tar --no-recursion --owner=0 --group=0 -c -T - | bzip2 -9 > ${OUTDIR}/${DISTNAME}.tar.bz2 - cd .. - rm -rf build - done - diff --git a/contrib/gitian/gitian-linux.yml b/contrib/gitian/gitian-linux.yml deleted file mode 100644 index 41915deb9ee..00000000000 --- a/contrib/gitian/gitian-linux.yml +++ /dev/null @@ -1,180 +0,0 @@ ---- -name: "monero-linux-0.18" -enable_cache: true -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "gperf" -- "gcc-7" -- "g++-7" -- "gcc" -- "g++" -- "gcc-7-aarch64-linux-gnu" -- "g++-7-aarch64-linux-gnu" -- "gcc-aarch64-linux-gnu" -- "g++-aarch64-linux-gnu" -- "binutils-aarch64-linux-gnu" -- "gcc-7-arm-linux-gnueabihf" -- "g++-7-arm-linux-gnueabihf" -- "gcc-arm-linux-gnueabihf" -- "g++-arm-linux-gnueabihf" -- "g++-riscv64-linux-gnu" -- "g++-7-multilib" -- "gcc-7-multilib" -- "binutils-arm-linux-gnueabihf" -- "binutils-gold" -- "git" -- "pkg-config" -- "build-essential" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "ca-certificates" -- "python" -- "cmake" -remotes: -- "url": "https://github.com/monero-project/monero.git" - "dir": "monero" -files: [] -script: | - - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu i686-linux-gnu riscv64-linux-gnu" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="date" - HOST_CFLAGS="-O2 -g" - HOST_CXXFLAGS="-O2 -g" - HOST_LDFLAGS=-static-libstdc++ - - export GZIP="-9n" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - export GITIAN=1 - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - if which ${i}-${prog}-7 - then - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog}-7 | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - fi - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes - mkdir -p $EXTRA_INCLUDES_BASE - - # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm, - # but we can't write there. Instead, create a link here and force it to be included in the - # search paths. - # This problem goes away if linux-libc-dev:i386 pkg exists, but it's not in bionic. - - mkdir -p $EXTRA_INCLUDES_BASE/i686-linux-gnu - rm -f $WRAP_DIR/extra_includes/i686-linux-gnu/asm - ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-linux-gnu/asm - - # glibc 2.23 breaks compatibility with <=2.19 use of lgamma function. - # Hack the math header to restore the old behavior. - mkdir $EXTRA_INCLUDES_BASE/bits - sed -e '/__REDIRFROM .lgamma,/,+3s/_USE_/_DONTUSE_/g' /usr/include/x86_64-linux-gnu/bits/math-finite.h > $EXTRA_INCLUDES_BASE/bits/math-finite.h - - # gcc 7+ honors SOURCE_DATE_EPOCH, no faketime needed - export SOURCE_DATE_EPOCH=`date -d 2000-01-01T12:00:00 +%s` - - git config --global core.abbrev 9 - cd monero - # Set the version string that gets added to the tar archive name - version="`git describe`" - if [[ $version == *"-"*"-"* ]]; then - version="`git rev-parse --short=9 HEAD`" - version="`echo $version | head -c 9`" - fi - - BASEPREFIX=`pwd`/contrib/depends - # Build dependencies for each host - export TAR_OPTIONS=--mtime=2000-01-01T12:00:00 - for i in $HOSTS; do - ARCH_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$ARCH_INCLUDES" ]; then - EXTRA_INCLUDES="${EXTRA_INCLUDES_BASE}:${ARCH_INCLUDES}" - else - EXTRA_INCLUDES="${EXTRA_INCLUDES_BASE}" - fi - export C_INCLUDE_PATH="$EXTRA_INCLUDES" - export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES" - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" V=1 - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - ORIGPATH="$PATH" - # Build in a new dir for each host - export SOURCE_DATE_EPOCH=`date -d ${REFERENCE_DATE}T${REFERENCE_TIME} +%s` - export TAR_OPTIONS=--mtime=${REFERENCE_DATE}T${REFERENCE_TIME} - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir build && cd build - ARCH_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$ARCH_INCLUDES" ]; then - EXTRA_INCLUDES="${EXTRA_INCLUDES_BASE}:${ARCH_INCLUDES}" - else - EXTRA_INCLUDES="${EXTRA_INCLUDES_BASE}" - fi - export C_INCLUDE_PATH="$EXTRA_INCLUDES" - export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES" - # glibc only added riscv support in 2.27, disable backwards compatibility - if [ "$i" == "riscv64-linux-gnu" ]; then - BACKCOMPAT_OPTION=OFF - else - BACKCOMPAT_OPTION=ON - fi - cmake .. -DCMAKE_TOOLCHAIN_FILE=${BASEPREFIX}/${i}/share/toolchain.cmake -DBACKCOMPAT=${BACKCOMPAT_OPTION} -DCMAKE_SKIP_RPATH=ON - make ${MAKEOPTS} - chmod 755 bin/* - cp ../LICENSE ../README.md ../docs/ANONYMITY_NETWORKS.md bin - chmod 644 bin/LICENSE bin/*.md - DISTNAME=monero-${i}-${version} - mv bin ${DISTNAME} - find ${DISTNAME}/ | sort | tar --no-recursion --owner=0 --group=0 -c -T - | bzip2 -9 > ${OUTDIR}/${DISTNAME}.tar.bz2 - cd .. - rm -rf build - done - diff --git a/contrib/gitian/gitian-osx.yml b/contrib/gitian/gitian-osx.yml deleted file mode 100644 index 74ad21ea3af..00000000000 --- a/contrib/gitian/gitian-osx.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -name: "monero-osx-0.18" -enable_cache: true -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "ca-certificates" -- "curl" -- "g++" -- "git" -- "pkg-config" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "cmake" -- "libcap-dev" -- "libz-dev" -- "libbz2-dev" -- "python" -- "python-dev" -- "python-setuptools" -remotes: -- "url": "https://github.com/monero-project/monero.git" - "dir": "monero" -files: [] -script: | - WRAP_DIR=$HOME/wrapped - HOSTS="x86_64-apple-darwin aarch64-apple-darwin" - FAKETIME_HOST_PROGS="" - FAKETIME_PROGS="ar ranlib date dmg genisoimage python" - - export GZIP="-9n" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - export GITIAN=1 - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - export ZERO_AR_DATE=1 - - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - git config --global core.abbrev 9 - cd monero - # Set the version string that gets added to the tar archive name - version="`git describe`" - if [[ $version == *"-"*"-"* ]]; then - version="`git rev-parse --short=9 HEAD`" - version="`echo $version | head -c 9`" - fi - - BASEPREFIX=`pwd`/contrib/depends - - # Build dependencies for each host - export TAR_OPTIONS=--mtime=2000-01-01T12:00:00 - for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - ORIGPATH="$PATH" - # Build in a new dir for each host - export TAR_OPTIONS=--mtime=${REFERENCE_DATE}T${REFERENCE_TIME} - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir build && cd build - cmake .. -DCMAKE_TOOLCHAIN_FILE=${BASEPREFIX}/${i}/share/toolchain.cmake - make ${MAKEOPTS} - chmod 755 bin/* - cp ../LICENSE ../README.md ../docs/ANONYMITY_NETWORKS.md bin - chmod 644 bin/LICENSE bin/*.md - DISTNAME=monero-${i}-${version} - mv bin ${DISTNAME} - find ${DISTNAME}/ | sort | tar --no-recursion --owner=0 --group=0 -c -T - | bzip2 -9 > ${OUTDIR}/${DISTNAME}.tar.bz2 - cd .. - rm -rf build - done - diff --git a/contrib/gitian/gitian-win.yml b/contrib/gitian/gitian-win.yml deleted file mode 100644 index 4c607898eb5..00000000000 --- a/contrib/gitian/gitian-win.yml +++ /dev/null @@ -1,136 +0,0 @@ ---- -name: "monero-win-0.18" -enable_cache: true -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "curl" -- "g++" -- "git" -- "pkg-config" -- "autoconf" -- "libtool" -- "automake" -- "faketime" -- "bsdmainutils" -- "mingw-w64" -- "g++-mingw-w64" -- "zip" -- "ca-certificates" -- "python" -- "cmake" -alternatives: -- - package: "i686-w64-mingw32-g++" - path: "/usr/bin/i686-w64-mingw32-g++-posix" -- - package: "i686-w64-mingw32-gcc" - path: "/usr/bin/i686-w64-mingw32-gcc-posix" -- - package: "x86_64-w64-mingw32-g++" - path: "/usr/bin/x86_64-w64-mingw32-g++-posix" -- - package: "x86_64-w64-mingw32-gcc" - path: "/usr/bin/x86_64-w64-mingw32-gcc-posix" -remotes: -- "url": "https://github.com/monero-project/monero.git" - "dir": "monero" -files: [] -script: | - WRAP_DIR=$HOME/wrapped - HOSTS="i686-w64-mingw32 x86_64-w64-mingw32" - FAKETIME_HOST_PROGS="windres objcopy" - FAKETIME_PROGS="date zip" - HOST_CFLAGS="-O2 -g" - HOST_CXXFLAGS="-O2 -g" - - export GZIP="-9n" - export TZ="UTC" - export BUILD_DIR=`pwd` - mkdir -p ${WRAP_DIR} - if test -n "$GBUILD_CACHE_ENABLED"; then - export SOURCES_PATH=${GBUILD_COMMON_CACHE} - export BASE_CACHE=${GBUILD_PACKAGE_CACHE} - export GITIAN=1 - mkdir -p ${BASE_CACHE} ${SOURCES_PATH} - fi - - function create_global_faketime_wrappers { - for prog in ${FAKETIME_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog} - echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${prog} - chmod +x ${WRAP_DIR}/${prog} - done - } - - function create_per-host_faketime_wrappers { - for i in $HOSTS; do - for prog in ${FAKETIME_HOST_PROGS}; do - echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog} - echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} - echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog} - echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} - chmod +x ${WRAP_DIR}/${i}-${prog} - done - done - } - - # Faketime for depends so intermediate results are comparable - export PATH_orig=${PATH} - create_global_faketime_wrappers "2000-01-01 12:00:00" - create_per-host_faketime_wrappers "2000-01-01 12:00:00" - export PATH=${WRAP_DIR}:${PATH} - - # gcc 7+ honors SOURCE_DATE_EPOCH, no faketime needed - export SOURCE_DATE_EPOCH=`date -d 2000-01-01T12:00:00 +%s` - - git config --global core.abbrev 9 - cd monero - # Set the version string that gets added to the tar archive name - version="`git describe`" - if [[ $version == *"-"*"-"* ]]; then - version="`git rev-parse --short=9 HEAD`" - version="`echo $version | head -c 9`" - fi - - BASEPREFIX=`pwd`/contrib/depends - # Build dependencies for each host - export TAR_OPTIONS=--mtime=2000-01-01T12:00:00 - for i in $HOSTS; do - EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$EXTRA_INCLUDES" ]; then - export HOST_ID_SALT="$EXTRA_INCLUDES" - fi - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" V=1 - unset HOST_ID_SALT - done - - # Faketime for binaries - export PATH=${PATH_orig} - create_global_faketime_wrappers "${REFERENCE_DATETIME}" - create_per-host_faketime_wrappers "${REFERENCE_DATETIME}" - export PATH=${WRAP_DIR}:${PATH} - - ORIGPATH="$PATH" - # Run cmake and make, for each create a new build/ directory, - # compile from there, archive, export and delete the archive again - export SOURCE_DATE_EPOCH=`date -d ${REFERENCE_DATE}T${REFERENCE_TIME} +%s` - export TAR_OPTIONS=--mtime=${REFERENCE_DATE}T${REFERENCE_TIME} - for i in ${HOSTS}; do - export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - mkdir build && cd build - cmake .. -DCMAKE_TOOLCHAIN_FILE=${BASEPREFIX}/${i}/share/toolchain.cmake - make ${MAKEOPTS} - cp ../LICENSE ../README.md ../docs/ANONYMITY_NETWORKS.md bin - DISTNAME=monero-${i}-${version} - mv bin ${DISTNAME} - find ${DISTNAME}/ | sort | zip -X@ ${OUTDIR}/${DISTNAME}.zip - cd .. && rm -rf build - done - diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md new file mode 100644 index 00000000000..c6b6a3cb72a --- /dev/null +++ b/contrib/guix/INSTALL.md @@ -0,0 +1,798 @@ +# Guix Installation and Setup + +This only needs to be done once per machine. If you have already completed the +installation and setup, please proceed to [perform a build](./README.md). + +Otherwise, you may choose from one of the following options to install Guix: + +1. Using the official **shell installer script** [⤓ skip to section][install-script] + - Maintained by Guix developers + - Easiest (automatically performs *most* setup) + - Works on nearly all Linux distributions + - Only installs latest release + - Binary installation only, requires high level of trust + - Note: The script needs to be run as root, so it should be inspected before it's run +2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball] + - Maintained by Guix developers + - Normal difficulty (full manual setup required) + - Works on nearly all Linux distributions + - Installs any release + - Binary installation only, requires high level of trust +3. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg] + - Maintained by distribution's Guix package maintainer + - Normal difficulty (manual setup required) + - Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions + - Installs a release decided on by package maintainer + - Source or binary installation depending on the distribution +4. Building **from source** [⤓ skip to section][install-source] + - Maintained by you + - Hard, but rewarding + - Can be made to work on most Linux distributions + - Installs any commit (more granular) + - Source installation, requires lower level of trust + +## Options 1 and 2: Using the official shell installer script or binary tarball + +The installation instructions for both the official shell installer script and +the binary tarballs can be found in the GNU Guix Manual's [Binary Installation +section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html). + +Note that running through the binary tarball installation steps is largely +equivalent to manually performing what the shell installer script does. + +Note that at the time of writing (July 5th, 2021), the shell installer script +automatically creates an `/etc/profile.d` entry which the binary tarball +installation instructions do not ask you to create. However, you will likely +need this entry for better desktop integration. Please see [this +section](#add-an-etcprofiled-entry) for instructions on how to add a +`/etc/profile.d/guix.sh` entry. + +Regardless of which installation option you chose, the changes to +`/etc/profile.d` will not take effect until the next shell or desktop session, +so you should log out and log back in. + +## Option 3: Using a distribution-maintained package + +Note that this section is based on the distro packaging situation at the time of +writing (July 2021). Guix is expected to be more widely packaged over time. For +an up-to-date view on Guix's package status/version across distros, please see: +https://repology.org/project/guix/versions + +### Debian / Ubuntu + +Guix is available as a distribution package in [Debian +](https://packages.debian.org/search?keywords=guix) and [Ubuntu +](https://packages.ubuntu.com/search?keywords=guix). + +To install: +```sh +sudo apt install guix +``` + +### Arch Linux + +Guix is available in the AUR as +[`guix`](https://aur.archlinux.org/packages/guix/), please follow the +installation instructions in the Arch Linux Wiki ([live +link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation), +[2021/03/30 +permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation)) +to install Guix. + +At the time of writing (2021/03/30), the `check` phase will fail if the path to +guix's build directory is longer than 36 characters due to an anachronistic +character limit on the shebang line. Since the `check` phase happens after the +`build` phase, which may take quite a long time, it is recommended that users +either: + +1. Skip the `check` phase + - For `makepkg`: `makepkg --nocheck ...` + - For `yay`: `yay --mflags="--nocheck" ...` + - For `paru`: `paru --nocheck ...` +2. Or, check their build directory's length beforehand + - For those building with `makepkg`: `pwd | wc -c` + +## Option 4: Building from source + +Building Guix from source is a rather involved process but a rewarding one for +those looking to minimize trust and maximize customizability (e.g. building a +particular commit of Guix). Previous experience with using autotools-style build +systems to build packages from source will be helpful. *hic sunt dracones.* + +I strongly urge you to at least skim through the entire section once before you +start issuing commands, as it will save you a lot of unnecessary pain and +anguish. + +### Installing common build tools + +There are a few basic build tools that are required for most things we'll build, +so let's install them now: + +Text transformation/i18n: +- `autopoint` (sometimes packaged in `gettext`) +- `help2man` +- `po4a` +- `texinfo` + +Build system tools: +- `g++` w/ C++11 support +- `libtool` +- `autoconf` +- `automake` +- `pkg-config` (sometimes packaged as `pkgconf`) +- `make` +- `cmake` + +Miscellaneous: +- `git` +- `gnupg` +- `python3` + +### Building and Installing Guix's dependencies + +In order to build Guix itself from source, we need to first make sure that the +necessary dependencies are installed and discoverable. The most up-to-date list +of Guix's dependencies is kept in the ["Requirements" +section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix +Reference Manual. + +Depending on your distribution, most or all of these dependencies may already be +packaged and installable without manually building and installing. + +For reference, the graphic below outlines Guix v1.3.0's dependency graph: + +![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png) + +If you do not care about building each dependency from source, and Guix is +already packaged for your distribution, you can easily install only the build +dependencies of Guix. For example, to enable deb-src and install the Guix build +dependencies on Ubuntu/Debian: + +```sh +sed -i 's|# deb-src|deb-src|g' /etc/apt/sources.list +apt update +apt-get build-dep -y guix +``` + +If this succeeded, you can likely skip to section +["Building and Installing Guix itself"](#building-and-installing-guix-itself). + +#### Guile + +###### Corner case: Multiple versions of Guile on one system + +It is recommended to only install the required version of Guile, so that build systems do +not get confused about which Guile to use. + +However, if you insist on having more versions of Guile installed on +your system, then you need to **consistently** specify +`GUILE_EFFECTIVE_VERSION=3.0` to all +`./configure` invocations for Guix and its dependencies. + +##### Installing Guile + +If your distribution splits packages into `-dev`-suffixed and +non-`-dev`-suffixed sub-packages (as is the case for Debian-derived +distributions), please make sure to install both. For example, to install Guile +v3.0 on Debian/Ubuntu: + +```sh +apt install guile-3.0 guile-3.0-dev +``` + +#### Mixing distribution packages and source-built packages + +At the time of writing, most distributions have _some_ of Guix's dependencies +packaged, but not all. This means that you may want to install the distribution +package for some dependencies, and manually build-from-source for others. + +Distribution packages usually install to `/usr`, which is different from the +default `./configure` prefix of source-built packages: `/usr/local`. + +This means that if you mix-and-match distribution packages and source-built +packages and do not specify exactly `--prefix=/usr` to `./configure` for +source-built packages, you will need to augment the `GUILE_LOAD_PATH` and +`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look +under the right prefix and find your source-built packages. + +For example, if you are using Guile v3.0, and have Guile packages in the +`/usr/local` prefix, either add the following lines to your `.profile` or +`.bash_profile` so that the environment variable is properly set for all future +shell logins, or paste the lines into a POSIX-style shell to temporarily modify +the environment variables of your current shell session. + +```sh +# Help Guile v3.0.x find packages in /usr/local +export GUILE_LOAD_PATH="/usr/local/share/guile/site/3.0${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" +export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/3.0/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" +``` + +Note that these environment variables are used to check for packages during +`./configure`, so they should be set as soon as possible should you want to use +a prefix other than `/usr`. + +#### Building and installing source-built packages + +***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are +documented in the sub-sections immediately below. Please read these sections +before proceeding to build and install these packages.* + +Although you should always refer to the README or INSTALL files for the most +accurate information, most of these dependencies use autoconf-style build +systems (check if there's a `configure.ac` file), and will likely do the right +thing with the following: + +Clone the repository and check out the latest release: +```sh +git clone /.git +cd +git tag -l # check for the latest release +git checkout +``` + +For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at +the root of the repository): + +```sh +./autogen.sh || autoreconf -vfi +./configure --prefix= +make +sudo make install +``` + +For CMake-based build systems (if `CMakeLists.txt` exists at the root of the +repository): + +```sh +mkdir build && cd build +cmake .. -DCMAKE_INSTALL_PREFIX= +sudo cmake --build . --target install +``` + +If you choose not to specify exactly `--prefix=/usr` to `./configure`, please +make sure you've carefully read the [previous section] on mixing distribution +packages and source-built packages. + +##### Binding packages require `-dev`-suffixed packages + +Relevant for: +- Everyone + +When building bindings, the `-dev`-suffixed version of the original package +needs to be installed. For example, building `Guile-zlib` on Debian-derived +distributions requires that `zlib1g-dev` is installed. + +When using bindings, the `-dev`-suffixed version of the original package still +needs to be installed. This is particularly problematic when distribution +packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that +installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a +dependency. + +Below is a list of relevant Guile bindings and their corresponding `-dev` +packages in Debian at the time of writing. + +| Guile binding package | -dev Debian package | +|-----------------------|---------------------| +| guile-gcrypt | libgcrypt-dev | +| guile-git | libgit2-dev | +| guile-gnutls | (none) | +| guile-json | (none) | +| guile-lzlib | liblz-dev | +| guile-ssh | libssh-dev | +| guile-sqlite3 | libsqlite3-dev | +| guile-zlib | zlib1g-dev | + +##### `guile-git` actually depends on `libgit2 >= 1.1` + +Relevant for: +- Those building `guile-git` from source against `libgit2 < 1.1` +- Those installing `guile-git` from their distribution where `guile-git` is + built against `libgit2 < 1.1` + +As of v0.5.2, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, +it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a +reference of `origin/keyring`: instead of interpreting the reference as "the +'keyring' branch of the 'origin' remote", the reference is interpreted as "the +branch literally named 'origin/keyring'" + +This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and +`guile-git` is built against it. + +Should you be in this situation, you need to build both `libgit2 v1.1.x` and +`guile-git` from source. + +Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527 + +### Building and Installing Guix itself + +Start by cloning Guix: + +``` +git clone https://git.savannah.gnu.org/git/guix.git +cd guix +``` + +You will likely want to build the latest release. +At the time of writing (November 2023), the latest release was `v1.4.0`. + +``` +git branch -a -l 'origin/version-*' # check for the latest release +git checkout +``` + +Bootstrap the build system: +``` +./bootstrap +``` + +Configure with the recommended `--localstatedir` flag: +``` +./configure --localstatedir=/var +``` + +Note: If you intend to hack on Guix in the future, you will need to supply the +same `--localstatedir=` flag for all future Guix `./configure` invocations. See +the last paragraph of this +[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more +details. + +Build Guix (this will take a while): +``` +make -j$(nproc) +``` + +Install Guix: + +``` +sudo make install +``` + +### Post-"build from source" Setup + +#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]` + +At this point, guix will be installed to `${bindir}`, which is likely +`/usr/local/bin` if you did not override directory variables at +`./configure`-time. More information on standard Automake directory variables +can be found +[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html). + +However, the Guix init scripts and service configurations for Upstart, systemd, +SysV, and OpenRC are installed (in `${libdir}`) to launch +`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`, +which does not yet exist, and will only exist after [`root` performs their first +`guix pull`](#guix-pull-as-root). + +We need to create a `-original` version of these init scripts that's pointed to +the binaries we just built and `make install`'ed in `${bindir}` (normally, +`/usr/local/bin`). + +Example for `systemd`, run as `root`: + +```sh +# Create guix-daemon-original.service by modifying guix-daemon.service +libdir=# set according to your PREFIX (default is /usr/local/lib) +bindir="$(dirname $(command -v guix-daemon))" +sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service +chmod 664 /etc/systemd/system/guix-daemon-original.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the non-working guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the working guix-daemon-original.service is started and enabled +systemctl enable guix-daemon-original +systemctl start guix-daemon-original +``` + +#### Creating `guix-daemon` users / groups + +Please see the [relevant +section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html) +in the Guix Reference Manual for more details. + +## Optional setup + +At this point, you are set up to [use Guix to build Monero](./README.md#usage). However, if you want to polish your setup a bit and +make it "what Guix intended", then read the next few subsections. + +### Add an `/etc/profile.d` entry + +This section definitely does not apply to you if you installed Guix using: +1. The shell installer script +2. fanquake's Docker image +3. Debian's `guix` package + +#### Background + +Although Guix knows how to update itself and its packages, it does so in a +non-invasive way (it does not modify `/usr/local/bin/guix`). + +Instead, it does the following: + +- After a `guix pull`, it updates + `/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink + targeting this directory at `$HOME/.config/guix/current` + +- After a `guix install`, it updates + `/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink + targeting this directory at `$HOME/.guix-profile` + +Therefore, in order for these operations to affect your shell/desktop sessions +(and for the principle of least astonishment to hold), their corresponding +directories have to be added to well-known environment variables like `$PATH`, +`$INFOPATH`, `$XDG_DATA_DIRS`, etc. + +In other words, if `$HOME/.config/guix/current/bin` does not exist in your +`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same +goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages. + +Helpfully, after a `guix pull` or `guix install`, a message will be printed like +so: + +``` +hint: Consider setting the necessary environment variables by running: + + GUIX_PROFILE="$HOME/.guix-profile" + . "$GUIX_PROFILE/etc/profile" + +Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'. +``` + +However, this is somewhat tedious to do for both `guix pull` and `guix install` +for each user on the system that wants to properly use `guix`. I recommend that +you instead add an entry to `/etc/profile.d` instead. This is done by default +when installing the Debian package later than 1.2.0-4 and when using the shell +script installer. + +#### Instructions + +Create `/etc/profile.d/guix.sh` with the following content: +```sh +# _GUIX_PROFILE: `guix pull` profile +_GUIX_PROFILE="$HOME/.config/guix/current" +if [ -L $_GUIX_PROFILE ]; then + export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH" + # Export INFOPATH so that the updated info pages can be found + # and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info + # When INFOPATH is unset, add a trailing colon so that Emacs + # searches 'Info-default-directory-list'. + export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH" +fi + +# GUIX_PROFILE: User's default profile +GUIX_PROFILE="$HOME/.guix-profile" +[ -L $GUIX_PROFILE ] || return +GUIX_LOCPATH="$GUIX_PROFILE/lib/locale" +export GUIX_PROFILE GUIX_LOCPATH + +[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile" + +# set XDG_DATA_DIRS to include Guix installations +export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}" +``` + +Please note that this will not take effect until the next shell or desktop +session (log out and log back in). + +### `guix pull` as root + +Before you do this, you need to read the section on [choosing your security +model][security-model] and adjust `guix` and `guix-daemon` flags according to +your choice, as invoking `guix pull` may pull substitutes from substitute +servers (which you may not want). + +As mentioned in a previous section, Guix expects +`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with +`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix. +However, this is not the case when we build from source. Therefore, we need to +perform a `guix pull` as `root`: + +```sh +sudo --login guix pull --branch=version- +# or +sudo --login guix pull --commit= +``` + +`guix pull` is quite a long process (especially if you're using +`--no-substitutes`). If you encounter build problems, please refer to the +[troubleshooting section](#troubleshooting). + +Note that running a bare `guix pull` with no commit or branch specified will +pull the latest commit on Guix's master branch, which is likely fine, but not +recommended. + +If you installed Guix from source, you may get an error like the following: +```sh +error: while creating symlink '/root/.config/guix/current' No such file or directory +``` +To resolve this, simply: +``` +sudo mkdir -p /root/.config/guix +``` +Then try the `guix pull` command again. + +After the `guix pull` finishes successfully, +`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated. + +#### Using the newly-pulled `guix` by restarting the daemon + +Depending on how you installed Guix, you should now make sure that your init +scripts and service configurations point to the newly-pulled `guix-daemon`. + +##### If you built Guix from source + +If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now +do the following: + +```sh +systemctl stop guix-daemon-original +systemctl disable guix-daemon-original + +systemctl enable guix-daemon +systemctl start guix-daemon +``` + +Remember to set `--no-substitutes` in `$libdir/systemd/system/guix-daemon.service` and other customizations if you used them for `guix-daemon-original.service`. + +##### If you installed Guix via the Debian/Ubuntu distribution packages + +You will need to create a `guix-daemon-latest` service which points to the new +`guix` rather than a pinned one. + +```sh +# Create guix-daemon-latest.service by modifying guix-daemon.service +sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service +chmod 664 /lib/systemd/system/guix-daemon-latest.service + +# Make systemd recognize the new service +systemctl daemon-reload + +# Make sure that the old guix-daemon.service is stopped and disabled +systemctl stop guix-daemon +systemctl disable guix-daemon + +# Make sure that the new guix-daemon-latest.service is started and enabled +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### If you installed Guix via lantw44's Arch Linux AUR package + +At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is +`guix-daemon-latest.service`, therefore, you should do the following: + +```sh +systemctl stop guix-daemon +systemctl disable guix-daemon + +systemctl enable guix-daemon-latest +systemctl start guix-daemon-latest +``` + +##### Otherwise... + +Simply do: + +```sh +systemctl restart guix-daemon +``` + +### Checking everything + +If you followed all the steps above to make your Guix setup "prim and proper," +you can check that you did everything properly by running through this +checklist. + +1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login + +2. `guix describe` should not print `guix describe: error: failed to determine + origin`, but rather something like: + + ``` + Generation 38 Feb 22 2021 16:39:31 (current) + guix f350df4 + repository URL: https://git.savannah.gnu.org/git/guix.git + branch: version-1.2.0 + commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7 + ``` + +3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix` + +# Troubleshooting + +## Derivation failed to build + +When you see a build failure like below: + +``` +building /gnu/store/...-foo-3.6.12.drv... +/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0' +builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1 +build of /gnu/store/...-foo-3.6.12.drv failed +View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'. +cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built +cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built +guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed +``` + +It means that `guix` failed to build a package named `foo`, which was a +dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed" +line is not necessarily the root cause, the first "failed" line is. + +Most of the time, the build failure is due to a spurious test failure or the +package's build system/test suite breaking when running multi-threaded. To +rebuild _just_ this derivation in a single-threaded fashion (please don't forget +to add other `guix` flags like `--no-substitutes` as appropriate): + +```sh +$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv +``` + +If the single-threaded rebuild did not succeed, you may need to dig deeper. +You may view `foo`'s build logs in `less` like so (please replace paths with the +path you see in the build failure output): + +```sh +$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less +``` + +`foo`'s build directory is also preserved and available at +`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple +times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build +failure output for the most accurate, up-to-date information. + +### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character + +This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem +which rejects characters not present in the UTF-8 character code set. An example +is ZFS with the utf8only=on option set. + +More information: https://github.com/python/cpython/issues/81765 + +### openssl-1.1.1l and openssl-1.1.1n + +OpenSSL includes tests that will fail once some certificate has expired. +The workarounds from the GnuTLS section immediately below can be used. + +### GnuTLS: test-suite FAIL: status-request-revoked + +*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`* + +This unfortunate error is most common for non-substitute builders who installed +Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a +hardcoded certificate which expired on 2020-10-24. + +What's more unfortunate is that this GnuTLS derivation is somewhat special in +Guix's dependency graph and is not affected by the package transformation flags +like `--without-tests=`. + +The easiest solution for those encountering this problem is to install a newer +version of Guix. However, there are ways to work around this issue: + +#### Workaround 1: Using substitutes for this single derivation + +If you've authorized the official Guix build farm's key (more info +[here](./README.md#step-1-authorize-the-signing-keys)), then you can use +substitutes just for this single derivation by invoking the following: + +```sh +guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +``` + +See [this section](./README.md#removing-authorized-keys) for instructions on how +to remove authorized keys if you don't want to keep the build farm's key +authorized. + +#### Workaround 2: Temporarily setting the system clock back + +This workaround was described [here](https://issues.guix.gnu.org/44559#5). + +Basically: +2. Turn off NTP +3. Set system time to 2020-10-01 +4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +5. Set system time back to accurate current time +6. Turn NTP back on + +For example, + +```sh +sudo timedatectl set-ntp no +sudo date --set "01 oct 2020 15:00:00" +guix build /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +sudo timedatectl set-ntp yes +``` + +#### Workaround 3: Disable the tests in the Guix source code for this single derivation + +If all of the above workarounds fail, you can also disable the `tests` phase of +the derivation via the `arguments` option, as described in the official +[`package` +reference](https://guix.gnu.org/manual/en/html_node/package-Reference.html). + +For example, to disable the openssl-1.1 check phase: + +```diff +diff --git a/gnu/packages/tls.scm b/gnu/packages/tls.scm +index f1e844b..1077c4b 100644 +--- a/gnu/packages/tls.scm ++++ b/gnu/packages/tls.scm +@@ -494,4 +494,5 @@ (define-public openssl-1.1 + (arguments + `(#:parallel-tests? #f ++ #:tests? #f + #:test-target "test" +``` + +### coreutils: FAIL: tests/tail-2/inotify-dir-recreate + +The inotify-dir-create test fails on "remote" filesystems such as overlayfs +(Docker's default filesystem) due to the filesystem being mistakenly recognized +as non-remote. + +A relatively easy workaround to this is to make sure that a somewhat traditional +filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For +Docker users, this might mean [using a volume][docker/volumes], [binding +mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap) +[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag. + +Please see the following links for more details: + +- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940) +- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935), [guix-issues#49985](https://issues.guix.gnu.org/49985#5) +- A commit to skip this test in Guix has been merged into the core-updates branch: + [savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4) + + +[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball +[install-distro-pkg]: #option-4-using-a-distribution-maintained-package +[install-source]: #option-5-building-from-source + +[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0 +[security-model]: ./README.md#choosing-your-security-model + +[docker/volumes]: https://docs.docker.com/storage/volumes/ +[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/ +[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/ + +# Purging/Uninstalling Guix + +In the extraordinarily rare case where you messed up your Guix installation in +an irreversible way, you may want to completely purge Guix from your system and +start over. + +1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt + purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source). +2. Remove all build users and groups + + You may check for relevant users and groups using: + + ``` + getent passwd | grep guix + getent group | grep guix + ``` + + Then, you may remove users and groups using: + + ``` + sudo userdel + sudo groupdel + ``` + +3. Remove all possible Guix-related directories + - `/var/guix/` + - `/var/log/guix/` + - `/gnu/` + - `/etc/guix/` + - `/home/*/.config/guix/` + - `/home/*/.cache/guix/` + - `/home/*/.guix-profile/` + - `/root/.config/guix/` + - `/root/.cache/guix/` + - `/root/.guix-profile/` diff --git a/contrib/guix/README.md b/contrib/guix/README.md new file mode 100644 index 00000000000..83f42c1b616 --- /dev/null +++ b/contrib/guix/README.md @@ -0,0 +1,345 @@ +# Bootstrappable Monero Builds + +This directory contains the files necessary to perform bootstrappable Monero +builds. + +[Bootstrappability][b17e] furthers our binary security guarantees by allowing us +to _audit and reproduce_ our toolchain instead of blindly _trusting_ binary +downloads. + +We achieve bootstrappability by using Guix as a functional package manager. + +# Requirements + +Conservatively, you will need an x86_64 machine with: + +- 16GB of free disk space on the partition that /gnu/store will reside in +- 8GB of free disk space **per platform triple** you're planning on building + (see the `HOSTS` [environment variable description][env-vars-list]) + +# Installation and Setup + +If you don't have Guix installed and set up, please follow the instructions in +[INSTALL.md](./INSTALL.md) + +# Usage + +If you haven't considered your security model yet, please read [the relevant +section](#choosing-your-security-model) before proceeding to perform a build. + +## Building + +*The author highly recommends at least reading over the [common usage patterns +and examples](#common-guix-build-invocation-patterns-and-examples) section below +before starting a build. For a full list of customization options, see the +[recognized environment variables][env-vars-list] section.* + +To build Monero reproducibly with all default options, invoke the +following from the top of a clean repository: + +```sh +./contrib/guix/guix-build +``` + +## Cleaning intermediate work directories + +By default, `guix-build` leaves all intermediate files or "work directories" +(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so +that they are available to the user (to aid in debugging, etc.). However, these +directories usually take up a large amount of disk space. Therefore, a +`guix-clean` convenience script is provided which cleans the current `git` +worktree to save disk space: + +``` +./contrib/guix/guix-clean +``` + +## Attesting to build outputs + +Much like how Gitian build outputs are attested to in a `gitian.sigs` +repository, Guix build outputs are attested to in the [`guix.sigs` +repository](https://github.com/monero-project/guix.sigs). + +After you've cloned the `guix.sigs` repository, to attest to the current +worktree's commit/tag: + +``` +env GUIX_SIGS_REPO= SIGNER= ./contrib/guix/guix-attest +``` + +See `./contrib/guix/guix-attest --help` for more information on the various ways +`guix-attest` can be invoked. + +## Verifying build output attestations + +After at least one other signer has uploaded their signatures to the `guix.sigs` +repository: + +``` +git -C pull +env GUIX_SIGS_REPO= ./contrib/guix/guix-verify +``` + +## Common `guix-build` invocation patterns and examples + +### Keeping caches outside of the worktree + +If you perform a lot of builds and have a bunch of worktrees, you may find it +more efficient to keep the depends tree's download cache and build cache +outside of the worktrees to avoid duplicate downloads and unnecessary builds. To +help with this situation, the `guix-build` script honours the `SOURCES_PATH`, +`BASE_CACHE` environment variables and will pass them on to the +depends tree so that you can do something like: + +```sh +env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" ./contrib/guix/guix-build +``` + +Note that the paths that these environment variables point to **must be +directories**, and **NOT symlinks to directories**. + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Building a subset of platform triples + +Sometimes you only want to build a subset of the supported platform triples, in +which case you can override the default list by setting the space-separated +`HOSTS` environment variable: + +```sh +env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin' ./contrib/guix/guix-build +``` + +See the [recognized environment variables][env-vars-list] section for more +details. + +### Controlling the number of threads used by `guix` build commands + +Depending on your system's RAM capacity, you may want to decrease the number of +threads used to decrease RAM usage or vice versa. + +By default, the scripts under `./contrib/guix` will invoke all `guix` build +commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not +specified. However, astute manual readers will also notice that `guix` build +commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified). + +Here is the difference between `--cores=` and `--max-jobs=`: + +> Note: When I say "derivation," think "package" + +`--cores=` + +- controls the number of CPU cores to build each derivation. This is the value + passed to `make`'s `--jobs=` flag. + +`--max-jobs=` + +- controls how many derivations can be built in parallel +- defaults to 1 + +Therefore, the default is for `guix` build commands to build one derivation at a +time, utilizing `$JOBS` threads. + +Specifying the `$JOBS` environment variable will only modify `--cores=`, but you +can also modify the value for `--max-jobs=` by specifying +`$ADDITIONAL_GUIX_COMMON_FLAGS`. For example, if you have a LOT of memory, you +may want to set: + +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' +``` + +Which allows for a maximum of 8 derivations to be built at the same time, each +utilizing `$JOBS` threads. + +Or, if you'd like to avoid spurious build failures caused by issues with +parallelism within a single package, but would still like to build multiple +packages when the dependency graph allows for it, you may want to try: + +```sh +export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8' +``` + +See the [recognized environment variables][env-vars-list] section for more +details. + +## Recognized environment variables + +* _**HOSTS**_ + + Override the space-separated list of platform triples for which to perform a + bootstrappable build. + + _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu + riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu + x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_ + +* _**SOURCES_PATH**_ + + Set the depends tree download cache for sources. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary redownloading of package sources. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + + The path that this environment variable points to **must be a directory**, and + **NOT a symlink to a directory**. + +* _**JOBS**_ + + Override the number of jobs to run simultaneously, you might want to do so on + a memory-limited machine. This may be passed to: + + - `guix` build commands as in `guix shell --cores="$JOBS"` + - `make` as in `make --jobs="$JOBS"` + - `xargs` as in `xargs -P"$JOBS"` + + See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for + more details. + + _(defaults to the value of `nproc` outside the container)_ + +* _**SOURCE_DATE_EPOCH**_ + + Override the reference UNIX timestamp used for bit-for-bit reproducibility, + the variable name conforms to [standard][r12e/source-date-epoch]. + + _(defaults to the output of `$(git log --format=%at -1)`)_ + +* _**V**_ + + If non-empty, will pass `V=1` to all `make` invocations, making `make` output + verbose. + + Note that any given value is ignored. The variable is only checked for + emptiness. More concretely, this means that `V=` (setting `V` to the empty + string) is interpreted the same way as not setting `V` at all, and that `V=0` + has the same effect as `V=1`. + +* _**SUBSTITUTE_URLS**_ + + A whitespace-delimited list of URLs from which to download pre-built packages. + A URL is only used if its signing key is authorized (refer to the [substitute + servers section](#option-1-building-with-substitutes) for more details). + +* _**ADDITIONAL_GUIX_COMMON_FLAGS**_ + + Additional flags to be passed to all `guix` commands. + +* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_ + + Additional flags to be passed to `guix time-machine`. + +* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ + + Additional flags to be passed to the invocation of `guix shell` inside + `guix time-machine`. + +# Choosing your security model + +No matter how you installed Guix, you need to decide on your security model for +building packages with Guix. + +Guix allows us to achieve better binary security by using our CPU time to build +everything from scratch. However, it doesn't sacrifice user choice in pursuit of +this: users can decide whether or not to use **substitutes** (pre-built +packages). + +## Option 1: Building with substitutes + +### Step 1: Authorize the signing keys + +Depending on the installation procedure you followed, you may have already +authorized the Guix build farm key. In particular, the official shell installer +script asks you if you want the key installed, and the debian distribution +package authorized the key during installation. + +You can check the current list of authorized keys at `/etc/guix/acl`. + +At the time of writing, a `/etc/guix/acl` with just the Guix build farm key +authorized looks something like: + +```lisp +(acl + (entry + (public-key + (ecc + (curve Ed25519) + (q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#) + ) + ) + (tag + (guix import) + ) + ) + ) +``` + +If you've determined that the official Guix build farm key hasn't been +authorized, and you would like to authorize it, run the following as root: + +``` +guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub +``` + +If +`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub` +doesn't exist, try: + +```sh +guix archive --authorize < /share/guix/ci.guix.gnu.org.pub +``` + +Where `` is likely: +- `/usr` if you installed from a distribution package +- `/usr/local` if you installed Guix from source and didn't supply any + prefix-modifying flags to Guix's `./configure` + +#### Removing authorized keys + +To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the +`(entry (public-key ...))` entry. + +### Step 2: Specify the substitute servers + +Once its key is authorized, the official Guix build farm at +https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag +is supplied. This default list of substitute servers is overridable both on a +`guix-daemon` level and when you invoke `guix` commands. + +## Option 2: Disabling substitutes on an ad-hoc basis + +If you prefer not to use any substitutes, make sure to supply `--no-substitutes` +like in the following snippet. The first build will take a while, but the +resulting packages will be cached for future builds. + +For direct invocations of `guix`: +```sh +guix --no-substitutes +``` + +For the scripts under `./contrib/guix/`: +```sh +export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' +``` + +## Option 3: Disabling substitutes by default + +`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that, +unless otherwise overridden by a command line invocation, no substitutes will be +used. + +If you start `guix-daemon` using an init script, you can edit said script to +supply this flag. + +[b17e]: https://bootstrappable.org/ +[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ +[env-vars-list]: #recognized-environment-variables diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest new file mode 100755 index 00000000000..c7398fb45e0 --- /dev/null +++ b/contrib/guix/guix-attest @@ -0,0 +1,197 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the monero repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat env basename mkdir diff sort xargs tee + +if [ -z "$NO_SIGN" ]; then + # make it possible to override the gpg binary + GPG=${GPG:-gpg} + + # $GPG can contain extra arguments passed to the binary + # so let's check only the existence of arg[0] + # shellcheck disable=SC2206 + GPG_ARRAY=($GPG) + check_tools "${GPG_ARRAY[0]}" +fi + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < \\ + SIGNER=GPG_KEY_NAME[=SIGNER_NAME] \\ + [ NO_SIGN=1 ] + ./contrib/guix/guix-attest + +Example w/o overriding signing name: + + env GUIX_SIGS_REPO=/home/user/guix.sigs \\ + SIGNER=achow101 \\ + ./contrib/guix/guix-attest + +Example overriding signing name: + + env GUIX_SIGS_REPO=/home/user/guix.sigs \\ + SIGNER=0x96AB007F1A7ED999=dongcarl \\ + ./contrib/guix/guix-attest + +Example w/o signing, just creating SHA256SUMS: + + env GUIX_SIGS_REPO=/home/user/guix.sigs \\ + SIGNER=achow101 \\ + NO_SIGN=1 \\ + ./contrib/guix/guix-attest + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ] || [ -z "$SIGNER" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +################ +# The key specified in SIGNER should be usable +################ + +IFS='=' read -r gpg_key_name signer_name <<< "$SIGNER" +if [ -z "${signer_name}" ]; then + signer_name="$gpg_key_name" +fi + +if [ -z "$NO_SIGN" ] && ! ${GPG} --dry-run --list-secret-keys "${gpg_key_name}" >/dev/null 2>&1; then + echo "ERR: GPG can't seem to find any key named '${gpg_key_name}'" + exit 1 +fi + +############## +## Attest ## +############## + +# Usage: out_name $logdir +# +# HOST: The output directory being attested +# +out_name() { + basename "$(dirname "$1")" +} + +shasum_already_exists() { +cat <_FLAGS. +# +# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's +# existence here and direct users of this script to use our (more flexible) +# custom environment variables. +if [ -n "$GUIX_BUILD_OPTIONS" ]; then +cat << EOF +Error: Environment variable GUIX_BUILD_OPTIONS is not empty: + '$GUIX_BUILD_OPTIONS' + +Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset +GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options +across guix commands or ADDITIONAL_GUIX__FLAGS to set build options for a +specific guix command. + +See contrib/guix/README.md for more details. +EOF +exit 1 +fi + +################ +# Checkout git submodules if we haven't already +################ + +git submodule update --init --recursive --progress + +################ +# The git worktree should not be dirty +################ + +if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then +cat << EOF +ERR: The current git worktree is dirty, which may lead to broken builds. + + Aborting... + +Hint: To make your git worktree clean, You may want to: + 1. Commit your changes, + 2. Stash your changes, or + 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on + using a dirty worktree +EOF +exit 1 +fi + +mkdir -p "$VERSION_BASE" + +################ +# Build directories should not exist +################ + +# Default to building for all supported HOSTs (overridable by environment) +export HOSTS="${HOSTS:-x86_64-linux-gnu + aarch64-linux-gnu + arm-linux-gnueabihf + riscv64-linux-gnu + i686-linux-gnu + x86_64-w64-mingw32 + x86_64-unknown-freebsd + x86_64-apple-darwin + aarch64-apple-darwin + aarch64-linux-android + arm-linux-androideabi}" + +# Usage: distsrc_for_host HOST +# +# HOST: The current platform triple we're building for +# +distsrc_for_host() { + echo "${DISTSRC_BASE}/build/distsrc-${VERSION}-${1}" +} + +# Accumulate a list of build directories that already exist... +hosts_distsrc_exists="" +for host in $HOSTS; do + if [ -e "$(distsrc_for_host "$host")" ]; then + hosts_distsrc_exists+=" ${host}" + fi +done + +if [ -n "$hosts_distsrc_exists" ]; then +# ...so that we can print them out nicely in an error message +cat << EOF +ERR: Build directories for this commit already exist for the following platform + triples you're attempting to build, probably because of previous builds. + Please remove, or otherwise deal with them prior to starting another build. + + Aborting... + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +for host in $hosts_distsrc_exists; do + echo " ${host} '$(distsrc_for_host "$host")'" +done +exit 1 +else + mkdir -p "$DISTSRC_BASE" +fi + +################ +# Check that we can connect to the guix-daemon +################ + +cat << EOF +Checking that we can connect to the guix-daemon... + +Hint: If this hangs, you may want to try turning your guix-daemon off and on + again. + +EOF +if ! guix gc --list-failures > /dev/null; then +cat << EOF + +ERR: Failed to connect to the guix-daemon, please ensure that one is running and + reachable. +EOF +exit 1 +fi + +# Developer note: we could use `guix repl` for this check and run: +# +# (import (guix store)) (close-connection (open-connection)) +# +# However, the internal API is likely to change more than the CLI invocation + +################ +# Services database must have basic entries +################ + +if ! getent services http https ftp > /dev/null 2>&1; then +cat << EOF +ERR: Your system's C library cannot find service database entries for at least + one of the following services: http, https, ftp. + +Hint: Most likely, /etc/services does not exist yet (common for docker images + and minimal distros), or you don't have permissions to access it. + + If /etc/services does not exist yet, you may want to install the + appropriate package for your distro which provides it. + + On Debian/Ubuntu: netbase + On Arch Linux: iana-etc + + For more information, see: getent(1), services(5) + +EOF + +fi + +######### +# SETUP # +######### + +# Determine the maximum number of jobs to run simultaneously (overridable by +# environment) +JOBS="${JOBS:-$(nproc)}" + +# Usage: host_to_commonname HOST +# +# HOST: The current platform triple we're building for +# +host_to_commonname() { + case "$1" in + *darwin*) echo osx ;; + *mingw*) echo win ;; + *android*) echo android ;; + *linux*) echo linux ;; + *freebsd*) echo freebsd ;; + *) exit 1 ;; + esac +} + +COMMIT_TIMESTAMP="$(git -c log.showSignature=false log --format=%at -1)" + +# Precious directories are those which should not be cleaned between successive +# guix builds +depends_precious_dir_names='SOURCES_PATH BASE_CACHE' +precious_dir_names="${depends_precious_dir_names} OUTDIR_BASE LOGDIR_BASE PROFILES_BASE" + +# Usage: contains IFS-SEPARATED-LIST ITEM +contains() { + for i in ${1}; do + if [ "$i" = "${2}" ]; then + return 0 # Found! + fi + done + return 1 +} + +# If the user explicitly specified a precious directory, create it so we +# can map it into the container +for precious_dir_name in $precious_dir_names; do + precious_dir_path="${!precious_dir_name}" + if [ -n "$precious_dir_path" ]; then + if [ ! -e "$precious_dir_path" ]; then + mkdir -p "$precious_dir_path" + elif [ -L "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} cannot be a symbolic link" + exit 1 + elif [ ! -d "$precious_dir_path" ]; then + echo "ERR: ${precious_dir_name} must be a directory" + exit 1 + fi + fi +done + +mkdir -p "$VAR_BASE" + +# Record the _effective_ values of precious directories such that guix-clean can +# avoid clobbering them if appropriate. +# +# shellcheck disable=SC2046,SC2086 +{ + # Get precious dir definitions from depends and the environment + for precious_dir_name in $precious_dir_names; do + if contains "$depends_precious_dir_names" "$precious_dir_name"; then + precious_dir_path="$(make -C "${PWD}/contrib/depends" --no-print-directory print-${precious_dir_name})" + else + precious_dir_path="${!precious_dir_name}" + fi + echo "${precious_dir_name}=${precious_dir_path}" + done +} > "${VAR_BASE}/precious_dirs" + +# Make sure an output and logs directory exists for our builds +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" +mkdir -p "$OUTDIR_BASE" +LOGDIR_BASE="${LOGDIR_BASE:-${VERSION_BASE}/logs}" +mkdir -p "$LOGDIR_BASE" + +# Download and archive Rust dependencies. +# Version and Hash need to be updated when: +# - we bump the time-machine and Guix has a new version of Rust. +# - Cargo.lock in src/fcmp_pp/fcmp_pp_rust is updated. +RUST_DEPS_VERSION=0 +RUST_DEPS_HASH="hchq2aqq68vxp3hm1hhl74b90vsqb9xv" +RUST_DEPS_ARCHIVE="rust_deps-${RUST_DEPS_VERSION}.tar.gz" +RUST_DEPS_STORE_ITEM="/gnu/store/${RUST_DEPS_HASH}-${RUST_DEPS_ARCHIVE}" +if [ ! -f "${RUST_DEPS_STORE_ITEM}" ]; then + time-machine environment --manifest="${PWD}/contrib/guix/rust/cargo.scm" \ + --container \ + --pure \ + --network \ + --no-cwd \ + --user="user" \ + --share="$PWD"=/monero \ + -- env RUST_DEPS_ARCHIVE="$RUST_DEPS_ARCHIVE" \ + bash /monero/contrib/guix/rust/cargo.sh + + time-machine download ${RUST_DEPS_ARCHIVE} + rm ${RUST_DEPS_ARCHIVE} +fi + +# Download the depends sources now as we won't have internet access in the build +# container +for host in $HOSTS; do + make -C "${PWD}/contrib/depends" -j"$JOBS" download-"$(host_to_commonname "${host}")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +done + +# Usage: outdir_for_host HOST +# +# HOST: The current platform triple we're building for +# +outdir_for_host() { + echo "${OUTDIR_BASE}/${1}" +} + +# Usage: logdir_for_host HOST +# +# HOST: The current platform triple we're building for +# +logdir_for_host() { + echo "${LOGDIR_BASE}/${1}" +} + +# Usage: profiledir_for_host HOST +# +# HOST: The current platform triple we're building for +# +profiledir_for_host() { + echo "${PROFILES_BASE}/${1}" +} + + +######### +# BUILD # +######### + +# Function to be called when building for host ${1} and the user interrupts the +# build +int_trap() { +cat << EOF +** INT received while building ${1}, you may want to clean up the relevant + work directories (e.g. distsrc-*) before rebuilding + +Hint: To blow everything away, you may want to use: + + $ ./contrib/guix/guix-clean + +Specifically, this will remove all files without an entry in the index, +excluding the depends download cache, the depends built +packages cache, the garbage collector roots for Guix environments, and the +output directory. +EOF +} + +# Deterministically build Monero +# shellcheck disable=SC2153 +for host in $HOSTS; do + + # Display proper warning when the user interrupts the build + trap 'int_trap ${host}' INT + + ( + # Required for 'contrib/guix/manifest.scm' to output the right manifest + # for the particular $HOST we're building for + export HOST="$host" + + + # shellcheck disable=SC2030 +cat << EOF +INFO: Building ${VERSION:?not set} for platform triple ${HOST:?not set}: + ...using commit timestamp: ${COMMIT_TIMESTAMP:?not set} + ...running at most ${JOBS:?not set} jobs + ...from worktree directory: '${PWD}' + ...bind-mounted in container to: '/monero' + ...in build directory: '$(distsrc_for_host "$HOST")' + ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")' + ...outputting in: '$(outdir_for_host "$HOST")' + ...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")' +EOF + + rm -f "$(profiledir_for_host "${HOST}")" + + # First run produces a different GUIX_ENVIRONMENT. + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- echo "$HOST" + + # Useful for CI environments where you only want to build Guix packages so they + # can be cached. Currently unused. + if [[ -v DRY_RUN ]]; then + echo "Dry run, exiting.." + exit 0 + fi + + # Run the build script 'contrib/guix/libexec/build.sh' in the build + # container specified by 'contrib/guix/manifest.scm'. + # + # Explanation of `guix environment` flags: + # + # --container run command within an isolated container + # + # Running in an isolated container minimizes build-time differences + # between machines and improves reproducibility + # + # --pure unset existing environment variables + # + # Same rationale as --container + # + # --no-cwd do not share current working directory with an + # isolated container + # + # When --container is specified, the default behavior is to share + # the current working directory with the isolated container at the + # same exact path (e.g. mapping '/home/user/monero/' to + # '/home/user/monero/'). This means that the $PWD inside the + # container becomes a source of irreproducibility. --no-cwd disables + # this behaviour. + # + # --share=SPEC for containers, share writable host file system + # according to SPEC + # + # --share="$PWD"=/monero + # + # maps our current working directory to /monero + # inside the isolated container, which we later cd + # into. + # + # While we don't want to map our current working directory to the + # same exact path (as this introduces irreproducibility), we do want + # it to be at a _fixed_ path _somewhere_ inside the isolated + # container so that we have something to build. '/monero' was + # chosen arbitrarily. + # + # ${SOURCES_PATH:+--share="$SOURCES_PATH"} + # + # make the downloaded depends sources path available + # inside the isolated container + # + # The isolated container has no network access as it's in a + # different network namespace from the main machine, so we have to + # make the downloaded depends sources available to it. The sources + # should have been downloaded prior to this invocation. + # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Monero) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # + # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} + # + # fetch substitute from SUBSTITUTE_URLS if they are + # authorized + # + # Depending on the user's security model, it may be desirable to use + # substitutes (pre-built packages) from servers that the user trusts. + # Please read the README.md in the same directory as this file for + # more information. + # + # shellcheck disable=SC2086,SC2031 + time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + --container \ + --pure \ + --no-cwd \ + --share="$PWD"=/monero \ + --share="$DISTSRC_BASE"=/distsrc-base \ + --share="$OUTDIR_BASE"=/outdir-base \ + --share="$LOGDIR_BASE"=/logdir-base \ + --share="$RUST_DEPS_STORE_ITEM"=/rust-deps \ + --expose="$(git rev-parse --git-common-dir)" \ + ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + --link-profile \ + --user="user" \ + --root="$(profiledir_for_host "${HOST}")" \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ + -- env HOST="$HOST" \ + VERSION="$VERSION" \ + JOBS="$JOBS" \ + COMMIT_TIMESTAMP="${COMMIT_TIMESTAMP:?unable to determine value}" \ + ${V:+V=1} \ + ${DEPENDS_ONLY:+DEPENDS_ONLY=1} \ + ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ + DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$host")" \ + OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$host")" \ + LOGDIR="$(LOGDIR_BASE=/logdir-base && logdir_for_host "$host")" \ + DIST_ARCHIVE_BASE=/outdir-base/dist-archive \ + bash -c "cd /monero && bash contrib/guix/libexec/build.sh" + ) + +done diff --git a/contrib/guix/guix-clean b/contrib/guix/guix-clean new file mode 100755 index 00000000000..b1537c853b0 --- /dev/null +++ b/contrib/guix/guix-clean @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the Monero repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools git realpath + + +############# +## Clean ## +############# + +# Usage: under_dir MAYBE_PARENT MAYBE_CHILD +# +# If MAYBE_CHILD is a subdirectory of MAYBE_PARENT, print the relative path +# from MAYBE_PARENT to MAYBE_CHILD. Otherwise, return 1 as the error code. +# +# NOTE: This does not perform any symlink-resolving. +# +under_dir() { + local relpath + relpath="$(realpath --no-symlinks --relative-base="$1" "$2")" + if [ -z "$relpath" ] || [ "$relpath" = "$2" ] || [ "$relpath" = "." ]; then + return 1 + else + echo "$relpath" + fi +} + +# Usage: dir_under_git_root MAYBE_CHILD +# +# If MAYBE_CHILD is under the current git repository and exists, print the +# relative path from the git repository's top-level directory to MAYBE_CHILD, +# otherwise, exit with an error code. +# +dir_under_git_root() { + local rv + rv="$(under_dir "$(git_root)" "$1")" + [ -n "$rv" ] && echo "$rv" +} + +shopt -s nullglob +found_precious_dirs_files=( "${version_base_prefix}"*/"${var_base_basename}/precious_dirs" ) # This expands to an array of directories... +shopt -u nullglob + +exclude_flags=() + +for precious_dirs_file in "${found_precious_dirs_files[@]}"; do + # Make sure the precious directories (e.g. SOURCES_PATH, BASE_CACHE, SDK_PATH) + # are excluded from git-clean + echo "Found precious_dirs file: '${precious_dirs_file}'" + + # Exclude the precious_dirs file itself + if dirs_file_exclude_fragment=$(dir_under_git_root "$(dirname "$precious_dirs_file")"); then + exclude_flags+=( --exclude="/${dirs_file_exclude_fragment}/precious_dirs" ) + fi + + # Read each 'name=dir' pair from the precious_dirs file + while IFS='=' read -r name dir; do + # Add an exclusion flag if the precious directory is under the git root. + if under=$(dir_under_git_root "$dir"); then + echo "Avoiding ${name}: ${under}" + exclude_flags+=( --exclude="/$under" ) + fi + done < "$precious_dirs_file" +done + +git clean -xdff "${exclude_flags[@]}" diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify new file mode 100755 index 00000000000..580273c0cae --- /dev/null +++ b/contrib/guix/guix-verify @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# Source the common prelude, which: +# 1. Checks if we're at the top directory of the monero repository +# 2. Defines a few common functions and variables +# +# shellcheck source=libexec/prelude.bash +source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash" + + +################### +## Sanity Checks ## +################### + +################ +# Required non-builtin commands should be invokable +################ + +check_tools cat diff gpg + +################ +# Required env vars should be non-empty +################ + +cmd_usage() { +cat < [ SIGNER= ] ./contrib/guix/guix-verify + +Example overriding signer's manifest to use as base + + env GUIX_SIGS_REPO=/home/user/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify + +EOF +} + +if [ -z "$GUIX_SIGS_REPO" ]; then + cmd_usage + exit 1 +fi + +################ +# GUIX_SIGS_REPO should exist as a directory +################ + +if [ ! -d "$GUIX_SIGS_REPO" ]; then +cat << EOF +ERR: The specified GUIX_SIGS_REPO is not an existent directory: + + '$GUIX_SIGS_REPO' + +Hint: Please clone the guix.sigs repository and point to it with the + GUIX_SIGS_REPO environment variable. + +EOF +cmd_usage +exit 1 +fi + +############## +## Verify ## +############## + +OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}" +echo "Looking for signature directories in '${OUTSIGDIR_BASE}'" +echo "" + +# Usage: verify compare_manifest current_manifest +verify() { + local compare_manifest="$1" + local current_manifest="$2" + if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then + echo "ERR: Failed to verify GPG signature in '${current_manifest}'" + echo "" + echo "Hint: Either the signature is invalid or the public key is missing" + echo "" + failure=1 + elif ! diff --text --report-identical "$compare_manifest" "$current_manifest" 1>&2; then + echo "ERR: The SHA256SUMS attestation in these two directories differ:" + echo " '${compare_manifest}'" + echo " '${current_manifest}'" + echo "" + failure=1 + else + echo "Verified: '${current_manifest}'" + echo "" + fi +} + +shopt -s nullglob +all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS ) +shopt -u nullglob + +if (( ${#all_all[@]} )); then + compare_all="${all_all[0]}" + if [[ -n "$SIGNER" ]]; then + signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS" + if [[ -f "$signer_all" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_all="$signer_all" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi + + for current_manifest in "${all_all[@]}"; do + verify "$compare_all" "$current_manifest" + done + + echo "DONE: Checking output signatures for all.SHA256SUMS" + echo "" +else + echo "ERR: No signature directories with all.SHA256SUMS found" + exit 1 +fi + +if [ -n "$failure" ]; then + exit 1 +fi diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh new file mode 100644 index 00000000000..f1c3b4ad4e9 --- /dev/null +++ b/contrib/guix/libexec/build.sh @@ -0,0 +1,453 @@ +#!/usr/bin/env bash +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Copyright (c) 2022-2024 The Monero Project +# Distributed under the MIT software license, see the accompanying +# file ../LICENSE.txt or http://www.opensource.org/licenses/mit-license.php. +export LC_ALL=C +set -e -o pipefail +export TZ=UTC + +# shellcheck source=contrib/shell/git-utils.bash +source contrib/shell/git-utils.bash + +# Although Guix _does_ set umask when building its own packages (in our case, +# this is all packages in manifest.scm), it does not set it for `guix +# environment`. It does make sense for at least `guix environment --container` +# to set umask, so if that change gets merged upstream and we bump the +# time-machine to a commit which includes the aforementioned change, we can +# remove this line. +# +# This line should be placed before any commands which creates files. +umask 0022 + +if [ -n "$V" ]; then + # Print both unexpanded (-v) and expanded (-x) forms of commands as they are + # read from this file. + set -vx + # Set VERBOSE for CMake-based builds + export VERBOSE="$V" +fi + +# Check that required environment variables are set +cat << EOF +Required environment variables as seen inside the container: + DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set} + VERSION: ${VERSION:?not set} + HOST: ${HOST:?not set} + COMMIT_TIMESTAMP: ${COMMIT_TIMESTAMP:?not set} + JOBS: ${JOBS:?not set} + DISTSRC: ${DISTSRC:?not set} + OUTDIR: ${OUTDIR:?not set} + LOGDIR: ${LOGDIR:?not set} + OPTIONS: ${OPTIONS} +EOF + +ACTUAL_OUTDIR="${OUTDIR}" +OUTDIR="${DISTSRC}/output" +DISTNAME="monero-${HOST}-${VERSION}" + +# Use a fixed timestamp for depends builds so hashes match across commits that +# don't make changes to the build system. This timestamp is only used for depends +# packages. Source archive and binary tarballs use the commit date. +export SOURCE_DATE_EPOCH=1397818193 + +##################### +# Environment Setup # +##################### + +# Collect some information about the build environment to help debug potential reproducibility issues +mkdir -p "${LOGDIR}" +ls -1 /gnu/store | sort > ${LOGDIR}/guix-hashes.txt +printenv | sort | grep -v '^\(BASE_CACHE=\|DISTNAME=\|DISTSRC=\|OUTDIR=\|LOGDIR=\|SOURCES_PATH=\|JOBS=\|OPTIONS=\|DEPENDS_ONLY=\)' > ${LOGDIR}/guix-env.txt + +# The depends folder also serves as a base-prefix for depends packages for +# $HOSTs after successfully building. +BASEPREFIX="${PWD}/contrib/depends" + +# Given a package name and an output name, return the path of that output in our +# current guix environment +store_path() { + grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \ + | head --lines=1 \ + | sed --expression='s|\x29*$||' \ + --expression='s|^[[:space:]]*"||' \ + --expression='s|"[[:space:]]*$||' +} + +# These environment variables are automatically set by Guix, but don't +# necessarily point to the correct toolchain paths. This is fixed below. +unset LIBRARY_PATH +unset CPATH +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH +unset OBJC_INCLUDE_PATH +unset OBJCPLUS_INCLUDE_PATH + +NATIVE_GCC="$(store_path gcc-toolchain)" + +export C_INCLUDE_PATH="${NATIVE_GCC}/include" +export CPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" +export OBJC_INCLUDE_PATH="${NATIVE_GCC}/include" +export OBJCPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include" + +case "$HOST" in + *darwin*) export LIBRARY_PATH="${NATIVE_GCC}/lib" ;; + *mingw*) export LIBRARY_PATH="${NATIVE_GCC}/lib" ;; + *) + NATIVE_GCC_STATIC="$(store_path gcc-toolchain static)" + export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC_STATIC}/lib" + ;; +esac + +prepend_to_search_env_var() { + export "${1}=${2}${!1:+:}${!1}" +} + +# error: failed to run custom build command for `compiler_builtins` +# +# error while loading shared libraries: libgcc_s.so.1: cannot open shared object file: No such file or directory +export LD_LIBRARY_PATH="${NATIVE_GCC}/lib" + +# Set environment variables to point the CROSS toolchain to the right +# includes/libs for $HOST +case "$HOST" in + *mingw*) + # Determine output paths to use in CROSS_* environment variables + case "$HOST" in + i686-*) CROSS_GLIBC="$(store_path "mingw-w64-i686-winpthreads")" ;; + x86_64-*) CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" ;; + *) exit 1 ;; + esac + + CROSS_GCC="$(store_path "gcc-cross-${HOST}")" + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) + + # The search path ordering is generally: + # 1. gcc-related search paths + # 2. libc-related search paths + # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" + export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + ;; + *darwin*) + # The CROSS toolchain for darwin uses the SDK and ignores environment variables. + # See depends/hosts/darwin.mk for more details. + ;; + *android*) + export LD_LIBRARY_PATH="${NATIVE_GCC}/lib:$(find /gnu/store -maxdepth 1 -name "*zlib*" | sort | head -n 1)/lib:$(find /gnu/store -maxdepth 1 -name "*gcc-11*-lib" | sort | head -n 1)/lib" + ;; + *linux-gnu*) + CROSS_GLIBC="$(store_path "glibc-cross-${HOST}")" + CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" + CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" + CROSS_GCC="$(store_path "gcc-cross-${HOST}")" + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) + + export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" + export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + ;; + *freebsd*) + ;; + *) + exit 1 ;; +esac + + +# Sanity check CROSS_*_PATH directories +IFS=':' read -ra PATHS <<< "${CROSS_C_INCLUDE_PATH}:${CROSS_CPLUS_INCLUDE_PATH}:${CROSS_LIBRARY_PATH}" +for p in "${PATHS[@]}"; do + if [ -n "$p" ] && [ ! -d "$p" ]; then + echo "'$p' doesn't exist or isn't a directory... Aborting..." + exit 1 + fi +done + +# Disable Guix ld auto-rpath behavior +case "$HOST" in + *darwin*) + # The auto-rpath behavior is necessary for darwin builds as some native + # tools built by depends refer to and depend on Guix-built native + # libraries + # + # After the native packages in depends are built, the ld wrapper should + # no longer affect our build, as clang would instead reach for + # x86_64-apple-darwin-ld from cctools + ;; + *android*) + ;; + *) export GUIX_LD_WRAPPER_DISABLE_RPATH=yes ;; +esac + +# Make /usr/bin if it doesn't exist +[ -e /usr/bin ] || mkdir -p /usr/bin +[ -e /lib64 ] || mkdir /lib64 + +# Symlink file and env to a conventional path +[ -e /usr/bin/file ] || ln -s --no-dereference "$(command -v file)" /usr/bin/file +[ -e /usr/bin/env ] || ln -s --no-dereference "$(command -v env)" /usr/bin/env +[ -e /bin/bash ] || ln -s --no-dereference "$(command -v bash)" /bin/bash +[ -e /bin/sh ] || ln -s --no-dereference "$(command -v sh)" /bin/sh + +# The Android NDK toolchain cannot (yet) be bootstrapped. The compiler binaries +# included in the NDK have their dynamic interpreter set to the standard x86_64 +# interpreter path, which does not exist in this location in the Guix environment. +# The alternative was patchelf-ing all binaries included in the NDK, but this is +# more hacky and adds a dependency on patchelf for non-Guix builders. +[ -e /lib64/ld-linux-x86-64.so.2 ] || ln -s --no-dereference "${NATIVE_GCC}/lib/ld-linux-x86-64.so.2" /lib64/ld-linux-x86-64.so.2 + +# Determine the correct value for -Wl,--dynamic-linker for the current $HOST +# +# We need to do this because the dynamic linker does not exist at a standard path +# in the Guix container. Binaries wouldn't be able to start in other environments. +case "$HOST" in + *linux-gnu*) + glibc_dynamic_linker=$( + case "$HOST" in + x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; + arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; + aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; + riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; + i686-linux-gnu) echo /lib/ld-linux.so.2 ;; + *) exit 1 ;; + esac + ) + ;; +esac + +export GLIBC_DYNAMIC_LINKER=${glibc_dynamic_linker} + +# Environment variables for determinism +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" +export TZ="UTC" +case "$HOST" in + *darwin*) + # cctools AR, unlike GNU binutils AR, does not have a deterministic mode + # or a configure flag to enable determinism by default, it only + # understands if this env-var is set or not. See: + # + # https://github.com/tpoechtrager/cctools-port/blob/55562e4073dea0fbfd0b20e0bf69ffe6390c7f97/cctools/ar/archive.c#L334 + export ZERO_AR_DATE=yes + ;; +esac + +#################### +# Depends Building # +#################### + +mkdir -p "${OUTDIR}" + +# Log the depends build ids +make -C contrib/depends --no-print-directory HOST="$HOST" print-final_build_id_long | tr ':' '\n' > ${LOGDIR}/depends-hashes.txt + +# Build the depends tree, overriding variables that assume multilib gcc +make -C contrib/depends --jobs="$JOBS" HOST="$HOST" \ + ${V:+V=1} \ + ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ + ${SDK_PATH+SDK_PATH="$SDK_PATH"} \ + OUTDIR="$OUTDIR" \ + LOGDIR="$LOGDIR" \ + x86_64_linux_CC=x86_64-linux-gnu-gcc \ + x86_64_linux_CXX=x86_64-linux-gnu-g++ \ + x86_64_linux_AR=x86_64-linux-gnu-gcc-ar \ + x86_64_linux_RANLIB=x86_64-linux-gnu-gcc-ranlib \ + x86_64_linux_NM=x86_64-linux-gnu-gcc-nm \ + x86_64_linux_STRIP=x86_64-linux-gnu-strip + +# Log the depends package hashes +DEPENDS_PACKAGES="$(make -C contrib/depends --no-print-directory HOST="$HOST" print-all_packages)" +DEPENDS_CACHE="$(make -C contrib/depends --no-print-directory ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} print-BASE_CACHE)" + +# Keep a record of the depends packages and their hashes that will be used for +# our build. If there is a reproducibility issue, comparing this log file could +# help narrow down which package is responsible for the defect. +{ + for package in ${DEPENDS_PACKAGES}; do + cat "${DEPENDS_CACHE}/${HOST}/${package}"/*.hash + done +} | sort -k2 > "${LOGDIR}/depends-packages.txt" + +# Stop here if we're only building depends packages. This is useful when +# debugging reproducibility issues in depends packages. Skips ahead to the next +# target, so we don't spend time building Monero binaries. +if [[ -n "$DEPENDS_ONLY" ]]; then + exit 0 +fi + +########################### +# Source Tarball Building # +########################### + +# Use COMMIT_TIMESTAMP for the source and release binary archives +export SOURCE_DATE_EPOCH=${COMMIT_TIMESTAMP} +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" + +GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/monero-source-${VERSION}.tar.gz" + +# Create the source tarball if not already there +# This uses `git ls-files --recurse-submodules` instead of `git archive` to make +# sure submodules are included in the source archive. +if [ ! -e "$GIT_ARCHIVE" ]; then + mkdir -p "$(dirname "$GIT_ARCHIVE")" + git ls-files --recurse-submodules \ + | sort \ + | tar --create --transform "s,^,monero-source-${VERSION}/," --mode='u+rw,go+r-w,a+X' --files-from=- \ + | gzip -9n > ${GIT_ARCHIVE} + sha256sum "$GIT_ARCHIVE" +fi + +########################### +# Binary Tarball Building # +########################### + +# CFLAGS +case "$HOST" in + *linux-gnu*) + HOST_CFLAGS=$(find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + HOST_CFLAGS+=" -ffile-prefix-map=${PWD}=." ;; +esac + +# CXXFLAGS +HOST_CXXFLAGS="$HOST_CFLAGS" +case "$HOST" in + arm-linux-gnueabihf) HOST_CXXFLAGS+=" -Wno-psabi" ;; +esac + +# LDFLAGS +case "$HOST" in + *linux-gnu*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++" ;; + *mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;; +esac + +# error: "/gnu/store/<...>-rust-1.82.0/lib/rustlib/src/rust/library/Cargo.lock" does not exist, +# unable to build with the standard library +# +# The standard library does not exist at the location Cargo expects. +# +# We can override the path to the Rust source by setting the __CARGO_TESTS_ONLY_SRC_ROOT environment variable. +# See: https://github.com/rust-lang/cargo/blob/rust-1.82.0/src/cargo/core/compiler/standard_lib.rs#L183 +export __CARGO_TESTS_ONLY_SRC_ROOT=/rust/library + +# error: the `-Z` flag is only accepted on the nightly channel of Cargo, but this is the `stable` channel +# +# Since we don't have access to the nightly channel, we need to bypass the check with RUSTC_BOOTSTRAP. +# +# We could avoid using `-Z build-std` by cross-compiling the full standard library for each target. This approach +# adds hours to our build time and greatly increases the amount of foreign source code that is compiled as part of +# our build process. +export RUSTC_BOOTSTRAP=1 + +# See: https://rust-lang.github.io/rust-project-goals/2025h1/build-std.html +CARGO_OPTIONS="-Zbuild-std=std,panic_abort;" + +# TODO: add doc +CARGO_OPTIONS+="-Zbuild-std-features=panic_immediate_abort;" + +export GIT_DISCOVERY_ACROSS_FILESYSTEM=1 +# Force Trezor support for release binaries +export USE_DEVICE_TREZOR_MANDATORY=1 + +# Make $HOST-specific native binaries from depends available in $PATH +export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" +mkdir -p "$DISTSRC" +( + cd "$DISTSRC" + + # Extract the source tarball + tar --strip-components=1 -xf "${GIT_ARCHIVE}" + + # Setup the directory where our Monero build for HOST will be + # installed. This directory will also later serve as the input for our + # binary tarballs. + INSTALLPATH="${DISTSRC}/installed/${DISTNAME}" + mkdir -p "${INSTALLPATH}" + + # Ensure rpath in the resulting binaries is empty + CMAKEFLAGS="-DCMAKE_SKIP_RPATH=ON" + + # We can't check if submodules are checked out because we're building in an + # extracted source archive. The guix-build script makes sure submodules are + # checked out before starting a build. + CMAKEFLAGS+=" -DMANUAL_SUBMODULES=1" + + # Make sure cargo knows where to find our vendored sources. + mkdir -p /home/user/.cargo + cp contrib/guix/rust/config.toml /home/user/.cargo/ + sed -i "s/TARGET/${HOST}/g" /home/user/.cargo/config.toml + + # Unpack rust dependencies + mkdir -p /rust + tar xf /rust-deps -C /rust + + # Configure this DISTSRC for $HOST + # shellcheck disable=SC2086 + env CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" \ + cmake --toolchain "${BASEPREFIX}/${HOST}/share/toolchain.cmake" -S . -B build \ + -DCMAKE_INSTALL_PREFIX="${INSTALLPATH}" \ + -DCMAKE_EXE_LINKER_FLAGS="${HOST_LDFLAGS}" \ + -DCMAKE_SHARED_LINKER_FLAGS="${HOST_LDFLAGS}" \ + -DCARGO_OPTIONS="${CARGO_OPTIONS}" \ + ${CMAKEFLAGS} + + make -C build --jobs="$JOBS" + + # Copy docs + cp README.md LICENSE docs/ANONYMITY_NETWORKS.md "${INSTALLPATH}" + + # Binaries should not contain references to the store path + for binary in "build/bin"/*; do + if strings "$binary" | grep -q "/gnu/store"; then + echo "ERR: ${binary} contains unexpected string: /gnu/store" + exit 1 + fi + done + + # Copy binaries + cp -a build/bin/* "${INSTALLPATH}" + + ( + cd installed + + # Finally, deterministically produce binary tarballs ready for release + case "$HOST" in + *mingw*) + find "${DISTNAME}/" -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find "${DISTNAME}/" \ + | sort \ + | zip -X@ "${OUTDIR}/${DISTNAME}.zip" \ + || ( rm -f "${OUTDIR}/${DISTNAME}.zip" && exit 1 ) + ;; + *) + find "${DISTNAME}/" -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find "${DISTNAME}/" \ + | sort \ + | tar --no-recursion --owner=0 --group=0 -c -T - \ + | bzip2 -9 > "${OUTDIR}/${DISTNAME}.tar.bz2" \ + || ( rm -f "${OUTDIR}/${DISTNAME}.tar.bz2" && exit 1 ) + ;; + esac + ) +) # $DISTSRC + +rm -rf "$ACTUAL_OUTDIR" +mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ + || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sort -k2 \ + | sponge "$LOGDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash new file mode 100644 index 00000000000..112887762dc --- /dev/null +++ b/contrib/guix/libexec/prelude.bash @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +export LC_ALL=C +set -e -o pipefail + +# shellcheck source=contrib/shell/git-utils.bash +source contrib/shell/git-utils.bash + +################ +# Required non-builtin commands should be invocable +################ + +check_tools() { + for cmd in "$@"; do + if ! command -v "$cmd" > /dev/null 2>&1; then + echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH" + exit 1 + fi + done +} + +check_tools cat env git realpath + +################ +# We should be at the top directory of the repository +################ + +same_dir() { + local resolved1 resolved2 + resolved1="$(realpath -e "${1}")" + resolved2="$(realpath -e "${2}")" + [ "$resolved1" = "$resolved2" ] +} + +if ! same_dir "${PWD}" "$(git_root)"; then +cat << EOF +ERR: This script must be invoked from the top level of the git repository + +Hint: This may look something like: + env FOO=BAR ./contrib/guix/guix- + +EOF +exit 1 +fi + +################ +# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility +# across time. +# +# For more information on guix time-machine, see: +# https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html +# +# Before updating the pinned hash: +# +# - Push new commits to monero-project/guix from upstream. Do not forget to update +# the keyring branch as well. Guix uses this branch to authenticate commits. +# +# The repository is set to monero-project/guix because fetching from the official +# repo at https://git.savannah.gnu.org/git/guix.git is unreliable in CI jobs. +# +# Do not attempt to push custom changes to monero-project/guix, it will not work! +# If a change is necessary to Guix, submit a patch to https://issues.guix.gnu.org/ +# New packages can be defined in manifest.scm until they are available upstream. +# +# - Make sure a bootstrapped build works with the new commit using a fresh Guix install: +# $ export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' +# +# - Check how the update affects our build graph and which packages have been updated. +time-machine() { + # shellcheck disable=SC2086 + guix time-machine --url=https://github.com/monero-project/guix.git \ + --commit=9d09b0cf841fb657a1aec12e9bab68e00c2b493c \ + --cores="$JOBS" \ + --keep-failed \ + --fallback \ + ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ + ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ + -- "$@" +} + + +################ +# Set common variables +################ + +VERSION="${FORCE_VERSION:-$(git_head_version)}" + +VERSION_BASE_DIR="${VERSION_BASE_DIR:-${PWD}}" +version_base_prefix="${VERSION_BASE_DIR}/guix/guix-build-" +VERSION_BASE="${version_base_prefix}${VERSION}" # TOP + +DISTSRC_BASE="${DISTSRC_BASE:-${VERSION_BASE}}" + +OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}" + +LOGDIR_BASE="${LOGDIR_BASE:-${VERSION_BASE}/logs}" + +var_base_basename="var" +VAR_BASE="${VAR_BASE:-${VERSION_BASE}/${var_base_basename}}" + +profiles_base_basename="profiles" +PROFILES_BASE="${PROFILES_BASE:-${VAR_BASE}/${profiles_base_basename}}" diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm new file mode 100644 index 00000000000..ec88019d294 --- /dev/null +++ b/contrib/guix/manifest.scm @@ -0,0 +1,316 @@ +(use-modules (gnu packages) + (gnu packages autotools) + (gnu packages bash) + ((gnu packages cmake) #:select (cmake-minimal)) + (gnu packages commencement) + (gnu packages compression) + (gnu packages cross-base) + ((gnu packages elf) #:select (patchelf)) + (gnu packages file) + (gnu packages gawk) + (gnu packages gcc) + (gnu packages gperf) + ((gnu packages libusb) #:select (libplist)) + ((gnu packages linux) #:select (linux-libre-headers-6.1 util-linux)) + (gnu packages llvm) + (gnu packages mingw) + (gnu packages moreutils) + (gnu packages perl) + (gnu packages pkg-config) + ((gnu packages python) #:select (python-minimal)) + (gnu packages rust) + ((gnu packages tls) #:select (openssl)) + ((gnu packages version-control) #:select (git-minimal)) + (guix build-system gnu) + (guix build-system trivial) + (guix download) + (guix gexp) + (guix git-download) + ((guix licenses) #:prefix license:) + (guix packages) + ((guix utils) #:select (substitute-keyword-arguments))) + +(define-syntax-rule (search-our-patches file-name ...) + "Return the list of absolute file names corresponding to each +FILE-NAME found in ./patches relative to the current file." + (parameterize + ((%patch-path (list (string-append (dirname (current-filename)) "/patches")))) + (list (search-patch file-name) ...))) + +(define building-on (string-append "--build=" (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu")) + +(define (make-cross-toolchain target + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc) + "Create a cross-compilation toolchain package for TARGET" + (let* ((xbinutils (cross-binutils target)) + ;; 1. Build a cross-compiling gcc without targeting any libc, derived + ;; from BASE-GCC-FOR-LIBC + (xgcc-sans-libc (cross-gcc target + #:xgcc base-gcc-for-libc + #:xbinutils xbinutils)) + ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived + ;; from BASE-KERNEL-HEADERS + (xkernel (cross-kernel-headers target + #:linux-headers base-kernel-headers + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils)) + ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL, + ;; derived from BASE-LIBC + (xlibc (cross-libc target + #:libc base-libc + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils + #:xheaders xkernel)) + ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from + ;; BASE-GCC + (xgcc (cross-gcc target + #:xgcc base-gcc + #:xbinutils xbinutils + #:libc xlibc))) + ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and + ;; XGCC + (package + (name (string-append target "-toolchain")) + (version (package-version xgcc)) + (source #f) + (build-system trivial-build-system) + (arguments '(#:builder (begin (mkdir %output) #t))) + (propagated-inputs + (list xbinutils + xlibc + xgcc + `(,xlibc "static") + `(,xgcc "lib"))) + (synopsis (string-append "Complete GCC tool chain for " target)) + (description (string-append "This package provides a complete GCC tool +chain for " target " development.")) + (home-page (package-home-page xgcc)) + (license (package-license xgcc))))) + +(define base-gcc gcc-12) +(define base-linux-kernel-headers linux-libre-headers-6.1) + +(define* (make-monero-cross-toolchain target + #:key + (base-gcc-for-libc linux-base-gcc) + (base-kernel-headers base-linux-kernel-headers) + (base-libc glibc-2.27) + (base-gcc linux-base-gcc)) + "Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values +desirable for building Monero release binaries." + (make-cross-toolchain target + base-gcc-for-libc + base-kernel-headers + base-libc + base-gcc)) + +(define (gcc-mingw-patches gcc) + (package-with-extra-patches gcc + (search-our-patches "gcc-remap-guix-store.patch"))) + +(define (make-mingw-pthreads-cross-toolchain target) + "Create a cross-compilation toolchain package for TARGET" + (let* ((xbinutils (cross-binutils target)) + (pthreads-xlibc (package-with-extra-patches (cond ((string-prefix? "i686-" target) + mingw-w64-i686-winpthreads) + (else mingw-w64-x86_64-winpthreads)) + (search-our-patches "winpthreads-remap-guix-store.patch"))) + (pthreads-xgcc (cross-gcc target + #:xgcc (gcc-mingw-patches mingw-w64-base-gcc) + #:xbinutils xbinutils + #:libc pthreads-xlibc))) + ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and + ;; XGCC + (package + (name (string-append target "-posix-toolchain")) + (version (package-version pthreads-xgcc)) + (source #f) + (build-system trivial-build-system) + (arguments '(#:builder (begin (mkdir %output) #t))) + (propagated-inputs + (list xbinutils + pthreads-xlibc + pthreads-xgcc + `(,pthreads-xgcc "lib"))) + (synopsis (string-append "Complete GCC tool chain for " target)) + (description (string-append "This package provides a complete GCC tool +chain for " target " development.")) + (home-page (package-home-page pthreads-xgcc)) + (license (package-license pthreads-xgcc))))) + +(define-public mingw-w64-base-gcc + (package + (inherit base-gcc) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-threads=posix", + building-on))))))) + +(define-public linux-base-gcc + (package + (inherit (package-with-extra-patches base-gcc + (search-our-patches "gcc-remap-guix-store.patch"))) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-initfini-array=yes", + "--enable-default-ssp=yes", + "--enable-default-pie=yes", + "--enable-standard-branch-protection=yes", + "--enable-cet=yes", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + ;; Given a XGCC package, return a modified package that replace each instance of + ;; -rpath in the default system spec that's inserted by Guix with -rpath-link + (add-after 'pre-configure 'replace-rpath-with-rpath-link + (lambda _ + (substitute* (cons "gcc/config/rs6000/sysv4.h" + (find-files "gcc/config" + "^gnu-user.*\\.h$")) + (("-rpath=") "-rpath-link=")) + #t)))))))) + +(define-public glibc-2.27 + (package + (inherit glibc-2.31) + (version "2.27") + (source (origin + (method git-fetch) + (uri (git-reference + (url "https://sourceware.org/git/glibc.git") + (commit "73886db6218e613bd6d4edf529f11e008a6c2fa6"))) + (file-name (git-file-name "glibc" "73886db6218e613bd6d4edf529f11e008a6c2fa6")) + (sha256 + (base32 + "0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq")) + (patches (search-our-patches "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch" + "glibc-2.27-guix-prefix.patch" + "glibc-2.27-no-librt.patch" + "glibc-2.27-riscv64-fix-incorrect-jal-with-HIDDEN_JUMPTARGET.patch")))) + (arguments + (substitute-keyword-arguments (package-arguments glibc) + ((#:configure-flags flags) + `(append ,flags + ;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html + (list "--enable-stack-protector=all", + "--enable-bind-now", + "--disable-werror", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + (add-before 'configure 'set-etc-rpc-installation-directory + (lambda* (#:key outputs #:allow-other-keys) + ;; Install the rpc data base file under `$out/etc/rpc'. + ;; Otherwise build will fail with "Permission denied." + (let ((out (assoc-ref outputs "out"))) + (substitute* "sunrpc/Makefile" + (("^\\$\\(inst_sysconfdir\\)/rpc(.*)$" _ suffix) + (string-append out "/etc/rpc" suffix "\n")) + (("^install-others =.*$") + (string-append "install-others = " out "/etc/rpc\n")))))))))) + (native-inputs + (modify-inputs (package-native-inputs glibc-2.31) + (delete "make") + (append gnu-make-4.2))))) ;; make >= 4.4 causes an infinite loop (stdio-common) + + +; This list declares which packages are included in the container environment. It +; should reflect the minimal set of packages we need to build and debug the build +; process. Guix will also include the run-time dependencies for each package. +; +; If a package is target-specific, place it in the corresponding list at the end. +; Be mindful when adding new packages here. Some packages take a very long time +; to bootstrap. Prefer -minimal versions of packages, unless there is a good +; reason not to. +; +; To show run-time dependencies, run: +; $ guix time-machine --commit= -- graph --type=references | xdot - +; +; To show build-time dependencies (excluding bootstrap), run: +; $ guix time-machine --commit= -- graph | xdot - + +(packages->manifest + (append + (list ;; The Basics + bash + ; the build graph for bash-minimal is slightly smaller. + ; however, it does not include readline support which + ; makes debugging inside the guix container inconvenient + coreutils-minimal + ; includes basic shell utilities: cat, cp, echo, mkdir, etc + which + + ;; File(system) inspection + file + grep + diffutils ; provides diff + findutils ; provides find and xargs + + ;; File transformation + patch + gawk + sed + moreutils ; sponge is used to construct the SHA256SUMS.part file in libexec/build.sh + patchelf ; unused, occassionally useful for debugging + + ;; Compression and archiving + tar + bzip2 ; used to create release archives (non-windows) + gzip ; used to unpack most packages in depends + xz ; used to unpack freebsd_base + p7zip + zip ; used to create release archives (windows) + unzip ; used to unpack android_ndk + + ;; Build tools + gnu-make + libtool + autoconf-2.71 ; defaults to 2.69, which does not recognize the aarch64-apple-darwin target + automake + pkg-config + gperf ; required to build eudev in depends + cmake-minimal + rust + (list rust "cargo") + + ;; Scripting + perl ; required to build openssl in depends + python-minimal ; required to build monero (cmake/CheckTrezor.cmake) and in android_ndk + + ;; Git + git-minimal ; used to create the release source archive + ) + (let ((target (getenv "HOST"))) + (cond ((string-suffix? "-mingw32" target) + (list + gcc-toolchain-12 + (make-mingw-pthreads-cross-toolchain target))) + ((string-contains target "-linux-gnu") + (list + gcc-toolchain-12 + (list gcc-toolchain-12 "static") + (make-monero-cross-toolchain target))) + ((string-contains target "freebsd") + (list + gcc-toolchain-12 + (list gcc-toolchain-12 "static") + clang-toolchain-11 binutils)) + ((string-contains target "android") + (list + gcc-toolchain-12 + (list gcc-toolchain-12 "static"))) + ((string-contains target "darwin") + (list + gcc-toolchain-11 + clang-toolchain-11 + binutils)) + (else '()))))) diff --git a/contrib/guix/patches/gcc-remap-guix-store.patch b/contrib/guix/patches/gcc-remap-guix-store.patch new file mode 100644 index 00000000000..a8b41d485b0 --- /dev/null +++ b/contrib/guix/patches/gcc-remap-guix-store.patch @@ -0,0 +1,20 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +--- a/libgcc/Makefile.in ++++ b/libgcc/Makefile.in +@@ -854,7 +854,7 @@ endif + # libgcc_eh.a, only LIB2ADDEH matters. If we do, only LIB2ADDEHSTATIC and + # LIB2ADDEHSHARED matter. (Usually all three are identical.) + +-c_flags := -fexceptions ++c_flags := -fexceptions $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + + ifeq ($(enable_shared),yes) + +-- +2.37.0 + diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-2.27-guix-prefix.patch new file mode 100644 index 00000000000..6648bc6c053 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-guix-prefix.patch @@ -0,0 +1,22 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +We might be able to drop this in favour of using --with-nonshared-cflags +when we being using newer versions of glibc. + +--- a/Makeconfig ++++ b/Makeconfig +@@ -992,6 +992,10 @@ object-suffixes := + CPPFLAGS-.o = $(pic-default) + # libc.a must be compiled with -fPIE/-fpie for static PIE. + CFLAGS-.o = $(filter %frame-pointer,$(+cflags)) $(pie-default) ++ ++# Map Guix store paths to /usr ++CFLAGS-.o += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;` ++ + libtype.o := lib%.a + object-suffixes += .o + ifeq (yes,$(build-shared)) diff --git a/contrib/guix/patches/glibc-2.27-no-librt.patch b/contrib/guix/patches/glibc-2.27-no-librt.patch new file mode 100644 index 00000000000..4f2092ba7ec --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-no-librt.patch @@ -0,0 +1,53 @@ +This patch can be dropped when we are building with glibc 2.30+. + +commit 6e41ef56c9baab719a02f1377b1e7ce7bff61e73 +Author: Florian Weimer +Date: Fri Feb 8 10:21:56 2019 +0100 + + rt: Turn forwards from librt to libc into compat symbols [BZ #24194] + + As the result of commit 6e6249d0b461b952d0f544792372663feb6d792a + ("BZ#14743: Move clock_* symbols from librt to libc."), in glibc 2.17, + clock_gettime, clock_getres, clock_settime, clock_getcpuclockid, + clock_nanosleep were added to libc, and the file rt/clock-compat.c + was added with forwarders to the actual implementations in libc. + These forwarders were wrapped in + + #if SHLIB_COMPAT (librt, GLIBC_2_2, GLIBC_2_17) + + so that they are not present for newer architectures (such as + powerpc64le) with a 2.17 or later ABI baseline. But the forwarders + were not marked as compatibility symbols. As a result, on older + architectures, historic configure checks such as + + AC_CHECK_LIB(rt, clock_gettime) + + still cause linking against librt, even though this is completely + unnecessary. It also creates a needless porting hazard because + architectures behave differently when it comes to symbol availability. + + Reviewed-by: Carlos O'Donell + +diff --git a/rt/clock-compat.c b/rt/clock-compat.c +index f816973c05..11e71aa890 100644 +--- a/rt/clock-compat.c ++++ b/rt/clock-compat.c +@@ -30,14 +30,16 @@ + #if HAVE_IFUNC + # undef INIT_ARCH + # define INIT_ARCH() +-# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) ++# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) \ ++ compat_symbol (librt, name, name, GLIBC_2_2); + #else + # define COMPAT_REDIRECT(name, proto, arglist) \ + int \ + name proto \ + { \ + return __##name arglist; \ +- } ++ } \ ++ compat_symbol (librt, name, name, GLIBC_2_2); + #endif + + COMPAT_REDIRECT (clock_getres, diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch new file mode 100644 index 00000000000..3738719fa3f --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch @@ -0,0 +1,23 @@ +Fixes a missing include in glibc. It is needed for riscv64 targets. +This patch can be removed when we upgrade glibc to > 2.27. + +diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +index d612ef4c6c..0b2042620b 100644 +--- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c ++++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c +@@ -21,7 +21,11 @@ + #include + #include + #include +-#include ++#if __has_include () ++# include ++#else ++# include ++#endif + + typedef int (*func_type) (void *, void *, unsigned long int); + +-- +2.31.1 + diff --git a/contrib/guix/patches/glibc-2.27-riscv64-fix-incorrect-jal-with-HIDDEN_JUMPTARGET.patch b/contrib/guix/patches/glibc-2.27-riscv64-fix-incorrect-jal-with-HIDDEN_JUMPTARGET.patch new file mode 100644 index 00000000000..a8dde070e78 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-riscv64-fix-incorrect-jal-with-HIDDEN_JUMPTARGET.patch @@ -0,0 +1,41 @@ +Backported from: https://sourceware.org/git/?p=glibc.git;a=commit;h=68389203832ab39dd0dbaabbc4059e7fff51c29b +Context: https://sourceware.org/bugzilla/show_bug.cgi?id=28509 + +Resolves a build failure with glibc 2.27 + binutils >=2.40. +Patch can be removed if we update glibc to >= 2.35. + +diff --git a/sysdeps/riscv/setjmp.S b/sysdeps/riscv/setjmp.S +index cfbd276fc3..e2f8088a6e 100644 +--- a/sysdeps/riscv/setjmp.S ++++ b/sysdeps/riscv/setjmp.S +@@ -21,7 +21,7 @@ + + ENTRY (_setjmp) + li a1, 0 +- j __sigsetjmp ++ j HIDDEN_JUMPTARGET (__sigsetjmp) + END (_setjmp) + ENTRY (setjmp) + li a1, 1 +diff --git a/sysdeps/unix/sysv/linux/riscv/setcontext.S b/sysdeps/unix/sysv/linux/riscv/setcontext.S +index 9f1c7b41fd..a0d9575a08 100644 +--- a/sysdeps/unix/sysv/linux/riscv/setcontext.S ++++ b/sysdeps/unix/sysv/linux/riscv/setcontext.S +@@ -95,6 +95,7 @@ LEAF (__setcontext) + 99: j __syscall_error + + PSEUDO_END (__setcontext) ++libc_hidden_def (__setcontext) + weak_alias (__setcontext, setcontext) + + LEAF (__start_context) +@@ -108,7 +109,7 @@ LEAF (__start_context) + /* Invoke subsequent context if present, else exit(0). */ + mv a0, s2 + beqz s2, 1f +- jal __setcontext +-1: j exit ++ jal HIDDEN_JUMPTARGET (__setcontext) ++1: j HIDDEN_JUMPTARGET (exit) + + PSEUDO_END (__start_context) diff --git a/contrib/guix/patches/winpthreads-remap-guix-store.patch b/contrib/guix/patches/winpthreads-remap-guix-store.patch new file mode 100644 index 00000000000..e1f1a6eba53 --- /dev/null +++ b/contrib/guix/patches/winpthreads-remap-guix-store.patch @@ -0,0 +1,17 @@ +Without ffile-prefix-map, the debug symbols will contain paths for the +guix store which will include the hashes of each package. However, the +hash for the same package will differ when on different architectures. +In order to be reproducible regardless of the architecture used to build +the package, map all guix store prefixes to something fixed, e.g. /usr. + +--- a/mingw-w64-libraries/winpthreads/Makefile.in ++++ b/mingw-w64-libraries/winpthreads/Makefile.in +@@ -478,7 +478,7 @@ top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ + top_srcdir = @top_srcdir@ + SUBDIRS = . tests +-AM_CFLAGS = -Wall -DWIN32_LEAN_AND_MEAN $(am__append_1) ++AM_CFLAGS = -Wall -DWIN32_LEAN_AND_MEAN $(am__append_1) $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;) + ACLOCAL_AMFLAGS = -I m4 + lib_LTLIBRARIES = libwinpthread.la + include_HEADERS = include/pthread.h include/sched.h include/semaphore.h include/pthread_unistd.h include/pthread_time.h include/pthread_compat.h include/pthread_signal.h diff --git a/contrib/guix/rust/cargo.scm b/contrib/guix/rust/cargo.scm new file mode 100644 index 00000000000..29adfedee94 --- /dev/null +++ b/contrib/guix/rust/cargo.scm @@ -0,0 +1,75 @@ +(use-modules (gnu packages) + ((gnu packages bash) #:select (bash-minimal)) + ((gnu packages certs) #:select (nss-certs)) + (gnu packages compression) + (gnu packages curl) + (gnu packages moreutils) + (gnu packages rust) + ((gnu packages tls) #:select (openssl)) + ((gnu packages web) #:select (jq)) + (guix build-system trivial) + (guix download) + ((guix licenses) #:prefix license:) + (guix packages)) + +;; We don't use (list rust "rust-src") for two reasons: +;; +;; - Hashes in Cargo.lock are replaced, resulting in: +;; error: checksum for ` ` changed between lock files +;; +;; - It drags in a bunch of unnecessary deps, including python. +;; See: guix graph --type=references rust | xdot - + +;; Instead, we create a new package with the unaltered rust source +;; and vendor the standard library in cargo.sh + +;; TODO: can we use inherit here? + +(define-public rust-std + (package + (name "rust-std") + (version (package-version rust)) + ;; You'd expect (source (package-source (rust)) to work here, + ;; but it refers to the source store item and NOT the .tar.gz archive + (source (origin + (method url-fetch) + (uri (origin-uri (package-source rust))) + (sha256 + (content-hash-value (origin-hash (package-source rust)))))) + (build-system trivial-build-system) + (native-inputs (list tar gzip)) + (arguments + `(#:modules ((guix build utils)) + #:builder + (begin + (use-modules (guix build utils)) + (let ((out (assoc-ref %outputs "out")) + (source (assoc-ref %build-inputs "source")) + (tar (search-input-file %build-inputs "/bin/tar")) + (gzip (search-input-file %build-inputs "/bin/gzip")) + (gzip-path (string-append (assoc-ref %build-inputs "gzip") "/bin"))) + (setenv "PATH" gzip-path) + (mkdir out) + (invoke tar "xvf" source "-C" out "--strip-components=1"))))) + (synopsis (package-synopsis rust)) + (description (package-description rust)) + (home-page (package-home-page rust)) + (license (package-license rust)))) + +(packages->manifest + (append + (list + bash-minimal + coreutils-minimal + curl + findutils ;; find + grep + gzip + jq + moreutils + nss-certs + openssl + sed + tar + (list rust "cargo") + rust-std))) diff --git a/contrib/guix/rust/cargo.sh b/contrib/guix/rust/cargo.sh new file mode 100644 index 00000000000..7ff5dff037b --- /dev/null +++ b/contrib/guix/rust/cargo.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -e -o pipefail + +# Environment variables for determinism +export LC_ALL=C +export SOURCE_DATE_EPOCH=1397818193 +export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name" +export TZ="UTC" + +# Given a package name and an output name, return the path of that output in our +# current guix environment +store_path() { + grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \ + | head --lines=1 \ + | sed --expression='s|\x29*$||' \ + --expression='s|^[[:space:]]*"||' \ + --expression='s|"[[:space:]]*$||' +} + +echo "Fetching rust dependencies.." + +cd /monero/src/fcmp_pp/fcmp_pp_rust + +# error: the cargo feature `public-dependency` requires a nightly version of Cargo, but this is the `stable` channel +# +# https://doc.rust-lang.org/cargo/reference/unstable.html#public-dependency +export RUSTC_BOOTSTRAP=1 + +# Assert that `Cargo.lock` will remain unchanged +CARGO_OPTIONS="--locked" + +# https://github.com/rust-lang/wg-cargo-std-aware/issues/23#issuecomment-1445119470 +# If we don't vendor std, we'll run into: 'error: no matching package named `compiler_builtins` found' during build. +CARGO_OPTIONS+=" --sync $(store_path rust-std)/library/Cargo.toml" + +# Vendor fcmp_pp_rust + std library deps +cargo vendor ${CARGO_OPTIONS} /rust/vendor + +cp -r "$(store_path rust-std)/library" /rust/library + +cd /rust/vendor + +# `cargo vendor` includes dozens of packages that aren't needed to build the standard library. +# We can't simply remove these packages, because cargo expects them to be there. +# Instead, we replace the packages with a stub, which is sufficient to pass cargo's checks. +while IFS= read -r line; do + cd "$line" + find . -not -path "." -not -name "Cargo.toml" -not -name ".cargo-checksum.json" -delete + mkdir src + touch src/lib.rs + + # Cargo.toml must remain unaltered. + # src/lib.rs must exist, but may be empty. + # 'e3b0...b855' is equivalent to `echo -n "" | sha256sum -` + # We can't set 'package' to the empty hash, as this might conflict with fcmp_pp_rust's Cargo.lock, resulting + # in the same error. + cat .cargo-checksum.json \ + | jq '{files: {"Cargo.toml": .files["Cargo.toml"]}, package}' \ + | jq '.files += {"src/lib.rs": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}' \ + | sponge .cargo-checksum.json + cd .. +done < "/monero/contrib/guix/rust/stubs" + +cd /rust + +# Create deterministic archive +find . -print0 \ + | sort --zero-terminated \ + | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \ + | gzip -9n > "/monero/$RUST_DEPS_ARCHIVE" diff --git a/contrib/guix/rust/config.toml b/contrib/guix/rust/config.toml new file mode 100644 index 00000000000..68cb79fb9b6 --- /dev/null +++ b/contrib/guix/rust/config.toml @@ -0,0 +1,14 @@ +[source.crates-io] +replace-with = "vendored-sources" + +[source."git+https://github.com/kayabaNerve/crypto-bigint?branch=c-repr"] +git = "https://github.com/kayabaNerve/crypto-bigint" +branch = "c-repr" +replace-with = "vendored-sources" + +[source."git+https://github.com/kayabaNerve/fcmp-plus-plus"] +git = "https://github.com/kayabaNerve/fcmp-plus-plus" +replace-with = "vendored-sources" + +[source.vendored-sources] +directory = "/rust/vendor" diff --git a/contrib/guix/rust/stubs b/contrib/guix/rust/stubs new file mode 100644 index 00000000000..4ae6318769c --- /dev/null +++ b/contrib/guix/rust/stubs @@ -0,0 +1,30 @@ +addr2line +adler +allocator-api2 +cc +dlmalloc +fortanix-sgx-abi +getopts +gimli-0.28.1 +gimli +hermit-abi +memchr +miniz_oxide +object +r-efi +r-efi-alloc +rand +rand_xorshift +unicode-width +unwinding +wasi +windows-sys +windows-targets +windows_aarch64_gnullvm +windows_aarch64_msvc +windows_i686_gnu +windows_i686_gnullvm +windows_i686_msvc +windows_x86_64_gnu +windows_x86_64_gnullvm +windows_x86_64_msvc diff --git a/contrib/shell/git-utils.bash b/contrib/shell/git-utils.bash new file mode 100644 index 00000000000..0bca2d24384 --- /dev/null +++ b/contrib/shell/git-utils.bash @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +git_root() { + git rev-parse --show-toplevel 2> /dev/null +} + +git_head_version() { + local recent_tag + if recent_tag="$(git describe --exact-match HEAD 2> /dev/null)"; then + echo "${recent_tag}" + else + git rev-parse --short=12 HEAD + fi +} diff --git a/docs/RELEASE_CHECKLIST.md b/docs/RELEASE_CHECKLIST.md index cfd04f98d02..b36fa660fc2 100644 --- a/docs/RELEASE_CHECKLIST.md +++ b/docs/RELEASE_CHECKLIST.md @@ -41,9 +41,7 @@ - [ ] https://miningpoolstats.stream/monero - [ ] Release branch created - [ ] Update src/version.cpp.in with new version AND new name (if necessary) - - [ ] Update Gitian YML files in contrib/gitian/ to the new version number - [ ] Update README.md with new fork table entry (or at least update the Recommended Monero version) - - [ ] Update contrib/gitian/README.md so that the instructions reflect the current version - [ ] Update src/checkpoints/checkpoints.cpp with a recent hardcoded checkpoint - [ ] Update src/blocks/checkpoints.dat with ./monero-blockchain-export --output-file checkpoints.dat --block-stop --blocksdat - [ ] Update expected_block_hashes_hash in src/cryptonote_core/blockchain.cpp with checkpoints.dat sha256 hash diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6190b40f830..ddada45bf0b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -89,6 +89,7 @@ add_subdirectory(ringct) add_subdirectory(checkpoints) add_subdirectory(cryptonote_basic) add_subdirectory(cryptonote_core) +add_subdirectory(fcmp_pp) add_subdirectory(lmdb) add_subdirectory(multisig) add_subdirectory(net) diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index e94705b221d..5bcb16bc879 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -45,6 +45,7 @@ target_link_libraries(blockchain_db PUBLIC common cncrypto + fcmp_pp ringct ${LMDB_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 894eb15c7c5..2c0cb503318 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -41,6 +41,60 @@ using epee::string_tools::pod_to_hex; +//--------------------------------------------------------------- +// Helper function to group outputs by unlock block +static void get_outs_by_unlock_block(const cryptonote::transaction &tx, + const std::vector &output_ids, + const uint64_t tx_height, + const bool miner_tx, + fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block_inout) +{ + const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); + + CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); + + for (std::size_t i = 0; i < tx.vout.size(); ++i) + { + const auto &out = tx.vout[i]; + + crypto::public_key output_public_key; + if (!cryptonote::get_output_public_key(out, output_public_key)) + throw std::runtime_error("Could not get an output public key from a tx output."); + + static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " + "Revisit this section and update for the new tx version."); + CHECK_AND_ASSERT_THROW_MES(tx.version == 1 || tx.version == 2, "encountered unexpected tx version"); + + if (!miner_tx && tx.version == 2) + CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); + + rct::key commitment = (miner_tx || tx.version != 2) + ? rct::zeroCommit(out.amount) + : tx.rct_signatures.outPk[i].mask; + + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_public_key), + .commitment = std::move(commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_ids[i], + .output_pair = std::move(output_pair) + }; + + if (outs_by_unlock_block_inout.find(unlock_block) == outs_by_unlock_block_inout.end()) + { + auto new_vec = std::vector{std::move(output_context)}; + outs_by_unlock_block_inout[unlock_block] = std::move(new_vec); + } + else + { + outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_context)); + } + } +} +//--------------------------------------------------------------- + namespace cryptonote { @@ -179,7 +233,7 @@ void BlockchainDB::pop_block() pop_block(blk, txs); } -void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) +std::vector BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) { const transaction &tx = txp.first; @@ -223,7 +277,7 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair uint64_t tx_id = add_transaction_data(blk_hash, txp, tx_hash, tx_prunable_hash); - std::vector amount_output_indices(tx.vout.size()); + std::vector output_indices(tx.vout.size()); // iterate tx.vout using indices instead of C++11 foreach syntax because // we need the index @@ -231,21 +285,35 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair { // miner v2 txes have their coinbase output in one single out to save space, // and we store them as rct outputs with an identity mask + // note: get_outs_by_unlock_block mirrors this logic if (miner_tx && tx.version == 2) { cryptonote::tx_out vout = tx.vout[i]; rct::key commitment = rct::zeroCommit(vout.amount); vout.amount = 0; - amount_output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, &commitment); } else { - amount_output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, tx.version > 1 ? &tx.rct_signatures.outPk[i].mask : NULL); } } + + std::vector amount_output_indices; + std::vector output_ids; + amount_output_indices.reserve(output_indices.size()); + output_ids.reserve(output_indices.size()); + for (const auto &o_idx : output_indices) + { + amount_output_indices.push_back(o_idx.amount_index); + output_ids.push_back(o_idx.output_id); + } + add_tx_amount_output_indices(tx_id, amount_output_indices); + + return output_ids; } uint64_t BlockchainDB::add_block( const std::pair& blck @@ -273,9 +341,12 @@ uint64_t BlockchainDB::add_block( const std::pair& blck time1 = epee::misc_utils::get_tick_count(); + std::vector> output_ids; + output_ids.reserve(txs.size()); + uint64_t num_rct_outs = 0; blobdata miner_bd = tx_to_blob(blk.miner_tx); - add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); + std::vector miner_output_ids = add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); if (blk.miner_tx.version == 2) num_rct_outs += blk.miner_tx.vout.size(); int tx_i = 0; @@ -283,7 +354,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck for (const std::pair& tx : txs) { tx_hash = blk.tx_hashes[tx_i]; - add_transaction(blk_hash, tx, &tx_hash); + output_ids.push_back(add_transaction(blk_hash, tx, &tx_hash)); for (const auto &vout: tx.first.vout) { if (vout.amount == 0) @@ -294,9 +365,32 @@ uint64_t BlockchainDB::add_block( const std::pair& blck TIME_MEASURE_FINISH(time1); time_add_transaction += time1; + // When adding a block, we also need to keep track of when outputs unlock, so + // we can use them to grow the merkle tree used in fcmp's at that point. + fcmp_pp::curve_trees::OutputsByUnlockBlock outs_by_unlock_block; + + // Get miner tx's leaf tuples + get_outs_by_unlock_block( + blk.miner_tx, + miner_output_ids, + prev_height, + true/*miner_tx*/, + outs_by_unlock_block); + + // Get all other txs' leaf tuples + for (std::size_t i = 0; i < txs.size(); ++i) + { + get_outs_by_unlock_block( + txs[i].first, + output_ids[i], + prev_height, + false/*miner_tx*/, + outs_by_unlock_block); + } + // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); - add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash); + add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, outs_by_unlock_block); TIME_MEASURE_FINISH(time1); time_add_block1 += time1; diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 3e953da30d0..68a1cfdec29 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -32,6 +32,8 @@ #include #include +#include +#include #include #include "common/command_line.h" #include "crypto/hash.h" @@ -40,6 +42,7 @@ #include "cryptonote_basic/difficulty.h" #include "cryptonote_basic/hardfork.h" #include "cryptonote_protocol/enums.h" +#include "fcmp_pp/curve_trees.h" /** \file * Cryptonote Blockchain Database Interface @@ -187,6 +190,14 @@ struct txpool_tx_meta_t } }; +/** + * @brief a struct containing output indexes for convenience + */ +struct output_indexes_t +{ + uint64_t amount_index; + uint64_t output_id; +}; #define DBF_SAFE 1 #define DBF_FAST 2 @@ -398,6 +409,7 @@ class BlockchainDB * @param cumulative_difficulty the accumulated difficulty after this block * @param coins_generated the number of coins generated total after this block * @param blk_hash the hash of the block + * @param outs_by_unlock_block the outputs from this block to add to the merkle tree */ virtual void add_block( const block& blk , size_t block_weight @@ -406,6 +418,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) = 0; /** @@ -470,8 +483,9 @@ class BlockchainDB * future, this tracking (of the number, at least) should be moved to * this class, as it is necessary and the same among all BlockchainDB. * - * It returns an amount output index, which is the index of the output - * for its specified amount. + * It returns the output indexes, which contains an amount output index (the + * index of the output for its specified amount) and output id (the global + * index of the output among all outputs of any amount). * * This data should be stored in such a manner that the only thing needed to * reverse the process is the tx_out. @@ -484,9 +498,9 @@ class BlockchainDB * @param local_index index of the output in its transaction * @param unlock_time unlock time/height of the output * @param commitment the rct commitment to the output amount - * @return amount output index + * @return output indexes */ - virtual uint64_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; /** * @brief store amount output indices for a tx's outputs @@ -567,8 +581,10 @@ class BlockchainDB * @param tx the transaction to add * @param tx_hash_ptr the hash of the transaction, if already calculated * @param tx_prunable_hash_ptr the hash of the prunable part of the transaction, if already calculated + * + * @return the global output ids of all outputs inserted */ - void add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); + std::vector add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); mutable uint64_t time_tx_exists = 0; //!< a performance metric uint64_t time_commit1 = 0; //!< a performance metric @@ -576,12 +592,14 @@ class BlockchainDB HardFork* m_hardfork; + std::shared_ptr m_curve_trees; + public: /** * @brief An empty constructor. */ - BlockchainDB(): m_hardfork(NULL), m_open(false) { } + BlockchainDB(): m_hardfork(NULL), m_open(false), m_curve_trees() { } /** * @brief An empty destructor. @@ -1685,7 +1703,7 @@ class BlockchainDB * * @return false if the function returns false for any key image, otherwise true */ - virtual bool for_all_key_images(std::function) const = 0; + virtual bool for_all_key_images(std::function) const = 0; /** * @brief runs a function over a range of blocks @@ -1764,6 +1782,15 @@ class BlockchainDB */ virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; + // TODO: description and make private + virtual void grow_tree(std::vector &&new_outputs) = 0; + + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) = 0; + + // TODO: description + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const = 0; + virtual uint64_t get_num_leaf_tuples() const = 0; + virtual std::array get_tree_root() const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index d01119249cc..0def1e70511 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -54,7 +54,7 @@ using epee::string_tools::pod_to_hex; using namespace crypto; // Increase when the DB structure changes -#define VERSION 5 +#define VERSION 6 namespace { @@ -199,6 +199,10 @@ namespace * * spent_keys input hash - * + * locked_outputs block ID [{output ID, output pubkey, commitment}...] + * leaves leaf_idx {output ID, output pubkey, commitment} + * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] + * * txpool_meta txn hash txn metadata * txpool_blob txn hash txn blob * @@ -210,7 +214,8 @@ namespace * attached as a prefix on the Data to serve as the DUPSORT key. * (DUPFIXED saves 8 bytes per record.) * - * The output_amounts table doesn't use a dummy key, but uses DUPSORT. + * The output_amounts, locked_outputs, and layers tables don't use a + * dummy key, but use DUPSORT. */ const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -228,6 +233,11 @@ const char* const LMDB_OUTPUT_TXS = "output_txs"; const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; +// Curve trees merkle tree tables +const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; +const char* const LMDB_LEAVES = "leaves"; +const char* const LMDB_LAYERS = "layers"; + const char* const LMDB_TXPOOL_META = "txpool_meta"; const char* const LMDB_TXPOOL_BLOB = "txpool_blob"; @@ -326,7 +336,22 @@ typedef struct mdb_block_info_4 uint64_t bi_long_term_block_weight; } mdb_block_info_4; -typedef mdb_block_info_4 mdb_block_info; +typedef struct mdb_block_info_5 +{ + uint64_t bi_height; + uint64_t bi_timestamp; + uint64_t bi_coins; + uint64_t bi_weight; // a size_t really but we need 32-bit compat + uint64_t bi_diff_lo; + uint64_t bi_diff_hi; + crypto::hash bi_hash; + uint64_t bi_cum_rct; + uint64_t bi_long_term_block_weight; + uint64_t bi_n_leaf_tuples; + std::array bi_tree_root; +} mdb_block_info_5; + +typedef mdb_block_info_5 mdb_block_info; typedef struct blk_height { crypto::hash bh_hash; @@ -351,6 +376,16 @@ typedef struct outtx { uint64_t local_index; } outtx; +typedef struct mdb_leaf { + uint64_t leaf_idx; + fcmp_pp::curve_trees::OutputContext output_context; +} mdb_leaf; + +typedef struct layer_val { + uint64_t child_chunk_idx; + std::array child_chunk_hash; +} layer_val; + std::atomic mdb_txn_safe::num_active_txns{0}; std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT; @@ -769,7 +804,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -797,6 +832,13 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l throw0(BLOCK_PARENT_DNE("Top block is not new block's parent")); } + // Grow the tree with outputs that unlock at this block height + auto unlocked_outputs = this->get_outs_at_unlock_block_id(m_height); + this->grow_tree(std::move(unlocked_outputs)); + + // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked outputs table + this->del_locked_outs_at_block_id(m_height); + int result = 0; MDB_val_set(key, m_height); @@ -830,6 +872,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l bi.bi_cum_rct += bi_prev->bi_cum_rct; } bi.bi_long_term_block_weight = long_term_block_weight; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); MDB_val_set(val, bi); result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP); @@ -840,6 +884,21 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l if (result) throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str())); + CURSOR(locked_outputs) + + // Add the locked outputs from this block to the locked outputs table + for (const auto &unlock_block : outs_by_unlock_block) + { + MDB_val_set(k_block_id, unlock_block.first); + for (const auto &locked_output : unlock_block.second) + { + MDB_val_set(v_output, locked_output); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } + } + // we use weight as a proxy for size, since we don't have size but weight is >= size // and often actually equal m_cum_size += block_weight; @@ -868,6 +927,8 @@ void BlockchainLMDB::remove_block() // must use h now; deleting from m_block_info will invalidate it mdb_block_info *bi = (mdb_block_info *)h.mv_data; + const uint64_t block_id = bi->bi_height; + const uint64_t old_n_leaf_tuples = bi->bi_n_leaf_tuples; blk_height bh = {bi->bi_hash, 0}; h.mv_data = (void *)&bh; h.mv_size = sizeof(bh); @@ -881,6 +942,13 @@ void BlockchainLMDB::remove_block() if ((result = mdb_cursor_del(m_cur_block_info, 0))) throw1(DB_ERROR(lmdb_error("Failed to add removal of block info to db transaction: ", result).c_str())); + + // Get n_leaf_tuples from the new tip so we can trim the curve trees tree to the new tip + const uint64_t new_n_leaf_tuples = get_top_block_n_leaf_tuples(); + if (new_n_leaf_tuples > old_n_leaf_tuples) + throw1(DB_ERROR("Unexpected: more leaf tuples are in prev block, tree is expected to only grow")); + const uint64_t trim_n_leaf_tuples = old_n_leaf_tuples - new_n_leaf_tuples; + this->trim_tree(trim_n_leaf_tuples, block_id/*trim_block_id*/); } uint64_t BlockchainLMDB::add_transaction_data(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) @@ -1046,7 +1114,7 @@ void BlockchainLMDB::remove_transaction_data(const crypto::hash& tx_hash, const throw1(DB_ERROR("Failed to add removal of tx index to db transaction")); } -uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, +output_indexes_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -1110,7 +1178,10 @@ uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, if ((result = mdb_cursor_put(m_cur_output_amounts, &val_amount, &data, MDB_APPENDDUP))) throw0(DB_ERROR(lmdb_error("Failed to add output pubkey to db transaction: ", result).c_str())); - return ok.amount_index; + return output_indexes_t{ + .amount_index = ok.amount_index, + .output_id = ok.output_id + }; } void BlockchainLMDB::add_tx_amount_output_indices(const uint64_t tx_id, @@ -1167,125 +1238,1172 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in CURSOR(output_amounts); CURSOR(output_txs); - MDB_val_set(k, amount); - MDB_val_set(v, out_index); + MDB_val_set(k, amount); + MDB_val_set(v, out_index); + + auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + else if (result) + throw0(DB_ERROR(lmdb_error("DB error attempting to get an output", result).c_str())); + + const pre_rct_outkey *ok = (const pre_rct_outkey *)v.mv_data; + MDB_val_set(otxk, ok->output_id); + result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &otxk, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + { + throw0(DB_ERROR("Unexpected: global output index not found in m_output_txs")); + } + else if (result) + { + throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); + } + + // Remove output from locked outputs table if present. We expect all valid + // outputs to be in the locked outputs table because remove_output is called + // when removing the top block from the chain, and all outputs from the top + // block are expected to be locked until they are at least 10 blocks old (10 + // is the lower bound). An output might not be in the locked outputs table if + // it is invalid, then gets removed from the locked outputs table upon growing + // the tree. + // TODO: test case where we add an invalid output to the chain, grow the tree + // in the block in which that output unlocks, pop blocks to remove that output + // from the chain, then progress the chain again. + CURSOR(locked_outputs); + + const uint64_t unlock_block = cryptonote::get_unlock_block_index(ok->data.unlock_time, ok->data.height); + + MDB_val_set(k_block_id, unlock_block); + MDB_val_set(v_output, ok->output_id); + + result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + { + // We expect this output is invalid + } + else if (result) + { + throw1(DB_ERROR(lmdb_error("Error adding removal of locked output to db transaction", result).c_str())); + } + else + { + result = mdb_cursor_del(m_cur_locked_outputs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting locked output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + } + + result = mdb_cursor_del(m_cur_output_txs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + + // now delete the amount + result = mdb_cursor_del(m_cur_output_amounts, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting amount for output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); +} + +void BlockchainLMDB::prune_outputs(uint64_t amount) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + CURSOR(output_amounts); + CURSOR(output_txs); + + MINFO("Pruning outputs for amount " << amount); + + MDB_val v; + MDB_val_set(k, amount); + int result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + return; + if (result) + throw0(DB_ERROR(lmdb_error("Error looking up outputs: ", result).c_str())); + + // gather output ids + mdb_size_t num_elems; + mdb_cursor_count(m_cur_output_amounts, &num_elems); + MINFO(num_elems << " outputs found"); + std::vector output_ids; + output_ids.reserve(num_elems); + while (1) + { + const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; + output_ids.push_back(okp->output_id); + MDEBUG("output id " << okp->output_id); + result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_NEXT_DUP); + if (result == MDB_NOTFOUND) + break; + if (result) + throw0(DB_ERROR(lmdb_error("Error counting outputs: ", result).c_str())); + } + if (output_ids.size() != num_elems) + throw0(DB_ERROR("Unexpected number of outputs")); + + result = mdb_cursor_del(m_cur_output_amounts, MDB_NODUPDATA); + if (result) + throw0(DB_ERROR(lmdb_error("Error deleting outputs: ", result).c_str())); + + for (uint64_t output_id: output_ids) + { + MDB_val_set(v, output_id); + result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Error looking up output: ", result).c_str())); + result = mdb_cursor_del(m_cur_output_txs, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Error deleting output: ", result).c_str())); + } +} + +void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(spent_keys) + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; + if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) { + if (result == MDB_KEYEXIST) + throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); + else + throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + } +} + +void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(spent_keys) + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; + auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH); + if (result != 0 && result != MDB_NOTFOUND) + throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str())); + if (!result) + { + result = mdb_cursor_del(m_cur_spent_keys, 0); + if (result) + throw1(DB_ERROR(lmdb_error("Error adding removal of key image to db transaction", result).c_str())); + } +} + +void BlockchainLMDB::grow_tree(std::vector &&new_outputs) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + if (new_outputs.empty()) + return; + + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree"); + + CURSOR(leaves) + + // Get the number of leaf tuples that exist in the tree + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + + // Read every layer's last hash + const auto last_hashes = this->get_tree_last_hashes(); + + // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_outputs)); + + // Insert the leaves + // TODO: grow_leaves + auto &leaves = tree_extension.leaves; + for (uint64_t i = 0; i < leaves.tuples.size(); ++i) + { + const uint64_t leaf_idx = i + leaves.start_leaf_tuple_idx; + mdb_leaf val{.leaf_idx = leaf_idx, .output_context = std::move(leaves.tuples[i])}; + MDB_val_set(v, val); + + int result = mdb_cursor_put(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + } + + // Grow the layers + // TODO: grow_layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + + bool use_c2 = true; + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + const uint64_t layer_idx = c2_idx + c1_idx; + MDEBUG("Growing layer " << layer_idx); + + if (use_c2) + { + if (layer_idx % 2 != 0) + throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c2: " + + std::to_string(layer_idx)).c_str())); + + this->grow_layer(m_curve_trees->m_c2, + c2_extensions, + c2_idx, + layer_idx); + + ++c2_idx; + } + else + { + if (layer_idx % 2 == 0) + throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c1: " + + std::to_string(layer_idx)).c_str())); + + this->grow_layer(m_curve_trees->m_c1, + c1_extensions, + c1_idx, + layer_idx); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +template +void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, + const std::vector> &layer_extensions, + const uint64_t ext_idx, + const uint64_t layer_idx) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(ext_idx < layer_extensions.size(), "unexpected layer extension"); + const auto &ext = layer_extensions[ext_idx]; + + CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension"); + + // TODO: make sure ext.start_idx lines up with the end of the layer + + MDB_val_copy k(layer_idx); + + if (ext.update_existing_last_hash) + { + // We updated the last hash, so update it + layer_val lv; + lv.child_chunk_idx = ext.start_idx; + lv.child_chunk_hash = curve->to_bytes(ext.hashes.front()); + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists and is the existing last hash + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } + + // Now add all the new hashes found in the extension + for (uint64_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) + { + layer_val lv; + lv.child_chunk_idx = i + ext.start_idx; + lv.child_chunk_hash = curve->to_bytes(ext.hashes[i]); + MDB_val_set(v, lv); + + int result = mdb_cursor_put(m_cur_layers, &k, &v, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add hash: ", result).c_str())); + } +} + +void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + if (trim_n_leaf_tuples == 0) + return; + + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(leaves) + CURSOR(locked_outputs) + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to trim tree"); + + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + const auto trim_instructions = m_curve_trees->get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees->get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected new total leaves"); + + // Trim the leaves + // TODO: trim_leaves + MDB_val_set(k_block_id, trim_block_id); + for (uint64_t i = 0; i < trim_n_leaf_tuples; ++i) + { + uint64_t leaf_tuple_idx = (old_n_leaf_tuples - trim_n_leaf_tuples + i); + + MDB_val_copy k(leaf_tuple_idx); + MDB_val v = k; + int result = mdb_cursor_get(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + // Re-add the output to the locked output table in order. The output should + // be in the outputs tables. + const auto *o = (mdb_leaf *)v.mv_data; + MDB_val_set(v_output, o->output_context); + MDEBUG("Re-adding locked output_id: " << o->output_context.output_id << " , unlock block: " << trim_block_id); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to re-add locked output: ", result).c_str())); + + // Delete the leaf + result = mdb_cursor_del(m_cur_leaves, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing leaf: ", result).c_str())); + + MDEBUG("Successfully removed leaf at leaf_tuple_idx: " << leaf_tuple_idx); + } + + // Trim the layers + // TODO: trim_layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + + const std::size_t n_layers = c2_layer_reductions.size() + c1_layer_reductions.size(); + + bool use_c2 = true; + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < n_layers; ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; + this->trim_layer(m_curve_trees->m_c2, c2_reduction, i); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; + this->trim_layer(m_curve_trees->m_c1, c1_reduction, i); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // Trim any remaining layers in layers after the root + // TODO: trim_leftovers_after_root + if (n_layers > 0) + { + const uint64_t expected_root_idx = n_layers - 1; + while (1) + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; + if (last_layer_idx > expected_root_idx) + { + // Delete all elements in layers after the root + result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elems after root: ", result).c_str())); + } + else if (last_layer_idx < expected_root_idx) + { + throw0(DB_ERROR("Encountered unexpected last elem in tree before the root")); + } + else // last_layer_idx == expected_root_idx + { + // We've trimmed all layers past the root, we're done + break; + } + } + } + else // n_layers == 0 + { + // Empty the layers table, no elems should remain + int result = mdb_drop(*m_write_txn, m_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error emptying layers table: ", result).c_str())); + } +} + +template +void BlockchainLMDB::trim_layer(const std::unique_ptr &curve, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, + const uint64_t layer_idx) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + MDEBUG("Trimming layer " << layer_idx); + MDB_val_copy k(layer_idx); + + // Get the number of existing elements in the layer + // TODO: get_num_elems_in_layer + uint64_t old_n_elems_in_layer = 0; + { + // Get the first record in a layer so we can then get the last record + MDB_val v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); + + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + old_n_elems_in_layer = (1 + lv->child_chunk_idx); + } + + CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents, + "unexpected old n elems in layer"); + const uint64_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; + + // Delete the elements + for (uint64_t i = 0; i < trim_n_elems_in_layer; ++i) + { + uint64_t last_elem_idx = (old_n_elems_in_layer - 1 - i); + MDB_val_set(v, last_elem_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get elem: ", result).c_str())); + + result = mdb_cursor_del(m_cur_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elem: ", result).c_str())); + + MDEBUG("Successfully removed elem at layer_idx: " << layer_idx << " , last_elem_idx: " << last_elem_idx); + } + + // Update the last element if needed + if (layer_reduction.update_existing_last_hash) + { + layer_val lv; + lv.child_chunk_idx = layer_reduction.new_total_parents - 1; + lv.child_chunk_hash = curve->to_bytes(layer_reduction.new_last_hash); + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists and is the existing last hash + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } +} + +uint64_t BlockchainLMDB::get_num_leaf_tuples() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + + // Get the number of leaf tuples in the tree + std::uint64_t n_leaf_tuples = 0; + + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_LAST); + if (result == MDB_NOTFOUND) + n_leaf_tuples = 0; + else if (result == MDB_SUCCESS) + n_leaf_tuples = 1 + ((const mdb_leaf*)v.mv_data)->leaf_idx; + else + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + + TXN_POSTFIX_RDONLY(); + + return n_leaf_tuples; +} + +uint64_t BlockchainLMDB::get_top_block_n_leaf_tuples() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(block_info); + + // if no blocks, return 0 + uint64_t m_height = height(); + if (m_height == 0) + { + return 0; + } + + MDB_val_copy k(m_height - 1); + MDB_val h = k; + int result = 0; + if ((result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &h, MDB_GET_BOTH))) + throw1(BLOCK_DNE(lmdb_error("Failed to get top block: ", result).c_str())); + + const uint64_t n_leaf_tuples = ((mdb_block_info *)h.mv_data)->bi_n_leaf_tuples; + TXN_POSTFIX_RDONLY(); + return n_leaf_tuples; +} + +std::array BlockchainLMDB::get_tree_root() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + std::array root; + + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result == MDB_SUCCESS) + { + auto *lv = (layer_val *)v.mv_data; + root = std::move(lv->child_chunk_hash); + } + else if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + + TXN_POSTFIX_RDONLY(); + + return root; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes; + auto &c1_last_hashes = last_hashes.c1_last_hashes; + auto &c2_last_hashes = last_hashes.c2_last_hashes; + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + uint64_t layer_idx = 0; + while (1) + { + MDB_val_copy k(layer_idx); + MDB_val v; + + // Get the first record in a layer so we can then get the last record + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); + + // Get the last record in a layer + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last record in layer: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Reading last hash at layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); + + const bool use_c2 = (layer_idx % 2) == 0; + if (use_c2) + { + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + c2_last_hashes.emplace_back(std::move(point)); + } + else + { + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + c1_last_hashes.emplace_back(std::move(point)); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return last_hashes; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; + if (trim_instructions.empty()) + return last_chunk_children_to_trim; + + check_open(); + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + auto &c1_last_children_out = last_chunk_children_to_trim.c1_children; + auto &c2_last_children_out = last_chunk_children_to_trim.c2_children; + + // Get the leaves to trim + // TODO: separate function for leaves + { + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); + const auto &trim_leaf_layer_instructions = trim_instructions[0]; + + std::vector leaves_to_trim; + + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) + { + leaves_to_trim.reserve(trim_leaf_layer_instructions.end_trim_idx - trim_leaf_layer_instructions.start_trim_idx); + + uint64_t idx = trim_leaf_layer_instructions.start_trim_idx; + CHECK_AND_ASSERT_THROW_MES(idx % fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, + "expected divisble by leaf tuple size"); + + const uint64_t leaf_tuple_idx = idx / fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + MDB_val k = zerokval; + MDB_val_copy v(leaf_tuple_idx); + + MDB_cursor_op leaf_op = MDB_GET_BOTH; + do + { + int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + const auto *db_leaf = (mdb_leaf *)v.mv_data; + + // TODO: parallelize calls to this function + auto leaf = m_curve_trees->leaf_tuple(db_leaf->output_context.output_pair); + + leaves_to_trim.emplace_back(std::move(leaf.O_x)); + leaves_to_trim.emplace_back(std::move(leaf.I_x)); + leaves_to_trim.emplace_back(std::move(leaf.C_x)); + + idx += fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + } + while (idx < trim_leaf_layer_instructions.end_trim_idx); + } + + c2_last_children_out.emplace_back(std::move(leaves_to_trim)); + } + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim + // TODO: separate function for layers + bool parent_is_c1 = true; + for (uint64_t i = 1; i < trim_instructions.size(); ++i) + { + const auto &trim_layer_instructions = trim_instructions[i]; + + std::vector c1_children; + std::vector c2_children; + + if (trim_layer_instructions.end_trim_idx > trim_layer_instructions.start_trim_idx) + { + const uint64_t layer_idx = (i - 1); + uint64_t idx = trim_layer_instructions.start_trim_idx; + + MDB_val_set(k, layer_idx); + MDB_val_set(v, idx); + MDB_cursor_op op = MDB_GET_BOTH; + do + { + MDEBUG("Getting child to trim at layer_idx: " << layer_idx << " , idx: " << idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, op); + op = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + if (parent_is_c1) + { + const auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c2->point_to_cycle_scalar(point); + c1_children.emplace_back(std::move(child_scalar)); + } + else + { + const auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c1->point_to_cycle_scalar(point); + c2_children.emplace_back(std::move(child_scalar)); + } + + ++idx; + } + while (idx < trim_layer_instructions.end_trim_idx); + } + + if (parent_is_c1) + c1_last_children_out.emplace_back(std::move(c1_children)); + else + c2_last_children_out.emplace_back(std::move(c2_children)); + + parent_is_c1 = !parent_is_c1; + } + + TXN_POSTFIX_RDONLY(); + + return last_chunk_children_to_trim; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; + if (trim_instructions.empty()) + return last_hashes_out; + + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + uint64_t layer_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) + { + const uint64_t new_last_idx = trim_layer_instructions.new_total_parents - 1; + + MDB_val_copy k(layer_idx); + MDB_val_set(v, new_last_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + if ((layer_idx % 2) == 0) + { + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + last_hashes_out.c2_last_hashes.emplace_back(std::move(point)); + } + else + { + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + last_hashes_out.c1_last_hashes.emplace_back(std::move(point)); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return last_hashes_out; +} + +bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + RCURSOR(layers) + + const uint64_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); + + MDEBUG("Auditing tree with " << actual_n_leaf_tuples << " leaf tuples"); + + if (actual_n_leaf_tuples == 0) + { + // Make sure layers table is also empty + MDB_stat db_stats; + int result = mdb_stat(m_txn, m_layers, &db_stats); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_layers: ", result).c_str())); + CHECK_AND_ASSERT_MES(db_stats.ms_entries == 0, false, "unexpected num layer entries"); + return true; + } + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + + // Check chunks of leaves hash into first layer as expected + uint64_t layer_idx = 0; + uint64_t child_chunk_idx = 0; + MDB_cursor_op leaf_op = MDB_FIRST; + MDB_cursor_op parent_op = MDB_FIRST; + + MDB_val_copy k_parent(layer_idx); + MDB_val_set(v_parent, child_chunk_idx); + + while (1) + { + // Get next leaf chunk + std::vector leaf_tuples_chunk; + leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width); + + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << layer_idx << ", child_chunk_idx " << child_chunk_idx); + + // Iterate until chunk is full or we get to the end of all leaves + MDB_val k_leaf, v_leaf; + while (1) + { + int result = mdb_cursor_get(m_cur_leaves, &k_leaf, &v_leaf, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + + const auto *o = (mdb_leaf *)v_leaf.mv_data; + auto leaf = m_curve_trees->leaf_tuple(o->output_context.output_pair); + + leaf_tuples_chunk.emplace_back(std::move(leaf)); + + if (leaf_tuples_chunk.size() == m_curve_trees->m_c2_width) + break; + } + + // Get the actual leaf chunk hash from the db + MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); + int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, parent_op); + parent_op = MDB_NEXT_DUP; + + // Check end condition: no more leaf tuples in the leaf layer + if (leaf_tuples_chunk.empty()) + { + // No more leaves, expect to be done with parent chunks as well + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected leaf chunk parent result found at child_chunk_idx " + + std::to_string(child_chunk_idx), result).c_str())); + + MDEBUG("Successfully audited leaf layer"); + break; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); + if (layer_idx != *(uint64_t*)k_parent.mv_data || child_chunk_idx != ((layer_val *)v_parent.mv_data)->child_chunk_idx) + throw0(DB_ERROR("unexpected parent encountered")); + + // Get the expected leaf chunk hash + const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk)); + const fcmp_pp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; + + // Hash the chunk of leaves + for (uint64_t i = 0; i < leaves.size(); ++i) + MDEBUG("Hashing " << m_curve_trees->m_c2->to_string(leaves[i])); + + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); + MDEBUG("chunk_hash " << m_curve_trees->m_c2->to_string(chunk_hash) << " , hash init point: " + << m_curve_trees->m_c2->to_string(m_curve_trees->m_c2->hash_init_point()) << " (" << leaves.size() << " leaves)"); + + // Now compare to value from the db + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual leaf chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); + + const auto expected_bytes = m_curve_trees->m_c2->to_bytes(chunk_hash); + const auto actual_bytes = lv->child_chunk_hash; + CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); + CHECK_AND_ASSERT_MES(lv->child_chunk_idx == child_chunk_idx, false, "unexpected child chunk idx"); + + ++child_chunk_idx; + } + + MDEBUG("Successfully audited leaf layer"); + + // Traverse up the tree auditing each layer until we've audited every layer in the tree + bool audit_complete = false; + while (!audit_complete) + { + MDEBUG("Auditing layer " << layer_idx); + + // Alternate starting with c1 as parent (we already audited c2 leaf parents), then c2 as parent, then c1, etc. + const bool parent_is_c1 = layer_idx % 2 == 0; + if (parent_is_c1) + { + audit_complete = this->audit_layer( + /*c_child*/ m_curve_trees->m_c2, + /*c_parent*/ m_curve_trees->m_c1, + layer_idx, + /*chunk_width*/ m_curve_trees->m_c1_width); + } + else + { + audit_complete = this->audit_layer( + /*c_child*/ m_curve_trees->m_c1, + /*c_parent*/ m_curve_trees->m_c2, + layer_idx, + /*chunk_width*/ m_curve_trees->m_c2_width); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return true; +} + +template +bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const uint64_t child_layer_idx, + const uint64_t chunk_width) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + + // Open two separate cursors for child and parent layer + MDB_cursor *child_layer_cursor, *parent_layer_cursor; + + int c_result = mdb_cursor_open(m_txn, m_layers, &child_layer_cursor); + if (c_result) + throw0(DB_ERROR(lmdb_error("Failed to open child cursor: ", c_result).c_str())); + int p_result = mdb_cursor_open(m_txn, m_layers, &parent_layer_cursor); + if (p_result) + throw0(DB_ERROR(lmdb_error("Failed to open parent cursor: ", p_result).c_str())); + + // Set the cursors to the start of each layer + const uint64_t parent_layer_idx = child_layer_idx + 1; + + MDB_val_set(k_child, child_layer_idx); + MDB_val_set(k_parent, parent_layer_idx); + + MDB_val v_child, v_parent; + + c_result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, MDB_SET); + p_result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, MDB_SET); + + if (c_result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", c_result).c_str())); + if (p_result != MDB_SUCCESS && p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", p_result).c_str())); + + // Begin to audit the layer + MDB_cursor_op op_child = MDB_FIRST_DUP; + MDB_cursor_op op_parent = MDB_FIRST_DUP; + bool audit_complete = false; + uint64_t child_chunk_idx = 0; + while (1) + { + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << parent_layer_idx << ", child_chunk_idx " << child_chunk_idx); + + // Get next child chunk + std::vector child_chunk; + child_chunk.reserve(chunk_width); + while (1) + { + int result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, op_child); + op_child = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); + + const auto *lv = (layer_val *)v_child.mv_data; + auto child_point = c_child->from_bytes(lv->child_chunk_hash); + + child_chunk.emplace_back(std::move(child_point)); + + if (child_chunk.size() == chunk_width) + break; + } + + // Get the actual chunk hash from the db + int result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, op_parent); + op_parent = MDB_NEXT_DUP; + + // Check for end conditions + // End condition A (audit_complete=false): finished auditing layer and ready to move up a layer + // End condition B (audit_complete=true ): finished auditing the tree, no more layers remaining + + // End condition A: check if finished auditing this layer + if (child_chunk.empty()) + { + // No more children, expect to be done auditing layer and ready to move up a layer + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + + MDEBUG("Finished auditing layer " << child_layer_idx); + audit_complete = false; + break; + } + + // End condition B: check if finished auditing the tree + if (child_chunk_idx == 0 && child_chunk.size() == 1) + { + if (p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + + MDEBUG("Encountered root at layer_idx " << child_layer_idx); + audit_complete = true; + break; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str())); + + if (child_layer_idx != *(uint64_t*)k_child.mv_data) + throw0(DB_ERROR("unexpected child encountered")); + if (parent_layer_idx != *(uint64_t*)k_parent.mv_data) + throw0(DB_ERROR("unexpected parent encountered")); - auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH); - if (result == MDB_NOTFOUND) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR(lmdb_error("DB error attempting to get an output", result).c_str())); + // Get the expected chunk hash + std::vector child_scalars; + child_scalars.reserve(child_chunk.size()); + for (const auto &child : child_chunk) + child_scalars.emplace_back(c_child->point_to_cycle_scalar(child)); + const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; - const pre_rct_outkey *ok = (const pre_rct_outkey *)v.mv_data; - MDB_val_set(otxk, ok->output_id); - result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &otxk, MDB_GET_BOTH); - if (result == MDB_NOTFOUND) - { - throw0(DB_ERROR("Unexpected: global output index not found in m_output_txs")); - } - else if (result) - { - throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); + for (uint64_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); + + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk); + MDEBUG("Expected chunk_hash " << c_parent->to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); + + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); + + const auto actual_bytes = lv->child_chunk_hash; + const auto expected_bytes = c_parent->to_bytes(chunk_hash); + if (actual_bytes != expected_bytes) + throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); + if (lv->child_chunk_idx != child_chunk_idx) + throw0(DB_ERROR(("unexpected child_chunk_idx, epxected " + std::to_string(child_chunk_idx)).c_str())); + + ++child_chunk_idx; } - result = mdb_cursor_del(m_cur_output_txs, 0); - if (result) - throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); - // now delete the amount - result = mdb_cursor_del(m_cur_output_amounts, 0); - if (result) - throw0(DB_ERROR(lmdb_error(std::string("Error deleting amount for output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + TXN_POSTFIX_RDONLY(); + + return audit_complete; } -void BlockchainLMDB::prune_outputs(uint64_t amount) +std::vector BlockchainLMDB::get_outs_at_unlock_block_id( + uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); - mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(output_amounts); - CURSOR(output_txs); - MINFO("Pruning outputs for amount " << amount); + TXN_PREFIX_RDONLY(); + RCURSOR(locked_outputs) - MDB_val v; - MDB_val_set(k, amount); - int result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET); - if (result == MDB_NOTFOUND) - return; - if (result) - throw0(DB_ERROR(lmdb_error("Error looking up outputs: ", result).c_str())); + MDB_val_set(k_block_id, block_id); + MDB_val v_output; - // gather output ids - mdb_size_t num_elems; - mdb_cursor_count(m_cur_output_amounts, &num_elems); - MINFO(num_elems << " outputs found"); - std::vector output_ids; - output_ids.reserve(num_elems); + // Get all the locked outputs at the provided block id + std::vector outs; + + MDB_cursor_op op = MDB_SET; while (1) { - const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; - output_ids.push_back(okp->output_id); - MDEBUG("output id " << okp->output_id); - result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_NEXT_DUP); + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, op); if (result == MDB_NOTFOUND) break; - if (result) - throw0(DB_ERROR(lmdb_error("Error counting outputs: ", result).c_str())); - } - if (output_ids.size() != num_elems) - throw0(DB_ERROR("Unexpected number of outputs")); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); + op = MDB_NEXT_MULTIPLE; - result = mdb_cursor_del(m_cur_output_amounts, MDB_NODUPDATA); - if (result) - throw0(DB_ERROR(lmdb_error("Error deleting outputs: ", result).c_str())); + const uint64_t blk_id = *(const uint64_t*)k_block_id.mv_data; + if (blk_id != block_id) + throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - for (uint64_t output_id: output_ids) - { - MDB_val_set(v, output_id); - result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); - if (result) - throw0(DB_ERROR(lmdb_error("Error looking up output: ", result).c_str())); - result = mdb_cursor_del(m_cur_output_txs, 0); - if (result) - throw0(DB_ERROR(lmdb_error("Error deleting output: ", result).c_str())); - } -} + const auto range_begin = ((const fcmp_pp::curve_trees::OutputContext*)v_output.mv_data); + const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::OutputContext); -void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image) -{ - LOG_PRINT_L3("BlockchainLMDB::" << __func__); - check_open(); - mdb_txn_cursors *m_cursors = &m_wcursors; + auto it = range_begin; - CURSOR(spent_keys) + // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it + if (outs.size() == 1) + ++it; - MDB_val k = {sizeof(k_image), (void *)&k_image}; - if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) { - if (result == MDB_KEYEXIST) - throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); - else - throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + while (it < range_end) + { + outs.push_back(*it); + ++it; + } } + + TXN_POSTFIX_RDONLY(); + + return outs; } -void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) +void BlockchainLMDB::del_locked_outs_at_block_id(uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(spent_keys) + CURSOR(locked_outputs) - MDB_val k = {sizeof(k_image), (void *)&k_image}; - auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH); - if (result != 0 && result != MDB_NOTFOUND) - throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str())); - if (!result) - { - result = mdb_cursor_del(m_cur_spent_keys, 0); - if (result) - throw1(DB_ERROR(lmdb_error("Error adding removal of key image to db transaction", result).c_str())); - } + MDB_val_set(k_block_id, block_id); + + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, NULL, MDB_SET); + if (result == MDB_NOTFOUND) + return; + if (result != MDB_SUCCESS) + throw1(DB_ERROR(lmdb_error("Error finding locked outputs to remove: ", result).c_str())); + + result = mdb_cursor_del(m_cur_locked_outputs, MDB_NODUPDATA); + if (result) + throw1(DB_ERROR(lmdb_error("Error removing locked outputs: ", result).c_str())); } BlockchainLMDB::~BlockchainLMDB() @@ -1302,7 +2420,7 @@ BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::close(); } -BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() +BlockchainLMDB::BlockchainLMDB(bool batch_transactions, std::shared_ptr curve_trees): BlockchainDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); // initialize folder to something "safe" just in case @@ -1319,6 +2437,8 @@ BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() // reset may also need changing when initialize things here m_hardfork = nullptr; + + m_curve_trees = curve_trees; } void BlockchainLMDB::open(const std::string& filename, const int db_flags) @@ -1331,6 +2451,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) if (m_open) throw0(DB_OPEN_FAILURE("Attempted to open db, but it's already open")); + if (m_curve_trees == nullptr) + throw0(DB_OPEN_FAILURE("curve trees not set yet, must be set before opening db")); + boost::filesystem::path direc(filename); if (!boost::filesystem::exists(direc) && !boost::filesystem::create_directories(direc)) { @@ -1437,6 +2560,10 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); + lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); lmdb_db_open(txn, LMDB_TXPOOL_BLOB, MDB_CREATE, m_txpool_blob, "Failed to open db handle for m_txpool_blob"); @@ -1456,6 +2583,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + mdb_set_dupsort(txn, m_leaves, compare_uint64); + mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); if (!(mdb_flags & MDB_RDONLY)) @@ -1512,7 +2642,10 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) // We don't handle the old format previous to that commit. txn.commit(); m_open = true; + // Decrement num active txs so db can resize if needed + mdb_txn_safe::increment_txns(-1); migrate(db_version); + mdb_txn_safe::increment_txns(1); return; } #endif @@ -1633,6 +2766,12 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); + if (auto result = mdb_drop(txn, m_locked_outputs, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); + if (auto result = mdb_drop(txn, m_leaves, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); + if (auto result = mdb_drop(txn, m_layers, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_layers: ", result).c_str())); (void)mdb_drop(txn, m_hf_starting_heights, 0); // this one is dropped in new code if (auto result = mdb_drop(txn, m_hf_versions, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_versions: ", result).c_str())); @@ -3543,14 +4682,17 @@ bool BlockchainLMDB::has_key_image(const crypto::key_image& img) const TXN_PREFIX_RDONLY(); RCURSOR(spent_keys); - MDB_val k = {sizeof(img), (void *)&img}; + crypto::key_image_y img_y; + crypto::key_image_to_y(img, img_y); + + MDB_val k = {sizeof(img_y), (void *)&img_y}; ret = (mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH) == 0); TXN_POSTFIX_RDONLY(); return ret; } -bool BlockchainLMDB::for_all_key_images(std::function f) const +bool BlockchainLMDB::for_all_key_images(std::function f) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -3571,8 +4713,8 @@ bool BlockchainLMDB::for_all_key_images(std::functionm_txn; + + /* the spent_keys table name is the same but the old version and new version + * have different data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + lmdb_db_open(txn, "spent_keyr", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keyr"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + + MDB_cursor *c_new_spent_keys, *c_old_spent_keys; + MDB_val k, v_img; + MDB_cursor_op op = MDB_FIRST; + + uint64_t n_old_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, o_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_old_key_images = db_stats.ms_entries; + } + + uint64_t n_new_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, m_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_new_key_images = db_stats.ms_entries; + } + + const uint64_t n_key_images = n_old_key_images + n_new_key_images; + + i = n_new_key_images; + while (i < n_key_images) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_key_images, (uint64_t)99); + std::cout << i << " / " << n_key_images << " key images (" << percent << "% of step 1/3) \r" << std::flush; + } + + // Start a new batch so resizing can occur as needed + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_spent_keys, &c_new_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + result = mdb_cursor_open(txn, o_spent_keys, &c_old_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keys: ", result).c_str())); + op = MDB_FIRST; + } + + // Get old key image and use it to set the new key image y + result = mdb_cursor_get(c_old_spent_keys, &k, &v_img, op); + op = MDB_NEXT; + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from spent_keys: ", result).c_str())); + const crypto::key_image k_image = *(const crypto::key_image*)v_img.mv_data; + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k_y = {sizeof(k_image_y), (void *)&k_image_y}; + if (auto result = mdb_cursor_put(c_new_spent_keys, (MDB_val *)&zerokval, &k_y, MDB_NODUPDATA)) { + if (result == MDB_KEYEXIST) + throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); + else + throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + } + + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_spent_keys, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + } + + // 2. Prepare all valid outputs to be inserted into the merkle tree and + // place them in a locked outputs table. The key to this new table is the + // block id in which the outputs unlock. + { + MINFO("Setting up a locked outputs table (step 2/3 of full-chain membership proof migration)"); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + lmdb_db_open(txn, "tmp_last_output", MDB_INTEGERKEY | MDB_CREATE, m_tmp_last_output, "Failed to open db handle for m_tmp_last_output"); + txn.commit(); + + if (!m_batch_transactions) + set_batch_transactions(true); + const std::size_t BATCH_SIZE = 10000; + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + // Use this cache to know how to restart the migration if the process is killed + struct tmp_output_cache { uint64_t n_outputs_read; uint64_t amount; outkey ok; }; + tmp_output_cache last_output; + + MDB_cursor *c_output_amounts, *c_locked_outputs, *c_tmp_last_output; + MDB_val k, v; + + i = 0; + const uint64_t n_outputs = this->num_outputs(); + MDB_cursor_op op = MDB_FIRST; + while (1) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_outputs, (uint64_t)99); + std::cout << i << " / " << n_outputs << " outputs (" << percent << "% of step 2/3) \r" << std::flush; + } + + // Update last output read + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + + // Commit and start a new txn + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + // Reset k and v so we continue migration from the last output + k = {sizeof(last_output.amount), (void *)&last_output.amount}; + + const std::size_t outkey_size = (last_output.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&last_output.ok}; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_output_amounts, &c_output_amounts); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + result = mdb_cursor_open(txn, m_tmp_last_output, &c_tmp_last_output); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for temp last output: ", result).c_str())); + + // Get the cached last output from the db + bool found_cached_output = false; + tmp_output_cache cached_last_o; + if (i == 0) + { + MDB_val v_last_output; + result = mdb_cursor_get(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, MDB_SET); + if (result != MDB_SUCCESS && result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get max output id: ", result).c_str())); + if (result != MDB_NOTFOUND) + { + cached_last_o = *(const tmp_output_cache*)v_last_output.mv_data; + + if (n_outputs < cached_last_o.n_outputs_read) + throw0(DB_ERROR("Unexpected n_outputs_read on cached last output")); + if (n_outputs == cached_last_o.n_outputs_read) + break; + + MDEBUG("Found cached output " << cached_last_o.ok.output_id + << ", migrated " << cached_last_o.n_outputs_read << " outputs already"); + found_cached_output = true; + + // Set k and v so we can continue the migration from that output + k = {sizeof(cached_last_o.amount), (void *)&cached_last_o.amount}; + + const std::size_t outkey_size = (cached_last_o.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&cached_last_o.ok}; + + i = cached_last_o.n_outputs_read; + op = MDB_NEXT; + } + } + + // Advance the output_amounts cursor to the last output read + if (i || found_cached_output) + { + result = mdb_cursor_get(c_output_amounts, &k, &v, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to advance cursor for output amounts: ", result).c_str())); + } + } + + // Get the next output from the db + result = mdb_cursor_get(c_output_amounts, &k, &v, op); + op = MDB_NEXT; + if (result == MDB_NOTFOUND) + { + // Indicate we've read all outputs so we know the migration step is complete + last_output.n_outputs_read = n_outputs; + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + + batch_stop(); + break; + } + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get a record from output amounts: ", result).c_str())); + + ++i; + const bool commit_next_iter = i && !(i % BATCH_SIZE); + + // Read the output data + uint64_t amount = *(const uint64_t*)k.mv_data; + output_data_t output_data; + uint64_t output_id; + if (amount == 0) + { + const outkey *okp = (const outkey *)v.mv_data; + output_data = okp->data; + output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(outkey)); + } + else + { + const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; + memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); + output_data.commitment = rct::zeroCommit(amount); + output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(pre_rct_outkey)); + } + + if (commit_next_iter) + { + // Set last output metadata + last_output.amount = amount; + last_output.n_outputs_read = i; + } + + // Prepare the output for insertion to the tree + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_data.pubkey), + .commitment = std::move(output_data.commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + // Get the block in which the output will unlock + const uint64_t unlock_block = cryptonote::get_unlock_block_index(output_data.unlock_time, output_data.height); + + // Now add the output to the locked outputs table + MDB_val_set(k_block_id, unlock_block); + MDB_val_set(v_output, output_context); + + // MDB_NODUPDATA because all output id's should be unique + // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by output_id + result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } + } + + // 3. Set up the curve trees merkle tree by growing the tree block by block, + // with leaves that unlock in each respective block + { + MINFO("Setting up a merkle tree using existing cryptonote outputs (step 3/3 of full-chain membership proof migration)"); + + if (!m_batch_transactions) + set_batch_transactions(true); + const std::size_t BATCH_SIZE = 50; + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + /* the block_info table name is the same but the old version and new version + * have incompatible data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + MDB_dbi o_block_info = m_block_info; + lmdb_db_open(txn, "block_infn", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; + MDB_val k_blk, v_blk; + + i = 0; + const uint64_t n_blocks = height(); + while (i < n_blocks) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_blocks, (uint64_t)99); + std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 3/3) \r" << std::flush; + } + + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + result = mdb_cursor_open(txn, m_block_info, &c_new_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); + result = mdb_cursor_open(txn, o_block_info, &c_old_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_info: ", result).c_str())); + + // See what the last block inserted into the new table was + if (i == 0) + { + MDB_stat db_stats; + result = mdb_stat(txn, m_block_info, &db_stats); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_block_info: ", result).c_str())); + i = db_stats.ms_entries; + if (i == n_blocks) + break; + } + } + + // Get the leaf tuples that unlock at the given block + auto unlocked_outputs = this->get_outs_at_unlock_block_id(i); + this->grow_tree(std::move(unlocked_outputs)); + + // Now that we've used the unlocked leaves to grow the tree, we delete them from the locked outputs table + this->del_locked_outs_at_block_id(i); + + // Get old block_info and use it to set the new one with new values + result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from block_info: ", result).c_str())); + const mdb_block_info_4 *bi_old = (const mdb_block_info_4*)v_blk.mv_data; + if (i != bi_old->bi_height) + throw0(DB_ERROR(std::string("Unexpected block retrieved, retrieved: " + std::to_string(bi_old->bi_height) + " , expected: " + std::to_string(i)).c_str())); + mdb_block_info_5 bi; + bi.bi_height = bi_old->bi_height; + bi.bi_timestamp = bi_old->bi_timestamp; + bi.bi_coins = bi_old->bi_coins; + bi.bi_weight = bi_old->bi_weight; + bi.bi_diff_lo = bi_old->bi_diff_lo; + bi.bi_diff_hi = bi_old->bi_diff_hi; + bi.bi_hash = bi_old->bi_hash; + bi.bi_cum_rct = bi_old->bi_cum_rct; + bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); + + LOGIF(el::Level::Info) + { + if ((bi.bi_height % 1000) == 0) + { + const std::string tree_root = epee::string_tools::pod_to_hex(bi.bi_tree_root); + MINFO("Height: " << i << ", block: " << bi.bi_hash << ", tree root: " << tree_root << ", leaves: " << bi.bi_n_leaf_tuples); + } + } + + MDB_val_set(nv, bi); + result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to put a record into block_infn: ", result).c_str())); + + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_block_info, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + /* Delete the old table */ + result = mdb_drop(txn, o_block_info, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old block_info table: ", result).c_str())); + + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_block_info, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); + RENAME_DB("block_infn"); + mdb_dbi_close(m_env, m_block_info); + + lmdb_db_open(txn, "block_info", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + txn.commit(); + } + } while(0); + + // Update db version + uint32_t version = 6; + v.mv_data = (void *)&version; + v.mv_size = sizeof(version); + MDB_val_str(vk, "version"); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + result = mdb_put(txn, m_properties, &vk, &v, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str())); + + // Drop the old spent keys table. We keep it until here so we know if the key image migration is complete. + result = mdb_drop(txn, o_spent_keys, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old spent_keys table: ", result).c_str())); + + // Rename the spent keyr table to the new spent keys table + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_spent_keys, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + RENAME_DB("spent_keyr"); + mdb_dbi_close(m_env, m_spent_keys); + + lmdb_db_open(txn, "spent_keys", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + + // We only needed the temp last output table for this migration, drop it + result = mdb_drop(txn, m_tmp_last_output, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to drop temp last output table: ", result).c_str())); + + txn.commit(); +} + void BlockchainLMDB::migrate(const uint32_t oldversion) { if (oldversion < 1) @@ -5689,6 +7312,8 @@ void BlockchainLMDB::migrate(const uint32_t oldversion) migrate_3_4(); if (oldversion < 5) migrate_4_5(); + if (oldversion < 6) + migrate_5_6(); } } // namespace cryptonote diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 6eeb942dc25..b8d087412ad 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -30,6 +30,7 @@ #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata +#include "fcmp_pp/curve_trees.h" #include "ringct/rctTypes.h" #include @@ -64,6 +65,10 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; + MDB_cursor *m_txc_locked_outputs; + MDB_cursor *m_txc_leaves; + MDB_cursor *m_txc_layers; + MDB_cursor *m_txc_txpool_meta; MDB_cursor *m_txc_txpool_blob; @@ -87,6 +92,9 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys +#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs +#define m_cur_leaves m_cursors->m_txc_leaves +#define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta #define m_cur_txpool_blob m_cursors->m_txc_txpool_blob #define m_cur_alt_blocks m_cursors->m_txc_alt_blocks @@ -109,6 +117,9 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; + bool m_rf_locked_outputs; + bool m_rf_leaves; + bool m_rf_layers; bool m_rf_txpool_meta; bool m_rf_txpool_blob; bool m_rf_alt_blocks; @@ -183,7 +194,7 @@ struct mdb_txn_safe class BlockchainLMDB : public BlockchainDB { public: - BlockchainLMDB(bool batch_transactions=true); + BlockchainLMDB(bool batch_transactions=true, std::shared_ptr curve_trees = fcmp_pp::curve_trees::curve_trees_v1()); ~BlockchainLMDB(); virtual void open(const std::string& filename, const int mdb_flags=0); @@ -303,7 +314,7 @@ class BlockchainLMDB : public BlockchainDB virtual bool for_all_txpool_txes(std::function f, bool include_blob = false, relay_category category = relay_category::broadcasted) const; - virtual bool for_all_key_images(std::function) const; + virtual bool for_all_key_images(std::function) const; virtual bool for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function) const; virtual bool for_all_transactions(std::function, bool pruned) const; virtual bool for_all_outputs(std::function f) const; @@ -356,6 +367,13 @@ class BlockchainLMDB : public BlockchainDB static int compare_hash32(const MDB_val *a, const MDB_val *b); static int compare_string(const MDB_val *a, const MDB_val *b); + // make private + virtual void grow_tree(std::vector &&new_outputs); + + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id); + + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const; + private: void do_resize(uint64_t size_increase=0); @@ -370,6 +388,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ); virtual void remove_block(); @@ -378,7 +397,7 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx); - virtual uint64_t add_output(const crypto::hash& tx_hash, + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -399,6 +418,41 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_spent_key(const crypto::key_image& k_image); + template + void grow_layer(const std::unique_ptr &curve, + const std::vector> &layer_extensions, + const uint64_t c_idx, + const uint64_t layer_idx); + + template + void trim_layer(const std::unique_ptr &curve, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, + const uint64_t layer_idx); + + virtual uint64_t get_num_leaf_tuples() const; + + uint64_t get_top_block_n_leaf_tuples() const; + + virtual std::array get_tree_root() const; + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; + + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const; + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + + template + bool audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const uint64_t child_layer_idx, + const uint64_t chunk_width) const; + + std::vector get_outs_at_unlock_block_id(uint64_t block_id); + + void del_locked_outs_at_block_id(uint64_t block_id); + uint64_t num_outputs() const; // Hard fork @@ -441,6 +495,9 @@ class BlockchainLMDB : public BlockchainDB // migrate from DB version 4 to 5 void migrate_4_5(); + // migrate from DB version 5 to 6 + void migrate_5_6(); + void cleanup_batch(); private: @@ -463,6 +520,10 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; + MDB_dbi m_locked_outputs; + MDB_dbi m_leaves; + MDB_dbi m_layers; + MDB_dbi m_txpool_meta; MDB_dbi m_txpool_blob; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 308bdd4c24b..84d1d3a8016 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -112,12 +112,17 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_block() override { } virtual uint64_t add_transaction_data(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) override {return 0;} virtual void remove_transaction_data(const crypto::hash& tx_hash, const cryptonote::transaction& tx) override {} - virtual uint64_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return 0;} + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return {0, 0};} virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} + virtual void grow_tree(std::vector &&new_outputs) override {}; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) override {}; + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; + virtual std::array get_tree_root() const override { return {}; }; + virtual uint64_t get_num_leaf_tuples() const override { return 0; }; - virtual bool for_all_key_images(std::function) const override { return true; } + virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } virtual bool for_all_transactions(std::function, bool pruned) const override { return true; } virtual bool for_all_outputs(std::function f) const override { return true; } @@ -144,6 +149,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/crypto/crypto-ops-data.c b/src/crypto/crypto-ops-data.c index edaa4644fd2..57304c41dac 100644 --- a/src/crypto/crypto-ops-data.c +++ b/src/crypto/crypto-ops-data.c @@ -870,6 +870,7 @@ const fe fe_fffb1 = {-31702527, -2466483, -26106795, -12203692, -12169197, -3210 const fe fe_fffb2 = {8166131, -6741800, -17040804, 3154616, 21461005, 1466302, -30876704, -6368709, 10503587, -13363080}; /* sqrt(2 * A * (A + 2)) */ const fe fe_fffb3 = {-13620103, 14639558, 4532995, 7679154, 16815101, -15883539, -22863840, -14813421, 13716513, -6477756}; /* sqrt(-sqrt(-1) * A * (A + 2)) */ const fe fe_fffb4 = {-21786234, -12173074, 21573800, 4524538, -4645904, 16204591, 8012863, -8444712, 3212926, 6885324}; /* sqrt(sqrt(-1) * A * (A + 2)) */ +const fe fe_a_inv_3 = {-22207407, 11184811, 22369621, -11184811, -22369621, 11184811, 22369621, -11184811, -22369621, 11184811}; /* A / 3*/ const ge_p3 ge_p3_identity = { {0}, {1, 0}, {1, 0}, {0} }; const ge_p3 ge_p3_H = { {7329926, -15101362, 31411471, 7614783, 27996851, -3197071, -11157635, -6878293, 466949, -7986503}, diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index 314fe448a20..9dd9ff7ddf6 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -30,6 +30,8 @@ #include #include +#include +#include #include "warnings.h" #include "crypto-ops.h" @@ -90,7 +92,7 @@ void fe_0(fe h) { h = 1 */ -static void fe_1(fe h) { +void fe_1(fe h) { h[0] = 1; h[1] = 0; h[2] = 0; @@ -313,6 +315,39 @@ void fe_invert(fe out, const fe z) { return; } +// Montgomery's trick +// https://iacr.org/archive/pkc2004/29470042/29470042.pdf 2.2 +int fe_batch_invert(fe *out, const fe *in, const int n) { + if (n == 0) { + return 0; + } + + // Step 1: collect initial muls + fe *init_muls = (fe *) malloc(n * sizeof(fe)); + if (!init_muls) { + return 1; + } + memcpy(&init_muls[0], &in[0], sizeof(fe)); + for (int i = 1; i < n; ++i) { + fe_mul(init_muls[i], init_muls[i-1], in[i]); + } + + // Step 2: get the inverse of all elems multiplied together + fe a; + fe_invert(a, init_muls[n-1]); + + // Step 3: get each inverse + for (int i = n; i > 1; --i) { + fe_mul(out[i-1], a, init_muls[i-2]); + fe_mul(a, a, in[i-1]); + } + memcpy(&out[0], &a, sizeof(fe)); + + free(init_muls); + + return 0; +} + /* From fe_isnegative.c */ /* @@ -958,7 +993,7 @@ Can overlap h with f or g. |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static void fe_sub(fe h, const fe f, const fe g) { +void fe_sub(fe h, const fe f, const fe g) { int32_t f0 = f[0]; int32_t f1 = f[1]; int32_t f2 = f[2]; @@ -1328,16 +1363,9 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *r3, const unsigned char *a, con } } -/* From ge_frombytes.c, modified */ - -int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { - fe u; - fe v; - fe vxx; - fe check; - - /* From fe_frombytes.c */ +/* From fe_frombytes.c */ +int fe_frombytes_vartime(fe y, const unsigned char *s) { int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; int64_t h2 = load_3(s + 7) << 5; @@ -1378,18 +1406,31 @@ int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { carry6 = (h6 + (int64_t) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; carry8 = (h8 + (int64_t) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; - h->Y[0] = h0; - h->Y[1] = h1; - h->Y[2] = h2; - h->Y[3] = h3; - h->Y[4] = h4; - h->Y[5] = h5; - h->Y[6] = h6; - h->Y[7] = h7; - h->Y[8] = h8; - h->Y[9] = h9; + y[0] = h0; + y[1] = h1; + y[2] = h2; + y[3] = h3; + y[4] = h4; + y[5] = h5; + y[6] = h6; + y[7] = h7; + y[8] = h8; + y[9] = h9; - /* End fe_frombytes.c */ + return 0; +} + +/* From ge_frombytes.c, modified */ + +int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { + fe u; + fe v; + fe vxx; + fe check; + + if (fe_frombytes_vartime(h->Y, s) != 0) { + return -1; + } fe_1(h->Z); fe_sq(u, h->Y); @@ -1606,7 +1647,7 @@ static void ge_precomp_cmov(ge_precomp *t, const ge_precomp *u, unsigned char b) fe_cmov(t->xy2d, u->xy2d, b); } -static void select(ge_precomp *t, int pos, signed char b) { +static void _select(ge_precomp *t, int pos, signed char b) { ge_precomp minust; unsigned char bnegative = negative(b); unsigned char babs = b - (((-bnegative) & b) << 1); @@ -1662,7 +1703,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p3_0(h); for (i = 1; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } @@ -1672,7 +1713,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p2_dbl(&r, &s); ge_p1p1_to_p3(h, &r); for (i = 0; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } } @@ -3877,3 +3918,16 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { // Y/Z = 0/0 return 0; } + +// https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y) +{ + // (1/(1-y))*(1+y) + fe inv_one_minus_y_mul_one_plus_y; + fe_mul(inv_one_minus_y_mul_one_plus_y, inv_one_minus_y, one_plus_y); + + // wei x = (1/(1-y))*(1+y) + (A/3) + fe wei_x_fe; + fe_add(wei_x_fe, inv_one_minus_y_mul_one_plus_y, fe_a_inv_3); + fe_tobytes(wei_x, wei_x_fe); +} diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index c103f1f789d..b5976c7621b 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -88,6 +88,7 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *, const unsigned char *, const extern const fe fe_sqrtm1; extern const fe fe_d; +int fe_frombytes_vartime(fe, const unsigned char *); int ge_frombytes_vartime(ge_p3 *, const unsigned char *); /* From ge_p1p1_to_p2.c */ @@ -143,6 +144,7 @@ extern const fe fe_fffb1; extern const fe fe_fffb2; extern const fe fe_fffb3; extern const fe fe_fffb4; +extern const fe fe_a_inv_3; extern const ge_p3 ge_p3_identity; extern const ge_p3 ge_p3_H; void ge_fromfe_frombytes_vartime(ge_p2 *, const unsigned char *); @@ -163,7 +165,12 @@ void ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); void fe_add(fe h, const fe f, const fe g); void fe_tobytes(unsigned char *, const fe); void fe_invert(fe out, const fe z); +int fe_batch_invert(fe *out, const fe *in, const int n); void fe_mul(fe out, const fe, const fe); +void fe_sub(fe h, const fe f, const fe g); void fe_0(fe h); +void fe_1(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); + +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y); diff --git a/src/crypto/crypto.cpp b/src/crypto/crypto.cpp index f6c94fa0393..2145b06a603 100644 --- a/src/crypto/crypto.cpp +++ b/src/crypto/crypto.cpp @@ -618,6 +618,12 @@ namespace crypto { ge_p1p1_to_p3(&res, &point2); } + void crypto_ops::derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + ge_p3 point; + hash_to_ec(pub, point); + ge_p3_tobytes(&ki_gen, &point); + } + void crypto_ops::generate_key_image(const public_key &pub, const secret_key &sec, key_image &image) { ge_p3 point; ge_p2 point2; @@ -773,4 +779,21 @@ POP_WARNINGS static_assert(sizeof(crypto::view_tag) <= sizeof(view_tag_full), "view tag should not be larger than hash result"); memcpy(&view_tag, &view_tag_full, sizeof(crypto::view_tag)); } + + bool crypto_ops::key_image_to_y(const key_image &ki, key_image_y &ki_y) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki_y, &ki, 32); + // clear the sign bit, leaving us with the y coord + ki_y.data[31] &= 0x7F; + // return true if sign bit is set on the original key image + return (ki.data[31] & 0x80) > 0; + } + + void crypto_ops::key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki, &ki_y, 32); + if (sign) { + ki.data[31] ^= 0x80; + } + } } diff --git a/src/crypto/crypto.h b/src/crypto/crypto.h index 9252017823e..26da75d13a2 100644 --- a/src/crypto/crypto.h +++ b/src/crypto/crypto.h @@ -95,6 +95,10 @@ namespace crypto { friend class crypto_ops; }; + POD_CLASS key_image_y: ec_point { + friend class crypto_ops; + }; + POD_CLASS signature { ec_scalar c, r; friend class crypto_ops; @@ -110,7 +114,7 @@ namespace crypto { static_assert(sizeof(ec_point) == 32 && sizeof(ec_scalar) == 32 && sizeof(public_key) == 32 && sizeof(public_key_memsafe) == 32 && sizeof(secret_key) == 32 && - sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && + sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && sizeof(key_image_y) == 32 && sizeof(signature) == 64 && sizeof(view_tag) == 1, "Invalid structure size"); class crypto_ops { @@ -145,6 +149,8 @@ namespace crypto { friend void generate_tx_proof_v1(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const secret_key &, signature &); static bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); friend bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); + static void derive_key_image_generator(const public_key &, ec_point &); + friend void derive_key_image_generator(const public_key &, ec_point &); static void generate_key_image(const public_key &, const secret_key &, key_image &); friend void generate_key_image(const public_key &, const secret_key &, key_image &); static void generate_ring_signature(const hash &, const key_image &, @@ -157,6 +163,10 @@ namespace crypto { const public_key *const *, std::size_t, const signature *); static void derive_view_tag(const key_derivation &, std::size_t, view_tag &); friend void derive_view_tag(const key_derivation &, std::size_t, view_tag &); + static bool key_image_to_y(const key_image &, key_image_y &); + friend bool key_image_to_y(const key_image &, key_image_y &); + static void key_image_from_y(const key_image_y &, const bool, key_image &); + friend void key_image_from_y(const key_image_y &, const bool, key_image &); }; void generate_random_bytes_thread_safe(size_t N, uint8_t *bytes); @@ -270,6 +280,10 @@ namespace crypto { return crypto_ops::check_tx_proof(prefix_hash, R, A, B, D, sig, version); } + inline void derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + crypto_ops::derive_key_image_generator(pub, ki_gen); + } + /* To send money to a key: * * The sender generates an ephemeral key and includes it in transaction output. * * To spend the money, the receiver generates a key image from it. @@ -313,6 +327,21 @@ namespace crypto { crypto_ops::derive_view_tag(derivation, output_index, vt); } + /** Clear the sign bit on the key image (i.e. get just the y coordinate). + * Return true if the sign bit is set, false if not. + * Since fcmp's allow construction of key images with sign bit cleared, while + * the same key image with sign bit set may already exist in the chain, we + * prevent double spends by converting all existing key images in the chain to + * their y coordinate and preventing duplicate key image y's. + */ + inline bool key_image_to_y(const key_image &ki, key_image_y &ki_y) { + return crypto_ops::key_image_to_y(ki, ki_y); + } + + inline void key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + return crypto_ops::key_image_from_y(ki_y, sign, ki); + } + inline std::ostream &operator <<(std::ostream &o, const crypto::public_key &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -331,6 +360,9 @@ namespace crypto { inline std::ostream &operator <<(std::ostream &o, const crypto::key_image &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } + inline std::ostream &operator <<(std::ostream &o, const crypto::key_image_y &v) { + epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; + } inline std::ostream &operator <<(std::ostream &o, const crypto::signature &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -345,6 +377,8 @@ namespace crypto { inline bool operator>(const public_key &p1, const public_key &p2) { return p2 < p1; } inline bool operator<(const key_image &p1, const key_image &p2) { return memcmp(&p1, &p2, sizeof(key_image)) < 0; } inline bool operator>(const key_image &p1, const key_image &p2) { return p2 < p1; } + inline bool operator<(const key_image_y &p1, const key_image_y &p2) { return memcmp(&p1, &p2, sizeof(key_image_y)) < 0; } + inline bool operator>(const key_image_y &p1, const key_image_y &p2) { return p2 < p1; } } // type conversions for easier calls to sc_add(), sc_sub(), hash functions @@ -357,5 +391,6 @@ CRYPTO_MAKE_HASHABLE(public_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(secret_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(public_key_memsafe) CRYPTO_MAKE_HASHABLE(key_image) +CRYPTO_MAKE_HASHABLE(key_image_y) CRYPTO_MAKE_COMPARABLE(signature) CRYPTO_MAKE_COMPARABLE(view_tag) diff --git a/src/cryptonote_basic/cryptonote_basic.h b/src/cryptonote_basic/cryptonote_basic.h index a50ae9c32d8..94624760939 100644 --- a/src/cryptonote_basic/cryptonote_basic.h +++ b/src/cryptonote_basic/cryptonote_basic.h @@ -306,7 +306,8 @@ namespace cryptonote ar.tag("rctsig_prunable"); ar.begin_object(); r = rct_signatures.p.serialize_rctsig_prunable(ar, rct_signatures.type, vin.size(), vout.size(), - vin.size() > 0 && vin[0].type() == typeid(txin_to_key) ? boost::get(vin[0]).key_offsets.size() - 1 : 0); + (vin.empty() || vin[0].type() != typeid(txin_to_key) || rct_signatures.type == rct::RCTTypeFcmpPlusPlus) + ? 0 : boost::get(vin[0]).key_offsets.size() - 1); if (!r || !ar.good()) return false; ar.end_object(); } diff --git a/src/cryptonote_basic/cryptonote_boost_serialization.h b/src/cryptonote_basic/cryptonote_boost_serialization.h index 8948c650cd4..cbdaf507bd0 100644 --- a/src/cryptonote_basic/cryptonote_boost_serialization.h +++ b/src/cryptonote_basic/cryptonote_boost_serialization.h @@ -330,7 +330,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -339,6 +339,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; } template @@ -354,6 +356,11 @@ namespace boost a & x.MGs; if (ver >= 1u) a & x.CLSAGs; + if (ver >= 3u) + { + a & x.curve_trees_tree_depth; + a & x.fcmp_pp; + } if (x.rangeSigs.empty()) a & x.pseudoOuts; } @@ -364,7 +371,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -373,6 +380,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; //-------------- a & x.p.rangeSigs; if (x.p.rangeSigs.empty()) @@ -384,7 +393,12 @@ namespace boost a & x.p.MGs; if (ver >= 1u) a & x.p.CLSAGs; - if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus) + if (ver >= 3u) + { + a & x.p.curve_trees_tree_depth; + a & x.p.fcmp_pp; + } + if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus || x.type == rct::RCTTypeFcmpPlusPlus) a & x.p.pseudoOuts; } @@ -425,6 +439,6 @@ namespace boost } } -BOOST_CLASS_VERSION(rct::rctSigPrunable, 2) -BOOST_CLASS_VERSION(rct::rctSig, 2) +BOOST_CLASS_VERSION(rct::rctSigPrunable, 3) +BOOST_CLASS_VERSION(rct::rctSig, 3) BOOST_CLASS_VERSION(rct::multisig_out, 1) diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index 62ddd86fb4b..e60f7e945d3 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -106,7 +106,7 @@ namespace cryptonote uint64_t get_transaction_weight_clawback(const transaction &tx, size_t n_padded_outputs) { const rct::rctSig &rv = tx.rct_signatures; - const bool plus = rv.type == rct::RCTTypeBulletproofPlus; + const bool plus = rv.type == rct::RCTTypeBulletproofPlus || rv.type == rct::RCTTypeFcmpPlusPlus; const uint64_t bp_base = (32 * ((plus ? 6 : 9) + 7 * 2)) / 2; // notional size of a 2 output proof, normalized to 1 proof (ie, divided by 2) const size_t n_outputs = tx.vout.size(); if (n_padded_outputs <= 2) @@ -484,6 +484,7 @@ namespace cryptonote weight += extra; // calculate deterministic CLSAG/MLSAG data size + // TODO: update for fcmp_pp const size_t ring_size = boost::get(tx.vin[0]).key_offsets.size(); if (rct::is_rct_clsag(tx.rct_signatures.type)) extra = tx.vin.size() * (ring_size + 2) * 32; @@ -1292,7 +1293,8 @@ namespace cryptonote binary_archive ba(ss); const size_t inputs = t.vin.size(); const size_t outputs = t.vout.size(); - const size_t mixin = t.vin.empty() ? 0 : t.vin[0].type() == typeid(txin_to_key) ? boost::get(t.vin[0]).key_offsets.size() - 1 : 0; + const size_t mixin = (t.vin.empty() || t.rct_signatures.type == rct::RCTTypeFcmpPlusPlus || t.vin[0].type() != typeid(txin_to_key)) + ? 0 : boost::get(t.vin[0]).key_offsets.size() - 1; bool r = tt.rct_signatures.p.serialize_rctsig_prunable(ba, t.rct_signatures.type, inputs, outputs, mixin); CHECK_AND_ASSERT_MES(r, false, "Failed to serialize rct signatures prunable"); cryptonote::get_blob_hash(ss.str(), res); @@ -1644,4 +1646,62 @@ namespace cryptonote sc_sub((unsigned char*)key.data, (const unsigned char*)key.data, (const unsigned char*)hash.data); return key; } + //--------------------------------------------------------------- + // TODO: write tests for this func that match with current daemon logic + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain) + { + uint64_t unlock_block_index = 0; + + static_assert(CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE > 0, "unexpected default spendable age"); + const uint64_t default_block_index = block_included_in_chain + (CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE - 1); + + if (unlock_time == 0) + { + unlock_block_index = default_block_index; + } + else if (unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + { + // The unlock_time in this case is supposed to be the chain height at which the output unlocks + // The chain height is 1 higher than the highest block index, so we subtract 1 for this delta + unlock_block_index = unlock_time > 0 ? (unlock_time - 1) : 0; + } + else + { + // Interpret the unlock_time as time + // TODO: hardcode correct times for each network and take in nettype + const auto hf_v15_time = 1656629118; + const auto hf_v15_height = 2689608; + + // Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block + // TODO: consider taking into account 60s block times when that was consensus + if (hf_v15_time > unlock_time) + { + const auto seconds_since_unlock = hf_v15_time - unlock_time; + const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; + + unlock_block_index = hf_v15_height > blocks_since_unlock + ? (hf_v15_height - blocks_since_unlock) + : default_block_index; + } + else + { + const auto seconds_until_unlock = unlock_time - hf_v15_time; + const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; + unlock_block_index = hf_v15_height + blocks_until_unlock; + } + + /* Note: since this function was introduced for the hf that included fcmp's, it's possible for an output to be + spent before it reaches the unlock_block_index going by the old rules; this is ok. It can't be spent again b/c + it'll have a duplicate key image. It's also possible for an output to unlock by old rules, and then re-lock + again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_block_index + starting at the fork for fcmp's. + */ + + // TODO: double check the accuracy of this calculation + MDEBUG("unlock time: " << unlock_time << " , unlock_block_index: " << unlock_block_index); + } + + // Can't unlock earlier than the default unlock block + return std::max(unlock_block_index, default_block_index); + } } diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index fc7dfcd8590..e3a4644030c 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -37,6 +37,7 @@ #include "include_base_utils.h" #include "crypto/crypto.h" #include "crypto/hash.h" +#include "fcmp_pp/curve_trees.h" #include #include @@ -265,6 +266,10 @@ namespace cryptonote crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); + + // Returns the block index in which the provided unlock_time unlocks + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain); + #define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \ CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \ specific_type& variable_name = boost::get(variant_var); diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp index 13c470172a4..3015a6136c6 100644 --- a/src/cryptonote_core/blockchain.cpp +++ b/src/cryptonote_core/blockchain.cpp @@ -156,7 +156,9 @@ bool Blockchain::scan_outputkeys_for_indexes(size_t tx_version, const txin_to_ke auto it = m_scan_table.find(tx_prefix_hash); if (it != m_scan_table.end()) { - auto its = it->second.find(tx_in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(tx_in_to_key.k_image, ki_y); + auto its = it->second.find(ki_y); if (its != it->second.end()) { outputs = its->second; @@ -2910,7 +2912,9 @@ bool Blockchain::check_for_double_spend(const transaction& tx, key_images_contai // if the insert into the block-wide spent keys container succeeds, // check the blockchain-wide spent keys container and make sure the // key wasn't used in another block already. - auto r = m_spent_keys.insert(ki); + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + auto r = m_spent_keys.insert(ki_y); if(!r.second || m_db->has_key_image(ki)) { //double spend detected @@ -5151,7 +5155,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector>()); + m_scan_table.emplace(tx_prefix_hash, std::unordered_map>()); its = m_scan_table.find(tx_prefix_hash); assert(its != m_scan_table.end()); @@ -5161,7 +5165,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector (txin); // check for duplicate - auto it = its->second.find(in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + auto it = its->second.find(ki_y); if (it != its->second.end()) SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks."); @@ -5278,7 +5284,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vectorsecond.emplace(in_to_key.k_image, outputs); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + its->second.emplace(ki_y, outputs); } } } @@ -5577,7 +5585,7 @@ void Blockchain::unlock() m_blockchain_lock.unlock(); } -bool Blockchain::for_all_key_images(std::function f) const +bool Blockchain::for_all_key_images(std::function f) const { return m_db->for_all_key_images(f); } diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h index 2caad16a577..e86cdfc9f23 100644 --- a/src/cryptonote_core/blockchain.h +++ b/src/cryptonote_core/blockchain.h @@ -955,7 +955,7 @@ namespace cryptonote * * @return false if any key image fails the check, otherwise true */ - bool for_all_key_images(std::function) const; + bool for_all_key_images(std::function) const; /** * @brief perform a check on all blocks in the blockchain in the given range @@ -1125,7 +1125,7 @@ namespace cryptonote #endif // TODO: evaluate whether or not each of these typedefs are left over from blockchain_storage - typedef std::unordered_set key_images_container; + typedef std::unordered_set key_images_container; typedef std::vector blocks_container; @@ -1143,7 +1143,7 @@ namespace cryptonote size_t m_current_block_cumul_weight_median; // metadata containers - std::unordered_map>> m_scan_table; + std::unordered_map>> m_scan_table; std::unordered_map m_blocks_longhash_table; // Keccak hashes for each block and for fast pow checking diff --git a/src/cryptonote_core/cryptonote_core.cpp b/src/cryptonote_core/cryptonote_core.cpp index 954dc81e4c0..06598449edf 100644 --- a/src/cryptonote_core/cryptonote_core.cpp +++ b/src/cryptonote_core/cryptonote_core.cpp @@ -1292,11 +1292,13 @@ namespace cryptonote //----------------------------------------------------------------------------------------------- bool core::check_tx_inputs_keyimages_diff(const transaction& tx) const { - std::unordered_set ki; + std::unordered_set ki; for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, tokey_in, false); - if(!ki.insert(tokey_in.k_image).second) + crypto::key_image_y ki_y; + crypto::key_image_to_y(tokey_in.k_image, ki_y); + if(!ki.insert(ki_y).second) return false; } return true; diff --git a/src/cryptonote_core/tx_pool.cpp b/src/cryptonote_core/tx_pool.cpp index 2d01b2bb287..fdbefcfadde 100644 --- a/src/cryptonote_core/tx_pool.cpp +++ b/src/cryptonote_core/tx_pool.cpp @@ -523,7 +523,9 @@ namespace cryptonote for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, txin, false); - std::unordered_set& kei_image_set = m_spent_key_images[txin.k_image]; + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto& kei_image_set = m_spent_key_images[ki_y]; // Only allow multiple txes per key-image if kept-by-block. Only allow // the same txid if going from local/stem->fluff. @@ -531,14 +533,14 @@ namespace cryptonote if (tx_relay != relay_method::block) { const bool one_txid = - (kei_image_set.empty() || (kei_image_set.size() == 1 && *(kei_image_set.cbegin()) == id)); + (kei_image_set.empty() || (kei_image_set.size() == 1 && (*(kei_image_set.cbegin())).tx_hash == id)); CHECK_AND_ASSERT_MES(one_txid, false, "internal error: tx_relay=" << unsigned(tx_relay) << ", kei_image_set.size()=" << kei_image_set.size() << ENDL << "txin.k_image=" << txin.k_image << ENDL << "tx_id=" << id); } const bool new_or_previously_private = - kei_image_set.insert(id).second || + kei_image_set.insert({id, ki_sign}).second || !m_blockchain.txpool_tx_matches_category(id, relay_category::legacy); CHECK_AND_ASSERT_MES(new_or_previously_private, false, "internal error: try to insert duplicate iterator in key_image set"); } @@ -557,14 +559,16 @@ namespace cryptonote for(const txin_v& vi: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(vi, const txin_to_key, txin, false); - auto it = m_spent_key_images.find(txin.k_image); + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto it = m_spent_key_images.find(ki_y); CHECK_AND_ASSERT_MES(it != m_spent_key_images.end(), false, "failed to find transaction input in key images. img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - std::unordered_set& key_image_set = it->second; + auto& key_image_set = it->second; CHECK_AND_ASSERT_MES(key_image_set.size(), false, "empty key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - auto it_in_set = key_image_set.find(actual_hash); + auto it_in_set = key_image_set.find({actual_hash, ki_sign}); CHECK_AND_ASSERT_MES(it_in_set != key_image_set.end(), false, "transaction id not found in key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); key_image_set.erase(it_in_set); @@ -1099,7 +1103,7 @@ namespace cryptonote backlog.clear(); uint64_t w = 0; - std::unordered_set k_images; + std::unordered_set k_images; for (const tx_block_template_backlog_entry& e : tmp) { @@ -1262,19 +1266,40 @@ namespace cryptonote }, true, category); for (const key_images_container::value_type& kee : m_spent_key_images) { - const crypto::key_image& k_image = kee.first; - const std::unordered_set& kei_image_set = kee.second; - spent_key_image_info ki; - ki.id_hash = epee::string_tools::pod_to_hex(k_image); - for (const crypto::hash& tx_id_hash : kei_image_set) - { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, category)) - ki.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_id_hash)); + // id_hash corresponds to key image as the daemon received it, so we need + // to derive key image from key_image_y and sign bit to prevent a breaking + // change to clients. After the fcmp fork, all key images should have sign + // bit cleared so this can be cleaned up further. + const crypto::key_image_y& k_image_y = kee.first; + const auto& kei_image_set = kee.second; + spent_key_image_info ki_info_sign; + spent_key_image_info ki_info_no_sign; + for (const auto& ki_context : kei_image_set) + { + const crypto::hash &tx_hash = ki_context.tx_hash; + const bool sign = ki_context.sign; + if (m_blockchain.txpool_tx_matches_category(tx_hash, category)) + { + crypto::key_image ki; + crypto::key_image_from_y(k_image_y, sign, ki); + if (sign) + { + ki_info_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + else + { + ki_info_no_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_no_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + } } // Only return key images for which we have at least one tx that we can show for them - if (!ki.txs_hashes.empty()) - key_image_infos.push_back(std::move(ki)); + if (!ki_info_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_sign)); + if (!ki_info_no_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_no_sign)); } return true; } @@ -1314,11 +1339,11 @@ namespace cryptonote for (const key_images_container::value_type& kee : m_spent_key_images) { std::vector tx_hashes; - const std::unordered_set& kei_image_set = kee.second; - for (const crypto::hash& tx_id_hash : kei_image_set) + const auto& kei_image_set = kee.second; + for (const auto& ki_context : kei_image_set) { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, relay_category::broadcasted)) - tx_hashes.push_back(tx_id_hash); + if (m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted)) + tx_hashes.push_back(ki_context.tx_hash); } if (!tx_hashes.empty()) @@ -1337,11 +1362,13 @@ namespace cryptonote for (const auto& image : key_images) { bool is_spent = false; - const auto found = m_spent_key_images.find(image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(image, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end()) { - for (const crypto::hash& tx_hash : found->second) - is_spent |= m_blockchain.txpool_tx_matches_category(tx_hash, relay_category::broadcasted); + for (const auto& ki_context : found->second) + is_spent |= m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted); } spent.push_back(is_spent); } @@ -1402,12 +1429,14 @@ namespace cryptonote bool tx_memory_pool::have_tx_keyimg_as_spent(const crypto::key_image& key_im, const crypto::hash& txid) const { CRITICAL_REGION_LOCAL(m_transactions_lock); - const auto found = m_spent_key_images.find(key_im); + crypto::key_image_y ki_y; + crypto::key_image_to_y(key_im, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end() && !found->second.empty()) { // If another tx is using the key image, always return as spent. // See `insert_key_images`. - if (1 < found->second.size() || *(found->second.cbegin()) != txid) + if (1 < found->second.size() || (*(found->second.cbegin())).tx_hash != txid) return true; return m_blockchain.txpool_tx_matches_category(txid, relay_category::legacy); } @@ -1515,23 +1544,27 @@ namespace cryptonote return is_transaction_ready_to_go(txd, txid, cryptonote::blobdata_ref{txblob.data(), txblob.size()}, tx); } //--------------------------------------------------------------------------------- - bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - if(k_images.count(itk.k_image)) + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + if(k_images.count(ki_y)) return true; } return false; } //--------------------------------------------------------------------------------- - bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - auto i_res = k_images.insert(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + auto i_res = k_images.insert(ki_y); CHECK_AND_ASSERT_MES(i_res.second, false, "internal error: key images pool cache - inserted duplicate image in set: " << itk.k_image); } return true; @@ -1546,11 +1579,14 @@ namespace cryptonote for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, void()); - const key_images_container::const_iterator it = m_spent_key_images.find(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + const key_images_container::const_iterator it = m_spent_key_images.find(ki_y); if (it != m_spent_key_images.end()) { - for (const crypto::hash &txid: it->second) + for (const auto &ki_context: it->second) { + const auto &txid = ki_context.tx_hash; txpool_tx_meta_t meta; if (!m_blockchain.get_txpool_tx_meta(txid, meta)) { @@ -1634,7 +1670,7 @@ namespace cryptonote size_t max_total_weight_pre_v5 = (130 * median_weight) / 100 - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight_v5 = 2 * median_weight - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight = version >= 5 ? max_total_weight_v5 : max_total_weight_pre_v5; - std::unordered_set k_images; + std::unordered_set k_images; LOG_PRINT_L2("Filling block template, median weight " << median_weight << ", " << m_txs_by_fee_and_receive_time.size() << " txes in the pool"); diff --git a/src/cryptonote_core/tx_pool.h b/src/cryptonote_core/tx_pool.h index 69a123fc9e3..86ae947f4c5 100644 --- a/src/cryptonote_core/tx_pool.h +++ b/src/cryptonote_core/tx_pool.h @@ -52,6 +52,29 @@ #include "rpc/core_rpc_server_commands_defs.h" #include "rpc/message_data_structs.h" +namespace cryptonote +{ + //! key image's contextual data + struct ki_context_t + { + crypto::hash tx_hash; + bool sign; // original key image had sign bit set + bool operator==(const ki_context_t rhs) const { return rhs.tx_hash == tx_hash && rhs.sign == sign; }; + }; +}//cryptonote + +namespace std +{ + template<> struct hash + { + std::size_t operator()(const cryptonote::ki_context_t &_ki_context) const + { + const std::size_t h = reinterpret_cast(_ki_context.tx_hash); + return h + (_ki_context.sign ? 1 : 0); + } + }; +}//std + namespace cryptonote { class Blockchain; @@ -553,7 +576,7 @@ namespace cryptonote * * @return true if any key images present in the set, otherwise false */ - static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); + static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); /** * @brief append the key images from a transaction to the given set @@ -563,7 +586,7 @@ namespace cryptonote * * @return false if any append fails, otherwise true */ - static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); + static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); /** * @brief check if a transaction is a valid candidate for inclusion in a block @@ -602,8 +625,12 @@ namespace cryptonote * in the event of a reorg where someone creates a new/different * transaction on the assumption that the original will not be in a * block again. + *! we use key_image_y as the key since we need to prevent double spends of + * key image y coordinates (fcmp's enables constructing key images with + * sign bit cleared for key images which may already exist in the chain + * with sign bit set) */ - typedef std::unordered_map> key_images_container; + typedef std::unordered_map> key_images_container; #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) public: diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt new file mode 100644 index 00000000000..54ebad4a851 --- /dev/null +++ b/src/fcmp_pp/CMakeLists.txt @@ -0,0 +1,59 @@ +# Copyright (c) 2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set(fcmp_pp_sources + curve_trees.cpp + fcmp_pp_crypto.cpp + tower_cycle.cpp) + +monero_find_all_headers(fcmp_pp_headers "${CMAKE_CURRENT_SOURCE_DIR}") + +add_subdirectory(fcmp_pp_rust) + +monero_add_library_with_deps( + NAME fcmp_pp + DEPENDS fcmp_pp_rust + SOURCES + ${fcmp_pp_sources} + ${fcmp_pp_headers}) + +if(WIN32) + set(EXTRA_RUST_LIBRARIES ws2_32 ntdll userenv) +else() + set(EXTRA_RUST_LIBRARIES ) +endif() + +target_link_libraries(fcmp_pp + PUBLIC + cncrypto + common + epee + PRIVATE + ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a + ${EXTRA_LIBRARIES} + ${EXTRA_RUST_LIBRARIES}) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp new file mode 100644 index 00000000000..f4701d6f97a --- /dev/null +++ b/src/fcmp_pp/curve_trees.cpp @@ -0,0 +1,1239 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "curve_trees.h" + +#include "common/threadpool.h" +#include "ringct/rctOps.h" + +#include + +namespace fcmp_pp +{ +namespace curve_trees +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Instantiate the tower cycle types +template class CurveTrees; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Public helper functions +//---------------------------------------------------------------------------------------------------------------------- +template +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children) +{ + return curve->hash_grow( + curve->hash_init_point(), + 0,/*offset*/ + curve->zero_scalar(), + new_children + ); +}; +template Helios::Point get_new_parent(const std::unique_ptr &curve, + const typename Helios::Chunk &new_children); +template Selene::Point get_new_parent(const std::unique_ptr &curve, + const typename Selene::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +std::shared_ptr curve_trees_v1(const std::size_t helios_chunk_width, const std::size_t selene_chunk_width) +{ + std::unique_ptr helios(new Helios()); + std::unique_ptr selene(new Selene()); + return std::shared_ptr( + new CurveTreesV1( + std::move(helios), + std::move(selene), + helios_chunk_width, + selene_chunk_width + ) + ); +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Static functions +//---------------------------------------------------------------------------------------------------------------------- +// After hashing a layer of children points, convert those children x-coordinates into their respective cycle +// scalars, and prepare them to be hashed for the next layer +template +static std::vector next_child_scalars_from_children(const std::unique_ptr &c_child, + const typename C_CHILD::Point *last_root, + const LayerExtension &children) +{ + std::vector child_scalars_out; + child_scalars_out.reserve(1 + children.hashes.size()); + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + if (last_root != nullptr) + { + // If the children don't already include the existing root, then we need to include it to be hashed + // - the children would include the existing root already if the existing root was updated in the child + // layer (the start_idx would be 0) + if (children.start_idx > 0) + { + MDEBUG("Updating root layer and including the existing root in next children"); + child_scalars_out.emplace_back(c_child->point_to_cycle_scalar(*last_root)); + } + } + + // Convert child points to scalars + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars_out); + + return child_scalars_out; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +static void hash_first_chunk(const std::unique_ptr &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + // Prepare to hash + const auto &existing_hash = old_last_parent != nullptr + ? *old_last_parent + : curve->hash_init_point(); + + const auto &prior_child_after_offset = old_last_child != nullptr + ? *old_last_child + : curve->zero_scalar(); + + const auto chunk_start = new_child_scalars.data(); + const typename C::Chunk chunk{chunk_start, chunk_size}; + + MDEBUG("existing_hash: " << curve->to_string(existing_hash) << " , start_offset: " << start_offset + << " , prior_child_after_offset: " << curve->to_string(prior_child_after_offset)); + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing child in first chunk " << curve->to_string(chunk_start[i])); + + // Do the hash + auto chunk_hash = curve->hash_grow( + existing_hash, + start_offset, + prior_child_after_offset, + chunk + ); + + MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- +template +static void hash_next_chunk(const std::unique_ptr &curve, + const std::size_t chunk_start_idx, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + const auto chunk_start = new_child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " hashing child " << curve->to_string(chunk_start[i])); + + auto chunk_hash = get_new_parent(curve, chunk); + + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- +// Hash chunks of a layer of new children, outputting the next layer's parents +template +static LayerExtension hash_children_chunks(const std::unique_ptr &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const uint64_t next_parent_start_index, + const std::vector &new_child_scalars, + const std::size_t chunk_width) +{ + LayerExtension parents_out; + parents_out.start_idx = next_parent_start_index; + parents_out.update_existing_last_hash = old_last_parent != nullptr; + + CHECK_AND_ASSERT_THROW_MES(!new_child_scalars.empty(), "empty child scalars"); + CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); + + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + + CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected first chunk size"); + + const std::size_t n_chunks = 1 // first chunk + + (new_child_scalars.size() - chunk_size) / chunk_width // middle chunks + + (((new_child_scalars.size() - chunk_size) % chunk_width > 0) ? 1 : 0); // final chunk + + parents_out.hashes.resize(n_chunks); + + MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() + << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); + + // Hash all chunks in parallel + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); + + // Hash the first chunk + tpool.submit(&waiter, + [ + &curve, + &old_last_child, + &old_last_parent, + &new_child_scalars, + &parents_out, + start_offset, + chunk_size + ]() + { + auto &hash_out = parents_out.hashes[0]; + hash_first_chunk(curve, + old_last_child, + old_last_parent, + start_offset, + new_child_scalars, + chunk_size, + hash_out); + }, + true + ); + + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = chunk_size; + std::size_t chunk_idx = 1; + while (chunk_start_idx < new_child_scalars.size()) + { + // Fill a complete chunk, or add the remaining new children to the last chunk + chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); + + CHECK_AND_ASSERT_THROW_MES(chunk_idx < parents_out.hashes.size(), "unexpected chunk_idx"); + + tpool.submit(&waiter, + [ + &curve, + &new_child_scalars, + &parents_out, + chunk_start_idx, + chunk_size, + chunk_idx + ]() + { + auto &hash_out = parents_out.hashes[chunk_idx]; + hash_next_chunk(curve, chunk_start_idx, new_child_scalars, chunk_size, hash_out); + }, + true + ); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); + + ++chunk_idx; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_idx == n_chunks, "unexpected n chunks"); + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to hash chunks"); + + return parents_out; +}; +//---------------------------------------------------------------------------------------------------------------------- +static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_total_children, + const uint64_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + // 1. Check pre-conditions on total number of children + // - If there's only 1 old child, it must be the old root, and we must be setting a new parent layer after old root + const bool setting_next_layer_after_old_root = old_total_children == 1; + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(new_total_children > old_total_children, + "new_total_children must be > old_total_children when setting next layer after old root"); + } + else + { + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + } + + // 2. Calculate old and new total number of parents using totals for children + // If there's only 1 child, then it must be the old root and thus it would have no old parents + const uint64_t old_total_parents = old_total_children > 1 + ? (1 + ((old_total_children - 1) / parent_chunk_width)) + : 0; + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + // 3. Check pre-conditions on total number of parents + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents < new_total_children, + "new_total_parents must be < new_total_children"); + + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(old_total_parents == 0, + "old_total_parents expected to be 0 when setting next layer after old root"); + } + + // 4. Set the current offset in the last chunk + // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're + // changing that last child + std::size_t offset = old_total_parents > 0 + ? (old_total_children % parent_chunk_width) + : 0; + + // 5. Check if the last chunk is full (keep in mind it's also possible it's empty) + const bool last_chunk_is_full = offset == 0; + + // 6. When the last child changes, we'll need to use its old value to update the parent + // - We only care if the child has a parent, otherwise we won't need the child's old value to update the parent + // (since there is no parent to update) + const bool need_old_last_child = old_total_parents > 0 && last_child_will_change; + + // 7. If we're changing the last child, we need to subtract the offset by 1 to account for that child + if (need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(old_total_children > 0, "no old children but last child is supposed to change"); + + // If the chunk is full, must subtract the chunk width by 1 + offset = offset == 0 ? (parent_chunk_width - 1) : (offset - 1); + } + + // 8. When the last parent changes, we'll need to use its old value to update itself + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk; + + // 9. Set the next parent's start index + uint64_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change1"); + --next_parent_start_index; + } + + // Done + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return GrowLayerInstructions{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; + +}; +//---------------------------------------------------------------------------------------------------------------------- +static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old_n_leaf_tuples, + const uint64_t new_n_leaf_tuples, + const std::size_t leaf_tuple_size, + const std::size_t leaf_layer_chunk_width) +{ + // The leaf layer can never be the root layer + const bool setting_next_layer_after_old_root = false; + + const uint64_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; + const uint64_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; + + const uint64_t old_total_parents = old_total_children > 0 + ? (1 + ((old_total_children - 1) / leaf_layer_chunk_width)) + : 0; + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + + // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf + const bool need_old_last_child = false; + + const std::size_t offset = old_total_children % leaf_layer_chunk_width; + + const bool last_chunk_is_full = offset == 0; + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = adding_members_to_existing_last_chunk; + + uint64_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change2"); + --next_parent_start_index; + } + + MDEBUG("parent_chunk_width: " << leaf_layer_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return GrowLayerInstructions{ + .parent_chunk_width = leaf_layer_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +// Helper function used to get the next layer extension used to grow the next layer in the tree +// - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent +// layer of the leaf layer +template +static LayerExtension get_next_layer_extension(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const GrowLayerInstructions &grow_layer_instructions, + const std::vector &child_last_hashes, + const std::vector &parent_last_hashes, + const std::vector> child_layer_extensions, + const std::size_t last_updated_child_idx, + const std::size_t last_updated_parent_idx) +{ + // TODO: comments + const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) + ? nullptr + : &child_last_hashes[last_updated_child_idx]; + + const auto *parent_last_hash = (last_updated_parent_idx >= parent_last_hashes.size()) + ? nullptr + : &parent_last_hashes[last_updated_parent_idx]; + + // Pre-conditions + CHECK_AND_ASSERT_THROW_MES(last_updated_child_idx < child_layer_extensions.size(), "missing child layer"); + const auto &child_extension = child_layer_extensions[last_updated_child_idx]; + + if (grow_layer_instructions.setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES((last_updated_child_idx + 1) == child_last_hashes.size(), + "unexpected last updated child idx"); + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child when setting layer after old root"); + } + + const auto child_scalars = next_child_scalars_from_children(c_child, + grow_layer_instructions.setting_next_layer_after_old_root ? child_last_hash : nullptr, + child_extension); + + if (grow_layer_instructions.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(parent_last_hash != nullptr, "missing last parent"); + + typename C_PARENT::Scalar last_child_scalar; + if (grow_layer_instructions.need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child"); + last_child_scalar = c_child->point_to_cycle_scalar(*child_last_hash); + } + + // Do the hashing + LayerExtension layer_extension = hash_children_chunks( + c_parent, + grow_layer_instructions.need_old_last_child ? &last_child_scalar : nullptr, + grow_layer_instructions.need_old_last_parent ? parent_last_hash : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, + child_scalars, + grow_layer_instructions.parent_chunk_width + ); + + CHECK_AND_ASSERT_THROW_MES((layer_extension.start_idx + layer_extension.hashes.size()) == + grow_layer_instructions.new_total_parents, + "unexpected num parents extended"); + + return layer_extension; +} +//---------------------------------------------------------------------------------------------------------------------- +static TrimLayerInstructions get_trim_layer_instructions( + const uint64_t old_total_children, + const uint64_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); + CHECK_AND_ASSERT_THROW_MES(old_total_children >= new_total_children, + "old_total_children must be >= new_total_children"); + + // Calculate old and new total number of parents using totals for children + const uint64_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents, + "old_total_parents must be >= new_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, + "new_total_children must be > new_total_parents"); + + const std::size_t old_offset = old_total_children % parent_chunk_width; + const std::size_t new_offset = new_total_children % parent_chunk_width; + + // Get the number of existing children in what will become the new last chunk after trimming + const uint64_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) + ? parent_chunk_width + : old_offset; + + MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << ", new_offset: " << new_offset); + + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children >= new_offset, + "unexpected new_last_chunk_old_num_children"); + + // Get the number of children we'll be trimming from the new last chunk + const std::size_t trim_n_children = new_offset == 0 + ? 0 // The last chunk wil remain full when the new_offset == 0 + : new_last_chunk_old_num_children - new_offset; + + // We use hash trim if we're trimming fewer elems in the last chunk than the number of elems remaining + const bool need_last_chunk_children_to_trim = trim_n_children > 0 && trim_n_children <= new_offset; + + // Otherwise we use hash_grow + const bool need_last_chunk_remaining_children = trim_n_children > 0 && trim_n_children > new_offset; + + CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children), + "cannot both need last children to trim and need the remaining children"); + + // If we're trimming from the new last chunk OR an element in the new last chunk will change, then we're going to + // update the existing last hash, since its children are changing + const bool update_existing_last_hash = trim_n_children > 0 || last_child_will_change; + + // If we're trimming using remaining children, then we're just going to call hash_grow as if the chunk is being + // hashed for the first time, and so we don't need the existing last hash in that case, even if the hash is updating + const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children; + + // Set the hash_offset to use when calling hash_grow or hash_trim + std::size_t hash_offset = 0; + if (need_last_chunk_children_to_trim) + { + CHECK_AND_ASSERT_THROW_MES(new_offset > 0, "new_offset must be > 0 when trimming last chunk children"); + hash_offset = new_offset; + + if (last_child_will_change) + { + // We decrement the offset we use to hash the chunk if the last child is changing, since we're going to + // use the old value of the last child when trimming + --hash_offset; + } + } + else if (need_last_chunk_remaining_children) + { + // If we're trimming using remaining children, then we're just going to call hash_grow with offset 0 + hash_offset = 0; + } + else if (last_child_will_change) + { + // We're not trimming at all in this case, we're only updating the existing last hash with hash_trim. We need + // hash_offset to be equal to 1 - this existing last hash's position + hash_offset = new_offset == 0 + ? (parent_chunk_width - 1) // chunk is full, so decrement full width by 1 + : (new_offset - 1); + } + + // Set the child index range so the caller knows which children to read from the tree + uint64_t start_trim_idx = 0; + uint64_t end_trim_idx = 0; + if (need_last_chunk_children_to_trim) + { + // We'll call hash_trim to trim the children between [offset, last chunk end] + const uint64_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width; + const uint64_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width; + + start_trim_idx = chunk_boundary_start + hash_offset; + end_trim_idx = std::min(chunk_boundary_end, old_total_children); + } + else if (need_last_chunk_remaining_children) + { + // We'll call hash_grow with the remaining children between [0, offset] + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "new_offset is unexpectedly high"); + start_trim_idx = new_total_children - new_offset; + end_trim_idx = new_total_children; + + if (last_child_will_change) + { + // We don't need the last old child if it's changing, we'll just use its new value. Decrement the + // end_trim_idx by 1 so we know not to read and use the last old child from the tree in this case. + CHECK_AND_ASSERT_THROW_MES(end_trim_idx > 0, "end_trim_idx cannot be 0"); + --end_trim_idx; + } + } + + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim + << " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children + << " , need_existing_last_hash: " << need_existing_last_hash + << " , need_new_last_child: " << last_child_will_change + << " , update_existing_last_hash: " << update_existing_last_hash + << " , hash_offset: " << hash_offset + << " , start_trim_idx: " << start_trim_idx + << " , end_trim_idx: " << end_trim_idx); + + return TrimLayerInstructions{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .update_existing_last_hash = update_existing_last_hash, + .need_last_chunk_children_to_trim = need_last_chunk_children_to_trim, + .need_last_chunk_remaining_children = need_last_chunk_remaining_children, + .need_existing_last_hash = need_existing_last_hash, + .need_new_last_child = last_child_will_change, + .hash_offset = hash_offset, + .start_trim_idx = start_trim_idx, + .end_trim_idx = end_trim_idx, + }; +} +//---------------------------------------------------------------------------------------------------------------------- +template +static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_reduction( + const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const TrimLayerInstructions &trim_layer_instructions, + const std::vector &parent_last_hashes, + const std::vector> &children_to_trim, + const std::vector &child_last_hashes, + const std::size_t parent_layer_idx, + const std::size_t child_layer_idx, + const std::vector> &child_reductions) +{ + LayerReduction layer_reduction_out; + + layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; + layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; + + if (!trim_layer_instructions.need_last_chunk_children_to_trim && + !trim_layer_instructions.need_last_chunk_remaining_children && + !trim_layer_instructions.need_new_last_child) + { + // In this case we're just trimming to the boundary, and don't need to get a new hash + CHECK_AND_ASSERT_THROW_MES(!layer_reduction_out.update_existing_last_hash, "unexpected update last hash"); + MDEBUG("Trimming to chunk boundary"); + return layer_reduction_out; + } + + if (trim_layer_instructions.need_existing_last_hash) + CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); + + const typename C_PARENT::Point &existing_hash = trim_layer_instructions.need_existing_last_hash + ? parent_last_hashes[parent_layer_idx] + : c_parent->hash_init_point(); + + std::vector child_scalars; + if (trim_layer_instructions.need_last_chunk_children_to_trim + || trim_layer_instructions.need_last_chunk_remaining_children) + { + CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim"); + child_scalars = children_to_trim[parent_layer_idx]; + } + + typename C_PARENT::Scalar new_last_child_scalar = c_parent->zero_scalar(); + if (trim_layer_instructions.need_new_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here"); + CHECK_AND_ASSERT_THROW_MES(child_reductions.size() == child_layer_idx, "unexpected child layer idx"); + CHECK_AND_ASSERT_THROW_MES(child_reductions.back().update_existing_last_hash, "expected new last child"); + + const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; + new_last_child_scalar = c_child->point_to_cycle_scalar(new_last_child); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + child_scalars.emplace_back(std::move(new_last_child_scalar)); + } + else if (!trim_layer_instructions.need_last_chunk_children_to_trim) + { + // Falling to this conditional means we're not trimming at all, just updating the old last child + const std::size_t last_child_layer_idx = child_layer_idx - 1; + CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); + + const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; + auto old_last_child_scalar = c_child->point_to_cycle_scalar(old_last_child); + + child_scalars.emplace_back(std::move(old_last_child_scalar)); + } + } + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing child " << c_parent->to_string(child_scalars[i])); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + MDEBUG("hash_grow: existing_hash: " << c_parent->to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset); + + layer_reduction_out.new_last_hash = c_parent->hash_grow( + existing_hash, + trim_layer_instructions.hash_offset, + c_parent->zero_scalar(), + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}); + } + else + { + MDEBUG("hash_trim: existing_hash: " << c_parent->to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset + << " , child_to_grow_back: " << c_parent->to_string(new_last_child_scalar)); + + layer_reduction_out.new_last_hash = c_parent->hash_trim( + existing_hash, + trim_layer_instructions.hash_offset, + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}, + new_last_child_scalar); + } + + MDEBUG("Result hash: " << c_parent->to_string(layer_reduction_out.new_last_hash)); + + return layer_reduction_out; +} +//---------------------------------------------------------------------------------------------------------------------- +static PreLeafTuple output_to_pre_leaf_tuple(const OutputPair &output_pair) +{ + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + rct::key O, C; + if (!fcmp_pp::clear_torsion(rct::pk2rct(output_pubkey), O)) + throw std::runtime_error("output pubkey is invalid"); + if (!fcmp_pp::clear_torsion(commitment, C)) + throw std::runtime_error("commitment is invalid"); + + if (O == rct::I) + throw std::runtime_error("O cannot equal identity"); + if (C == rct::I) + throw std::runtime_error("C cannot equal identity"); + + // Must use the original output pubkey to derive I to prevent double spends, since torsioned outputs yield a + // a distinct I and key image from their respective torsion cleared output (and torsioned outputs are spendable + // before fcmp++) + crypto::ec_point I; + crypto::derive_key_image_generator(output_pubkey, I); + + PreLeafTuple plt; + if (!fcmp_pp::point_to_ed_y_derivatives(O, plt.O_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from O"); + if (!fcmp_pp::point_to_ed_y_derivatives(rct::pt2rct(I), plt.I_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from I"); + if (!fcmp_pp::point_to_ed_y_derivatives(C, plt.C_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from C"); + + return plt; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees public member functions +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::leaf_tuple(const OutputPair &output_pair) const +{ + const auto plt = output_to_pre_leaf_tuple(output_pair); + + rct::key O_x, I_x, C_x; + fcmp_pp::ed_y_derivatives_to_wei_x(plt.O_pre_x, O_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.I_pre_x, I_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.C_pre_x, C_x); + + return LeafTuple{ + .O_x = tower_cycle::selene_scalar_from_bytes(O_x), + .I_x = tower_cycle::selene_scalar_from_bytes(I_x), + .C_x = tower_cycle::selene_scalar_from_bytes(C_x) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::flatten_leaves(std::vector &&leaves) const +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (auto &l : leaves) + { + flattened_leaves.emplace_back(std::move(l.O_x)); + flattened_leaves.emplace_back(std::move(l.I_x)); + flattened_leaves.emplace_back(std::move(l.C_x)); + } + + return flattened_leaves; +}; + +// Explicit instantiation +template std::vector CurveTrees::flatten_leaves( + std::vector &&leaves) const; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_outputs) const +{ + TreeExtension tree_extension; + tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; + + if (new_outputs.empty()) + return tree_extension; + + // Sort the outputs by order they appear in the chain + const auto sort_fn = [](const OutputContext &a, const OutputContext &b) { return a.output_id < b.output_id; }; + std::sort(new_outputs.begin(), new_outputs.end(), sort_fn); + + // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, + // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since + // they cannot be inserted to the tree. + std::vector flattened_leaves; + this->set_valid_leaves(flattened_leaves, tree_extension.leaves.tuples, std::move(new_outputs)); + + if (flattened_leaves.empty()) + return tree_extension; + + auto grow_layer_instructions = get_leaf_layer_grow_instructions( + old_n_leaf_tuples, + tree_extension.leaves.tuples.size(), + LEAF_TUPLE_SIZE, + m_leaf_layer_chunk_width); + + if (grow_layer_instructions.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent"); + + // Hash the leaf layer + auto leaf_parents = hash_children_chunks(m_c2, + nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only + grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, + flattened_leaves, + m_leaf_layer_chunk_width + ); + + CHECK_AND_ASSERT_THROW_MES( + (leaf_parents.start_idx + leaf_parents.hashes.size()) == grow_layer_instructions.new_total_parents, + "unexpected num leaf parents extended"); + + tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents)); + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + while (grow_layer_instructions.new_total_parents > 1) + { + MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); + + const uint64_t new_total_children = grow_layer_instructions.new_total_parents; + + grow_layer_instructions = this->set_next_layer_extension( + grow_layer_instructions, + parent_is_c1, + existing_last_hashes, + c1_last_idx, + c2_last_idx, + tree_extension + ); + + // Sanity check to make sure we're making progress to exit the while loop + CHECK_AND_ASSERT_THROW_MES(grow_layer_instructions.new_total_parents < new_total_children, + "expect fewer parents than children in every layer"); + + parent_is_c1 = !parent_is_c1; + } + + return tree_extension; +}; + +// Explicit instantiation +template CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_outputs) const; +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const +{ + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + std::vector trim_instructions; + + if (old_n_leaf_tuples == trim_n_leaf_tuples) + return trim_instructions; + + // Get trim instructions for the leaf layer + { + const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; + const uint64_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; + + const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; + + // Leaf layer's last child never changes since leaf layer is pop-/append-only + const bool last_child_will_change = false; + + auto trim_leaf_layer_instructions = get_trim_layer_instructions( + old_total_leaves, + new_total_leaves, + parent_chunk_width, + last_child_will_change); + + trim_instructions.emplace_back(std::move(trim_leaf_layer_instructions)); + } + + bool use_c2 = false; + while (trim_instructions.back().new_total_parents > 1) + { + auto trim_layer_instructions = get_trim_layer_instructions( + trim_instructions.back().old_total_parents, + trim_instructions.back().new_total_parents, + use_c2 ? m_c2_width : m_c1_width, + trim_instructions.back().update_existing_last_hash); + + trim_instructions.emplace_back(std::move(trim_layer_instructions)); + use_c2 = !use_c2; + } + + return trim_instructions; +} + +// Explicit instantiation +template std::vector CurveTrees::get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const +{ + TreeReduction tree_reduction_out; + + if (trim_instructions.empty()) + { + tree_reduction_out.new_total_leaf_tuples = 0; + return tree_reduction_out; + } + + CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, + "unexpected new total leaves"); + const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; + tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + + for (const auto &trim_layer_instructions : trim_instructions) + { + MDEBUG("Trimming layer " << (c1_idx + c2_idx) << " (c1_idx: " << c1_idx << " , c2_idx: " << c2_idx << ")"); + + if (use_c2) + { + auto c2_layer_reduction_out = get_next_layer_reduction( + m_c1, + m_c2, + trim_layer_instructions, + last_hashes.c2_last_hashes, + children_to_trim.c2_children, + last_hashes.c1_last_hashes, + c2_idx, + c1_idx, + tree_reduction_out.c1_layer_reductions + ); + + tree_reduction_out.c2_layer_reductions.emplace_back(std::move(c2_layer_reduction_out)); + ++c2_idx; + } + else + { + auto c1_layer_reduction_out = get_next_layer_reduction( + m_c2, + m_c1, + trim_layer_instructions, + last_hashes.c1_last_hashes, + children_to_trim.c1_children, + last_hashes.c2_last_hashes, + c1_idx, + c2_idx, + tree_reduction_out.c2_layer_reductions + ); + + tree_reduction_out.c1_layer_reductions.emplace_back(std::move(c1_layer_reduction_out)); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return tree_reduction_out; +}; + +// Explicit instantiation +template CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees private member functions +//---------------------------------------------------------------------------------------------------------------------- +template +void CurveTrees::set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const +{ + // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector + // because std::vector concurrent access is not thread safe. + enum Boolean : uint8_t { + False = 0, + True = 1, + }; + std::vector valid_outputs(new_outputs.size(), False); + + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); + + // Step 1. Multithreaded convert valid outputs into Edwards y derivatives needed to get Wei x coordinates + // TODO: investigate batched threading (as opposed to small tasks) + std::vector pre_leaves; + pre_leaves.resize(new_outputs.size()); + for (std::size_t i = 0; i < new_outputs.size(); ++i) + { + tpool.submit(&waiter, + [ + &new_outputs, + &valid_outputs, + &pre_leaves, + i + ]() + { + CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > i, "unexpected valid outputs size"); + CHECK_AND_ASSERT_THROW_MES(!valid_outputs[i], "unexpected valid output"); + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected pre_leaves size"); + + const auto &output_pair = new_outputs[i].output_pair; + + try { pre_leaves[i] = output_to_pre_leaf_tuple(output_pair); } + catch(...) { /* Invalid outputs can't be added to the tree */ return; } + + valid_outputs[i] = True; + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to ed y derivatives"); + + // Step 2. Collect valid Edwards y derivatives + const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); + const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; + + // Collecting (1+y)'s + fe *one_plus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec, "failed malloc one_plus_y_vec"); + + // Collecting (1-y)'s + fe *one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec, "failed malloc one_minus_y_vec"); + + std::size_t valid_i = 0; + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected size of pre_leaves"); + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems > valid_i, "unexpected valid_i"); + + auto &pl = pre_leaves[i]; + + auto &O_pre_x = pl.O_pre_x; + auto &I_pre_x = pl.I_pre_x; + auto &C_pre_x = pl.C_pre_x; + + static_assert(LEAF_TUPLE_SIZE == 3, "unexpected leaf tuple size"); + + // TODO: avoid copying underlying (tried using pointer to pointers, but wasn't clean) + memcpy(&one_plus_y_vec[valid_i], &O_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+1], &I_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+2], &C_pre_x.one_plus_y, sizeof(fe)); + + memcpy(&one_minus_y_vec[valid_i], &O_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+1], &I_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+2], &C_pre_x.one_minus_y, sizeof(fe)); + + valid_i += LEAF_TUPLE_SIZE; + } + + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems == valid_i, "unexpected end valid_i"); + + // Step 3. Get batch inverse of all valid (1-y)'s + // - Batch inversion is significantly faster than inverting 1 at a time + fe *inv_one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec, "failed malloc inv_one_minus_y_vec"); + CHECK_AND_ASSERT_THROW_MES(fe_batch_invert(inv_one_minus_y_vec, one_minus_y_vec, n_valid_leaf_elems) == 0, + "failed to batch invert"); + + // Step 4. Multithreaded get Wei x's and convert to Selene scalars + // TODO: investigate batched threading (as opposed to small tasks) + flattened_leaves_out.resize(n_valid_leaf_elems); + for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) + { + tpool.submit(&waiter, + [ + &inv_one_minus_y_vec, + &one_plus_y_vec, + &flattened_leaves_out, + i + ]() + { + rct::key wei_x; + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); + flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); + + // Step 5. Set valid tuples to be stored in the db + tuples_out.clear(); + tuples_out.reserve(n_valid_outputs); + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(new_outputs.size() > i, "unexpected size of valid outputs"); + + // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output context in the db to save 32 bytes + tuples_out.emplace_back(std::move(new_outputs[i])); + } + + // Step 6. Clean up + free(one_plus_y_vec); + free(one_minus_y_vec); + free(inv_one_minus_y_vec); +} +//---------------------------------------------------------------------------------------------------------------------- +template +GrowLayerInstructions CurveTrees::set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const +{ + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; + + auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; + + const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + + const auto grow_layer_instructions = get_grow_layer_instructions( + prev_layer_instructions.old_total_parents, + prev_layer_instructions.new_total_parents, + parent_chunk_width, + prev_layer_instructions.need_old_last_parent + ); + + if (parent_is_c1) + { + auto c1_layer_extension = get_next_layer_extension( + m_c2, + m_c1, + grow_layer_instructions, + c2_last_hashes, + c1_last_hashes, + c2_layer_extensions_out, + c2_last_idx_inout, + c1_last_idx_inout + ); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + ++c2_last_idx_inout; + } + else + { + auto c2_layer_extension = get_next_layer_extension( + m_c1, + m_c2, + grow_layer_instructions, + c1_last_hashes, + c2_last_hashes, + c1_layer_extensions_out, + c1_last_idx_inout, + c2_last_idx_inout + ); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + ++c1_last_idx_inout; + } + + return grow_layer_instructions; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees +} //namespace fcmp_pp diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h new file mode 100644 index 00000000000..aca0c753fb5 --- /dev/null +++ b/src/fcmp_pp/curve_trees.h @@ -0,0 +1,350 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "cryptonote_basic/cryptonote_basic.h" +#include "fcmp_pp_crypto.h" +#include "misc_log_ex.h" +#include "tower_cycle.h" + +#include +#include +#include + + +namespace fcmp_pp +{ +namespace curve_trees +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Hash a chunk of new children +template +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +// A layer of contiguous hashes starting from a specific start_idx in the tree +template +struct LayerExtension final +{ + uint64_t start_idx{0}; + bool update_existing_last_hash; + std::vector hashes; +}; + +// A struct useful to trim a layer and update its last hash if necessary +template +struct LayerReduction final +{ + uint64_t new_total_parents{0}; + bool update_existing_last_hash; + typename C::Point new_last_hash; +}; + +// Useful metadata for growing a layer +struct GrowLayerInstructions final +{ + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + uint64_t old_total_children; + uint64_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + uint64_t old_total_parents; + uint64_t new_total_parents; + + // When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order + // to set a new layer after that root + // - We'll need to be sure the old root gets hashed when setting the next layer + bool setting_next_layer_after_old_root; + // When the last child in the child layer changes, we'll need to use its old value to update its parent hash + bool need_old_last_child; + // When the last parent in the layer changes, we'll need to use its old value to update itself + bool need_old_last_parent; + + // The first chunk that needs to be updated's first child's offset within that chunk + std::size_t start_offset; + // The parent's starting index in the layer + uint64_t next_parent_start_index; +}; + +// Useful metadata for trimming a layer +struct TrimLayerInstructions final +{ + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + uint64_t old_total_children; + uint64_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + uint64_t old_total_parents; + uint64_t new_total_parents; + + // True if the new last chunk's existing parent hash will need to be updated + bool update_existing_last_hash; + + // Whether we need to explicitly trim children from the new last chunk + bool need_last_chunk_children_to_trim; + // Whether we need to trim by growing using the remaining children from the new last chunk + bool need_last_chunk_remaining_children; + // Whether we need the new last chunk's existing parent hash in order to complete the trim + bool need_existing_last_hash; + // Whether we need the new last child from the new last chunk in order to complete the trim + bool need_new_last_child; + + // The offset to use when hashing the last chunk + std::size_t hash_offset; + + // The starting and ending indexes of the children we're going to need to trim the last chunk + uint64_t start_trim_idx; + uint64_t end_trim_idx; +}; + +// Output pub key and commitment, ready to be converted to a leaf tuple +// - From {output_pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} +// - Output pairs do NOT necessarily have torsion cleared. We need the output pubkey as it exists in the chain in order +// to derive the correct I (when deriving {O.x, I.x, C.x}). Torsion clearing O before deriving I from O would enable +// spending a torsioned output once before the fcmp++ fork and again with a different key image via fcmp++. +#pragma pack(push, 1) +struct OutputPair final +{ + crypto::public_key output_pubkey; + rct::key commitment; +}; + +// Contextual wrapper for the output +struct OutputContext final +{ + // Output's global id in the chain, used to insert the output in the tree in the order it entered the chain + uint64_t output_id; + OutputPair output_pair; +}; +#pragma pack(pop) + +static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); +static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for output context"); + +using OutputsByUnlockBlock = std::unordered_map>; + +// Ed25519 points (can go from OutputTuple -> LeafTuple) +struct OutputTuple final +{ + rct::key O; + rct::key I; + rct::key C; +}; + +// Struct composed of ec elems needed to get a full-fledged leaf tuple +struct PreLeafTuple final +{ + fcmp_pp::EdYDerivatives O_pre_x; + fcmp_pp::EdYDerivatives I_pre_x; + fcmp_pp::EdYDerivatives C_pre_x; +}; + +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// This class is useful to help update the curve trees merkle tree without needing to keep the entire tree in memory +// - It requires instantiation with the C1 and C2 curve classes and widths, hardening the tree structure +// - It ties the C2 curve in the tree to the leaf layer (the leaf layer is composed of C2 scalars) +template +class CurveTrees +{ +public: + CurveTrees(std::unique_ptr &&c1, + std::unique_ptr &&c2, + const std::size_t c1_width, + const std::size_t c2_width): + m_c1{std::move(c1)}, + m_c2{std::move(c2)}, + m_c1_width{c1_width}, + m_c2_width{c2_width}, + m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} + { + assert(c1_width > 0); + assert(c2_width > 0); + }; + +//member structs +public: + // Tuple that composes a single leaf in the tree + struct LeafTuple final + { + // Output ed25519 point x-coordinate + typename C2::Scalar O_x; + // Key image generator x-coordinate + typename C2::Scalar I_x; + // Commitment x-coordinate + typename C2::Scalar C_x; + }; + static const std::size_t LEAF_TUPLE_SIZE = 3; + static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); + + // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer + struct Leaves final + { + // Starting leaf tuple index in the leaf layer + uint64_t start_leaf_tuple_idx{0}; + // Contiguous leaves in a tree that start at the start_idx + std::vector tuples; + }; + + // A struct useful to extend an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_extensions[0] is first layer after leaves, then c1_layer_extensions[0], c2_layer_extensions[1], etc + struct TreeExtension final + { + Leaves leaves; + std::vector> c1_layer_extensions; + std::vector> c2_layer_extensions; + }; + + // A struct useful to reduce the number of leaves in an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc + struct TreeReduction final + { + uint64_t new_total_leaf_tuples; + std::vector> c1_layer_reductions; + std::vector> c2_layer_reductions; + }; + + // Last hashes from each layer in the tree + // - layers alternate between C1 and C2 + // - c2_last_hashes[0] refers to the layer after leaves, then c1_last_hashes[0], then c2_last_hashes[1], etc + struct LastHashes final + { + std::vector c1_last_hashes; + std::vector c2_last_hashes; + }; + + // The children we'll trim from each last chunk in the tree + // - layers alternate between C1 and C2 + // - c2_children[0] refers to the layer after leaves, then c1_children[0], then c2_children[1], etc + struct LastChunkChildrenToTrim final + { + std::vector> c1_children; + std::vector> c2_children; + }; + +//member functions +public: + // Convert output pairs into leaf tuples, from {output pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} + LeafTuple leaf_tuple(const OutputPair &output_pair) const; + + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] + std::vector flatten_leaves(std::vector &&leaves) const; + + // Take in the existing number of leaf tuples and the existing last hash in each layer in the tree, as well as new + // outputs to add to the tree, and return a tree extension struct that can be used to extend a tree + TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_leaf_tuples) const; + + // Get instructions useful for trimming all existing layers in the tree + std::vector get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; + + // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from + // each last chunk, and the existing last hash in what will become the new last parent of each layer, and return + // a tree reduction struct that can be used to trim a tree + TreeReduction get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; + +private: + // Multithreaded helper function to convert outputs to leaf tuples and set leaves on tree extension + void set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const; + + // Helper function used to set the next layer extension used to grow the next layer in the tree + // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent + // layer of the leaf layer + GrowLayerInstructions set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const; + +//public member variables +public: + // The curve interfaces + const std::unique_ptr m_c1; + const std::unique_ptr m_c2; + + // The leaf layer has a distinct chunk width than the other layers + const std::size_t m_leaf_layer_chunk_width; + + // The chunk widths of the layers in the tree tied to each curve + const std::size_t m_c1_width; + const std::size_t m_c2_width; +}; +//---------------------------------------------------------------------------------------------------------------------- +using Helios = tower_cycle::Helios; +using Selene = tower_cycle::Selene; +using CurveTreesV1 = CurveTrees; + +// https://github.com/kayabaNerve/fcmp-plus-plus/blob +// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 +const std::size_t HELIOS_CHUNK_WIDTH = 38; +const std::size_t SELENE_CHUNK_WIDTH = 18; + +std::shared_ptr curve_trees_v1( + const std::size_t helios_chunk_width = HELIOS_CHUNK_WIDTH, + const std::size_t selene_chunk_width = SELENE_CHUNK_WIDTH); + +// A path in the tree containing whole chunks at each layer +// - leaves contain a complete chunk of leaves, encoded as compressed ed25519 points +// - c2_layers[0] refers to the chunk of elems in the tree in the layer after leaves. The hash of the chunk of +// leaves is 1 member of the c2_layers[0] chunk. The rest of c2_layers[0] is the chunk of elems that hash is in. +// - layers alternate between C1 and C2 +// - c1_layers[0] refers to the chunk of elems in the tree in the layer after c2_layers[0]. The hash of the chunk +// of c2_layers[0] is 1 member of the c1_layers[0] chunk. The rest of c1_layers[0] is the chunk of elems that hash +// is in. +// - c2_layers[1] refers to the chunk of elems in the tree in the layer after c1_layers[0] etc. +struct PathV1 final +{ + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees +} //namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp new file mode 100644 index 00000000000..6d3f2507a7b --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -0,0 +1,80 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "fcmp_pp_crypto.h" + +#include "ringct/rctOps.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +bool clear_torsion(const rct::key &k, rct::key &k_out) { + ge_p3 point; + if (ge_frombytes_vartime(&point, k.bytes) != 0) + return false; + // mul by inv 8, then mul by 8 + ge_p2 point_inv_8; + ge_scalarmult(&point_inv_8, rct::INV_EIGHT.bytes, &point); + ge_p1p1 point_inv_8_mul_8; + ge_mul8(&point_inv_8_mul_8, &point_inv_8); + ge_p3 torsion_cleared_point; + ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); + ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivatives) { + if (pub == rct::I) + return false; + fe y; + if (fe_frombytes_vartime(y, pub.bytes) != 0) + return false; + fe one; + fe_1(one); + // (1+y),(1-y) + fe_add(ed_y_derivatives.one_plus_y, one, y); + fe_sub(ed_y_derivatives.one_minus_y, one, y); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +void ed_y_derivatives_to_wei_x(const EdYDerivatives &pre_wei_x, rct::key &wei_x) { + fe inv_one_minus_y; + fe_invert(inv_one_minus_y, pre_wei_x.one_minus_y); + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y, pre_wei_x.one_plus_y); +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { + EdYDerivatives ed_y_derivatives; + if (!point_to_ed_y_derivatives(pub, ed_y_derivatives)) + return false; + ed_y_derivatives_to_wei_x(ed_y_derivatives, wei_x); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h new file mode 100644 index 00000000000..2c63ff22455 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -0,0 +1,54 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +extern "C" +{ +#include "crypto/crypto-ops.h" +} +#include "ringct/rctTypes.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +// Field elems needed to get wei x coord +struct EdYDerivatives final +{ + fe one_plus_y; + fe one_minus_y; +}; +//---------------------------------------------------------------------------------------------------------------------- +// TODO: tests for these functions +bool clear_torsion(const rct::key &k, rct::key &k_out); +bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivatives); +void ed_y_derivatives_to_wei_x(const EdYDerivatives &ed_y_derivatives, rct::key &wei_x); +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_rust/.gitignore b/src/fcmp_pp/fcmp_pp_rust/.gitignore new file mode 100644 index 00000000000..5a07b8927f8 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/.gitignore @@ -0,0 +1,4 @@ +# If a developer runs cargo build inside this sub-directory to only work with +# the Rust side of things, they'll create this target directory which shouldn't +# be committed +target diff --git a/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt new file mode 100644 index 00000000000..9d2df657e97 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt @@ -0,0 +1,118 @@ +# Copyright (c) 2016-2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +if(ARCH_ID MATCHES "x86-64") + set(RUST_ARCH "x86_64") +elseif(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") + set(RUST_ARCH "aarch64") +elseif(ARCH_ID MATCHES "armv7-a") + set(RUST_ARCH "armv7") +elseif(ARCH_ID MATCHES "i386") + set(RUST_ARCH "i686") +elseif(ARCH_ID MATCHES "riscv64") + set(RUST_ARCH "riscv64gc") +else() + set(RUST_ARCH "${ARCH_ID}") +endif() + +if(MINGW) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-gnu") +elseif(MSVC) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-msvc") +elseif(APPLE) + set(RUST_PLATFORM "apple-darwin") + set(RUST_TOOLCHAIN "") +elseif(FREEBSD) + set(RUST_PLATFORM "unknown-freebsd") + set(RUST_TOOLCHAIN "") +elseif(OPENBSD) + set(RUST_PLATFORM "unknown-openbsd") + set(RUST_TOOLCHAIN "") +elseif(ANDROID) + set(RUST_PLATFORM "linux-android") + if(RUST_ARCH MATCHES "(arm|armv7)") + set(RUST_TOOLCHAIN "eabi") + else() + set(RUST_TOOLCHAIN "") + endif() +elseif(DRAGONFLY) + set(RUST_PLATFORM "unknown-dragonfly") + set(RUST_TOOLCHAIN "") +elseif(CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)") + if(RUST_ARCH MATCHES "x86_64") + set(RUST_PLATFORM "pc-solaris") + set(RUST_TOOLCHAIN "") + elseif(RUST_ARCH MATCHES "sparcv9") + set(RUST_PLATFORM "sun-solaris") + set(RUST_TOOLCHAIN "") + endif() +else() + set(RUST_PLATFORM "unknown-linux") + if(RUST_ARCH MATCHES "armv7") + # Rust does support non-HF, yet Monero assumes HF for armv7 + set(RUST_TOOLCHAIN "-gnueabihf") + else() + set(RUST_TOOLCHAIN "-gnu") + endif() +endif() + +set(RUST_TARGET "${RUST_ARCH}-${RUST_PLATFORM}${RUST_TOOLCHAIN}") + +if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CARGO_CMD cargo build --target "${RUST_TARGET}" ${CARGO_OPTIONS}) + set(TARGET_DIR "debug") +else () + set(CARGO_CMD cargo build --target "${RUST_TARGET}" --release ${CARGO_OPTIONS}) + set(TARGET_DIR "release") +endif () + +set(FCMP_PP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_pp_rust") +set(FCMP_PP_RUST_HEADER "${FCMP_PP_RUST_HEADER_DIR}/fcmp++.h") +set(FCMP_PP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_pp_rust.a") + +# Removing OUTPUT files makes sure custom command runs every time +file(REMOVE_RECURSE "${FCMP_PP_RUST_HEADER_DIR}") +file(MAKE_DIRECTORY "${FCMP_PP_RUST_HEADER_DIR}") + +file(REMOVE "${FCMP_PP_RUST_LIB}") + +add_custom_command( + COMMENT "Building fcmp++ rust lib" + OUTPUT ${FCMP_PP_RUST_HEADER} + OUTPUT ${FCMP_PP_RUST_LIB} + COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} + COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_PP_RUST_HEADER} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_pp_rust.a ${FCMP_PP_RUST_LIB} + COMMAND echo "Finished copying fcmp++ rust targets" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + VERBATIM +) + +add_custom_target(fcmp_pp_rust DEPENDS ${FCMP_PP_RUST_LIB}) diff --git a/src/fcmp_pp/fcmp_pp_rust/Cargo.lock b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock new file mode 100644 index 00000000000..742b6570e8e --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock @@ -0,0 +1,804 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciphersuite" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "dalek-ff-group", + "digest", + "elliptic-curve", + "ff", + "flexible-transcript", + "group", + "helioselene", + "k256", + "minimal-ed448", + "p256", + "rand_core", + "sha2", + "sha3", + "std-shims", + "subtle", + "zeroize", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/kayabaNerve/crypto-bigint?branch=c-repr#78352771313f1e9b8e48abe5ce30d50d6bdd291d" +dependencies = [ + "generic-array 0.14.7", + "rand_core", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "group", + "rand_core", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dalek-ff-group" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "curve25519-dalek", + "digest", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "ec-divisors" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "dalek-ff-group", + "group", + "hex", + "rand_core", + "zeroize", +] + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "elliptic-curve", + "signature", + "spki", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "tap", + "zeroize", +] + +[[package]] +name = "fcmp_pp_rust" +version = "0.0.0" +dependencies = [ + "ciphersuite", + "ec-divisors", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "helioselene", + "monero-fcmp-plus-plus", + "std-shims", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "bitvec", + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "flexible-transcript" +version = "0.3.2" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "digest", + "merlin", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "full-chain-membership-proofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "ciphersuite", + "ec-divisors", + "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.0", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "generalized-bulletproofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "ciphersuite", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs-circuit-abstraction", + "generic-array 1.1.0", +] + +[[package]] +name = "generalized-schnorr" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "flexible-transcript", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "helioselene" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "dalek-ff-group", + "ec-divisors", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "k256" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core", + "zeroize", +] + +[[package]] +name = "minimal-ed448" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "ff", + "generic-array 1.1.0", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "monero-fcmp-plus-plus" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "dalek-ff-group", + "ec-divisors", + "flexible-transcript", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "generalized-bulletproofs-ec-gadgets", + "generalized-schnorr", + "generic-array 1.1.0", + "monero-generators", + "monero-io", + "monero-primitives", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-generators" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "dalek-ff-group", + "group", + "monero-io", + "sha3", + "std-shims", + "subtle", +] + +[[package]] +name = "monero-io" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "std-shims", +] + +[[package]] +name = "monero-primitives" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "sha3", + "std-shims", + "zeroize", +] + +[[package]] +name = "multiexp" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ff", + "group", + "rand_core", + "rustversion", + "std-shims", + "zeroize", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "std-shims" +version = "0.1.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "hashbrown", + "spin", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/src/fcmp_pp/fcmp_pp_rust/Cargo.toml b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml new file mode 100644 index 00000000000..65a487a0c72 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "fcmp_pp_rust" +version = "0.0.0" +edition = "2021" + +[lib] +name = "fcmp_pp_rust" +crate-type = ["staticlib"] + +[dependencies] +std-shims = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +helioselene = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } +ciphersuite = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519", "helioselene"] } + +generalized-bulletproofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["tests"] } + +ec-divisors = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519"] } +full-chain-membership-proofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +monero-fcmp-plus-plus = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +[patch.crates-io] +crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = "c-repr" } + +[profile.dev] +lto = "off" +panic = "abort" + +[profile.release] +lto = "off" +panic = "abort" diff --git a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h new file mode 100644 index 00000000000..81f7d02829f --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h @@ -0,0 +1,142 @@ +#include +#include +#include +#include +#include + + +namespace fcmp_pp_rust +{ +// ----- deps C bindings ----- + +/// Inner integer type that the [`Limb`] newtype wraps. +// TODO: test 32-bit platforms +using Word = uintptr_t; + +/// Big integers are represented as an array of smaller CPU word-size integers +/// called "limbs". +using Limb = Word; + + +/// Stack-allocated big unsigned integer. +/// +/// Generic over the given number of `LIMBS` +/// +/// # Encoding support +/// This type supports many different types of encodings, either via the +/// [`Encoding`][`crate::Encoding`] trait or various `const fn` decoding and +/// encoding functions that can be used with [`Uint`] constants. +/// +/// Optional crate features for encoding (off-by-default): +/// - `generic-array`: enables [`ArrayEncoding`][`crate::ArrayEncoding`] trait which can be used to +/// [`Uint`] as `GenericArray` and a [`ArrayDecoding`][`crate::ArrayDecoding`] trait which +/// can be used to `GenericArray` as [`Uint`]. +/// - `rlp`: support for [Recursive Length Prefix (RLP)][RLP] encoding. +/// +/// [RLP]: https://eth.wiki/fundamentals/rlp +template +struct Uint { + /// Inner limb array. Stored from least significant to most significant. + Limb limbs[LIMBS]; +}; + + +/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime. +/// Internally, the value is stored in Montgomery form (multiplied by MOD::R) until it is retrieved. +template +struct Residue { + Uint montgomery_form; +}; + + +/// A constant-time implementation of the Ed25519 field. +struct SeleneScalar { + Residue<32 / sizeof(uintptr_t)> _0; +}; +static_assert(sizeof(SeleneScalar) == 32, "unexpected size of selene scalar"); + + +/// The field novel to Helios/Selene. +struct HeliosScalar { + Residue<32 / sizeof(uintptr_t)> _0; +}; +static_assert(sizeof(HeliosScalar) == 32, "unexpected size of helios scalar"); + +struct HeliosPoint { + SeleneScalar x; + SeleneScalar y; + SeleneScalar z; +}; + +struct SelenePoint { + HeliosScalar x; + HeliosScalar y; + HeliosScalar z; +}; + +// ----- End deps C bindings ----- + +struct CResult { + void* value; + void* err; +}; + +template +struct Slice { + const T *buf; + uintptr_t len; +}; + +using HeliosScalarSlice = Slice; + +using SeleneScalarSlice = Slice; + +extern "C" { +HeliosPoint helios_hash_init_point(); + +SelenePoint selene_hash_init_point(); + +uint8_t *helios_scalar_to_bytes(HeliosScalar helios_scalar); + +uint8_t *selene_scalar_to_bytes(SeleneScalar selene_scalar); + +uint8_t *helios_point_to_bytes(HeliosPoint helios_point); + +uint8_t *selene_point_to_bytes(SelenePoint selene_point); + +HeliosPoint helios_point_from_bytes(const uint8_t *helios_point_bytes); + +SelenePoint selene_point_from_bytes(const uint8_t *selene_point_bytes); + +SeleneScalar selene_scalar_from_bytes(const uint8_t *selene_scalar_bytes); + +HeliosScalar selene_point_to_helios_scalar(SelenePoint selene_point); + +SeleneScalar helios_point_to_selene_scalar(HeliosPoint helios_point); + +HeliosScalar helios_zero_scalar(); + +SeleneScalar selene_zero_scalar(); + +CResult hash_grow_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalar existing_child_at_offset, + HeliosScalarSlice new_children); + +CResult hash_trim_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalarSlice children, + HeliosScalar child_to_grow_back); + +CResult hash_grow_selene(SelenePoint existing_hash, + uintptr_t offset, + SeleneScalar existing_child_at_offset, + SeleneScalarSlice new_children); + +CResult hash_trim_selene(SelenePoint existing_hash, + uintptr_t offset, + SeleneScalarSlice children, + SeleneScalar child_to_grow_back); + +} // extern "C" +}//namespace fcmp_pp_rust diff --git a/src/fcmp_pp/fcmp_pp_rust/src/lib.rs b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs new file mode 100644 index 00000000000..4d9c37c75f1 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs @@ -0,0 +1,232 @@ +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + GroupEncoding, + }, + Ciphersuite, Helios, Selene, +}; +use helioselene::{ + Field25519 as SeleneScalar, HeliosPoint, HelioseleneField as HeliosScalar, SelenePoint, +}; + +use ec_divisors::DivisorCurve; +use full_chain_membership_proofs::tree::{hash_grow, hash_trim}; + +use monero_fcmp_plus_plus::{HELIOS_HASH_INIT, SELENE_HASH_INIT, HELIOS_GENERATORS, SELENE_GENERATORS}; + +// TODO: Use a macro to de-duplicate some of of this code + +#[no_mangle] +pub extern "C" fn helios_hash_init_point() -> HeliosPoint { + HELIOS_HASH_INIT() +} + +#[no_mangle] +pub extern "C" fn selene_hash_init_point() -> SelenePoint { + SELENE_HASH_INIT() +} + +fn c_u8_32(bytes: [u8; 32]) -> *const u8 { + let arr_ptr = Box::into_raw(Box::new(bytes)); + arr_ptr as *const u8 +} + +#[no_mangle] +pub extern "C" fn helios_scalar_to_bytes(helios_scalar: HeliosScalar) -> *const u8 { + c_u8_32(helios_scalar.to_repr()) +} + +#[no_mangle] +pub extern "C" fn selene_scalar_to_bytes(selene_scalar: SeleneScalar) -> *const u8 { + c_u8_32(selene_scalar.to_repr()) +} + +#[no_mangle] +pub extern "C" fn helios_point_to_bytes(helios_point: HeliosPoint) -> *const u8 { + c_u8_32(helios_point.to_bytes()) +} + +#[no_mangle] +pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 { + c_u8_32(selene_point.to_bytes()) +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn helios_point_from_bytes(helios_point: *const u8) -> HeliosPoint { + let mut helios_point = unsafe { core::slice::from_raw_parts(helios_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut helios_point).unwrap() +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn selene_point_from_bytes(selene_point: *const u8) -> SelenePoint { + let mut selene_point = unsafe { core::slice::from_raw_parts(selene_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut selene_point).unwrap() +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn selene_scalar_from_bytes(selene_scalar: *const u8) -> SeleneScalar { + let mut selene_scalar = unsafe { core::slice::from_raw_parts(selene_scalar, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_F(&mut selene_scalar).unwrap() +} + +#[no_mangle] +pub extern "C" fn selene_point_to_helios_scalar(selene_point: SelenePoint) -> HeliosScalar { + let xy_coords = SelenePoint::to_xy(selene_point); + // TODO: Return an error here (instead of unwrapping) + let x: HeliosScalar = xy_coords.unwrap().0; + x +} + +#[no_mangle] +pub extern "C" fn helios_point_to_selene_scalar(helios_point: HeliosPoint) -> SeleneScalar { + let xy_coords = HeliosPoint::to_xy(helios_point); + // TODO: Return an error here (instead of unwrapping) + let x: SeleneScalar = xy_coords.unwrap().0; + x +} + +#[no_mangle] +pub extern "C" fn helios_zero_scalar() -> HeliosScalar { + HeliosScalar::ZERO +} + +#[no_mangle] +pub extern "C" fn selene_zero_scalar() -> SeleneScalar { + SeleneScalar::ZERO +} + +#[repr(C)] +pub struct Slice { + buf: *const T, + len: usize, +} +pub type HeliosScalarSlice = Slice; +pub type SeleneScalarSlice = Slice; +impl<'a, T> From> for &'a [T] { + fn from(slice: Slice) -> Self { + unsafe { core::slice::from_raw_parts(slice.buf, slice.len) } + } +} + +#[repr(C)] +pub struct CResult { + value: *const T, + err: *const E, +} +impl CResult { + fn ok(value: T) -> Self { + CResult { + value: Box::into_raw(Box::new(value)), + err: core::ptr::null(), + } + } + fn err(err: E) -> Self { + CResult { + value: core::ptr::null(), + err: Box::into_raw(Box::new(err)), + } + } +} + +#[no_mangle] +pub extern "C" fn hash_grow_helios( + existing_hash: HeliosPoint, + offset: usize, + existing_child_at_offset: HeliosScalar, + new_children: HeliosScalarSlice, +) -> CResult { + let hash = hash_grow( + HELIOS_GENERATORS(), + existing_hash, + offset, + existing_child_at_offset, + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_trim_helios( + existing_hash: HeliosPoint, + offset: usize, + children: HeliosScalarSlice, + child_to_grow_back: HeliosScalar, +) -> CResult { + let hash = hash_trim( + HELIOS_GENERATORS(), + existing_hash, + offset, + children.into(), + child_to_grow_back, + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_grow_selene( + existing_hash: SelenePoint, + offset: usize, + existing_child_at_offset: SeleneScalar, + new_children: SeleneScalarSlice, +) -> CResult { + let hash = hash_grow( + SELENE_GENERATORS(), + existing_hash, + offset, + existing_child_at_offset, + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_trim_selene( + existing_hash: SelenePoint, + offset: usize, + children: SeleneScalarSlice, + child_to_grow_back: SeleneScalar, +) -> CResult { + let hash = hash_trim( + SELENE_GENERATORS(), + existing_hash, + offset, + children.into(), + child_to_grow_back, + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +// https://github.com/rust-lang/rust/issues/79609 +#[cfg(all(target_os = "windows", target_arch = "x86"))] +#[no_mangle] +pub extern "C" fn _Unwind_Resume() {} diff --git a/src/fcmp_pp/proof.h b/src/fcmp_pp/proof.h new file mode 100644 index 00000000000..24f91fae9e7 --- /dev/null +++ b/src/fcmp_pp/proof.h @@ -0,0 +1,46 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +namespace fcmp_pp +{ + +// Byte buffer containing the fcmp++ proof +using FcmpPpProof = std::vector; + +static inline std::size_t proof_len(const std::size_t n_inputs, const uint8_t curve_trees_tree_depth) +{ + // TODO: implement + static_assert(sizeof(std::size_t) >= sizeof(uint8_t), "unexpected size of size_t"); + return n_inputs * (std::size_t)curve_trees_tree_depth * 2; +}; + +}//namespace fcmp_pp diff --git a/src/fcmp_pp/tower_cycle.cpp b/src/fcmp_pp/tower_cycle.cpp new file mode 100644 index 00000000000..9cb35af1f4b --- /dev/null +++ b/src/fcmp_pp/tower_cycle.cpp @@ -0,0 +1,283 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "string_tools.h" +#include "tower_cycle.h" + +namespace fcmp_pp +{ +namespace tower_cycle +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_init_point() const +{ + return fcmp_pp_rust::helios_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_init_point() const +{ + return fcmp_pp_rust::selene_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::CycleScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const +{ + return fcmp_pp_rust::helios_point_to_selene_scalar(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const +{ + return fcmp_pp_rust::selene_point_to_helios_scalar(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_grow( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Scalar &existing_child_at_offset, + const Helios::Chunk &new_children) const +{ + auto result = fcmp_pp_rust::hash_grow_helios( + existing_hash, + offset, + existing_child_at_offset, + new_children); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); + } + + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Helios::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_trim( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Chunk &children, + const Helios::Scalar &child_to_grow_back) const +{ + auto result = fcmp_pp_rust::hash_trim_helios( + existing_hash, + offset, + children, + child_to_grow_back); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); + } + + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Helios::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_grow( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Scalar &existing_child_at_offset, + const Selene::Chunk &new_children) const +{ + auto result = fcmp_pp_rust::hash_grow_selene( + existing_hash, + offset, + existing_child_at_offset, + new_children); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); + } + + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_trim( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Chunk &children, + const Selene::Scalar &child_to_grow_back) const +{ + auto result = fcmp_pp_rust::hash_trim_selene( + existing_hash, + offset, + children, + child_to_grow_back); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); + } + + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Scalar Helios::zero_scalar() const +{ + return fcmp_pp_rust::helios_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Scalar Selene::zero_scalar() const +{ + return fcmp_pp_rust::selene_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Scalar &scalar) const +{ + auto bytes = fcmp_pp_rust::helios_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Scalar &scalar) const +{ + auto bytes = fcmp_pp_rust::selene_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Point &point) const +{ + auto bytes = fcmp_pp_rust::helios_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Point &point) const +{ + auto bytes = fcmp_pp_rust::selene_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::from_bytes(const std::array &bytes) const +{ + return fcmp_pp_rust::helios_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::from_bytes(const std::array &bytes) const +{ + return fcmp_pp_rust::selene_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Exposed helper functions +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar) +{ + return fcmp_pp_rust::selene_scalar_from_bytes(scalar.bytes); +} +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) +{ + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve->zero_scalar()); +} + +// Explicit instantiations +template void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); + +template void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out) +{ + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + typename C_SCALARS::Scalar scalar = curve->point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } +} + +// Explicit instantiations +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); + +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace tower_cycle +} //namespace fcmp_pp diff --git a/src/fcmp_pp/tower_cycle.h b/src/fcmp_pp/tower_cycle.h new file mode 100644 index 00000000000..8ab69f902b9 --- /dev/null +++ b/src/fcmp_pp/tower_cycle.h @@ -0,0 +1,191 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "fcmp_pp_rust/fcmp++.h" +#include "ringct/rctTypes.h" + +#include + +namespace fcmp_pp +{ +namespace tower_cycle +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Rust types +//---------------------------------------------------------------------------------------------------------------------- +// Need to forward declare Scalar types for point_to_cycle_scalar below +using SeleneScalar = fcmp_pp_rust::SeleneScalar; +using HeliosScalar = fcmp_pp_rust::HeliosScalar; +//---------------------------------------------------------------------------------------------------------------------- +struct HeliosT final +{ + using Scalar = HeliosScalar; + using Point = fcmp_pp_rust::HeliosPoint; + using Chunk = fcmp_pp_rust::HeliosScalarSlice; + using CycleScalar = SeleneScalar; +}; +//---------------------------------------------------------------------------------------------------------------------- +struct SeleneT final +{ + using Scalar = SeleneScalar; + using Point = fcmp_pp_rust::SelenePoint; + using Chunk = fcmp_pp_rust::SeleneScalarSlice; + using CycleScalar = HeliosScalar; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Abstract parent curve class that curves in a cycle must implement +template +class Curve +{ +//member functions +public: + virtual typename C::Point hash_init_point() const = 0; + + // Read the x-coordinate from this curve's point to get this curve's cycle scalar + virtual typename C::CycleScalar point_to_cycle_scalar(const typename C::Point &point) const = 0; + + virtual typename C::Point hash_grow( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Scalar &existing_child_at_offset, + const typename C::Chunk &new_children) const = 0; + + virtual typename C::Point hash_trim( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Chunk &children, + const typename C::Scalar &child_to_grow_back) const = 0; + + virtual typename C::Scalar zero_scalar() const = 0; + + virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; + virtual std::array to_bytes(const typename C::Point &point) const = 0; + + virtual typename C::Point from_bytes(const std::array &bytes) const = 0; + + virtual std::string to_string(const typename C::Scalar &scalar) const = 0; + virtual std::string to_string(const typename C::Point &point) const = 0; +}; +//---------------------------------------------------------------------------------------------------------------------- +class Helios final : public Curve +{ +//typedefs +public: + using Scalar = HeliosT::Scalar; + using Point = HeliosT::Point; + using Chunk = HeliosT::Chunk; + using CycleScalar = HeliosT::CycleScalar; + +//member functions +public: + Point hash_init_point() const override; + + CycleScalar point_to_cycle_scalar(const Point &point) const override; + + Point hash_grow( + const Point &existing_hash, + const std::size_t offset, + const Scalar &existing_child_at_offset, + const Chunk &new_children) const override; + + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children, + const Scalar &child_to_grow_back) const override; + + Scalar zero_scalar() const override; + + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; + + Point from_bytes(const std::array &bytes) const override; + + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; +//---------------------------------------------------------------------------------------------------------------------- +class Selene final : public Curve +{ +//typedefs +public: + using Scalar = SeleneT::Scalar; + using Point = SeleneT::Point; + using Chunk = SeleneT::Chunk; + using CycleScalar = SeleneT::CycleScalar; + +//member functions +public: + Point hash_init_point() const override; + + CycleScalar point_to_cycle_scalar(const Point &point) const override; + + Point hash_grow( + const Point &existing_hash, + const std::size_t offset, + const Scalar &existing_child_at_offset, + const Chunk &new_children) const override; + + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children, + const Scalar &child_to_grow_back) const override; + + Scalar zero_scalar() const override; + + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; + + Point from_bytes(const std::array &bytes) const override; + + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace tower_cycle +}//namespace fcmp_pp diff --git a/src/ringct/rctSigs.cpp b/src/ringct/rctSigs.cpp index 2d92ba05d4a..c96bc7bbaad 100644 --- a/src/ringct/rctSigs.cpp +++ b/src/ringct/rctSigs.cpp @@ -47,8 +47,7 @@ using namespace std; #define CHECK_AND_ASSERT_MES_L1(expr, ret, message) {if(!(expr)) {MCERROR("verify", message); return ret;}} -namespace -{ +namespace rct { rct::Bulletproof make_dummy_bulletproof(const std::vector &outamounts, rct::keyV &C, rct::keyV &masks) { const size_t n_outs = outamounts.size(); @@ -117,9 +116,7 @@ namespace const size_t n_scalars = ring_size; return rct::clsag{rct::keyV(n_scalars, I), I, I, I}; } -} -namespace rct { Bulletproof proveRangeBulletproof(keyV &C, keyV &masks, const std::vector &amounts, epee::span sk, hw::device &hwdev) { CHECK_AND_ASSERT_THROW_MES(amounts.size() == sk.size(), "Invalid amounts/sk sizes"); diff --git a/src/ringct/rctSigs.h b/src/ringct/rctSigs.h index 035d866d696..af533e49503 100644 --- a/src/ringct/rctSigs.h +++ b/src/ringct/rctSigs.h @@ -64,6 +64,10 @@ namespace hw { namespace rct { + // helpers for mock txs + Bulletproof make_dummy_bulletproof(const std::vector &outamounts, keyV &C, keyV &masks); + BulletproofPlus make_dummy_bulletproof_plus(const std::vector &outamounts, keyV &C, keyV &masks); + clsag make_dummy_clsag(size_t ring_size); boroSig genBorromean(const key64 x, const key64 P1, const key64 P2, const bits indices); bool verifyBorromean(const boroSig &bb, const key64 P1, const key64 P2); diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 247f25fffbb..946f520a2ca 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -45,7 +45,7 @@ extern "C" { } #include "crypto/generic-ops.h" #include "crypto/crypto.h" - +#include "fcmp_pp/proof.h" #include "hex.h" #include "span.h" #include "memwipe.h" @@ -84,6 +84,7 @@ namespace rct { return bytes[i]; } bool operator==(const key &k) const { return !crypto_verify_32(bytes, k.bytes); } + bool operator!=(const key &k) const { return crypto_verify_32(bytes, k.bytes); } unsigned char bytes[32]; }; typedef std::vector keyV; //vector of keys @@ -303,6 +304,7 @@ namespace rct { RCTTypeBulletproof2 = 4, RCTTypeCLSAG = 5, RCTTypeBulletproofPlus = 6, + RCTTypeFcmpPlusPlus = 7, }; enum RangeProofType { RangeProofBorromean, RangeProofBulletproof, RangeProofMultiOutputBulletproof, RangeProofPaddedBulletproof }; struct RCTConfig { @@ -324,9 +326,10 @@ namespace rct { std::vector ecdhInfo; ctkeyV outPk; xmr_amount txnFee; // contains b + crypto::hash referenceBlock; // block containing the merkle tree root used for fcmp++ rctSigBase() : - type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0) + type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0), referenceBlock{} {} template class Archive> @@ -335,7 +338,7 @@ namespace rct { FIELD(type) if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; VARINT_FIELD(txnFee) // inputs/outputs not saved, only here for serialization help @@ -364,7 +367,7 @@ namespace rct { return false; for (size_t i = 0; i < outputs; ++i) { - if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { // Since RCTTypeBulletproof2 enote types, we don't serialize the blinding factor, and only serialize the // first 8 bytes of ecdhInfo[i].amount @@ -400,6 +403,8 @@ namespace rct { ar.delimit_array(); } ar.end_array(); + if (type == RCTTypeFcmpPlusPlus) + FIELD(referenceBlock) return ar.good(); } @@ -411,6 +416,7 @@ namespace rct { FIELD(ecdhInfo) FIELD(outPk) VARINT_FIELD(txnFee) + FIELD(referenceBlock) END_SERIALIZE() }; struct rctSigPrunable { @@ -420,6 +426,8 @@ namespace rct { std::vector MGs; // simple rct has N, full has 1 std::vector CLSAGs; keyV pseudoOuts; //C - for simple rct + uint8_t curve_trees_tree_depth; // for fcmp++ + fcmp_pp::FcmpPpProof fcmp_pp; // when changing this function, update cryptonote::get_pruned_transaction_weight template class Archive> @@ -433,9 +441,9 @@ namespace rct { return false; if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; - if (type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { uint32_t nbp = bulletproofs_plus.size(); VARINT_FIELD(nbp) @@ -492,7 +500,22 @@ namespace rct { ar.end_array(); } - if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeFcmpPlusPlus) + { + FIELD(curve_trees_tree_depth) + ar.tag("fcmp_pp"); + ar.begin_object(); + const std::size_t proof_len = fcmp_pp::proof_len(inputs, curve_trees_tree_depth); + if (!typename Archive::is_saving()) + fcmp_pp.resize(proof_len); + if (fcmp_pp.size() != proof_len) + return false; + ar.serialize_blob(fcmp_pp.data(), proof_len); + if (!ar.good()) + return false; + ar.end_object(); + } + else if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) { ar.tag("CLSAGs"); ar.begin_array(); @@ -583,7 +606,7 @@ namespace rct { } ar.end_array(); } - if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { ar.tag("pseudoOuts"); ar.begin_array(); @@ -607,6 +630,8 @@ namespace rct { FIELD(bulletproofs_plus) FIELD(MGs) FIELD(CLSAGs) + FIELD(curve_trees_tree_depth) + FIELD(fcmp_pp) FIELD(pseudoOuts) END_SERIALIZE() }; @@ -615,12 +640,12 @@ namespace rct { keyV& get_pseudo_outs() { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } keyV const& get_pseudo_outs() const { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } BEGIN_SERIALIZE_OBJECT() @@ -740,6 +765,7 @@ namespace rct { static inline const rct::key &sk2rct(const crypto::secret_key &sk) { return (const rct::key&)sk; } static inline const rct::key &ki2rct(const crypto::key_image &ki) { return (const rct::key&)ki; } static inline const rct::key &hash2rct(const crypto::hash &h) { return (const rct::key&)h; } + static inline const rct::key &pt2rct(const crypto::ec_point &pt) { return (const rct::key&)pt; } static inline const crypto::public_key &rct2pk(const rct::key &k) { return (const crypto::public_key&)k; } static inline const crypto::secret_key &rct2sk(const rct::key &k) { return (const crypto::secret_key&)k; } static inline const crypto::key_image &rct2ki(const rct::key &k) { return (const crypto::key_image&)k; } diff --git a/src/rpc/core_rpc_server.cpp b/src/rpc/core_rpc_server.cpp index f9afa4021f1..52aff5a5bc1 100644 --- a/src/rpc/core_rpc_server.cpp +++ b/src/rpc/core_rpc_server.cpp @@ -1288,6 +1288,16 @@ namespace cryptonote res.status = "Failed"; return true; } + + std::vector key_images_y; + key_images_y.reserve(key_images.size()); + for (const auto &ki : key_images) + { + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + key_images_y.emplace_back(std::move(ki_y)); + } + for (std::vector::const_iterator i = ki.begin(); i != ki.end(); ++i) { crypto::hash hash; @@ -1295,11 +1305,13 @@ namespace cryptonote if (parse_hash256(i->id_hash, hash)) { memcpy(&spent_key_image, &hash, sizeof(hash)); // a bit dodgy, should be other parse functions somewhere + crypto::key_image_y spent_key_image_y; + crypto::key_image_to_y(spent_key_image, spent_key_image_y); for (size_t n = 0; n < res.spent_status.size(); ++n) { if (res.spent_status[n] == COMMAND_RPC_IS_KEY_IMAGE_SPENT::UNSPENT) { - if (key_images[n] == spent_key_image) + if (key_images_y[n] == spent_key_image_y) { res.spent_status[n] = COMMAND_RPC_IS_KEY_IMAGE_SPENT::SPENT_IN_POOL; break; diff --git a/src/rpc/message_data_structs.h b/src/rpc/message_data_structs.h index 9cf6c908cef..e2bbb1e0f19 100644 --- a/src/rpc/message_data_structs.h +++ b/src/rpc/message_data_structs.h @@ -104,7 +104,7 @@ namespace rpc bool double_spend_seen; }; - typedef std::unordered_map > key_images_with_tx_hashes; + typedef std::unordered_map > key_images_with_tx_hashes; struct output_amount_count { diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 44ccf1e6462..31f5ad10462 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -32,6 +32,7 @@ #include #include "cryptonote_core/cryptonote_core.h" #include "blockchain_db/testdb.h" +#include "fcmp_pp/curve_trees.h" #define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000 @@ -64,6 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index 05a6ce1f90f..78c09271595 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,6 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); @@ -171,7 +172,7 @@ static std::unique_ptr init_blockchain(const std: const block *blk = &boost::get(ev); auto blk_hash = get_block_hash(*blk); - bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash); + bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash, {}); } bool r = bap->blockchain.init(bdb, nettype, true, test_options, 2, nullptr); diff --git a/tests/crypto/main.cpp b/tests/crypto/main.cpp index 668c04ea107..a2e3a96b58f 100644 --- a/tests/crypto/main.cpp +++ b/tests/crypto/main.cpp @@ -277,6 +277,24 @@ int main(int argc, char *argv[]) { if (expected != actual) { goto error; } + } else if (cmd == "key_image_to_y") { + key_image ki; + key_image_y expected_ki_y, actual_ki_y; + bool expected_sign, actual_sign; + get(input, ki, expected_ki_y, expected_sign); + actual_sign = key_image_to_y(ki, actual_ki_y); + if (expected_ki_y != actual_ki_y || expected_sign != actual_sign) { + goto error; + } + } else if (cmd == "key_image_from_y") { + key_image_y ki_y; + bool sign; + key_image expected_ki, actual_ki; + get(input, ki_y, sign, expected_ki); + key_image_from_y(ki_y, sign, actual_ki); + if (expected_ki != actual_ki) { + goto error; + } } else { throw ios_base::failure("Unknown function: " + cmd); } diff --git a/tests/crypto/tests.txt b/tests/crypto/tests.txt index 32e3b2d090b..9e48c8b28da 100644 --- a/tests/crypto/tests.txt +++ b/tests/crypto/tests.txt @@ -5543,3 +5543,203 @@ derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 15 00 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 127 a6 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 128 0d +key_image_to_y fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d false +key_image_to_y af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b false +key_image_to_y f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e false +key_image_to_y b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 false +key_image_to_y b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 false +key_image_to_y 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b false +key_image_to_y d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 false +key_image_to_y f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 false +key_image_to_y 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 false +key_image_to_y 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 false +key_image_to_y b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b false +key_image_to_y b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 false +key_image_to_y ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 false +key_image_to_y 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 false +key_image_to_y 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 false +key_image_to_y ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 false +key_image_to_y 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 false +key_image_to_y 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 false +key_image_to_y ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b false +key_image_to_y 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 false +key_image_to_y 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 false +key_image_to_y b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b false +key_image_to_y 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 false +key_image_to_y 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 false +key_image_to_y 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e false +key_image_to_y 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 false +key_image_to_y e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 false +key_image_to_y 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c false +key_image_to_y 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 false +key_image_to_y 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 false +key_image_to_y 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 false +key_image_to_y 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f false +key_image_to_y f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 false +key_image_to_y 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f false +key_image_to_y e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a false +key_image_to_y 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 false +key_image_to_y a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 false +key_image_to_y 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c false +key_image_to_y e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 false +key_image_to_y 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 false +key_image_to_y b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 false +key_image_to_y 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 false +key_image_to_y 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b false +key_image_to_y dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 false +key_image_to_y b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 false +key_image_to_y fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 false +key_image_to_y 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f false +key_image_to_y 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 false +key_image_to_y 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 false +key_image_to_y 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b false +key_image_to_y 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 false +key_image_to_y 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f false +key_image_to_y 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c false +key_image_to_y 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 false +key_image_to_y 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d false +key_image_to_y b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e false +key_image_to_y a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d98de a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d985e true +key_image_to_y 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a05488fd 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a054887d true +key_image_to_y b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e13a1 b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e1321 true +key_image_to_y 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a01055ea 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a010556a true +key_image_to_y 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c6c8 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c648 true +key_image_to_y 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc0666d0 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc066650 true +key_image_to_y a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15181 a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15101 true +key_image_to_y 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b780cc 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b7804c true +key_image_to_y 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e90 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e10 true +key_image_to_y dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2ddcf dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2dd4f true +key_image_to_y 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1ad3 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1a53 true +key_image_to_y 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef06cd 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef064d true +key_image_to_y 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27cab 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27c2b true +key_image_to_y 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab0b2 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab032 true +key_image_to_y f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474185 f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474105 true +key_image_to_y ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f382 ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f302 true +key_image_to_y 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e3cc 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e34c true +key_image_to_y 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad386 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad306 true +key_image_to_y 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f05098fc 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f050987c true +key_image_to_y f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa9263990e5 f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa926399065 true +key_image_to_y 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf4db 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf45b true +key_image_to_y f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb690 f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb610 true +key_image_to_y 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e58496f3 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e5849673 true +key_image_to_y ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bcabb ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bca3b true +key_image_to_y b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5adb b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5a5b true +key_image_to_y cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64dd3 cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64d53 true +key_image_to_y b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a79b b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a71b true +key_image_to_y cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652ebf cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652e3f true +key_image_to_y 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d33d5 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d3355 true +key_image_to_y f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76296 f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76216 true +key_image_to_y 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df7923de 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df79235e true +key_image_to_y 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abdff 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abd7f true +key_image_to_y fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb91 fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb11 true +key_image_to_y 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d76f0 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d7670 true +key_image_to_y fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac69e3 fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac6963 true +key_image_to_y d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91296 d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91216 true +key_image_to_y f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b5c1 f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b541 true +key_image_to_y eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e2cc eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e24c true +key_image_to_y 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1cd5 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1c55 true +key_image_to_y 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69fbb 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69f3b true +key_image_to_y 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b4a0 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b420 true +key_image_to_y 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bcb1 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bc31 true +key_image_to_y 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c10805f7 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c1080577 true +key_image_to_y bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c7649f1 bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c764971 true +key_image_from_y b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f false b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f +key_image_from_y 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e false 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e +key_image_from_y c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 false c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 +key_image_from_y 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 false 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 +key_image_from_y 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 false 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 +key_image_from_y 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 false 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 +key_image_from_y f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 false f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 +key_image_from_y a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 false a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 +key_image_from_y f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a false f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a +key_image_from_y 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 false 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 +key_image_from_y bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 false bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 +key_image_from_y 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 false 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 +key_image_from_y 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 false 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 +key_image_from_y 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e false 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e +key_image_from_y 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 false 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 +key_image_from_y 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 false 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 +key_image_from_y 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c false 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c +key_image_from_y f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d false f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d +key_image_from_y 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a false 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a +key_image_from_y 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 false 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 +key_image_from_y af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 false af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 +key_image_from_y 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 false 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 +key_image_from_y 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 false 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 +key_image_from_y d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 false d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 +key_image_from_y c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 false c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 +key_image_from_y 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 false 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 +key_image_from_y fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 false fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 +key_image_from_y e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e false e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e +key_image_from_y 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 false 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 +key_image_from_y e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 false e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 +key_image_from_y f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b false f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b +key_image_from_y 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 false 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 +key_image_from_y 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 false 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 +key_image_from_y 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b false 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b +key_image_from_y c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 false c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 +key_image_from_y f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f false f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f +key_image_from_y e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a false e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a +key_image_from_y e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 false e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 +key_image_from_y 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 false 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 +key_image_from_y 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 false 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 +key_image_from_y 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 false 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 +key_image_from_y 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 false 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 +key_image_from_y 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 false 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 +key_image_from_y 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace0c true 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace8c +key_image_from_y d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb656 true d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb6d6 +key_image_from_y ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced296542 true ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced2965c2 +key_image_from_y c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd24973 true c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd249f3 +key_image_from_y 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55408 true 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55488 +key_image_from_y fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a43b true fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a4bb +key_image_from_y 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c717 true 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c797 +key_image_from_y b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38f5b true b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38fdb +key_image_from_y 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab683a true 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab68ba +key_image_from_y 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d11f true 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d19f +key_image_from_y a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde634276b true a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde63427eb +key_image_from_y 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2609 true 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2689 +key_image_from_y 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc16 true 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc96 +key_image_from_y 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bd45 true 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bdc5 +key_image_from_y c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472d4d true c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472dcd +key_image_from_y b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b26b true b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b2eb +key_image_from_y 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea91b true 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea99b +key_image_from_y 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b954 true 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b9d4 +key_image_from_y e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c031b true e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c039b +key_image_from_y 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20d54 true 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20dd4 +key_image_from_y 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa1846 true 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa18c6 +key_image_from_y 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e15 true 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e95 +key_image_from_y ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d733 true ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d7b3 +key_image_from_y af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb79634d true af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb7963cd +key_image_from_y 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd02952577 true 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd029525f7 +key_image_from_y 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f464042f true 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f46404af +key_image_from_y a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2f7d true a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2ffd +key_image_from_y dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c9149 true dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c91c9 +key_image_from_y 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1b20 true 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1ba0 +key_image_from_y dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871a47 true dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871ac7 +key_image_from_y 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0108 true 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0188 +key_image_from_y 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee201 true 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee281 +key_image_from_y 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff465 true 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff4e5 +key_image_from_y c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c852 true c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c8d2 +key_image_from_y f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709be73 true f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709bef3 +key_image_from_y 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfe6c true 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfeec +key_image_from_y 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d2919663 true 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d29196e3 +key_image_from_y b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3b40 true b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3bc0 +key_image_from_y 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450b37 true 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450bb7 +key_image_from_y bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c340625c true bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c34062dc +key_image_from_y 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c941 true 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c9c1 +key_image_from_y af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e882688856034 true af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e8826888560b4 +key_image_from_y 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7b57 true 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7bd7 +key_image_from_y 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2810 true 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2890 +key_image_from_y 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f21595a true 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f2159da +key_image_from_y 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973113 true 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973193 +key_image_from_y bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e49359 true bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e493d9 +key_image_from_y df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6b36 true df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6bb6 +key_image_from_y 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70e39 true 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70eb9 +key_image_from_y a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bc7d true a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bcfd +key_image_from_y 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e718324 true 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e7183a4 +key_image_from_y c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff3961 true c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff39e1 +key_image_from_y 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4ae74 true 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4aef4 +key_image_from_y 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e955 true 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e9d5 +key_image_from_y 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1f4d true 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1fcd +key_image_from_y 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c178 true 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c1f8 +key_image_from_y 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a384b true 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a38cb diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index a1158fcecd6..4f259643a2c 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -36,6 +36,7 @@ set(performance_tests_headers construct_tx.h derive_public_key.h derive_secret_key.h + fe_batch_invert.h ge_frombytes_vartime.h generate_key_derivation.h generate_key_image.h diff --git a/tests/performance_tests/fe_batch_invert.h b/tests/performance_tests/fe_batch_invert.h new file mode 100644 index 00000000000..2aed96f9a0e --- /dev/null +++ b/tests/performance_tests/fe_batch_invert.h @@ -0,0 +1,79 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers + +#pragma once + +#include "crypto/crypto.h" + +template +class test_fe_batch_invert +{ +public: + static const size_t loop_count = 50; + static const size_t n_elems = 1000; + + bool init() + { + m_fes = (fe *) malloc(n_elems * sizeof(fe)); + + for (std::size_t i = 0; i < n_elems; ++i) + { + crypto::secret_key r; + crypto::random32_unbiased((unsigned char*)r.data); + + ge_p3 point; + ge_scalarmult_base(&point, (unsigned char*)r.data); + + memcpy(m_fes[i], &point.Y, sizeof(fe)); + } + + return true; + } + + bool test() + { + fe *inv_fes = (fe *) malloc(n_elems * sizeof(fe)); + + if (batched) + fe_batch_invert(inv_fes, m_fes, n_elems); + else + { + for (std::size_t i = 0; i < n_elems; ++i) + fe_invert(inv_fes[i], m_fes[i]); + } + + free(inv_fes); + + return true; + } + +private: + fe *m_fes; +}; diff --git a/tests/performance_tests/main.cpp b/tests/performance_tests/main.cpp index 929eec590d6..02770d65a8e 100644 --- a/tests/performance_tests/main.cpp +++ b/tests/performance_tests/main.cpp @@ -43,6 +43,7 @@ #include "derive_public_key.h" #include "derive_secret_key.h" #include "derive_view_tag.h" +#include "fe_batch_invert.h" #include "ge_frombytes_vartime.h" #include "ge_tobytes.h" #include "generate_key_derivation.h" @@ -206,6 +207,8 @@ int main(int argc, char** argv) TEST_PERFORMANCE0(filter, p, test_generate_key_image); TEST_PERFORMANCE0(filter, p, test_derive_public_key); TEST_PERFORMANCE0(filter, p, test_derive_secret_key); + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, true); // batched + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, false); // individual inversions TEST_PERFORMANCE0(filter, p, test_ge_frombytes_vartime); TEST_PERFORMANCE0(filter, p, test_ge_tobytes); TEST_PERFORMANCE0(filter, p, test_generate_keypair); diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 8659b0ed07a..eb48e281cb1 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -41,6 +41,7 @@ set(unit_tests_sources chacha.cpp checkpoints.cpp command_line.cpp + curve_trees.cpp crypto.cpp decompose_amount_into_digits.cpp device.cpp @@ -51,6 +52,7 @@ set(unit_tests_sources epee_serialization.cpp epee_utils.cpp expect.cpp + fcmp_pp.cpp json_serialization.cpp get_xtype_from_string.cpp hashchain.cpp @@ -113,11 +115,13 @@ monero_add_minimal_executable(unit_tests target_link_libraries(unit_tests PRIVATE ringct + cncrypto cryptonote_protocol cryptonote_core daemon_messages daemon_rpc_server blockchain_db + fcmp_pp lmdb_lib rpc net diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index 1c4841bb7a3..51931cdda2c 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -345,3 +345,63 @@ TEST(Crypto, generator_consistency) // ringct/rctTypes.h ASSERT_TRUE(memcmp(H.data, rct::H.bytes, 32) == 0); } + +TEST(Crypto, key_image_y) +{ + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + crypto::key_image ki; + crypto::generate_key_image(kp.pub, kp.sec, ki); + + crypto::key_image_y ki_y; + bool sign = crypto::key_image_to_y(ki, ki_y); + + static_assert(sizeof(crypto::key_image) == sizeof(crypto::key_image_y), "unequal key image <> key image y size"); + if (memcmp(ki.data, ki_y.data, sizeof(crypto::key_image)) == 0) + ASSERT_FALSE(sign); + else + ASSERT_TRUE(sign); + + // decoded y coordinate should be the same + fe y_from_ki; + fe y_from_ki_y; + ASSERT_EQ(fe_frombytes_vartime(y_from_ki, (unsigned char*)ki.data), 0); + ASSERT_EQ(fe_frombytes_vartime(y_from_ki_y, (unsigned char*)ki_y.data), 0); + + ASSERT_EQ(memcmp(y_from_ki, y_from_ki_y, sizeof(fe)), 0); +} + +TEST(Crypto, batch_inversion) +{ + const std::size_t MAX_TEST_ELEMS = 1000; + + // Memory allocator + auto alloc = [](const std::size_t n) -> fe* + { + fe *ptr = (fe *) malloc(n * sizeof(fe)); + if (!ptr) + throw std::runtime_error("failed to malloc fe *"); + return ptr; + }; + + // Init test elems and individual inversions + fe *init_elems = alloc(MAX_TEST_ELEMS); + fe *norm_inverted = alloc(MAX_TEST_ELEMS); + for (std::size_t i = 0; i < MAX_TEST_ELEMS; ++i) + { + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + ASSERT_EQ(fe_frombytes_vartime(init_elems[i], (unsigned char*)kp.pub.data), 0); + fe_invert(norm_inverted[i], init_elems[i]); + } + + // Do batch inversions and compare to individual inversions + for (std::size_t n_elems = 1; n_elems <= MAX_TEST_ELEMS; ++n_elems) + { + fe *batch_inverted = alloc(n_elems); + ASSERT_EQ(fe_batch_invert(batch_inverted, init_elems, n_elems), 0); + ASSERT_EQ(memcmp(batch_inverted, norm_inverted, n_elems * sizeof(fe)), 0); + free(batch_inverted); + } + + free(init_elems); + free(norm_inverted); +} diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp new file mode 100644 index 00000000000..491448b3a22 --- /dev/null +++ b/tests/unit_tests/curve_trees.cpp @@ -0,0 +1,1353 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" +#include "fcmp_pp/fcmp_pp_crypto.h" +#include "misc_log_ex.h" +#include "ringct/rctOps.h" + +#include + + +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test helpers +//---------------------------------------------------------------------------------------------------------------------- +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t old_n_leaf_tuples, + const std::size_t new_n_leaf_tuples) +{ + std::vector outs; + outs.reserve(new_n_leaf_tuples); + + for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) + { + const std::uint64_t output_id = old_n_leaf_tuples + i; + + // Generate random output tuple + crypto::secret_key o,c; + crypto::public_key O,C; + crypto::generate_keys(O, o, o, false); + crypto::generate_keys(C, c, c, false); + + rct::key C_key = rct::pk2rct(C); + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(O), + .commitment = std::move(C_key) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + outs.emplace_back(std::move(output_context)); + } + + return outs; +} +//---------------------------------------------------------------------------------------------------------------------- +static const Selene::Scalar generate_random_selene_scalar() +{ + crypto::secret_key s; + crypto::public_key S; + + crypto::generate_keys(S, s, s, false); + + rct::key S_x; + CHECK_AND_ASSERT_THROW_MES(fcmp_pp::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); + return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t n_leaves, + std::shared_ptr curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "unexpected starting n leaf tuples in db"); + + auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); + + test_db.m_db->grow_tree(std::move(leaves)); + + return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t trim_leaves, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_THROW_MES(expected_old_n_leaf_tuples >= trim_leaves, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_leaves > 0, "must be trimming some leaves"); + + LOG_PRINT_L1("Trimming " << trim_leaves << " leaf tuples from tree with " + << expected_old_n_leaf_tuples << " leaves in db"); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "trimming unexpected starting n leaf tuples in db"); + + // Can use 0 for trim_block_id since it's unused in tests + test_db.m_db->trim_tree(trim_leaves, 0); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(expected_old_n_leaf_tuples - trim_leaves), false, + "failed to trim tree in db"); + + MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +#define BEGIN_INIT_TREE_ITER(curve_trees) \ + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) \ + { \ + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves"); \ + \ + /* Init tree in memory */ \ + CurveTreesGlobalTree global_tree(*curve_trees); \ + ASSERT_TRUE(global_tree.grow_tree(0, init_leaves)); \ + \ + /* Init tree in db */ \ + INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees); \ + ASSERT_TRUE(grow_tree_db(0, init_leaves, curve_trees, test_db)); \ +//---------------------------------------------------------------------------------------------------------------------- +#define END_INIT_TREE_ITER(curve_trees) \ + }; \ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree helpers +//---------------------------------------------------------------------------------------------------------------------- +template +static bool validate_layer(const std::unique_ptr &curve, + const CurveTreesGlobalTree::Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size) +{ + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing " << curve->to_string(chunk_start[i])); + + const typename C::Point chunk_hash = fcmp_pp::curve_trees::get_new_parent(curve, chunk); + + MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve->to_string(chunk_hash)); + + const auto actual_bytes = curve->to_bytes(parent); + const auto expected_bytes = curve->to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +template +static std::vector get_last_chunk_children_to_trim(const std::unique_ptr &c_child, + const CurveTreesGlobalTree::Layer &child_layer, + const bool need_last_chunk_children_to_trim, + const bool need_last_chunk_remaining_children, + const std::size_t start_trim_idx, + const std::size_t end_trim_idx) +{ + std::vector children_to_trim_out; + if (end_trim_idx > start_trim_idx) + { + std::size_t idx = start_trim_idx; + MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx); + do + { + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; + + auto child_scalar = c_child->point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); + + ++idx; + } + while (idx < end_trim_idx); + } + + return children_to_trim_out; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree public implementations +//---------------------------------------------------------------------------------------------------------------------- +std::size_t CurveTreesGlobalTree::get_num_leaf_tuples() const +{ + return m_tree.leaves.size(); +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::grow_tree(const std::size_t expected_old_n_leaf_tuples,const std::size_t new_n_leaf_tuples) +{ + // Do initial tree reads + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + const CurveTreesV1::LastHashes last_hashes = this->get_last_hashes(); + + this->log_last_hashes(last_hashes); + + auto new_outputs = generate_random_leaves(m_curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); + + // Get a tree extension object to the existing tree using randomly generated leaves + // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves + const auto tree_extension = m_curve_trees.get_tree_extension(old_n_leaf_tuples, + last_hashes, + std::move(new_outputs)); + + this->log_tree_extension(tree_extension); + + // Use the tree extension to extend the existing tree + this->extend_tree(tree_extension); + + this->log_tree(); + + // Validate tree structure and all hashes + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples; + return this->audit_tree(expected_n_leaf_tuples); +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples) +{ + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + // Trim the global tree by `trim_n_leaf_tuples` + LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree with " + << old_n_leaf_tuples << " leaves in memory"); + + // Get trim instructions + const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees.get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + this->reduce_tree(tree_reduction); + + const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected num leaves after trim"); + + MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); + + this->log_tree(); + + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples; + bool res = this->audit_tree(expected_n_leaf_tuples); + CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); + + MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory"); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) const +{ + MDEBUG("Auditing global tree"); + + auto leaves = m_tree.leaves; + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; + + CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); + + if (leaves.empty()) + { + CHECK_AND_ASSERT_MES(c2_layers.empty() && c1_layers.empty(), false, "expected empty tree"); + return true; + } + + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); + + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); + + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + // TODO: implement templated function for below if statement + if (parent_is_c2) + { + MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx); + + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, + children, + child_scalars); + + const bool valid = validate_layer(m_curve_trees.m_c2, + parents, + child_scalars, + m_curve_trees.m_c2_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx); + + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, + children, + child_scalars); + + const bool valid = validate_layer( + m_curve_trees.m_c1, + parents, + child_scalars, + m_curve_trees.m_c1_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + MDEBUG("Validating leaves"); + + // Convert output pairs to leaf tuples + std::vector leaf_tuples; + leaf_tuples.reserve(leaves.size()); + for (const auto &leaf : leaves) + { + auto leaf_tuple = m_curve_trees.leaf_tuple(leaf); + leaf_tuples.emplace_back(std::move(leaf_tuple)); + } + + // Now validate leaves + return validate_layer(m_curve_trees.m_c2, + c2_layers[0], + m_curve_trees.flatten_leaves(std::move(leaf_tuples)), + m_curve_trees.m_leaf_layer_chunk_width); +} +//---------------------------------------------------------------------------------------------------------------------- +fcmp_pp::curve_trees::PathV1 CurveTreesGlobalTree::get_path_at_leaf_idx(const std::size_t leaf_idx) const +{ + fcmp_pp::curve_trees::PathV1 path_out; + + const std::size_t n_leaf_tuples = get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples > leaf_idx, "too high leaf idx"); + + // Get leaves + const std::size_t start_leaf_idx = (leaf_idx / m_curve_trees.m_c2_width) * m_curve_trees.m_c2_width; + const std::size_t end_leaf_idx = std::min(n_leaf_tuples, start_leaf_idx + m_curve_trees.m_c2_width); + for (std::size_t i = start_leaf_idx; i < end_leaf_idx; ++i) + { + const auto &output_pair = m_tree.leaves[i]; + + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + crypto::ec_point I; + crypto::derive_key_image_generator(output_pubkey, I); + + rct::key O = rct::pk2rct(output_pubkey); + rct::key C = commitment; + + auto output_tuple = fcmp_pp::curve_trees::OutputTuple{ + .O = std::move(O), + .I = std::move(rct::pt2rct(I)), + .C = std::move(C) + }; + + path_out.leaves.emplace_back(std::move(output_tuple)); + } + + // Get parents + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + std::size_t start_parent_idx = start_leaf_idx / m_curve_trees.m_c2_width; + std::size_t c1_idx = 0, c2_idx = 0; + bool use_c2 = true; + for (std::size_t i = 0; i < n_layers; ++i) + { + if (use_c2) + { + path_out.c2_layers.emplace_back(); + auto &layer_out = path_out.c2_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "too high c2_idx"); + const std::size_t n_layer_elems = m_tree.c2_layers[c2_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c2_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c2_layers[c2_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c1_width; + ++c2_idx; + } + else + { + path_out.c1_layers.emplace_back(); + auto &layer_out = path_out.c1_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "too high c1_idx"); + const std::size_t n_layer_elems = m_tree.c1_layers[c1_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c1_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c1_layers[c1_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c2_width; + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return path_out; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array CurveTreesGlobalTree::get_tree_root() const +{ + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + + if (n_layers == 0) + return std::array(); + + if ((n_layers % 2) == 0) + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c1_layers.empty(), "missing c1 layers"); + const auto &last_layer = m_tree.c1_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c1 layer"); + return m_curve_trees.m_c1->to_bytes(last_layer.back()); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c2_layers.empty(), "missing c2 layers"); + const auto &last_layer = m_tree.c2_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c2 layer"); + return m_curve_trees.m_c2->to_bytes(last_layer.back()); + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree private implementations +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) +{ + // Add the leaves + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() == tree_extension.leaves.start_leaf_tuple_idx, + "unexpected leaf start idx"); + + m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); + for (const auto &o : tree_extension.leaves.tuples) + { + m_tree.leaves.emplace_back(o.output_pair); + } + + // Add the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); + const fcmp_pp::curve_trees::LayerExtension &c2_ext = c2_extensions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c2_idx <= m_tree.c2_layers.size(), "missing c2 layer"); + if (m_tree.c2_layers.size() == c2_idx) + m_tree.c2_layers.emplace_back(Layer{}); + + auto &c2_inout = m_tree.c2_layers[c2_idx]; + + const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); + const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + + // We updated the last hash + if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c2_ext.update_existing_last_hash, "expect to be updating last hash"); + c2_inout.back() = c2_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c2_ext.update_existing_last_hash, "unexpected last hash update"); + } + + for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) + c2_inout.emplace_back(c2_ext.hashes[i]); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); + const fcmp_pp::curve_trees::LayerExtension &c1_ext = c1_extensions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c1_idx <= m_tree.c1_layers.size(), "missing c1 layer"); + if (m_tree.c1_layers.size() == c1_idx) + m_tree.c1_layers.emplace_back(Layer{}); + + auto &c1_inout = m_tree.c1_layers[c1_idx]; + + const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); + const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + + // We updated the last hash + if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c1_ext.update_existing_last_hash, "expect to be updating last hash"); + c1_inout.back() = c1_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c1_ext.update_existing_last_hash, "unexpected last hash update"); + } + + for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) + c1_inout.emplace_back(c1_ext.hashes[i]); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction) +{ + // Trim the leaves + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples, + "expected fewer new total leaves"); + while (m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples) + m_tree.leaves.pop_back(); + + // Trim the layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + CHECK_AND_ASSERT_THROW_MES(c2_layer_reductions.size() == c1_layer_reductions.size() + || c2_layer_reductions.size() == (c1_layer_reductions.size() + 1), + "unexpected mismatch of c2 and c1 layer reductions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "missing c2 layer"); + auto &c2_inout = m_tree.c2_layers[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_reduction.new_total_parents <= c2_inout.size(), + "unexpected c2 new total parents"); + + c2_inout.resize(c2_reduction.new_total_parents); + c2_inout.shrink_to_fit(); + + // We updated the last hash + if (c2_reduction.update_existing_last_hash) + { + c2_inout.back() = c2_reduction.new_last_hash; + } + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "missing c1 layer"); + auto &c1_inout = m_tree.c1_layers[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_reduction.new_total_parents <= c1_inout.size(), + "unexpected c1 new total parents"); + + c1_inout.resize(c1_reduction.new_total_parents); + c1_inout.shrink_to_fit(); + + // We updated the last hash + if (c1_reduction.update_existing_last_hash) + { + c1_inout.back() = c1_reduction.new_last_hash; + } + + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // Delete remaining layers + m_tree.c1_layers.resize(c1_layer_reductions.size()); + m_tree.c2_layers.resize(c2_layer_reductions.size()); + + m_tree.c1_layers.shrink_to_fit(); + m_tree.c2_layers.shrink_to_fit(); +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const +{ + CurveTreesV1::LastHashes last_hashes_out; + auto &c1_last_hashes_out = last_hashes_out.c1_last_hashes; + auto &c2_last_hashes_out = last_hashes_out.c2_last_hashes; + + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; + + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + c1_last_hashes_out.reserve(c1_layers.size()); + c2_last_hashes_out.reserve(c2_layers.size()); + + if (c2_layers.empty()) + return last_hashes_out; + + // Next parents will be c2 + bool use_c2 = true; + + // Then get last chunks up until the root + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + while (c1_last_hashes_out.size() < c1_layers.size() || c2_last_hashes_out.size() < c2_layers.size()) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); + c2_last_hashes_out.push_back(c2_layers[c2_idx].back()); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + c1_last_hashes_out.push_back(c1_layers[c1_idx].back()); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return last_hashes_out; +} +//---------------------------------------------------------------------------------------------------------------------- +// TODO: template +CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions) +{ + CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim; + + if (trim_instructions.empty()) + return all_children_to_trim; + + // Leaf layer + const auto &trim_leaf_layer_instructions = trim_instructions[0]; + + std::vector leaves_to_trim; + + // TODO: separate function + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) + { + std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; + MDEBUG("Start trim from idx: " << idx); + do + { + CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); + const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; + + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); + const auto leaf_tuple = m_curve_trees.leaf_tuple(m_tree.leaves[leaf_tuple_idx]); + + leaves_to_trim.push_back(leaf_tuple.O_x); + leaves_to_trim.push_back(leaf_tuple.I_x); + leaves_to_trim.push_back(leaf_tuple.C_x); + + idx += CurveTreesV1::LEAF_TUPLE_SIZE; + } + while (idx < trim_leaf_layer_instructions.end_trim_idx); + } + + all_children_to_trim.c2_children.emplace_back(std::move(leaves_to_trim)); + + bool parent_is_c2 = false; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 1; i < trim_instructions.size(); ++i) + { + MDEBUG("Getting trim instructions for layer " << i); + + const auto &trim_layer_instructions = trim_instructions[i]; + + const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim; + const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children; + const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx; + const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx; + + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c1, + m_tree.c1_layers[c1_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + start_trim_idx, + end_trim_idx); + + all_children_to_trim.c2_children.emplace_back(std::move(children_to_trim)); + ++c1_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c2, + m_tree.c2_layers[c2_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + start_trim_idx, + end_trim_idx); + + all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim)); + ++c2_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + return all_children_to_trim; +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + CurveTreesV1::LastHashes last_hashes; + + if (trim_instructions.empty()) + return last_hashes; + + bool parent_is_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) + { + const std::size_t new_total_parents = trim_layer_instructions.new_total_parents; + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents"); + + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + const auto &c2_layer = m_tree.c2_layers[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents"); + + last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + const auto &c1_layer = m_tree.c1_layers[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents"); + + last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]); + ++c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + return last_hashes; +} +//---------------------------------------------------------------------------------------------------------------------- +// Logging helpers +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_hashes) +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; + + MDEBUG("Total of " << c1_last_hashes.size() << " Helios layers and " << c2_last_hashes.size() << " Selene layers"); + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_last_hashes.size() + c2_last_hashes.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_hashes.size(), "unexpected c2 layer"); + + const auto &last_hash = c2_last_hashes[c2_idx]; + MDEBUG("c2_idx: " << c2_idx << " , last_hash: " << m_curve_trees.m_c2->to_string(last_hash)); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_hashes.size(), "unexpected c1 layer"); + + const auto &last_hash = c1_last_hashes[c1_idx]; + MDEBUG("c1_idx: " << c1_idx << " , last_hash: " << m_curve_trees.m_c1->to_string(last_hash)); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + const auto &c1_extensions = tree_extension.c1_layer_extensions; + const auto &c2_extensions = tree_extension.c2_layer_extensions; + + MDEBUG("Tree extension has " << tree_extension.leaves.tuples.size() << " leaves, " + << c1_extensions.size() << " helios layers, " << c2_extensions.size() << " selene layers"); + + MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); + for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) + { + const auto &output_pair = tree_extension.leaves.tuples[i].output_pair; + const auto leaf = m_curve_trees.leaf_tuple(output_pair); + + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); + + MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx + (i * CurveTreesV1::LEAF_TUPLE_SIZE)) + << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_extensions.size() + c2_extensions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer"); + + const fcmp_pp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; + MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); + + for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) + MDEBUG("Child chunk start idx: " << (j + c2_layer.start_idx) << " , hash: " + << m_curve_trees.m_c2->to_string(c2_layer.hashes[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer"); + + const fcmp_pp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; + MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); + + for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) + MDEBUG("Child chunk start idx: " << (j + c1_layer.start_idx) << " , hash: " + << m_curve_trees.m_c1->to_string(c1_layer.hashes[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_tree() +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + MDEBUG("Tree has " << m_tree.leaves.size() << " leaves, " + << m_tree.c1_layers.size() << " helios layers, " << m_tree.c2_layers.size() << " selene layers"); + + for (std::size_t i = 0; i < m_tree.leaves.size(); ++i) + { + const auto leaf = m_curve_trees.leaf_tuple(m_tree.leaves[i]); + + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); + + MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (m_tree.c1_layers.size() + m_tree.c2_layers.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "unexpected c2 layer"); + + const CurveTreesGlobalTree::Layer &c2_layer = m_tree.c2_layers[c2_idx]; + MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c2_layer.size(); ++j) + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2->to_string(c2_layer[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "unexpected c1 layer"); + + const CurveTreesGlobalTree::Layer &c1_layer = m_tree.c1_layers[c1_idx]; + MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c1_layer.size(); ++j) + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1->to_string(c1_layer[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, grow_tree) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 2; + + static const std::size_t tree_depth = 4; + + LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then extend the tree with ext_leaves + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) + { + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves, ext_leaves)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; + + static const std::size_t tree_depth = 4; + + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree_then_grow) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; + + static const std::size_t tree_depth = 2; + + static const std::size_t grow_after_trim = 1; + + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth + << ", then grow " << grow_after_trim << " leaf/leaves"); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves - trim_leaves, grow_after_trim)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); + ASSERT_TRUE(grow_tree_db(init_leaves - trim_leaves, grow_after_trim, curve_trees, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +// Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children +TEST(curve_trees, hash_trim) +{ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); + + // 1. Trim 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim selene_scalar_1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 2. Trim 2 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim the initial result by 2 children + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 3. Change 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then change to: {selene_scalar_0, selene_scalar_2} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Trim the 2nd child and grow with new child + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_2); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} + std::vector remaining_children{selene_scalar_0, selene_scalar_2}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 4. Trim 2 and grow back by 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim+grow to: {selene_scalar_0, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_3 = generate_random_selene_scalar(); + + // Trim the initial result by 2 children+grow by 1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_3); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} + std::vector remaining_children{selene_scalar_0, selene_scalar_3}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, hash_grow) +{ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); + + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 selene scalars + std::vector all_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + + // Extend with a new child + const auto selene_scalar_2 = generate_random_selene_scalar(); + std::vector new_children{selene_scalar_2}; + const auto ext_hash = curve_trees->m_c2->hash_grow( + init_hash, + all_children.size(), + curve_trees->m_c2->zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes = curve_trees->m_c2->to_bytes(ext_hash); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2} + all_children.push_back(selene_scalar_2); + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(ext_hash_bytes, grow_res_bytes); + + // Extend again with a new child + const auto selene_scalar_3 = generate_random_selene_scalar(); + new_children.clear(); + new_children = {selene_scalar_3}; + const auto ext_hash2 = curve_trees->m_c2->hash_grow( + ext_hash, + all_children.size(), + curve_trees->m_c2->zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes2 = curve_trees->m_c2->to_bytes(ext_hash2); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + all_children.push_back(selene_scalar_3); + const auto grow_res2 = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes2 = curve_trees->m_c2->to_bytes(grow_res2); + + ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2); +} diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h new file mode 100644 index 00000000000..4706a50581d --- /dev/null +++ b/tests/unit_tests/curve_trees.h @@ -0,0 +1,128 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "fcmp_pp/curve_trees.h" +#include "fcmp_pp/tower_cycle.h" +#include "unit_tests_utils.h" + +using Helios = fcmp_pp::curve_trees::Helios; +using Selene = fcmp_pp::curve_trees::Selene; +using CurveTreesV1 = fcmp_pp::curve_trees::CurveTreesV1; + +//---------------------------------------------------------------------------------------------------------------------- +#define INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth) \ + static_assert(helios_chunk_width > 1, "helios width must be > 1"); \ + static_assert(selene_chunk_width > 1, "selene width must be > 1"); \ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); \ + \ + /* Number of leaves required for tree to reach given depth */ \ + std::size_t min_leaves_needed_for_tree_depth = selene_chunk_width; \ + for (std::size_t i = 1; i < tree_depth; ++i) \ + { \ + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; \ + min_leaves_needed_for_tree_depth *= width; \ + } \ + \ + /* Increment to test for off-by-1 */ \ + ++min_leaves_needed_for_tree_depth; \ + \ + unit_test::BlockchainLMDBTest test_db; \ +//---------------------------------------------------------------------------------------------------------------------- + +// Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept +// in memory (it's stored in the db) +class CurveTreesGlobalTree +{ +public: + CurveTreesGlobalTree(CurveTreesV1 &curve_trees): m_curve_trees(curve_trees) {}; + +//member structs +public: + template + using Layer = std::vector; + + // A complete tree, useful for testing (don't want to keep the whole tree in memory during normal operation) + struct Tree final + { + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; + }; + +//public member functions +public: + // Read the in-memory tree and get the number of leaf tuples + std::size_t get_num_leaf_tuples() const; + + // Grow tree by provided new_n_leaf_tuples + bool grow_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t new_n_leaf_tuples); + + // Trim the provided number of leaf tuples from the tree + bool trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples); + + // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer + bool audit_tree(const std::size_t expected_n_leaf_tuples) const; + + // Get the path in the tree of the provided leaf idx + fcmp_pp::curve_trees::PathV1 get_path_at_leaf_idx(const std::size_t leaf_idx) const; + + // Hint: use num leaf tuples in the tree to determine the type + std::array get_tree_root() const; + +private: + // Use the tree extension to extend the in-memory tree + void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); + + // Use the tree reduction to reduce the in-memory tree + void reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction); + + // Read the in-memory tree and get the last hashes from each layer in the tree + CurveTreesV1::LastHashes get_last_hashes() const; + + // Read the in-memory tree and get data from what will be the last chunks after trimming the tree to the provided + // number of leaves + // - This function is useful to collect all tree data necessary to perform the actual trim operation + // - This function can return elems from each last chunk that will need to be trimmed + CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + + CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions); + + // logging helpers + void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); + void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); + void log_tree(); + +private: + CurveTreesV1 &m_curve_trees; + Tree m_tree = Tree{}; +}; + diff --git a/tests/unit_tests/fcmp_pp.cpp b/tests/unit_tests/fcmp_pp.cpp new file mode 100644 index 00000000000..fc879a8248a --- /dev/null +++ b/tests/unit_tests/fcmp_pp.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" +#include "misc_log_ex.h" +#include "ringct/rctOps.h" + + +//---------------------------------------------------------------------------------------------------------------------- +TEST(fcmp_pp, prove) +{ + static const std::size_t helios_chunk_width = fcmp_pp::curve_trees::HELIOS_CHUNK_WIDTH; + static const std::size_t selene_chunk_width = fcmp_pp::curve_trees::SELENE_CHUNK_WIDTH; + + static const std::size_t tree_depth = 3; + + LOG_PRINT_L1("Test prove with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + LOG_PRINT_L1("Initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Init tree in memory + CurveTreesGlobalTree global_tree(*curve_trees); + ASSERT_TRUE(global_tree.grow_tree(0, min_leaves_needed_for_tree_depth)); + + LOG_PRINT_L1("Finished initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Create proof for every leaf in the tree + for (std::size_t leaf_idx = 0; leaf_idx < global_tree.get_num_leaf_tuples(); ++leaf_idx) + { + const auto path = global_tree.get_path_at_leaf_idx(leaf_idx); + } +} +//---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 56958a0d85f..bd97784aabc 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -35,6 +35,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "cryptonote_basic/hardfork.h" #include "blockchain_db/testdb.h" +#include "fcmp_pp/curve_trees.h" using namespace cryptonote; @@ -54,6 +55,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back(blk); } @@ -107,20 +109,20 @@ TEST(major, Only) ASSERT_FALSE(hf.add(mkblock(0, 2), 0)); ASSERT_FALSE(hf.add(mkblock(2, 2), 0)); ASSERT_TRUE(hf.add(mkblock(1, 2), 0)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 1, only version 1 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 1)); ASSERT_FALSE(hf.add(mkblock(2, 2), 1)); ASSERT_TRUE(hf.add(mkblock(1, 2), 1)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 2, only version 2 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 2)); ASSERT_FALSE(hf.add(mkblock(1, 2), 2)); ASSERT_FALSE(hf.add(mkblock(3, 2), 2)); ASSERT_TRUE(hf.add(mkblock(2, 2), 2)); - db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); } TEST(empty_hardforks, Success) @@ -134,7 +136,7 @@ TEST(empty_hardforks, Success) ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready); for (uint64_t h = 0; h <= 10; ++h) { - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } ASSERT_EQ(hf.get(0), 1); @@ -168,14 +170,14 @@ TEST(check_for_height, Success) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_TRUE(hf.check_for_height(mkblock(1, 1), h)); ASSERT_FALSE(hf.check_for_height(mkblock(2, 2), h)); // block version is too high - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 10; ++h) { ASSERT_FALSE(hf.check_for_height(mkblock(1, 1), h)); // block version is too low ASSERT_TRUE(hf.check_for_height(mkblock(2, 2), h)); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -192,19 +194,19 @@ TEST(get, next_version) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_EQ(2, hf.get_next_version()); - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 9; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 10; h <= 15; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -245,7 +247,7 @@ TEST(steps_asap, Success) hf.init(); for (uint64_t h = 0; h < 10; ++h) { - db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -272,7 +274,7 @@ TEST(steps_1, Success) hf.init(); for (uint64_t h = 0 ; h < 10; ++h) { - db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -297,7 +299,7 @@ TEST(reorganize, Same) // index 0 1 2 3 4 5 6 7 8 9 static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 20; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -328,7 +330,7 @@ TEST(reorganize, Changed) static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE (hf.add(db.get_block_from_height(h), h)); } @@ -348,7 +350,7 @@ TEST(reorganize, Changed) ASSERT_EQ(db.height(), 3); hf.reorganize_from_block_height(2); for (uint64_t h = 3; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ (ret, h < 15); } @@ -372,7 +374,7 @@ TEST(voting, threshold) for (uint64_t h = 0; h <= 8; ++h) { uint8_t v = 1 + !!(h % 8); - db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); if (h >= 8 && threshold == 87) { // for threshold 87, we reach the threshold at height 7, so from height 8, hard fork to version 2, but 8 tries to add 1 @@ -406,7 +408,7 @@ TEST(voting, different_thresholds) static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4 }; for (uint64_t h = 0; h < sizeof(block_versions) / sizeof(block_versions[0]); ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ(ret, true); } @@ -459,7 +461,7 @@ TEST(voting, info) ASSERT_EQ(expected_thresholds[h], threshold); ASSERT_EQ(4, voting); - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -522,7 +524,7 @@ TEST(reorganize, changed) #define ADD(v, h, a) \ do { \ cryptonote::block b = mkblock(hf, h, v); \ - db.add_block(b, 0, 0, 0, 0, 0, crypto::hash()); \ + db.add_block(b, 0, 0, 0, 0, 0, crypto::hash(), {}); \ ASSERT_##a(hf.add(b, h)); \ } while(0) #define ADD_TRUE(v, h) ADD(v, h, TRUE) diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index f7ef262e61e..07d33fb7233 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,6 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 1ca9687a84d..c046d62b6b3 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1304,3 +1304,185 @@ TEST(Serialization, tuple_many_tuples) EXPECT_EQ(tupler, tupler_recovered); } + +TEST(Serialization, tx_fcmp_pp) +{ + using namespace cryptonote; + + const std::size_t n_inputs = 2; + const std::size_t n_outputs = 3; + const uint8_t curve_trees_tree_depth = 3; + + const std::size_t proof_len = fcmp_pp::proof_len(n_inputs, curve_trees_tree_depth); + + const auto make_dummy_fcmp_pp_tx = [curve_trees_tree_depth, proof_len]() -> transaction + { + transaction tx; + + tx.invalidate_hashes(); + tx.set_null(); + + tx.version = 2; + tx.rct_signatures.type = rct::RCTTypeFcmpPlusPlus; + + // Set inputs + txin_to_key txin_to_key1; + txin_to_key1.amount = 1; + memset(&txin_to_key1.k_image, 0x42, sizeof(crypto::key_image)); + txin_to_key1.key_offsets.clear(); + tx.vin.clear(); + for (size_t i = 0; i < n_inputs; ++i) + tx.vin.push_back(txin_to_key1); + + // Set outputs + const uint64_t amount = 1; + std::vector out_amounts; + tx_out vout; + set_tx_out(amount, crypto::public_key{}, true, crypto::view_tag{}, vout); + for (size_t i = 0; i < n_outputs; ++i) + { + tx.vout.push_back(vout); + out_amounts.push_back(amount); + } + + // 1 ecdhTuple for each output + rct::ecdhTuple ecdhInfo; + memset(&ecdhInfo.mask, 0x01, sizeof(rct::key)); + memset(&ecdhInfo.amount, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.ecdhInfo.push_back(ecdhInfo); + + // 1 outPk for each output + rct::ctkey ctkey; + memset(&ctkey.dest, 0x01, sizeof(rct::key)); + memset(&ctkey.mask, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.outPk.push_back(ctkey); + + // 1 bp+ + rct::keyV C, masks; + tx.rct_signatures.p.bulletproofs_plus.push_back(rct::make_dummy_bulletproof_plus(out_amounts, C, masks)); + + // 1 pseudoOut for each input + const rct::key pseudoOut{0x01}; + for (size_t i = 0; i < n_inputs; ++i) + tx.rct_signatures.p.pseudoOuts.push_back(pseudoOut); + + // Set the reference block for fcmp++ + const crypto::hash referenceBlock{0x01}; + tx.rct_signatures.referenceBlock = referenceBlock; + + // Set the curve trees merkle tree depth + tx.rct_signatures.p.curve_trees_tree_depth = curve_trees_tree_depth; + + // 1 fcmp++ proof + fcmp_pp::FcmpPpProof fcmp_pp; + fcmp_pp.reserve(proof_len); + for (std::size_t i = 0; i < proof_len; ++i) + fcmp_pp.push_back(i); + tx.rct_signatures.p.fcmp_pp = std::move(fcmp_pp); + + return tx; + }; + + // 1. Set up a normal tx that includes an fcmp++ proof + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + ASSERT_TRUE(serialization::parse_binary(blob, tx1)); + ASSERT_EQ(tx, tx1); + ASSERT_EQ(tx.rct_signatures.referenceBlock, crypto::hash{0x01}); + ASSERT_EQ(tx.rct_signatures.referenceBlock, tx1.rct_signatures.referenceBlock); + ASSERT_EQ(tx.rct_signatures.p.fcmp_pp, tx1.rct_signatures.p.fcmp_pp); + } + + // 2. fcmp++ proof is longer than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + + // Extend fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); + tx.rct_signatures.p.fcmp_pp.push_back(0x01); + + string blob; + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + // 3. fcmp++ proof is shorter than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + + // Shorten the fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); + tx.rct_signatures.p.fcmp_pp.pop_back(); + + string blob; + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + const auto fcmp_pp_to_hex_str = [](const transaction &tx) + { + std::string fcmp_pp_str; + for (std::size_t i = 0; i < tx.rct_signatures.p.fcmp_pp.size(); ++i) + { + std::stringstream ss; + ss << std::hex << std::setfill('0') << std::setw(2) << (int)tx.rct_signatures.p.fcmp_pp[i]; + fcmp_pp_str += ss.str(); + } + return fcmp_pp_str; + }; + + // 4. fcmp++ proof is longer than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Insert an extra proof elem + blob_str.insert(pos, "2a"); + std::string larger_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, larger_blob); + + ASSERT_FALSE(serialization::parse_binary(larger_blob, tx1)); + } + + // 5. fcmp++ proof is shorter than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Delete a proof elem + blob_str.erase(pos, 2); + std::string smaller_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, smaller_blob); + + ASSERT_FALSE(serialization::parse_binary(smaller_blob, tx1)); + } +} diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 65da7bf884d..ab3b77889f6 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -30,6 +30,13 @@ #pragma once +#include "gtest/gtest.h" + +#include "blockchain_db/blockchain_db.h" +#include "blockchain_db/lmdb/db_lmdb.h" +#include "fcmp_pp/curve_trees.h" +#include "misc_log_ex.h" + #include #include @@ -64,8 +71,92 @@ namespace unit_test private: std::atomic m_counter; }; + + class BlockchainLMDBTest + { + public: + BlockchainLMDBTest(bool is_copy = false) : + m_temp_db_dir(boost::filesystem::temp_directory_path().string() + "/monero-lmdb-tests/"), + m_is_copy{is_copy} + {} + + ~BlockchainLMDBTest() + { + delete m_db; + if (m_temp_db_dir.find("/monero-lmdb-tests/") == std::string::npos) + { + LOG_ERROR("unexpected temp db dir"); + return; + } + if (!m_is_copy) + boost::filesystem::remove_all(m_temp_db_dir); + } + + void init_new_db(std::shared_ptr curve_trees) + { + CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); + this->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); + + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dir_path = m_temp_db_dir + temp_db_path.string(); + + MDEBUG("Creating test db at path " << dir_path); + ASSERT_NO_THROW(this->m_db->open(dir_path)); + m_cur_dir_path = dir_path; + } + + void init_hardfork(cryptonote::HardFork *hardfork) + { + hardfork->init(); + this->m_db->set_hard_fork(hardfork); + } + + BlockchainLMDBTest *copy_db(std::shared_ptr curve_trees) + { + CHECK_AND_ASSERT_THROW_MES(this->m_db != nullptr, "expected non-null m_db"); + CHECK_AND_ASSERT_THROW_MES(this->m_cur_dir_path != "", "expected cur dir path set"); + + const boost::filesystem::path lmdb_data_path = boost::filesystem::path(m_cur_dir_path + "/data.mdb"); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::exists(lmdb_data_path), "did not find lmdb data file"); + + // Close db, copy db file, open copy, then reopen the db + this->m_db->close(); + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dest_path = m_temp_db_dir + temp_db_path.string(); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::create_directories(dest_path), + "failed to create new db dirs"); + boost::filesystem::copy_file(lmdb_data_path, dest_path + "/data.mdb"); + + // Open db copy + BlockchainLMDBTest *copy_db = new BlockchainLMDBTest(true/*is_copy*/); + copy_db->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); + copy_db->m_db->open(dest_path); + copy_db->m_cur_dir_path = dest_path; + + // Reopen original db so it's ready for use + this->m_db->open(m_cur_dir_path); + + return copy_db; + } + + cryptonote::BlockchainDB* m_db{nullptr}; + const std::string m_temp_db_dir; + std::string m_cur_dir_path{""}; + const bool m_is_copy{false}; + }; } +#define INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees) \ + if (curve_trees != nullptr) \ + test_db.init_new_db(curve_trees); \ + auto hardfork = cryptonote::HardFork(*test_db.m_db, 1, 0); \ + test_db.init_hardfork(&hardfork); \ + auto scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ \ + ASSERT_NO_THROW(test_db.m_db->close()); \ + delete test_db.m_db; \ + test_db.m_db = nullptr; \ + }) + # define ASSERT_EQ_MAP(val, map, key) \ do { \ auto found = map.find(key); \