Skip to content

Commit

Permalink
chore: pass on docker_fast.sh (#9615)
Browse files Browse the repository at this point in the history
- Should be faster, stable cache layers & end-to-end image now
available.
- No longer need aws cli tool for cache download

What's not done:
- Make the images not huge
- Most of yarn projects install could be in cache

fixes: #9503

---------

Co-authored-by: Maddiaa <47148561+Maddiaa0@users.noreply.github.com>
  • Loading branch information
ludamad and Maddiaa0 authored Oct 31, 2024
1 parent 8ce6834 commit 1c53459
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 71 deletions.
25 changes: 25 additions & 0 deletions Dockerfile.end-to-end.fast
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Use an ARG to define the architecture, defaulting to amd64
ARG ARCH=amd64
# aztec must be built from Dockerfile.fast
FROM aztecprotocol/aztec AS aztec
FROM aztecprotocol/build:1.0-${ARCH}

# Install additional dependencies
RUN apt-get update && apt-get install -y software-properties-common \
&& add-apt-repository ppa:xtradeb/apps -y && apt-get update \
&& apt-get install -y wget gnupg \
&& wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
&& echo "deb [arch=$(dpkg --print-architecture)] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
&& apt update && apt install -y curl chromium netcat-openbsd \
&& rm -rf /var/lib/apt/lists/*

ENV CHROME_BIN="/usr/bin/chromium"
ENV PATH=/opt/foundry/bin:$PATH
ENV HARDWARE_CONCURRENCY=""
ENV FAKE_PROOFS=""
ENV PROVER_AGENT_CONCURRENCY=8

COPY --from=aztec /usr/src/ /usr/src/
WORKDIR /usr/src/yarn-project/end-to-end

ENTRYPOINT ["yarn", "test"]
66 changes: 25 additions & 41 deletions Dockerfile.fast
Original file line number Diff line number Diff line change
Expand Up @@ -20,99 +20,83 @@ RUN git init -b master \
&& git config user.email 'tech@aztecprotocol.com'

# ---------- EXTRACT BUILD-SYSTEM ----------
COPY build-system.tar.gz .
RUN tar -xzf build-system.tar.gz \
&& rm build-system.tar.gz && git add . \
COPY build-system build-system
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# ---------- BUILD BARRETENBERG ----------
COPY barretenberg.tar.gz .
RUN tar -xzf barretenberg.tar.gz \
&& rm barretenberg.tar.gz && git add . \
COPY barretenberg barretenberg
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for barretenberg/cpp
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd barretenberg/cpp \
bash -c 'cd barretenberg \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "barretenberg/cpp: Success"

# Bootstrap cache for barretenberg/ts
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd barretenberg/ts \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "barretenberg/ts: Success"
./bootstrap_cache.sh' \
&& echo "barretenberg: Success"

# ---------- BUILD NOIR ----------
COPY noir.tar.gz .
RUN tar -xzf noir.tar.gz \
&& rm noir.tar.gz && git add . \
ADD noir noir
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for Noir
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd noir \
bash -c 'cd noir \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
./bootstrap_cache.sh' \
&& echo "noir: Success"

# ---------- BUILD L1 CONTRACTS ----------
COPY l1-contracts.tar.gz .
RUN tar -xzf l1-contracts.tar.gz \
&& rm l1-contracts.tar.gz && git add . \
ADD l1-contracts l1-contracts
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for L1 Contracts
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd l1-contracts \
bash -c 'cd l1-contracts \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
./bootstrap_cache.sh' \
&& echo "l1-contracts: Success"

# ---------- BUILD AVM TRANSPILER ----------
COPY avm-transpiler.tar.gz .
RUN tar -xzf avm-transpiler.tar.gz \
&& rm avm-transpiler.tar.gz && git add . \
ADD avm-transpiler avm-transpiler
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for AVM Transpiler
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd avm-transpiler \
bash -c 'cd avm-transpiler \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
./bootstrap_cache.sh' \
&& echo "avm-transpiler: Success"

# ---------- BUILD NOIR PROJECTS ----------
COPY noir-projects.tar.gz .
RUN tar -xzf noir-projects.tar.gz \
&& rm noir-projects.tar.gz && git add . \
ADD noir-projects noir-projects
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for Noir Projects
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd noir-projects \
bash -c 'cd noir-projects \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
./bootstrap_cache.sh' \
&& echo "noir-projects: Success"

# ---------- BUILD YARN PROJECT ----------
COPY yarn-project.tar.gz .
RUN tar -xzf yarn-project.tar.gz \
&& rm yarn-project.tar.gz && git add . \
ADD yarn-project yarn-project
RUN git add . \
&& git commit -m "Update git metadata" >/dev/null

# Build yarn-project directly (no cache script)
Expand Down
17 changes: 17 additions & 0 deletions barretenberg/bootstrap_cache.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -eu

cd "$(dirname "$0")"

# Run both tasks in the background
(cd cpp && ./bootstrap_cache.sh "$@") &
pid_cpp=$!
(cd ts && ./bootstrap_cache.sh "$@") &
pid_ts=$!

# Wait for both processes and capture any non-zero exit codes
wait $pid_cpp || exit_code=$?
wait $pid_ts || exit_code=$?

# Exit with the first non-zero exit code, if any
exit ${exit_code:-0}
11 changes: 10 additions & 1 deletion build-system/s3-cache-scripts/cache-download.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,17 @@ function on_exit() {
# Run on any exit
trap on_exit EXIT

# Extract endpoint URL if S3_BUILD_CACHE_AWS_PARAMS is set
if [[ -n "${S3_BUILD_CACHE_AWS_PARAMS:-}" ]]; then
# Extract URL from S3_BUILD_CACHE_AWS_PARAMS (assumes the format "--endpoint-url <URL>")
# TODO stop passing with endpoint url
S3_ENDPOINT=$(echo "$S3_BUILD_CACHE_AWS_PARAMS" | sed -n 's/--endpoint-url \([^ ]*\)/\1/p')
else
# Default to AWS S3 URL if no custom endpoint is set
S3_ENDPOINT="http://aztec-ci-artifacts.s3.amazonaws.com"
fi
# Attempt to download the cache file
aws ${S3_BUILD_CACHE_AWS_PARAMS:-} s3 cp "s3://aztec-ci-artifacts/build-cache/$TAR_FILE" "$TAR_FILE" --quiet --no-sign-request || (echo "Cache download of $TAR_FILE failed." && exit 1)
curl -s -f -O "${S3_ENDPOINT}/build-cache/$TAR_FILE" || (echo "Cache download of $TAR_FILE failed." && exit 1)

# Extract the cache file
mkdir -p "$OUT_DIR"
Expand Down
49 changes: 20 additions & 29 deletions docker_fast.sh
Original file line number Diff line number Diff line change
@@ -1,43 +1,24 @@
#!/usr/bin/env bash
# TODO eventually rename this docker.sh when we've moved to it entirely
set -eux
set -eu

function start_minio() {
if nc -z 127.0.0.1 12000 2>/dev/null >/dev/null ; then
# Already started
return
fi
docker run -d -p 12000:9000 -p 12001:12001 -v minio-data:/data \
quay.io/minio/minio server /data --console-address ":12001"
# Make our cache bucket
AWS_ACCESS_KEY_ID="minioadmin" AWS_SECRET_ACCESS_KEY="minioadmin" aws --endpoint-url http://localhost:12000 s3 mb s3://aztec-ci-artifacts 2>/dev/null || true
}
MAKE_END_TO_END=${1:-false}

S3_BUILD_CACHE_UPLOAD=${S3_BUILD_CACHE_UPLOAD:-false}
S3_BUILD_CACHE_MINIO_URL="http://$(hostname -I | awk '{print $1}'):12000"

# Start local file server for a quicker cache layer
start_minio

if ! git diff-index --quiet HEAD --; then
echo "Warning: You have unstaged changes. Disabling S3 caching and local MinIO caching to avoid polluting cache (which uses Git data)." >&2
echo "Warning: You have unstaged changes. For now this is a fatal error as this script relies on git metadata." >&2
S3_BUILD_CACHE_UPLOAD=false
S3_BUILD_CACHE_DOWNLOAD=false
S3_BUILD_CACHE_MINIO_URL=""
echo "Fatal: For now, this is a fatal error as it would defeat the purpose of 'fast'." >&2
S3_BUILD_CACHE_MINIO_URL=""A
exit 1
elif [ ! -z "${AWS_ACCESS_KEY_ID:-}" ] ; then
S3_BUILD_CACHE_DOWNLOAD=true
elif [ -f ~/.aws/credentials ]; then
# Retrieve credentials if available in AWS config

# Do not trace this information
set +x
AWS_ACCESS_KEY_ID=$(aws configure get default.aws_access_key_id)
AWS_SECRET_ACCESS_KEY=$(aws configure get default.aws_secret_access_key)

# Resume tracing
set -x
S3_BUILD_CACHE_DOWNLOAD=true
else
S3_BUILD_CACHE_UPLOAD=false
Expand All @@ -52,11 +33,8 @@ function on_exit() {
trap on_exit EXIT

# Save each secret environment variable into a separate file in $TMP directory
set +x
echo "${AWS_ACCESS_KEY_ID:-}" > "$TMP/aws_access_key_id.txt"
echo "${AWS_SECRET_ACCESS_KEY:-}" > "$TMP/aws_secret_access_key.txt"
set -x

echo "${S3_BUILD_CACHE_MINIO_URL:-}" > "$TMP/s3_build_cache_minio_url.txt"
echo "${S3_BUILD_CACHE_UPLOAD:-}" > "$TMP/s3_build_cache_upload.txt"
echo "${S3_BUILD_CACHE_DOWNLOAD:-}" > "$TMP/s3_build_cache_download.txt"
Expand All @@ -73,10 +51,19 @@ PROJECTS=(
yarn-project
)

function copy() {
local project=$1
git archive --format=tar.gz --mtime='1970-01-01T00:00Z' -o "$TMP/$project.tar.gz" $(git rev-parse HEAD) $project
cd "$TMP"
tar -xzf $project.tar.gz
rm $project.tar.gz
}
# Write the git archives in parallel
for project in "${PROJECTS[@]}"; do
# Archive Git-tracked files per project into a tar.gz file
git archive --format=tar.gz -o "$TMP/$project.tar.gz" HEAD $project
# Copy over JUST the git version of files over (bail if any fail)
copy $project || kill $0 &
done
wait

# Run Docker build with secrets in the folder with our archive
DOCKER_BUILDKIT=1 docker build -t aztecprotocol/aztec -f Dockerfile.fast --progress=plain \
Expand All @@ -85,4 +72,8 @@ DOCKER_BUILDKIT=1 docker build -t aztecprotocol/aztec -f Dockerfile.fast --progr
--secret id=s3_build_cache_minio_url,src=$TMP/s3_build_cache_minio_url.txt \
--secret id=s3_build_cache_upload,src=$TMP/s3_build_cache_upload.txt \
--secret id=s3_build_cache_download,src=$TMP/s3_build_cache_download.txt \
"$TMP"
"$TMP"

if [ $MAKE_END_TO_END != "false" ] ; then
DOCKER_BUILDKIT=1 docker build -t aztecprotocol/end-to-end -f Dockerfile.end-to-end.fast --progress=plain "$TMP"
fi

0 comments on commit 1c53459

Please sign in to comment.