Skip to content

Commit

Permalink
fix #340, properly handle errors on S3 during Walk() and delete old b…
Browse files Browse the repository at this point in the history
…ackup
  • Loading branch information
Slach committed Feb 3, 2022
1 parent b056412 commit 83b22c2
Show file tree
Hide file tree
Showing 15 changed files with 130 additions and 26 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ jobs:
export COMPOSE_FILE=docker-compose.yml
fi
export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race"
docker-compose -f test/integration/${COMPOSE_FILE} up -d minio
sleep 3
docker-compose -f test/integration/${COMPOSE_FILE} up -d clickhouse
docker-compose -f test/integration/${COMPOSE_FILE} ps -a
go test -timeout 30m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go
Expand Down
1 change: 1 addition & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ IMPROVEMENTS
- Return `clean` cli command and API `POST /backup/clean` endpoint, fix [#379](https://github.com/AlexAkulov/clickhouse-backup/issues/379)

BUG FIXES
- fix [#340](https://github.com/AlexAkulov/clickhouse-backup/issues/340), properly handle errors on S3 during Walk() and delete old backup
- fix [#331](https://github.com/AlexAkulov/clickhouse-backup/issues/331), properly restore tables where have table name with the same name as database name
- fix [#311](https://github.com/AlexAkulov/clickhouse-backup/issues/311), properly run clickhouse-backup inside docker container via entrypoint
- fix [#317](https://github.com/AlexAkulov/clickhouse-backup/issues/317), properly upload large files to Azure Blob Storage
Expand Down
7 changes: 4 additions & 3 deletions pkg/new_storage/gcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,13 @@ func (gcs *GCS) Connect() error {

func (gcs *GCS) Walk(gcsPath string, recursive bool, process func(r RemoteFile) error) error {
ctx := context.Background()
rootPath := path.Join(gcs.Config.Path, gcsPath)
delimiter := ""
if !recursive {
delimiter = "/"
}
it := gcs.client.Bucket(gcs.Config.Bucket).Objects(ctx, &storage.Query{
Prefix: path.Join(gcs.Config.Path, gcsPath) + "/",
Prefix: rootPath + "/",
Delimiter: delimiter,
})
for {
Expand All @@ -106,7 +107,7 @@ func (gcs *GCS) Walk(gcsPath string, recursive bool, process func(r RemoteFile)
case nil:
if object.Prefix != "" {
if err := process(&gcsFile{
name: strings.TrimPrefix(object.Prefix, path.Join(gcs.Config.Path, gcsPath)),
name: strings.TrimPrefix(object.Prefix, rootPath),
}); err != nil {
return err
}
Expand All @@ -115,7 +116,7 @@ func (gcs *GCS) Walk(gcsPath string, recursive bool, process func(r RemoteFile)
if err := process(&gcsFile{
size: object.Size,
lastModified: object.Updated,
name: strings.TrimPrefix(object.Name, path.Join(gcs.Config.Path, gcsPath)),
name: strings.TrimPrefix(object.Name, rootPath),
}); err != nil {
return err
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/new_storage/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,10 @@ func (s *S3) Walk(s3Path string, recursive bool, process func(r RemoteFile) erro
})
})
g.Go(func() error {
var err error
for s3File := range s3Files {
if err := process(s3File); err != nil {
return err
if err == nil {
err = process(s3File)
}
}
return nil
Expand Down
2 changes: 2 additions & 0 deletions test/integration/config-azblob.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
general:
disable_progress_bar: true
remote_storage: azblob
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "cluster"
clickhouse:
host: 127.0.0.1
Expand Down
3 changes: 2 additions & 1 deletion test/integration/config-gcs.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
general:
disable_progress_bar: true
remote_storage: gcs
upload_concurrency: 2
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "cluster"
clickhouse:
host: 127.0.0.1
Expand Down
34 changes: 34 additions & 0 deletions test/integration/config-s3-nodelete.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
general:
disable_progress_bar: true
remote_storage: s3
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "cluster"
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
s3:
access_key: nodelete
secret_key: nodelete_password
bucket: clickhouse
endpoint: http://minio:9000
acl: private
force_path_style: true
path: backup
disable_ssl: true
compression_format: tar
api:
listen: :7171
create_integration_tables: true
allow_parallel: true
2 changes: 2 additions & 0 deletions test/integration/config-sftp-auth-key.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
general:
disable_progress_bar: true
remote_storage: sftp
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "cluster"
clickhouse:
host: 127.0.0.1
Expand Down
2 changes: 2 additions & 0 deletions test/integration/config-sftp-auth-password.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
general:
disable_progress_bar: true
remote_storage: sftp
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "cluster"
clickhouse:
host: 127.0.0.1
Expand Down
18 changes: 11 additions & 7 deletions test/integration/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,14 @@ services:
- clickhouse-backup

minio:
image: docker.io/minio/minio:${MINIO_VERSION:-latest}
image: docker.io/bitnami/minio:${MINIO_VERSION:-latest}
container_name: minio
environment:
MINIO_ACCESS_KEY: access-key
MINIO_SECRET_KEY: it-is-my-super-secret-key
entrypoint: sh
command: -c 'mkdir -p doc_gen_minio/export/clickhouse && minio server doc_gen_minio/export'
ports:
- "9010:9000"
MINIO_DEFAULT_BUCKETS: 'clickhouse'
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
networks:
- clickhouse-backup

Expand Down Expand Up @@ -87,12 +86,17 @@ services:
- ./dhparam.pem:/etc/clickhouse-server/dhparam.pem
- ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml
- ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# uncomment only for local debug
# - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
# uncomment only when you need clickhouse logs
# - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
# - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log
# uncomment only for local debug
# - ./install_delve.sh:/tmp/install_delve.sh
ports:
- "8123:8123"
- "9000:9000"
- "7171:7171"
# uncomment for delve debugger
# - "40001:40001"
networks:
- clickhouse-backup
depends_on:
Expand Down
22 changes: 11 additions & 11 deletions test/integration/docker-compose_advanced.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,14 @@ services:
- clickhouse-backup

minio:
image: docker.io/minio/minio:${MINIO_VERSION:-latest}
image: docker.io/bitnami/minio:${MINIO_VERSION:-latest}
container_name: minio
environment:
MINIO_ACCESS_KEY: access-key
MINIO_SECRET_KEY: it-is-my-super-secret-key
entrypoint: sh
command: -c 'mkdir -p doc_gen_minio/export/clickhouse && minio server doc_gen_minio/export'
ports:
- "9010:9000"
MINIO_DEFAULT_BUCKETS: 'clickhouse'
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
networks:
- clickhouse-backup

Expand Down Expand Up @@ -112,16 +111,17 @@ services:
- ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml
- ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./storage_configuration.sh:/docker-entrypoint-initdb.d/storage_configuration.sh
# uncomment only for local debug
# - ./install_delve.sh:/tmp/install_delve.sh
# - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
# - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log
# uncomment only when you need clickhouse logs
# - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
# - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log
# uncomment only for local debug
# - ./install_delve.sh:/tmp/install_delve.sh
ports:
- "8123:8123"
- "9000:9000"
- "7171:7171"
# uncomment for delve debugger
# - "40001:40001"
# uncomment for delve debugger
# - "40001:40001"
networks:
- clickhouse-backup
depends_on:
Expand Down
13 changes: 12 additions & 1 deletion test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -936,6 +936,17 @@ func TestProjections(t *testing.T) {

}

func TestS3AdvancedIntegration(t *testing.T) {
if isTestShouldSkip("S3_ADVANCED_TESTS") {
t.Skip("Skipping S3 Advanced integration tests...")
return
}
r := require.New(t)
r.NoError(dockerExec("minio", "/bin/minio_nodelete.sh"))
r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse:/etc/clickhouse-backup/config.yml"))
runMainIntegrationScenario(t, "S3")
}

func runMainIntegrationScenario(t *testing.T, remoteStorageType string) {
var out string
var err error
Expand Down Expand Up @@ -1344,7 +1355,7 @@ func execCmd(cmd string, args ...string) error {
}

func execCmdOut(cmd string, args ...string) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second)
log.Infof("%s %s", cmd, strings.Join(args, " "))
out, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput()
cancel()
Expand Down
37 changes: 37 additions & 0 deletions test/integration/minio_nodelete.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
set -x
set -e
mc alias list

mc admin user add local nodelete nodelete_password

mc admin policy add local nodelete <( cat << EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Action": [
"s3:DeleteObject"
],
"Resource": [
"arn:aws:s3:::clickhouse/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::clickhouse/*"
]
}
]
}
EOF
)

mc admin policy set local nodelete user=nodelete

7 changes: 6 additions & 1 deletion test/integration/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race"
export LOG_LEVEL=${LOG_LEVEL:-info}
export GCS_TESTS=${GCS_TESTS:-}
export AZURE_TESTS=${AZURE_TESTS:-}
export S3_ADVANCED_TESTS=${S3_ADVANCED_TESTS:-}
export S3_DEBUG=${S3_DEBUG:-false}
export GCS_DEBUG=${GCS_DEBUG:-false}
export FTP_DEBUG=${FTP_DEBUG:-false}
Expand All @@ -25,5 +26,9 @@ docker-compose -f test/integration/${COMPOSE_FILE} down --remove-orphans
docker volume prune -f
make clean
make build-race
docker-compose -f test/integration/${COMPOSE_FILE} up -d --force-recreate
docker-compose -f test/integration/${COMPOSE_FILE} up -d minio mysql
sleep 5
docker-compose -f test/integration/${COMPOSE_FILE} exec minio mc alias list

docker-compose -f test/integration/${COMPOSE_FILE} up -d
go test -timeout 30m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go
1 change: 1 addition & 0 deletions test/integration/storage_configuration.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/bin/bash
date
if [[ ! -d /hdd1_data ]]; then
mkdir -pv /hdd1_data
fi
Expand Down

0 comments on commit 83b22c2

Please sign in to comment.