-
Notifications
You must be signed in to change notification settings - Fork 154
/
storage_transfer_manager_download_all_blobs.py
65 lines (53 loc) · 2.75 KB
/
storage_transfer_manager_download_all_blobs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START storage_transfer_manager_download_all_blobs]
def download_all_blobs_with_transfer_manager(
bucket_name, destination_directory="", processes=8
):
"""Download all of the blobs in a bucket, concurrently in a thread pool.
The filename of each blob once downloaded is derived from the blob name and
the `destination_directory `parameter. For complete control of the filename
of each blob, use transfer_manager.download_many() instead.
Directories will be created automatically as needed, for instance to
accommodate blob names that include slashes.
"""
# The ID of your GCS bucket
# bucket_name = "your-bucket-name"
# The directory on your computer to which to download all of the files. This
# string is prepended (with os.path.join()) to the name of each blob to form
# the full path. Relative paths and absolute paths are both accepted. An
# empty string means "the current working directory". Note that this
# parameter allows accepts directory traversal ("../" etc.) and is not
# intended for unsanitized end user input.
# destination_directory = ""
# The maximum number of processes to use for the operation. The performance
# impact of this value depends on the use case, but smaller files usually
# benefit from a higher number of processes. Each additional process occupies
# some CPU and memory resources until finished.
# processes=8
from google.cloud.storage import Client, transfer_manager
storage_client = Client()
bucket = storage_client.bucket(bucket_name)
blob_names = [blob.name for blob in bucket.list_blobs()]
results = transfer_manager.download_many_to_path(
bucket, blob_names, destination_directory=destination_directory, max_workers=processes
)
for name, result in zip(blob_names, results):
# The results list is either `None` or an exception for each blob in
# the input list, in order.
if isinstance(result, Exception):
print("Failed to download {} due to exception: {}".format(name, result))
else:
print("Downloaded {} to {}.".format(name, destination_directory + name))
# [END storage_transfer_manager_download_all_blobs]