diff --git a/.changes/next-release/feature-maxbandwidth-2947.json b/.changes/next-release/feature-maxbandwidth-2947.json new file mode 100644 index 000000000000..d86f5f77559d --- /dev/null +++ b/.changes/next-release/feature-maxbandwidth-2947.json @@ -0,0 +1,5 @@ +{ + "type": "feature", + "category": "``max_bandwidth``", + "description": "Add the ability to set maximum bandwidth consumption for ``s3`` commands. (`issue 1090 `__)" +} diff --git a/awscli/customizations/s3/transferconfig.py b/awscli/customizations/s3/transferconfig.py index 575299308033..d65d21d5bca5 100644 --- a/awscli/customizations/s3/transferconfig.py +++ b/awscli/customizations/s3/transferconfig.py @@ -22,6 +22,7 @@ 'multipart_chunksize': 8 * (1024 ** 2), 'max_concurrent_requests': 10, 'max_queue_size': 1000, + 'max_bandwidth': None } @@ -32,8 +33,10 @@ class InvalidConfigError(Exception): class RuntimeConfig(object): POSITIVE_INTEGERS = ['multipart_chunksize', 'multipart_threshold', - 'max_concurrent_requests', 'max_queue_size'] + 'max_concurrent_requests', 'max_queue_size', + 'max_bandwidth'] HUMAN_READABLE_SIZES = ['multipart_chunksize', 'multipart_threshold'] + HUMAN_READABLE_RATES = ['max_bandwidth'] @staticmethod def defaults(): @@ -54,6 +57,7 @@ def build_config(self, **kwargs): if kwargs: runtime_config.update(kwargs) self._convert_human_readable_sizes(runtime_config) + self._convert_human_readable_rates(runtime_config) self._validate_config(runtime_config) return runtime_config @@ -63,6 +67,17 @@ def _convert_human_readable_sizes(self, runtime_config): if value is not None and not isinstance(value, six.integer_types): runtime_config[attr] = human_readable_to_bytes(value) + def _convert_human_readable_rates(self, runtime_config): + for attr in self.HUMAN_READABLE_RATES: + value = runtime_config.get(attr) + if value is not None and not isinstance(value, six.integer_types): + if not value.endswith('B/s'): + raise InvalidConfigError( + 'Invalid rate: %s. The value must be expressed ' + 'as a rate in terms of bytes per seconds ' + '(e.g. 10MB/s or 800KB/s)' % value) + runtime_config[attr] = human_readable_to_bytes(value[:-2]) + def _validate_config(self, runtime_config): for attr in self.POSITIVE_INTEGERS: value = runtime_config.get(attr) @@ -94,6 +109,7 @@ def create_transfer_config_from_runtime_config(runtime_config): 'max_queue_size': 'max_request_queue_size', 'multipart_threshold': 'multipart_threshold', 'multipart_chunksize': 'multipart_chunksize', + 'max_bandwidth': 'max_bandwidth', } kwargs = {} for key, value in runtime_config.items(): diff --git a/awscli/topics/s3-config.rst b/awscli/topics/s3-config.rst index edf3a90e938f..0c088d0047ee 100644 --- a/awscli/topics/s3-config.rst +++ b/awscli/topics/s3-config.rst @@ -28,6 +28,8 @@ command set: transfers of individual files. * ``multipart_chunksize`` - When using multipart transfers, this is the chunk size that the CLI uses for multipart transfers of individual files. +* ``max_bandwidth`` - The maximum bandwidth that will be consumed for uploading + and downloading data to and from Amazon S3. These are the configuration values that can be set for both ``aws s3`` @@ -60,6 +62,7 @@ configuration:: max_queue_size = 10000 multipart_threshold = 64MB multipart_chunksize = 16MB + max_bandwidth = 50MB/s use_accelerate_endpoint = true addressing_style = path @@ -75,6 +78,7 @@ could instead run these commands:: $ aws configure set default.s3.max_queue_size 10000 $ aws configure set default.s3.multipart_threshold 64MB $ aws configure set default.s3.multipart_chunksize 16MB + $ aws configure set default.s3.max_bandwidth 50MB/s $ aws configure set default.s3.use_accelerate_endpoint true $ aws configure set default.s3.addressing_style path @@ -164,6 +168,33 @@ that is either as the number of bytes as an integer, or using a size suffix. +max_bandwidth +------------- + +**Default** - None + +This controls the maximum bandwidth that the S3 commands will +utilize when streaming content data to and from S3. Thus, this value only +applies for uploads and downloads. It does not apply to copies nor deletes +because those data transfers take place server side. The value is +in terms of **bytes** per second. The value can be specified as: + +* An integer. For example, ``1048576`` would set the maximum bandwidth usage + to 1 megabyte per second. +* A rate suffix. You can specify rate suffixes using: ``KB/s``, ``MB/s``, + ``GB/s``, etc. For example: ``300KB/s``, ``10MB/s``. + +In general, it is recommended to first use ``max_concurrent_requests`` to lower +transfers to the desired bandwidth consumption. The ``max_bandwidth`` setting +should then be used to further limit bandwidth consumption if setting +``max_concurrent_requests`` is unable to lower bandwidth consumption to the +desired rate. This is recommended because ``max_concurrent_requests`` controls +how many threads are currently running. So if a high ``max_concurrent_requests`` +value is set and a low ``max_bandwidth`` value is set, it may result in +threads having to wait unneccessarily which can lead to excess resource +consumption and connection timeouts. + + use_accelerate_endpoint ----------------------- diff --git a/tests/unit/customizations/s3/test_transferconfig.py b/tests/unit/customizations/s3/test_transferconfig.py index 2684af3e6de0..cad4bdc8bb36 100644 --- a/tests/unit/customizations/s3/test_transferconfig.py +++ b/tests/unit/customizations/s3/test_transferconfig.py @@ -70,6 +70,18 @@ def test_long_value(self): multipart_threshold=long_value) self.assertEqual(runtime_config['multipart_threshold'], long_value) + def test_converts_max_bandwidth_as_string(self): + runtime_config = self.build_config_with(max_bandwidth='1MB/s') + self.assertEqual(runtime_config['max_bandwidth'], 1024 * 1024) + + def test_validates_max_bandwidth_no_seconds(self): + with self.assertRaises(transferconfig.InvalidConfigError): + self.build_config_with(max_bandwidth='1MB') + + def test_validates_max_bandwidth_in_bits_per_second(self): + with self.assertRaises(transferconfig.InvalidConfigError): + self.build_config_with(max_bandwidth='1Mb/s') + class TestConvertToS3TransferConfig(unittest.TestCase): def test_convert(self): @@ -78,6 +90,7 @@ def test_convert(self): 'multipart_chunksize': 2, 'max_concurrent_requests': 3, 'max_queue_size': 4, + 'max_bandwidth': 1024 * 1024, 'addressing_style': 'path', 'use_accelerate_endpoint': True, # This is a TransferConfig only option, it should @@ -90,4 +103,5 @@ def test_convert(self): self.assertEqual(result.multipart_chunksize, 2) self.assertEqual(result.max_request_concurrency, 3) self.assertEqual(result.max_request_queue_size, 4) + self.assertEqual(result.max_bandwidth, 1024 * 1024) self.assertNotEqual(result.max_in_memory_upload_chunks, 1000)