diff --git a/examples/readers/pytorch_data_api_tiledb_dense.ipynb b/examples/readers/pytorch_data_api_tiledb_dense.ipynb index 522bdd04..b16442f9 100644 --- a/examples/readers/pytorch_data_api_tiledb_dense.ipynb +++ b/examples/readers/pytorch_data_api_tiledb_dense.ipynb @@ -454,7 +454,7 @@ " img = np.clip(img,0,1)\n", " return img\n", "\n", - "ctx = tiledb.Ctx({'sm.memory_budget': 1024**2})\n", + "ctx = tiledb.Ctx({'sm.mem.total_budget': 1024**2})\n", "with tiledb.open(training_images, ctx=ctx) as x, tiledb.open(training_labels, ctx=ctx) as y:\n", " # Because of this issue (https://github.com/pytorch/pytorch/issues/59451#issuecomment-854883855) we avoid using multiple workers on Jupyter.\n", " train_loader = PyTorchTileDBDataLoader(\n", diff --git a/examples/readers/tensorflow_data_api_tiledb_dense.ipynb b/examples/readers/tensorflow_data_api_tiledb_dense.ipynb index b621c25b..79a722ea 100644 --- a/examples/readers/tensorflow_data_api_tiledb_dense.ipynb +++ b/examples/readers/tensorflow_data_api_tiledb_dense.ipynb @@ -336,7 +336,7 @@ "\n", "model = create_model()\n", "\n", - "ctx = tiledb.Ctx({'sm.memory_budget': 1024**2})\n", + "ctx = tiledb.Ctx({'sm.mem.total_budget': 1024**2})\n", "with tiledb.open(training_images, ctx=ctx) as x, tiledb.open(training_labels, ctx=ctx) as y:\n", " tiledb_dataset = TensorflowTileDBDataset(\n", " ArrayParams(array=x, fields=['features']),\n", @@ -406,4 +406,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tests/readers/test_tensor_schema.py b/tests/readers/test_tensor_schema.py index 1a2a6a47..ce9de9b6 100644 --- a/tests/readers/test_tensor_schema.py +++ b/tests/readers/test_tensor_schema.py @@ -117,7 +117,7 @@ def parametrize_fields(*fields, num=3): def test_max_partition_weight_dense( dense_uri, fields, key_dim, memory_budget, dim_selectors ): - config = {"py.max_incomplete_retries": 0, "sm.memory_budget": memory_budget} + config = {"py.max_incomplete_retries": 0, "sm.mem.total_budget": memory_budget} with tiledb.open(dense_uri, config=config) as array: _test_max_partition_weight(array, fields, key_dim, dim_selectors) diff --git a/tiledb/ml/readers/_tensor_schema/base.py b/tiledb/ml/readers/_tensor_schema/base.py index f22954fc..d862ea26 100644 --- a/tiledb/ml/readers/_tensor_schema/base.py +++ b/tiledb/ml/readers/_tensor_schema/base.py @@ -104,7 +104,7 @@ def max_partition_weight(self) -> int: What constitutes weight of a partition depends on the array type: - For dense arrays, it is the number of unique keys (= number of "rows"). - It depends on the `sm.memory_budget` config parameter. + It depends on the `sm.mem.total_budget` config parameter. - For sparse arrays, it is the number of non-empty cells. It depends on the `py.init_buffer_bytes` config parameter. """ diff --git a/tiledb/ml/readers/_tensor_schema/dense.py b/tiledb/ml/readers/_tensor_schema/dense.py index 08b42deb..0ad30e16 100644 --- a/tiledb/ml/readers/_tensor_schema/dense.py +++ b/tiledb/ml/readers/_tensor_schema/dense.py @@ -66,7 +66,7 @@ def iter_tensors( @property def max_partition_weight(self) -> int: - memory_budget = int(self._array._ctx_().config()["sm.memory_budget"]) + memory_budget = int(self._array._ctx_().config()["sm.mem.total_budget"]) # The memory budget should be large enough to read the cells of the largest field bytes_per_cell = max(dtype.itemsize for dtype in self.field_dtypes)