diff --git a/kedro/io/csv_s3.py b/kedro/io/csv_s3.py index cba4730606..aadcb6b077 100644 --- a/kedro/io/csv_s3.py +++ b/kedro/io/csv_s3.py @@ -130,7 +130,7 @@ def _load(self) -> pd.DataFrame: with self._s3.open( "{}/{}".format(self._bucket_name, load_key), mode="rb" ) as s3_file: - return pd.read_csv(s3_file) + return pd.read_csv(s3_file, **self._load_args) def _save(self, data: pd.DataFrame) -> None: save_key = self._get_save_path( diff --git a/tests/io/test_csv_s3.py b/tests/io/test_csv_s3.py index 123df0f031..95de65a24a 100644 --- a/tests/io/test_csv_s3.py +++ b/tests/io/test_csv_s3.py @@ -224,6 +224,14 @@ def test_str_representation(self, s3_data_set, save_args): def test_serializable(self, s3_data_set): ForkingPickler.dumps(s3_data_set) + # pylint: disable=unused-argument + def test_load_args_propagated(self, mocker, mocked_s3_object): + mock = mocker.patch("kedro.io.csv_s3.pd.read_csv") + CSVS3DataSet( + FILENAME, BUCKET_NAME, AWS_CREDENTIALS, load_args=dict(custom=42) + ).load() + assert mock.call_args_list[0][1] == {"custom": 42} + @pytest.mark.usefixtures("mocked_s3_bucket") class TestCSVS3DataSetVersioned: