Skip to content

Commit

Permalink
albert's comments
Browse files Browse the repository at this point in the history
  • Loading branch information
lhoestq committed Oct 8, 2021
1 parent 87e2ab7 commit 243ac56
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 144 deletions.
20 changes: 12 additions & 8 deletions src/datasets/data_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,18 +239,20 @@ def __init__(self, data_files: List[Union[Path, Url]], origin_metadata: List[Tup
super().__init__(data_files)
self.origin_metadata = origin_metadata

@staticmethod
@classmethod
def from_hf_repo(
cls,
patterns: List[str],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
allowed_extensions: Optional[List[str]] = None,
) -> "DataFilesList":
data_files = resolve_patterns_in_dataset_repository(dataset_info, patterns, allowed_extensions)
origin_metadata = [(dataset_info.id, dataset_info.sha) for _ in patterns]
return DataFilesList(data_files, origin_metadata)
return cls(data_files, origin_metadata)

@staticmethod
@classmethod
def from_local_or_remote(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
Expand All @@ -259,7 +261,7 @@ def from_local_or_remote(
base_path = base_path if base_path is not None else str(Path().resolve())
data_files = resolve_patterns_locally_or_by_urls(base_path, patterns, allowed_extensions)
origin_metadata = _get_origin_metadata_locally_or_by_urls(data_files, use_auth_token=use_auth_token)
return DataFilesList(data_files, origin_metadata)
return cls(data_files, origin_metadata)


class DataFilesDict(Dict[str, DataFilesList]):
Expand All @@ -278,14 +280,15 @@ class DataFilesDict(Dict[str, DataFilesList]):
Changing the order of the keys of this dictionary also doesn't change its hash.
"""

@staticmethod
@classmethod
def from_local_or_remote(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> "DataFilesDict":
out = DataFilesDict()
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
DataFilesList.from_local_or_remote(
Expand All @@ -299,13 +302,14 @@ def from_local_or_remote(
)
return out

@staticmethod
@classmethod
def from_hf_repo(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
allowed_extensions: Optional[List[str]] = None,
) -> "DataFilesDict":
out = DataFilesDict()
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
DataFilesList.from_hf_repo(
Expand Down
Loading

1 comment on commit 243ac56

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Show benchmarks

PyArrow==3.0.0

Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.010668 / 0.011353 (-0.000685) 0.004198 / 0.011008 (-0.006810) 0.038668 / 0.038508 (0.000160) 0.034448 / 0.023109 (0.011339) 0.342312 / 0.275898 (0.066414) 0.354964 / 0.323480 (0.031484) 0.008490 / 0.007986 (0.000505) 0.005009 / 0.004328 (0.000680) 0.010337 / 0.004250 (0.006087) 0.037787 / 0.037052 (0.000735) 0.335486 / 0.258489 (0.076997) 0.348867 / 0.293841 (0.055026) 0.033581 / 0.128546 (-0.094966) 0.011648 / 0.075646 (-0.063998) 0.285493 / 0.419271 (-0.133778) 0.055339 / 0.043533 (0.011807) 0.318699 / 0.255139 (0.063560) 0.350183 / 0.283200 (0.066984) 0.079773 / 0.141683 (-0.061910) 1.901212 / 1.452155 (0.449058) 1.922782 / 1.492716 (0.430066)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.199182 / 0.018006 (0.181176) 0.494937 / 0.000490 (0.494447) 0.004917 / 0.000200 (0.004717) 0.000373 / 0.000054 (0.000319)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.041907 / 0.037411 (0.004496) 0.030813 / 0.014526 (0.016287) 0.067130 / 0.176557 (-0.109427) 0.138227 / 0.737135 (-0.598908) 0.069487 / 0.296338 (-0.226852)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.478864 / 0.215209 (0.263655) 5.069333 / 2.077655 (2.991679) 2.340339 / 1.504120 (0.836219) 2.057725 / 1.541195 (0.516531) 2.058675 / 1.468490 (0.590185) 0.530123 / 4.584777 (-4.054654) 6.264006 / 3.745712 (2.518294) 1.473559 / 5.269862 (-3.796302) 1.363566 / 4.565676 (-3.202111) 0.059683 / 0.424275 (-0.364593) 0.005353 / 0.007607 (-0.002255) 0.693427 / 0.226044 (0.467382) 6.314007 / 2.268929 (4.045079) 2.825931 / 55.444624 (-52.618693) 2.262740 / 6.876477 (-4.613737) 2.219279 / 2.142072 (0.077207) 0.662098 / 4.805227 (-4.143129) 0.139234 / 6.500664 (-6.361430) 0.055050 / 0.075469 (-0.020419)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.109807 / 1.841788 (-0.731981) 15.253038 / 8.074308 (7.178730) 34.947131 / 10.191392 (24.755739) 0.914030 / 0.680424 (0.233606) 0.605410 / 0.534201 (0.071209) 0.276643 / 0.579283 (-0.302640) 0.697004 / 0.434364 (0.262640) 0.241630 / 0.540337 (-0.298707) 0.262561 / 1.386936 (-1.124375)
PyArrow==latest
Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.009712 / 0.011353 (-0.001641) 0.005010 / 0.011008 (-0.005998) 0.036772 / 0.038508 (-0.001736) 0.043249 / 0.023109 (0.020140) 0.329009 / 0.275898 (0.053111) 0.368202 / 0.323480 (0.044722) 0.017359 / 0.007986 (0.009373) 0.004924 / 0.004328 (0.000595) 0.010066 / 0.004250 (0.005815) 0.044442 / 0.037052 (0.007390) 0.357126 / 0.258489 (0.098637) 0.405156 / 0.293841 (0.111315) 0.031848 / 0.128546 (-0.096699) 0.010536 / 0.075646 (-0.065110) 0.300088 / 0.419271 (-0.119183) 0.052458 / 0.043533 (0.008925) 0.333432 / 0.255139 (0.078293) 0.380751 / 0.283200 (0.097551) 0.076698 / 0.141683 (-0.064985) 1.853810 / 1.452155 (0.401655) 2.027241 / 1.492716 (0.534524)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.338054 / 0.018006 (0.320048) 0.492094 / 0.000490 (0.491604) 0.065673 / 0.000200 (0.065473) 0.000632 / 0.000054 (0.000577)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.040865 / 0.037411 (0.003454) 0.026241 / 0.014526 (0.011716) 0.027581 / 0.176557 (-0.148976) 0.138770 / 0.737135 (-0.598365) 0.033512 / 0.296338 (-0.262827)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.475166 / 0.215209 (0.259957) 4.605515 / 2.077655 (2.527861) 2.107142 / 1.504120 (0.603022) 1.768971 / 1.541195 (0.227777) 1.745706 / 1.468490 (0.277216) 0.512369 / 4.584777 (-4.072408) 6.243924 / 3.745712 (2.498212) 1.456871 / 5.269862 (-3.812990) 1.322874 / 4.565676 (-3.242802) 0.060104 / 0.424275 (-0.364171) 0.005240 / 0.007607 (-0.002367) 0.664946 / 0.226044 (0.438902) 6.047002 / 2.268929 (3.778073) 2.648040 / 55.444624 (-52.796584) 2.131231 / 6.876477 (-4.745246) 2.070615 / 2.142072 (-0.071458) 0.667995 / 4.805227 (-4.137232) 0.145106 / 6.500664 (-6.355558) 0.056642 / 0.075469 (-0.018827)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.062565 / 1.841788 (-0.779223) 14.163580 / 8.074308 (6.089272) 35.301967 / 10.191392 (25.110575) 0.950004 / 0.680424 (0.269580) 0.593515 / 0.534201 (0.059314) 0.267490 / 0.579283 (-0.311793) 0.675074 / 0.434364 (0.240710) 0.226716 / 0.540337 (-0.313622) 0.236530 / 1.386936 (-1.150406)

CML watermark

Please sign in to comment.