diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..8fbad451 --- /dev/null +++ b/.flake8 @@ -0,0 +1,16 @@ +[flake8] +max-line-length = 80 +# TODO: keep just E203 +extend-ignore = E203, E266, F401, E501 +# TODO: fix excludes +exclude = + ./.git + __pycache__ + ./dist + ./setup.py + ./dpbench/benchmarks/l2_norm/l2_norm_numba_dpex_k.py + ./dpbench/benchmarks/pairwise_distance/pairwise_distance_numba_dpex_k.py + ./dpbench/benchmarks/pairwise_distance/pairwise_distance_numba_dpex_p.py + ./dpbench/benchmarks/pairwise_distance/pairwise_distance_numba_npr.py +# TODO: simplify to 10 +max-complexity = 14 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fff9a6e2..bd30d86c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,5 +37,24 @@ repos: hooks: - id: reuse name: add SPDX headers - args: [annotate, --skip-existing, --skip-unrecognised, --year=2022 - 2023, --copyright=Intel Corporation, --license=Apache-2.0] + args: [ + annotate, + --skip-existing, + --skip-unrecognised, + --year=2022 - 2023, + --copyright=Intel Corporation, + --license=Apache-2.0] pass_filenames: true +- repo: https://github.com/pycqa/pydocstyle + rev: 6.3.0 + hooks: + - id: pydocstyle + # TODO: add packages one by one to enforce pydocstyle eventually + files: ^dpbench/config/ + args: ["--convention=google"] + # D417 does not work properly: + # https://github.com/PyCQA/pydocstyle/issues/459 +- repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 diff --git a/dpbench/__init__.py b/dpbench/__init__.py index 89f2cf8b..9d6e3430 100644 --- a/dpbench/__init__.py +++ b/dpbench/__init__.py @@ -2,16 +2,9 @@ # # SPDX-License-Identifier: Apache-2.0 -from .runner import ( - list_available_benchmarks, - list_possible_implementations, - run_benchmark, - run_benchmarks, -) +from .runner import run_benchmark, run_benchmarks __all__ = [ "run_benchmark", "run_benchmarks", - "list_available_benchmarks", - "list_possible_implementations", ] diff --git a/dpbench/config/__init__.py b/dpbench/config/__init__.py index 4babe1d2..03b2110d 100644 --- a/dpbench/config/__init__.py +++ b/dpbench/config/__init__.py @@ -9,63 +9,21 @@ to provide a structured way to define and store benchmark data. """ -import json -import os -from .benchmark import Benchmark +from .benchmark import Benchmark, BenchmarkImplementation from .config import Config from .framework import Framework -from .implementaion_postfix import Implementation - - -def read_configs(dirname: str = os.path.dirname(__file__)) -> Config: - """Read all configuration files and populates those settings into Config.""" - C: Config = Config([], [], []) - - impl_postfix_file = os.path.join(dirname, "../configs/impl_postfix.json") - bench_info_dir = os.path.join(dirname, "../configs/bench_info") - framework_info_dir = os.path.join(dirname, "../configs/framework_info") - - for bench_info_file in os.listdir(bench_info_dir): - if not bench_info_file.endswith(".json"): - continue - - bench_info_file = os.path.join(bench_info_dir, bench_info_file) - - with open(bench_info_file) as file: - file_contents = file.read() - - bench_info = json.loads(file_contents) - benchmark = Benchmark.from_dict(bench_info.get("benchmark")) - C.benchmarks.append(benchmark) - - for framework_info_file in os.listdir(framework_info_dir): - if not framework_info_file.endswith(".json"): - continue - - framework_info_file = os.path.join( - framework_info_dir, framework_info_file - ) - - with open(framework_info_file) as file: - file_contents = file.read() - - framework_info = json.loads(file_contents) - framework_dict = framework_info.get("framework") - if framework_dict: - framework = Framework.from_dict(framework_dict) - C.frameworks.append(framework) - - with open(impl_postfix_file) as file: - file_contents = file.read() - - implementaion_postfixes = json.loads(file_contents) - for impl in implementaion_postfixes: - implementation = Implementation.from_dict(impl) - C.implementations.append(implementation) - - return C - +from .implementation_postfix import Implementation +from .reader import read_configs """Use this variable for reading configurations""" -C: Config = read_configs() +GLOBAL: Config = read_configs() + +__all__ = [ + "GLOBAL", + "Benchmark", + "BenchmarkImplementation", + "Config", + "Framework", + "Implementation", +] diff --git a/dpbench/config/benchmark.py b/dpbench/config/benchmark.py index 3f3c3b11..3391c426 100644 --- a/dpbench/config/benchmark.py +++ b/dpbench/config/benchmark.py @@ -4,7 +4,7 @@ """Benchmark related configuration classes.""" -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Any, List Parameters = dict[str, Any] @@ -16,56 +16,92 @@ class Init: """Configuration for benchmark initialization.""" - func_name: str - input_args: List[str] - output_args: List[str] + func_name: str = "" + package_path: str = "" + module_name: str = "" + input_args: List[str] = field(default_factory=list) + output_args: List[str] = field(default_factory=list) @staticmethod def from_dict(obj: Any) -> "Init": """Convert object into Init dataclass.""" - _func_name = str(obj.get("func_name")) + _func_name = str(obj.get("func_name") or "") + _package_path = str(obj.get("package_path") or "") + _module_name = str(obj.get("module_name") or "") _input_args = obj.get("input_args") _output_args = obj.get("output_args") - return Init(_func_name, _input_args, _output_args) + return Init( + _func_name, _package_path, _module_name, _input_args, _output_args + ) + + def __post_init__(self): + """Post initialization hook for dataclass. Not for direct use.""" + self.func_name = self.func_name or "initialize" + + +@dataclass +class BenchmarkImplementation: + """Configuration for benchmark initialization.""" + + postfix: str + func_name: str + module_name: str + package_path: str + + @staticmethod + def from_dict(obj: Any) -> "BenchmarkImplementation": + """Convert object into Init dataclass.""" + _postfix = str(obj.get("postfix")) + _func_name = str(obj.get("func_name")) + _module_name = str(obj.get("module_name")) + _package_path = str(obj.get("package_path")) + return BenchmarkImplementation( + _postfix, _func_name, _module_name, _package_path + ) @dataclass class Benchmark: """Configuration with benchmark information.""" - name: str - short_name: str - relative_path: str - module_name: str - func_name: str - kind: str - domain: str - parameters: Presets - init: Init - input_args: List[str] - array_args: List[str] - output_args: List[str] + name: str = "" + short_name: str = "" + relative_path: str = "" + module_name: str = "" + package_path: str = "" + func_name: str = "" + kind: str = "" + domain: str = "" + parameters: Presets = field(default_factory=Presets) + init: Init = field(default_factory=Init) + input_args: List[str] = field(default_factory=list) + array_args: List[str] = field(default_factory=list) + output_args: List[str] = field(default_factory=list) + implementations: List[BenchmarkImplementation] = field(default_factory=list) @staticmethod def from_dict(obj: Any) -> "Benchmark": """Convert object into Benchamrk dataclass.""" - _name = str(obj.get("name")) - _short_name = str(obj.get("short_name")) - _relative_path = str(obj.get("relative_path")) - _module_name = str(obj.get("module_name")) - _func_name = str(obj.get("func_name")) - _kind = str(obj.get("kind")) - _domain = str(obj.get("domain")) + _name = str(obj.get("name") or "") + _short_name = str(obj.get("short_name") or "") + _relative_path = str(obj.get("relative_path") or "") + _module_name = str(obj.get("module_name") or "") + _packge_path = str(obj.get("package_path") or "") + _func_name = str(obj.get("func_name") or "") + _kind = str(obj.get("kind") or "") + _domain = str(obj.get("domain") or "") _parameters = Presets(obj.get("parameters")) _init = Init.from_dict(obj.get("init")) - _input_args = obj.get("input_args") - _array_args = obj.get("input_args") - _output_args = obj.get("input_args") + _input_args = obj.get("input_args") or [] + _array_args = obj.get("array_args") or [] + _output_args = obj.get("output_args") or [] + _implementations = obj.get("implementations") or [] return Benchmark( _name, _short_name, _relative_path, _module_name, + _packge_path, _func_name, _kind, _domain, @@ -74,4 +110,18 @@ def from_dict(obj: Any) -> "Benchmark": _input_args, _array_args, _output_args, + _implementations, ) + + def __post_init__(self): + """Post initialization hook for dataclass. Not for direct use.""" + if self.package_path == "": + self.package_path = f"dpbench.benchmarks.{self.module_name}" + + if self.init.module_name == "": + self.init.module_name = f"{self.module_name}_initialize" + + if self.init.package_path == "": + self.init.package_path = ( + f"{self.package_path}.{self.init.module_name}" + ) diff --git a/dpbench/config/config.py b/dpbench/config/config.py index a92ce3f1..5d198f2d 100644 --- a/dpbench/config/config.py +++ b/dpbench/config/config.py @@ -8,7 +8,7 @@ from .benchmark import Benchmark from .framework import Framework -from .implementaion_postfix import Implementation +from .implementation_postfix import Implementation @dataclass diff --git a/dpbench/config/framework.py b/dpbench/config/framework.py index 7de59116..1ff3bb74 100644 --- a/dpbench/config/framework.py +++ b/dpbench/config/framework.py @@ -17,13 +17,17 @@ class Framework: prefix: str class_: str arch: str + sycl_device: str @staticmethod def from_dict(obj: Any) -> "Framework": """Convert object into Framework dataclass.""" - _simple_name = str(obj.get("simple_name")) - _full_name = str(obj.get("full_name")) - _prefix = str(obj.get("prefix")) - _class = str(obj.get("class")) - _arch = str(obj.get("arch")) - return Framework(_simple_name, _full_name, _prefix, _class, _arch) + _simple_name = str(obj.get("simple_name") or "") + _full_name = str(obj.get("full_name") or "") + _prefix = str(obj.get("prefix") or "") + _class = str(obj.get("class") or "") + _arch = str(obj.get("arch") or "") + _sycl_device = str(obj.get("sycl_device") or "") + return Framework( + _simple_name, _full_name, _prefix, _class, _arch, _sycl_device + ) diff --git a/dpbench/config/implementaion_postfix.py b/dpbench/config/implementation_postfix.py similarity index 96% rename from dpbench/config/implementaion_postfix.py rename to dpbench/config/implementation_postfix.py index 669260ec..b5960f4b 100644 --- a/dpbench/config/implementaion_postfix.py +++ b/dpbench/config/implementation_postfix.py @@ -12,7 +12,7 @@ class Implementation: """Configuration with implementation information.""" - impl_postfix: str + postfix: str description: str @staticmethod diff --git a/dpbench/config/reader.py b/dpbench/config/reader.py new file mode 100644 index 00000000..405b5200 --- /dev/null +++ b/dpbench/config/reader.py @@ -0,0 +1,182 @@ +# SPDX-FileCopyrightText: 2022 - 2023 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +"""Set of functions to read configuration files.""" + +import importlib +import json +import os + +from .benchmark import Benchmark, BenchmarkImplementation +from .config import Config +from .framework import Framework +from .implementation_postfix import Implementation + + +def read_configs(dirname: str = os.path.dirname(__file__)) -> Config: + """Read all configuration files and populate those settings into Config. + + Args: + dirname: Path to the directory with configuration files. + + Returns: + Configuration object with populated configurations. + """ + config: Config = Config([], [], []) + + impl_postfix_file = os.path.join(dirname, "../configs/impl_postfix.json") + bench_info_dir = os.path.join(dirname, "../configs/bench_info") + framework_info_dir = os.path.join(dirname, "../configs/framework_info") + + read_benchmarks(config, bench_info_dir) + read_frameworks(config, framework_info_dir) + read_implementation_postfixes(config, impl_postfix_file) + + for benchmark in config.benchmarks: + read_benchmark_implementations(benchmark, config.implementations) + + return config + + +def read_benchmarks(config: Config, bench_info_dir: str): + """Read and populate benchmark configuration files. + + Args: + config: Configuration object where settings should be populated. + bench_info_dir: Path to the directory with configuration files. + + Returns: nothing. + """ + for bench_info_file in os.listdir(bench_info_dir): + if not bench_info_file.endswith(".json"): + continue + + bench_info_file = os.path.join(bench_info_dir, bench_info_file) + + with open(bench_info_file) as file: + file_contents = file.read() + + bench_info = json.loads(file_contents) + benchmark = Benchmark.from_dict(bench_info.get("benchmark")) + config.benchmarks.append(benchmark) + + +def read_frameworks(config: Config, framework_info_dir: str) -> None: + """Read and populate framework configuration files. + + Args: + config: Configuration object where settings should be populated. + framework_info_dir: Path to the directory with configuration files. + + Returns: nothing. + """ + for framework_info_file in os.listdir(framework_info_dir): + if not framework_info_file.endswith(".json"): + continue + + framework_info_file = os.path.join( + framework_info_dir, framework_info_file + ) + + with open(framework_info_file) as file: + file_contents = file.read() + + framework_info = json.loads(file_contents) + framework_dict = framework_info.get("framework") + if framework_dict: + framework = Framework.from_dict(framework_dict) + config.frameworks.append(framework) + + +def read_implementation_postfixes( + config: Config, impl_postfix_file: str +) -> None: + """Read and populate implementation postfix configuration file. + + Args: + config: Configuration object where settings should be populated. + impl_postfix_file: Path to the configuration file. + + Returns: nothing. + """ + with open(impl_postfix_file) as file: + file_contents = file.read() + + implementation_postfixes = json.loads(file_contents) + for impl in implementation_postfixes: + implementation = Implementation.from_dict(impl) + config.implementations.append(implementation) + + +def read_benchmark_implementations( + config: Benchmark, implementations: Implementation +) -> None: + """Read and discover implementation modules and functions. + + Args: + config: Benchmark configuration object where settings should be + populated. + implementations: Prepopulated list of implementations. + + Returns: nothing. + + Raises: + RuntimeError: Implementation file does not match any known postfix. + """ + if config.implementations: + return + + mod = importlib.import_module("dpbench.benchmarks." + config.module_name) + + modules: list[str] = [ + m + for m in mod.__loader__.get_resource_reader().contents() + if m.startswith(config.module_name) + ] + + for module in modules: + postfix = "" + module_name = "" + + if module.endswith(".py"): + module_name = module[:-3] + postfix = module_name[len(config.module_name) + 1 :] + elif module.endswith("sycl_native_ext"): + module_name = ( + f"{module}.{config.module_name}_sycl._{config.module_name}_sycl" + ) + postfix = "sycl" + + if config.init.module_name.endswith(module_name): + continue + + if postfix not in [impl.postfix for impl in implementations]: + raise RuntimeError( + f"Could not recognize postfix {postfix} as known postfix for" + + f" file {module} in {config.module_name}" + ) + + func_name: str = None + package_path: str = ( + f"dpbench.benchmarks.{config.module_name}.{module_name}" + ) + + try: + impl_mod = importlib.import_module(package_path) + + for func in [config.module_name, f"{config.module_name}_{postfix}"]: + if hasattr(impl_mod, func): + func_name = func + break + except Exception: + continue + + config.implementations.append( + BenchmarkImplementation( + postfix=postfix, + func_name=func_name, + module_name=module_name, + package_path=package_path, + ) + ) diff --git a/dpbench/infrastructure/benchmark.py b/dpbench/infrastructure/benchmark.py index 5cf2ec54..2e079a1a 100644 --- a/dpbench/infrastructure/benchmark.py +++ b/dpbench/infrastructure/benchmark.py @@ -19,6 +19,7 @@ import numpy as np +import dpbench.config as config from dpbench.infrastructure import timer from . import timeout_decorator as tout @@ -36,14 +37,8 @@ def _reset_output_args(bench, fmwrk, inputs, preset): - try: - output_args = bench.info["output_args"] - except KeyError: - logging.info( - "No output args to reset as benchmarks has no array output." - ) - return - array_args = bench.info["array_args"] + output_args = bench.info.output_args + array_args = bench.info.array_args for arg in inputs.keys(): if arg in output_args and arg in array_args: original_data = bench.get_data(preset=preset)[arg] @@ -99,15 +94,13 @@ def _exec( results_dict : A dictionary where timing and other results are stored. copy_output : A flag that controls copying output. """ - input_args = bench.info["input_args"] - array_args = bench.info["array_args"] + input_args = bench.info.input_args + array_args = bench.info.array_args impl_fn = bench.get_impl(impl_postfix) inputs = dict() with timer.timer() as t: - args = get_args( - bench.get_data(preset=preset), bench.info["array_args"], fmwrk - ) + args = get_args(bench.get_data(preset=preset), array_args, fmwrk) results_dict["setup_time"] = t.get_elapsed_time() @@ -155,12 +148,8 @@ def warmup(impl_fn, inputs): # Get the output data if copy_output: - try: - out_args = bench.info["output_args"] - except KeyError: - out_args = [] - - array_args = bench.info["array_args"] + out_args = bench.info.output_args + array_args = bench.info.array_args output_arrays = dict() with timer.timer() as t: for out_arg in out_args: @@ -633,16 +622,9 @@ def _check_if_valid_impl_postfix(self, impl_postfix: str) -> bool: def _set_implementation_fn_list( self, - bmod: str, - allowed_implementation_postfixes: list[str], ) -> list[BenchmarkImplFn]: - """Selects all the callables from the __all__ list for the module - excluding the initialize function that we know is not a benchmark - implementation. + """Loads all implementation functions into list. - Args: - bmod : A benchmark module - initialize_fname : Name of the initialization function Returns: A list of (name, value) pair that represents the name of an implementation function and a corresponding function object. @@ -650,86 +632,23 @@ def _set_implementation_fn_list( result: list[BenchmarkImplFn] = [] - for postfix, module_name in self._get_impl_names( - bmod, allowed_implementation_postfixes - ).items(): - module_name = f"dpbench.benchmarks.{self.bname}.{module_name}" + for impl in self.info.implementations: try: - mod = importlib.import_module(module_name) + mod = importlib.import_module(impl.package_path) + canonical_name = f"{self.info.module_name}_{impl.postfix}" + implfn = BenchmarkImplFn( + name=canonical_name, fn=getattr(mod, impl.func_name) + ) + result.append(implfn) except Exception: logging.exception( - f"Failed to import benchmark module: {module_name}" + f"Failed to import benchmark module: {impl.module_name}" ) continue - canonical_name = f"{self.bname}_{postfix}" - - func_name: str = None - for func in [self.bname, canonical_name]: - if hasattr(mod, func): - func_name = func - break - - if func_name: - implfn = BenchmarkImplFn( - name=canonical_name, fn=getattr(mod, func_name) - ) - result.append(implfn) - return result - def _load_benchmark_info(self, bconfig_path: str = None): - """Reads the benchmark configuration and loads into a member dict. - - Args: - bconfig_path (str, optional): _description_. Defaults to None. - """ - bench_filename = "{b}.json".format(b=self.bname) - bench_path = None - - if bconfig_path: - bench_path = bconfig_path.joinpath(bench_filename) - else: - parent_folder = pathlib.Path(__file__).parent.absolute() - bench_path = parent_folder.joinpath( - "..", "configs", "bench_info", bench_filename - ) - - try: - with open(bench_path) as json_file: - self.info: dict = json.load(json_file)["benchmark"] - except Exception: - logging.exception( - "Benchmark JSON file {b} could not be opened.".format( - b=bench_filename - ) - ) - raise - - def _set_data_initialization_fn(self): - """Sets the initialize function object to be used by the benchmark. - - Raises: - RuntimeError: If the module's initialize function could not be - loaded. - """ - - self.init_mod_path: str = ( - self.info.get("init", {}).get("package_path") - or f"dpbench.benchmarks.{self.bname}.{self.bname}_initialize" - ) - - self.init_fn_name: str = ( - self.info.get("init", {}).get("func_name") or "initialize" - ) - - self.initialize_fn = getattr( - importlib.import_module(self.init_mod_path), self.init_fn_name - ) - - def _set_reference_implementation( - self, impl_fnlist: list[BenchmarkImplFn] - ) -> BenchmarkImplFn: + def _set_reference_implementation(self) -> BenchmarkImplFn: """Sets the reference implementation for the benchmark. The reference implementation is either a pure Python implementation @@ -737,9 +656,6 @@ def _set_reference_implementation( the NumPy implementation over Python if both are present. If neither is found, then the reference implementation is set to None. - Args: - impl_fnlist : A list of (name, value) pair that represents the name - of an implementation function and a corresponding function object. Returns: BenchmarkImplFn: The reference benchmark implementation. @@ -747,8 +663,10 @@ def _set_reference_implementation( ref_impl = None - python_impl = [impl for impl in impl_fnlist if "python" in impl.name] - numpy_impl = [impl for impl in impl_fnlist if "numpy" in impl.name] + python_impl = [ + impl for impl in self.impl_fnlist if "python" in impl.name + ] + numpy_impl = [impl for impl in self.impl_fnlist if "numpy" in impl.name] if numpy_impl: ref_impl = numpy_impl[0] @@ -759,7 +677,7 @@ def _set_reference_implementation( return ref_impl - def _set_impl_to_framework_map(self, impl_fnlist): + def _set_impl_to_framework_map(self, impl_fnlist) -> dict[str, Framework]: """Create a dictionary mapping each implementation function name to a corresponding Framework object. @@ -852,70 +770,9 @@ def _validate_results(self, preset, frmwrk, frmwrk_out): except Exception: return False - def _get_impl_names( - self, - bmod, - allowed_implementation_postfixes: list[str], - ) -> dict[str, str]: - """Resolves bench_list into actual file paths. - - Returns: - map[str, str]: map of benchmark postfix to module name for - every implementation of the benchmark. - """ - - result: dict[str, str] = {} - - bench_list = self.info.get("bench_list") - if bench_list: - for bench in bench_list: - if isinstance(bench, tuple): - postfix = bench[0] - module_name = bench[1] - else: - postfix = bench - module_name = self.bname + "_" + postfix - - result[postfix] = module_name - - return result - - modules: list[str] = [ - m - for m in bmod.__loader__.get_resource_reader().contents() - if m.startswith(self.bname) - ] - - for module in modules: - module_name = module - postfix = "" - module_name = "" - - if module.endswith(".py"): - module_name = module[:-3] - postfix = module_name[len(self.bname) + 1 :] - elif module.endswith("sycl_native_ext"): - module_name = f"{module}.{self.bname}_sycl._{self.bname}_sycl" - postfix = "sycl" - - if self.init_mod_path.endswith(module_name): - continue - - if postfix not in allowed_implementation_postfixes: - raise RuntimeError( - f"Could not recognize postfix {postfix} as known postfix for" - + f" file {module} in {self.bname}" - ) - - result[postfix] = module_name - - return result - def __init__( self, - bmodule: object, - allowed_implementation_postfixes: list[str], - bconfig_path: str = None, + config: config.Benchmark, ): """Reads benchmark information. :param bname: The benchmark name. @@ -924,25 +781,21 @@ def __init__( package's bench_info directory is used. """ - self.bname = bmodule.__name__.split(".")[-1] self.bdata = dict() self.refdata = dict() - try: - self._load_benchmark_info(bconfig_path) - self._set_data_initialization_fn() - self.impl_fnlist = self._set_implementation_fn_list( - bmodule, - allowed_implementation_postfixes, - ) - self.ref_impl_fn = self._set_reference_implementation( - self.impl_fnlist - ) - self.impl_to_fw_map = self._set_impl_to_framework_map( - self.impl_fnlist - ) - except Exception: - raise + self.info: config.Benchmark = config + self.bname = self.info.module_name + self.init_mod_path = self.info.init.package_path + self.init_fn_name: str = self.info.init.func_name + + self.initialize_fn = getattr( + importlib.import_module(self.init_mod_path), self.init_fn_name + ) + + self.impl_fnlist = self._set_implementation_fn_list() + self.ref_impl_fn = self._set_reference_implementation() + self.impl_to_fw_map = self._set_impl_to_framework_map(self.impl_fnlist) def get_impl_fnlist(self) -> list[BenchmarkImplFn]: """Returns a list of function objects each for a single implementation @@ -1010,6 +863,7 @@ def get_data(self, preset: str = "L") -> Dict[str, Any]: :param preset: The data-size preset (S, M, L, paper). """ + # 0. Skip if preset is already loaded if preset in self.bdata.keys(): return self.bdata[preset] @@ -1018,20 +872,20 @@ def get_data(self, preset: str = "L") -> Dict[str, Any]: # 2. Check if the provided preset configuration is available in the # config file. - if preset not in self.info["parameters"].keys(): + if preset not in self.info.parameters.keys(): raise NotImplementedError( "{b} doesn't have a {p} preset.".format(b=self.bname, p=preset) ) # 3. Store the input preset args in the "data" dict. - parameters = self.info["parameters"][preset] + parameters = self.info.parameters[preset] for k, v in parameters.items(): data[k] = v # 4. Call the initialize_fn with the input args and store the results # in the "data" dict. - init_input_args_list = self.info["init"]["input_args"] + init_input_args_list = self.info.init.input_args init_input_args_val_list = [] for arg in init_input_args_list: init_input_args_val_list.append(data[arg]) @@ -1042,7 +896,7 @@ def get_data(self, preset: str = "L") -> Dict[str, Any]: # 5. Store the initialized output in the "data" dict. Note that the # implementation depends on Python dicts being ordered. Thus, the # code will not work with Python older than 3.7. - for idx, out in enumerate(self.info["init"]["output_args"]): + for idx, out in enumerate(self.info.init.output_args): data.update({out: initialized_output[idx]}) # 6. Update the benchmark data (self.bdata) with the generated data diff --git a/dpbench/infrastructure/dpcpp_framework.py b/dpbench/infrastructure/dpcpp_framework.py index 13df9efa..d796b0b4 100644 --- a/dpbench/infrastructure/dpcpp_framework.py +++ b/dpbench/infrastructure/dpcpp_framework.py @@ -7,28 +7,30 @@ import dpctl +import dpbench.config as config + from .framework import Framework class DpcppFramework(Framework): """A class for reading and processing framework information.""" - def __init__(self, fname: str, fconfig_path: str = None): + def __init__(self, fname: str = None, config: config.Framework = None): """Reads framework information. :param fname: The framework name. """ - super().__init__(fname, fconfig_path) + super().__init__(fname, config) try: - self.sycl_device = self.info["sycl_device"] + self.sycl_device = self.info.sycl_device dpctl.SyclDevice(self.sycl_device) except KeyError: pass except dpctl.SyclDeviceCreationError as sdce: logging.exception( "Could not create a Sycl device using filter {} string".format( - self.info["sycl_device"] + self.info.sycl_device ) ) raise sdce diff --git a/dpbench/infrastructure/dpnp_framework.py b/dpbench/infrastructure/dpnp_framework.py index aabbc7a0..6841a5db 100644 --- a/dpbench/infrastructure/dpnp_framework.py +++ b/dpbench/infrastructure/dpnp_framework.py @@ -7,28 +7,30 @@ import dpctl +import dpbench.config as cfg + from .framework import Framework class DpnpFramework(Framework): """A class for reading and processing framework information.""" - def __init__(self, fname: str, fconfig_path: str = None): + def __init__(self, fname: str = None, config: cfg.Framework = None): """Reads framework information. :param fname: The framework name. """ - super().__init__(fname, fconfig_path) + super().__init__(fname, config) try: - self.sycl_device = self.info["sycl_device"] + self.sycl_device = self.info.sycl_device dpctl.SyclDevice(self.sycl_device) except KeyError: pass except dpctl.SyclDeviceCreationError as sdce: logging.exception( "Could not create a Sycl device using filter {} string".format( - self.info["sycl_device"] + self.info.sycl_device ) ) raise sdce diff --git a/dpbench/infrastructure/framework.py b/dpbench/infrastructure/framework.py index a29400f1..1056a9f0 100644 --- a/dpbench/infrastructure/framework.py +++ b/dpbench/infrastructure/framework.py @@ -4,47 +4,43 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: BSD-3-Clause -import json import logging -import pathlib from typing import Callable, Dict import pkg_resources +import dpbench.config as cfg from dpbench.infrastructure import utilities class Framework(object): """A class for reading and processing framework information.""" - def __init__(self, fname: str, fconfig_path: str = None): + def __init__( + self, + fname: str = None, + config: cfg.Framework = None, + ): """Reads framework information. - :param fname: The framework name. + :param fname: The framework name. It must be provided if no config was + provided. + :param config: The framework configuration. It must be provided if no + fname was provided. """ - self.fname = fname + if fname is None and config is None: + raise ValueError("At least one of fname or config must be provided") - frmwrk_filename = "{f}.json".format(f=fname) - frmwrk_path = None + if config is None: + config = [ + f for f in cfg.GLOBAL.frameworks if fname in f.simple_name + ] + if len(config) < 1: + raise ValueError(f"Configuration with name {fname} not found") + config = config[0] - if fconfig_path: - frmwrk_path = pathlib.Path(fconfig_path).joinpath(frmwrk_filename) - else: - parent_folder = pathlib.Path(__file__).parent.absolute() - frmwrk_path = parent_folder.joinpath( - "..", "configs", "framework_info", frmwrk_filename - ) - - try: - with open(frmwrk_path) as json_file: - self.info = json.load(json_file)["framework"] - except Exception as e: - logging.exception( - "Framework JSON file {f} could not be opened.".format( - f=frmwrk_filename - ) - ) - raise e + self.info = config + self.fname = self.info.simple_name def device_filter_string(self) -> str: """Returns the sycl device's filter string if the framework has an diff --git a/dpbench/infrastructure/numba_dpex_framework.py b/dpbench/infrastructure/numba_dpex_framework.py index 8ebb7bc1..2a1198f9 100644 --- a/dpbench/infrastructure/numba_dpex_framework.py +++ b/dpbench/infrastructure/numba_dpex_framework.py @@ -7,28 +7,28 @@ import dpctl +import dpbench.config as cfg + from .framework import Framework class NumbaDpexFramework(Framework): """A class for reading and processing framework information.""" - def __init__(self, fname: str, fconfig_path: str = None): + def __init__(self, fname: str = None, config: cfg.Framework = None): """Reads framework information. :param fname: The framework name. """ - super().__init__(fname, fconfig_path) + super().__init__(fname, config) try: - self.sycl_device = self.info["sycl_device"] + self.sycl_device = self.info.sycl_device dpctl.SyclDevice(self.sycl_device) - except KeyError: - pass except dpctl.SyclDeviceCreationError as sdce: logging.exception( "Could not create a Sycl device using filter {} string".format( - self.info["sycl_device"] + self.info.sycl_device ) ) raise sdce diff --git a/dpbench/infrastructure/numba_framework.py b/dpbench/infrastructure/numba_framework.py index fa0a3feb..12180d1d 100644 --- a/dpbench/infrastructure/numba_framework.py +++ b/dpbench/infrastructure/numba_framework.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: BSD-3-Clause +import dpbench.config as cfg + from .framework import Framework _impl = { @@ -19,9 +21,9 @@ class NumbaFramework(Framework): """A class for reading and processing framework information.""" - def __init__(self, fname: str, fconfig_path: str = None): + def __init__(self, fname: str = None, config: cfg.Framework = None): """Reads framework information. :param fname: The framework name. """ - super().__init__(fname, fconfig_path) + super().__init__(fname, config) diff --git a/dpbench/runner.py b/dpbench/runner.py index c854f332..e6b2e7b7 100644 --- a/dpbench/runner.py +++ b/dpbench/runner.py @@ -5,11 +5,13 @@ import importlib import json import logging +import os import pathlib import pkgutil from datetime import datetime import dpbench.benchmarks as dp_bms +import dpbench.config as config import dpbench.infrastructure as dpbi from dpbench.infrastructure.enums import ErrorCodes @@ -40,44 +42,24 @@ def _print_results(result: dpbi.BenchmarkResults): print("error msg:", result.error_msg) -def list_available_benchmarks(): - """Return the list of available benchmarks that ae in the - dpbench.benchmarks module. - """ - - submods = [ - submod.name - for submod in pkgutil.iter_modules(dp_bms.__path__) - if submod.ispkg - ] - - return submods - +def get_benchmark( + benchmark: config.Benchmark = None, + benchmark_name: str = "", +) -> config.Benchmark: + """Returns benchmark config if it is not none, otherwise returns benchmark + config by name.""" + if benchmark is not None: + return benchmark -def list_possible_implementations() -> list[str]: - """Returns list of implementation postfixes, which are keys in - configs/impl_postfix.json. - """ - parent_folder = pathlib.Path(__file__).parent.absolute() - impl_postfix_json = parent_folder.joinpath("configs", "impl_postfix.json") - - try: - with open(impl_postfix_json) as json_file: - return [entry["impl_postfix"] for entry in json.load(json_file)] - except Exception: - logging.exception( - "impl postfix JSON file {b} could not be opened.".format( - b="impl_post_fix.json" - ) - ) - raise + return next( + b for b in config.GLOBAL.benchmarks if b.module_name == benchmark_name + ) def run_benchmark( - bname, + bname: str = "", + benchmark: config.Benchmark = None, implementation_postfix=None, - fconfig_path=None, - bconfig_path=None, preset="S", repeat=10, validate=True, @@ -86,20 +68,15 @@ def run_benchmark( print_results=True, run_id: int = None, ): + bench_cfg = get_benchmark(benchmark=benchmark, benchmark_name=bname) + bname = bench_cfg.name print("") print("================ Benchmark " + bname + " ========================") print("") bench = None - allowed_impl_postfixes = list_possible_implementations() - try: - benchmod = importlib.import_module("dpbench.benchmarks." + bname) - bench = dpbi.Benchmark( - benchmod, - bconfig_path=bconfig_path, - allowed_implementation_postfixes=allowed_impl_postfixes, - ) + bench = dpbi.Benchmark(bench_cfg) except Exception: logging.exception( "Skipping the benchmark execution due to the following error: " @@ -128,8 +105,6 @@ def run_benchmark( def run_benchmarks( - fconfig_path=None, - bconfig_path=None, preset="S", repeat=10, validate=True, @@ -155,21 +130,16 @@ def run_benchmarks( if not dbfile: dbfile = "results.db" - # dpbi.create_results_table(conn) dpbi.create_results_table() conn = dpbi.create_connection(db_file=dbfile) if run_id is None: run_id = dpbi.create_run(conn) - impl_postfixes = list_possible_implementations() - - for b in list_available_benchmarks(): - for impl in impl_postfixes: + for b in config.GLOBAL.benchmarks: + for impl in config.GLOBAL.implementations: run_benchmark( - bname=b, - implementation_postfix=impl, - fconfig_path=fconfig_path, - bconfig_path=bconfig_path, + benchmark=b, + implementation_postfix=impl.postfix, preset=preset, repeat=repeat, validate=validate,