diff --git a/mlos_bench/mlos_bench/launcher.py b/mlos_bench/mlos_bench/launcher.py
index 3f2247146ed..d154d95b736 100644
--- a/mlos_bench/mlos_bench/launcher.py
+++ b/mlos_bench/mlos_bench/launcher.py
@@ -14,11 +14,11 @@
 import logging
 import sys
 
-from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
+from typing import Any, Dict, Iterable, List, Optional, Tuple
 
 from mlos_bench.config.schemas import ConfigSchema
 from mlos_bench.dict_templater import DictTemplater
-from mlos_bench.util import BaseTypeVar, try_parse_val
+from mlos_bench.util import try_parse_val
 
 from mlos_bench.tunables.tunable import TunableValue
 from mlos_bench.tunables.tunable_groups import TunableGroups
@@ -338,7 +338,12 @@ def _load_optimizer(self, args_optimizer: Optional[str]) -> Optimizer:
             config = {key: val for key, val in self.global_config.items() if key in OneShotOptimizer.BASE_SUPPORTED_CONFIG_PROPS}
             return OneShotOptimizer(
                 self.tunables, config=config, service=self._parent_service)
-        optimizer = self._load(Optimizer, args_optimizer, ConfigSchema.OPTIMIZER)   # type: ignore[type-abstract]
+        class_config = self._config_loader.load_config(args_optimizer, ConfigSchema.OPTIMIZER)
+        assert isinstance(class_config, Dict)
+        optimizer = self._config_loader.build_optimizer(tunables=self.tunables,
+                                                        service=self._parent_service,
+                                                        config=class_config,
+                                                        global_config=self.global_config)
         return optimizer
 
     def _load_storage(self, args_storage: Optional[str]) -> Storage:
@@ -350,31 +355,15 @@ def _load_storage(self, args_storage: Optional[str]) -> Storage:
         if args_storage is None:
             # pylint: disable=import-outside-toplevel
             from mlos_bench.storage.sql.storage import SqlStorage
-            return SqlStorage(self.tunables, service=self._parent_service,
+            return SqlStorage(service=self._parent_service,
                               config={
                                   "drivername": "sqlite",
                                   "database": ":memory:",
                                   "lazy_schema_create": True,
                               })
-        storage = self._load(Storage, args_storage, ConfigSchema.STORAGE)   # type: ignore[type-abstract]
-        return storage
-
-    def _load(self, cls: Type[BaseTypeVar], json_file_name: str, schema_type: Optional[ConfigSchema]) -> BaseTypeVar:
-        """
-        Create a new instance of class `cls` from JSON configuration.
-
-        Note: For abstract types, mypy will complain at the call site.
-        Use "# type: ignore[type-abstract]" to suppress the warning.
-        See Also: https://github.com/python/mypy/issues/4717
-        """
-        class_config = self._config_loader.load_config(json_file_name, schema_type)
+        class_config = self._config_loader.load_config(args_storage, ConfigSchema.STORAGE)
         assert isinstance(class_config, Dict)
-        ret = self._config_loader.build_generic(
-            base_cls=cls,
-            tunables=self.tunables,
-            service=self._parent_service,
-            config=class_config,
-            global_config=self.global_config
-        )
-        assert isinstance(ret, cls)
-        return ret
+        storage = self._config_loader.build_storage(service=self._parent_service,
+                                                    config=class_config,
+                                                    global_config=self.global_config)
+        return storage
diff --git a/mlos_bench/mlos_bench/run.py b/mlos_bench/mlos_bench/run.py
index 22e6697b342..ee2675df680 100755
--- a/mlos_bench/mlos_bench/run.py
+++ b/mlos_bench/mlos_bench/run.py
@@ -94,6 +94,7 @@ def _optimize(*,
             trial_id=trial_id,
             root_env_config=root_env_config,
             description=env.name,
+            tunables=env.tunable_params,
             opt_target=opt.target,
             opt_direction=opt.direction,
          ) as exp:
diff --git a/mlos_bench/mlos_bench/services/config_persistence.py b/mlos_bench/mlos_bench/services/config_persistence.py
index 7ce4ecc3e0f..6c2dd19f7cf 100644
--- a/mlos_bench/mlos_bench/services/config_persistence.py
+++ b/mlos_bench/mlos_bench/services/config_persistence.py
@@ -14,24 +14,28 @@
 import json    # For logging only
 import logging
 
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, TYPE_CHECKING
 
 import json5    # To read configs with comments and other JSON5 syntax features
 from jsonschema import ValidationError, SchemaError
 
 from mlos_bench.config.schemas import ConfigSchema
 from mlos_bench.environments.base_environment import Environment
+from mlos_bench.optimizers.base_optimizer import Optimizer
 from mlos_bench.services.base_service import Service
 from mlos_bench.services.types.config_loader_type import SupportsConfigLoading
 from mlos_bench.tunables.tunable import TunableValue
 from mlos_bench.tunables.tunable_groups import TunableGroups
-from mlos_bench.util import instantiate_from_config, merge_parameters, path_join, preprocess_dynamic_configs, BaseTypeVar
+from mlos_bench.util import instantiate_from_config, merge_parameters, path_join, preprocess_dynamic_configs
 
 if sys.version_info < (3, 10):
     from importlib_resources import files
 else:
     from importlib.resources import files
 
+if TYPE_CHECKING:
+    from mlos_bench.storage.base_storage import Storage
+
 
 _LOG = logging.getLogger(__name__)
 
@@ -228,14 +232,13 @@ def prepare_class_load(self, config: Dict[str, Any],
 
         return (class_name, class_config)
 
-    def build_generic(self, *,
-                      base_cls: Type[BaseTypeVar],
-                      tunables: TunableGroups,
-                      service: Service,
-                      config: Dict[str, Any],
-                      global_config: Optional[Dict[str, Any]] = None) -> BaseTypeVar:
+    def build_optimizer(self, *,
+                        tunables: TunableGroups,
+                        service: Service,
+                        config: Dict[str, Any],
+                        global_config: Optional[Dict[str, Any]] = None) -> Optimizer:
         """
-        Generic instantiation of mlos_bench objects like Storage and Optimizer
+        Instantiation of mlos_bench Optimizer
         that depend on Service and TunableGroups.
 
         A class *MUST* have a constructor that takes four named arguments:
@@ -243,8 +246,6 @@ def build_generic(self, *,
 
         Parameters
         ----------
-        base_cls : ClassType
-            A base class of the object to instantiate.
         tunables : TunableGroups
             Tunable parameters of the environment. We need them to validate the
             configurations of merged-in experiments and restored/pending trials.
@@ -257,19 +258,49 @@ def build_generic(self, *,
 
         Returns
         -------
-        inst : Any
-            A new instance of the `cls` class.
+        inst : Optimizer
+            A new instance of the `Optimizer` class.
         """
         tunables_path = config.get("include_tunables")
         if tunables_path is not None:
             tunables = self._load_tunables(tunables_path, tunables)
         (class_name, class_config) = self.prepare_class_load(config, global_config)
-        inst = instantiate_from_config(base_cls, class_name,
+        inst = instantiate_from_config(Optimizer, class_name,   # type: ignore[type-abstract]
                                        tunables=tunables,
                                        config=class_config,
                                        global_config=global_config,
                                        service=service)
-        _LOG.info("Created: %s %s", base_cls.__name__, inst)
+        _LOG.info("Created: Optimizer %s", inst)
+        return inst
+
+    def build_storage(self, *,
+                      service: Service,
+                      config: Dict[str, Any],
+                      global_config: Optional[Dict[str, Any]] = None) -> "Storage":
+        """
+        Instantiation of mlos_bench Storage objects.
+
+        Parameters
+        ----------
+        service: Service
+            An optional service object (e.g., providing methods to load config files, etc.)
+        config : dict
+            Configuration of the class to instantiate, as loaded from JSON.
+        global_config : dict
+            Global configuration parameters (optional).
+
+        Returns
+        -------
+        inst : Storage
+            A new instance of the Storage class.
+        """
+        (class_name, class_config) = self.prepare_class_load(config, global_config)
+        from mlos_bench.storage.base_storage import Storage     # pylint: disable=import-outside-toplevel
+        inst = instantiate_from_config(Storage, class_name,     # type: ignore[type-abstract]
+                                       config=class_config,
+                                       global_config=global_config,
+                                       service=service)
+        _LOG.info("Created: Storage %s", inst)
         return inst
 
     def build_environment(self,     # pylint: disable=too-many-arguments
diff --git a/mlos_bench/mlos_bench/storage/__init__.py b/mlos_bench/mlos_bench/storage/__init__.py
index 615bc4a160f..9ae5c80f364 100644
--- a/mlos_bench/mlos_bench/storage/__init__.py
+++ b/mlos_bench/mlos_bench/storage/__init__.py
@@ -11,5 +11,5 @@
 
 __all__ = [
     'Storage',
-    'from_config'
+    'from_config',
 ]
diff --git a/mlos_bench/mlos_bench/storage/base_experiment_data.py b/mlos_bench/mlos_bench/storage/base_experiment_data.py
index 0f154a03182..62af726a90d 100644
--- a/mlos_bench/mlos_bench/storage/base_experiment_data.py
+++ b/mlos_bench/mlos_bench/storage/base_experiment_data.py
@@ -95,6 +95,7 @@ def results(self) -> pandas.DataFrame:
         results : pandas.DataFrame
             A DataFrame with configurations and results from all trials of the experiment.
             Has columns [trial_id, config_id, ts_start, ts_end, status]
-            followed by tunable config parameters and trial results. The latter can be NULLs
-            if the trial was not successful.
+            followed by tunable config parameters (prefixed with "config.") and
+            trial results (prefixed with "result."). The latter can be NULLs if the
+            trial was not successful.
         """
diff --git a/mlos_bench/mlos_bench/storage/base_storage.py b/mlos_bench/mlos_bench/storage/base_storage.py
index 1afd699f162..ac9a442c741 100644
--- a/mlos_bench/mlos_bench/storage/base_storage.py
+++ b/mlos_bench/mlos_bench/storage/base_storage.py
@@ -30,7 +30,6 @@ class Storage(metaclass=ABCMeta):
     """
 
     def __init__(self,
-                 tunables: TunableGroups,
                  config: Dict[str, Any],
                  global_config: Optional[dict] = None,
                  service: Optional[Service] = None):
@@ -39,15 +38,11 @@ def __init__(self,
 
         Parameters
         ----------
-        tunables : TunableGroups
-            Tunable parameters of the environment. We need them to validate the
-            configurations of merged-in experiments and restored/pending trials.
         config : dict
             Free-format key/value pairs of configuration parameters.
         """
         _LOG.debug("Storage config: %s", config)
         self._validate_json_config(config)
-        self._tunables = tunables.copy()
         self._service = service
         self._config = config.copy()
         self._global_config = global_config or {}
@@ -83,6 +78,7 @@ def experiment(self, *,
                    trial_id: int,
                    root_env_config: str,
                    description: str,
+                   tunables: TunableGroups,
                    opt_target: str,
                    opt_direction: Optional[str]) -> 'Storage.Experiment':
         """
@@ -102,6 +98,7 @@ def experiment(self, *,
             A path to the root JSON configuration file of the benchmarking environment.
         description : str
             Human-readable description of the experiment.
+        tunables : TunableGroups
         opt_target : str
             Name of metric we're optimizing for.
         opt_direction: Optional[str]
@@ -204,6 +201,11 @@ def description(self) -> str:
             """Get the Experiment's description"""
             return self._description
 
+        @property
+        def tunables(self) -> TunableGroups:
+            """Get the Experiment's tunables"""
+            return self._tunables
+
         @property
         def opt_target(self) -> str:
             """Get the Experiment's optimization target"""
@@ -271,7 +273,7 @@ def new_trial(self, tunables: TunableGroups,
             Parameters
             ----------
             tunables : TunableGroups
-                Tunable parameters of the experiment.
+                Tunable parameters to use for the trial.
             config : dict
                 Key/value pairs of additional non-tunable parameters of the trial.
 
@@ -303,7 +305,7 @@ def __init__(self, *,
             self._config = config or {}
 
         def __repr__(self) -> str:
-            return f"{self._experiment_id}:{self._trial_id}"
+            return f"{self._experiment_id}:{self._trial_id}:{self._config_id}"
 
         @property
         def trial_id(self) -> int:
diff --git a/mlos_bench/mlos_bench/storage/base_trial_data.py b/mlos_bench/mlos_bench/storage/base_trial_data.py
index 71ad7e75a42..1ec7548a7d7 100644
--- a/mlos_bench/mlos_bench/storage/base_trial_data.py
+++ b/mlos_bench/mlos_bench/storage/base_trial_data.py
@@ -7,11 +7,13 @@
 """
 from abc import ABCMeta, abstractmethod
 from datetime import datetime
-from typing import Optional
+from typing import Dict, Optional
 
 import pandas
 
 from mlos_bench.environments.status import Status
+from mlos_bench.tunables.tunable import TunableValue
+from mlos_bench.util import try_parse_val
 
 
 class TrialData(metaclass=ABCMeta):
@@ -46,38 +48,53 @@ def exp_id(self) -> str:
     @property
     def trial_id(self) -> int:
         """
-        ID of the current trial.
+        ID of the trial.
         """
         return self._trial_id
 
     @property
     def config_id(self) -> int:
         """
-        ID of the configuration of the current trial.
+        ID of the configuration of the trial.
         """
         return self._config_id
 
     @property
     def ts_start(self) -> datetime:
         """
-        Start timestamp of the current trial (UTC).
+        Start timestamp of the trial (UTC).
         """
         return self._ts_start
 
     @property
     def ts_end(self) -> Optional[datetime]:
         """
-        End timestamp of the current trial (UTC).
+        End timestamp of the trial (UTC).
         """
         return self._ts_end
 
     @property
     def status(self) -> Status:
         """
-        Status of the current trial.
+        Status of the trial.
         """
         return self._status
 
+    def _df_to_dict(self, dataframe: pandas.DataFrame) -> Dict[str, Optional[TunableValue]]:
+        """Utility function to convert certain key-value dataframe formats to a dict."""
+        if dataframe.columns.tolist() == ['metric', 'value']:
+            dataframe = dataframe.copy()
+            dataframe.rename(columns={'metric': 'parameter'}, inplace=True)
+        assert dataframe.columns.tolist() == ['parameter', 'value']
+        data = {}
+        for _, row in dataframe.iterrows():
+            assert isinstance(row['parameter'], str)
+            assert row['value'] is None or isinstance(row['value'], (str, int, float))
+            if row['parameter'] in data:
+                raise ValueError(f"Duplicate parameter '{row['parameter']}' in dataframe for trial {self}")
+            data[row['parameter']] = try_parse_val(row['value']) if isinstance(row['value'], str) else row['value']
+        return data
+
     @property
     @abstractmethod
     def tunable_config(self) -> pandas.DataFrame:
@@ -89,10 +106,23 @@ def tunable_config(self) -> pandas.DataFrame:
         Returns
         -------
         config : pandas.DataFrame
-            A dataframe with the tunable configuration of the current trial.
+            A dataframe with the tunable configuration of the trial.
             It has two `str` columns, "parameter" and "value".
         """
 
+    @property
+    def tunable_config_dict(self) -> Dict[str, Optional[TunableValue]]:
+        """
+        Retrieve the trials' tunable configuration from the storage as a dict.
+
+        Note: this corresponds to the Trial object's "tunables" property.
+
+        Returns
+        -------
+        config : dict
+        """
+        return self._df_to_dict(self.tunable_config)
+
     @property
     @abstractmethod
     def results(self) -> pandas.DataFrame:
@@ -101,17 +131,28 @@ def results(self) -> pandas.DataFrame:
 
         Returns
         -------
-        config : pandas.DataFrame
+        results : pandas.DataFrame
             A dataframe with the trial results.
             It has two `str` columns, "metric" and "value".
             If the trial status is not SUCCEEDED, the dataframe is empty.
         """
 
+    @property
+    def results_dict(self) -> Dict[str, Optional[TunableValue]]:
+        """
+        Retrieve the trials' results from the storage as a dict.
+
+        Returns
+        -------
+        results : dict
+        """
+        return self._df_to_dict(self.results)
+
     @property
     @abstractmethod
     def telemetry(self) -> pandas.DataFrame:
         """
-        Retrieve the trials' telemetry from the storage.
+        Retrieve the trials' telemetry from the storage as a dataframe.
 
         Returns
         -------
@@ -126,7 +167,7 @@ def telemetry(self) -> pandas.DataFrame:
     @abstractmethod
     def metadata(self) -> pandas.DataFrame:
         """
-        Retrieve the trials' metadata parameters.
+        Retrieve the trials' metadata parameters as a dataframe.
 
         Note: this corresponds to the Trial object's "config" property.
 
@@ -137,3 +178,16 @@ def metadata(self) -> pandas.DataFrame:
             It has two `str` columns, "parameter" and "value".
             Returns an empty dataframe if there is no metadata.
         """
+
+    @property
+    def metadata_dict(self) -> dict:
+        """
+        Retrieve the trials' metadata parameters as a dict.
+
+        Note: this corresponds to the Trial object's "config" property.
+
+        Returns
+        -------
+        metadata : dict
+        """
+        return self._df_to_dict(self.metadata)
diff --git a/mlos_bench/mlos_bench/storage/sql/experiment.py b/mlos_bench/mlos_bench/storage/sql/experiment.py
index 1bb89e2319e..f8b60e704d4 100644
--- a/mlos_bench/mlos_bench/storage/sql/experiment.py
+++ b/mlos_bench/mlos_bench/storage/sql/experiment.py
@@ -102,7 +102,7 @@ def _setup(self) -> None:
 
     def merge(self, experiment_ids: List[str]) -> None:
         _LOG.info("Merge: %s <- %s", self._experiment_id, experiment_ids)
-        raise NotImplementedError()
+        raise NotImplementedError("TODO")
 
     def load_tunable_config(self, config_id: int) -> Dict[str, Any]:
         with self._engine.connect() as conn:
diff --git a/mlos_bench/mlos_bench/storage/sql/experiment_data.py b/mlos_bench/mlos_bench/storage/sql/experiment_data.py
index 4aee29cbc14..69bf6a0a96e 100644
--- a/mlos_bench/mlos_bench/storage/sql/experiment_data.py
+++ b/mlos_bench/mlos_bench/storage/sql/experiment_data.py
@@ -88,6 +88,9 @@ def objectives(self) -> Dict[str, str]:
                              self, opt_target, objectives[opt_target])
         return objectives
 
+    # TODO: provide a way to get individual data to avoid repeated bulk fetches where only small amounts of data is accessed.
+    # Or else make the TrialData object lazily populate.
+
     @property
     def trials(self) -> Dict[int, TrialData]:
         with self._engine.connect() as conn:
@@ -115,9 +118,7 @@ def trials(self) -> Dict[int, TrialData]:
 
     @property
     def results(self) -> pandas.DataFrame:
-
         with self._engine.connect() as conn:
-
             cur_trials = conn.execute(
                 self._schema.trial.select().where(
                     self._schema.trial.c.exp_id == self._exp_id,
diff --git a/mlos_bench/mlos_bench/storage/sql/storage.py b/mlos_bench/mlos_bench/storage/sql/storage.py
index 9c747f8885c..71e6cda3fdf 100644
--- a/mlos_bench/mlos_bench/storage/sql/storage.py
+++ b/mlos_bench/mlos_bench/storage/sql/storage.py
@@ -28,11 +28,10 @@ class SqlStorage(Storage):
     """
 
     def __init__(self,
-                 tunables: TunableGroups,
                  config: dict,
                  global_config: Optional[dict] = None,
                  service: Optional[Service] = None):
-        super().__init__(tunables, config, global_config, service)
+        super().__init__(config, global_config, service)
         lazy_schema_create = self._config.pop("lazy_schema_create", False)
         self._log_sql = self._config.pop("log_sql", False)
         self._url = URL.create(**self._config)
@@ -62,12 +61,13 @@ def experiment(self, *,
                    trial_id: int,
                    root_env_config: str,
                    description: str,
+                   tunables: TunableGroups,
                    opt_target: str,
                    opt_direction: Optional[str]) -> Storage.Experiment:
         return Experiment(
             engine=self._engine,
             schema=self._schema,
-            tunables=self._tunables,
+            tunables=tunables,
             experiment_id=experiment_id,
             trial_id=trial_id,
             root_env_config=root_env_config,
@@ -78,6 +78,8 @@ def experiment(self, *,
 
     @property
     def experiments(self) -> Dict[str, ExperimentData]:
+        # FIXME: this is somewhat expensive if only fetching a single Experiment.
+        # May need to expand the API or data structures to lazily fetch data and/or cache it.
         with self._engine.connect() as conn:
             cur_exp = conn.execute(
                 self._schema.experiment.select().order_by(
diff --git a/mlos_bench/mlos_bench/storage/sql/trial.py b/mlos_bench/mlos_bench/storage/sql/trial.py
index 4e13ef8e889..0677cf9cb35 100644
--- a/mlos_bench/mlos_bench/storage/sql/trial.py
+++ b/mlos_bench/mlos_bench/storage/sql/trial.py
@@ -78,7 +78,6 @@ def update(self, status: Status, timestamp: datetime,
             except Exception:
                 conn.rollback()
                 raise
-
         return metrics
 
     def update_telemetry(self, status: Status, metrics: List[Tuple[datetime, str, Any]]) -> None:
diff --git a/mlos_bench/mlos_bench/storage/storage_factory.py b/mlos_bench/mlos_bench/storage/storage_factory.py
index 04e493a2000..faa934b28f9 100644
--- a/mlos_bench/mlos_bench/storage/storage_factory.py
+++ b/mlos_bench/mlos_bench/storage/storage_factory.py
@@ -10,13 +10,11 @@
 
 from mlos_bench.config.schemas import ConfigSchema
 from mlos_bench.services.config_persistence import ConfigPersistenceService
-from mlos_bench.tunables.tunable_groups import TunableGroups
 from mlos_bench.storage.base_storage import Storage
 
 
 def from_config(config_file: str,
                 global_configs: Optional[List[str]] = None,
-                tunables: Optional[List[str]] = None,
                 **kwargs: Any) -> Storage:
     """
     Create a new storage object from JSON5 config file.
@@ -27,8 +25,6 @@ def from_config(config_file: str,
         JSON5 config file to load.
     global_configs : Optional[List[str]]
         An optional list of config files with global parameters.
-    tunables : Optional[List[str]]
-        An optional list of files containing JSON5 metadata of the tunables.
     kwargs : dict
         Additional configuration parameters.
 
@@ -47,17 +43,13 @@ def from_config(config_file: str,
         config_loader = ConfigPersistenceService({"config_path": config_path})
     global_config.update(kwargs)
 
-    # pylint: disable=protected-access
-    tunable_groups = config_loader._load_tunables(tunables or [], TunableGroups())
     class_config = config_loader.load_config(config_file, ConfigSchema.STORAGE)
     assert isinstance(class_config, Dict)
 
-    ret = config_loader.build_generic(
-        base_cls=Storage,  # type: ignore[type-abstract]
-        tunables=tunable_groups,
+    ret = config_loader.build_storage(
         service=config_loader,
         config=class_config,
-        global_config=global_config
+        global_config=global_config,
     )
     assert isinstance(ret, Storage)
     return ret
diff --git a/mlos_bench/mlos_bench/tests/config/optimizers/test_load_optimizer_config_examples.py b/mlos_bench/mlos_bench/tests/config/optimizers/test_load_optimizer_config_examples.py
index f649f960bea..bd9099b6081 100644
--- a/mlos_bench/mlos_bench/tests/config/optimizers/test_load_optimizer_config_examples.py
+++ b/mlos_bench/mlos_bench/tests/config/optimizers/test_load_optimizer_config_examples.py
@@ -45,11 +45,10 @@ def test_load_optimizer_config_examples(config_loader_service: ConfigPersistence
     assert issubclass(cls, Optimizer)
     # Make an instance of the class based on the config.
     tunable_groups = TunableGroups()
-    storage_inst = config_loader_service.build_generic(
-        base_cls=Optimizer,     # type: ignore[type-abstract]
+    optimizer_inst = config_loader_service.build_optimizer(
         tunables=tunable_groups,
         config=config,
         service=config_loader_service,
     )
-    assert storage_inst is not None
-    assert isinstance(storage_inst, cls)
+    assert optimizer_inst is not None
+    assert isinstance(optimizer_inst, cls)
diff --git a/mlos_bench/mlos_bench/tests/config/storage/test_load_storage_config_examples.py b/mlos_bench/mlos_bench/tests/config/storage/test_load_storage_config_examples.py
index 18e714bcf8f..039d49948f2 100644
--- a/mlos_bench/mlos_bench/tests/config/storage/test_load_storage_config_examples.py
+++ b/mlos_bench/mlos_bench/tests/config/storage/test_load_storage_config_examples.py
@@ -15,7 +15,6 @@
 from mlos_bench.config.schemas.config_schemas import ConfigSchema
 from mlos_bench.services.config_persistence import ConfigPersistenceService
 from mlos_bench.storage.base_storage import Storage
-from mlos_bench.tunables.tunable_groups import TunableGroups
 from mlos_bench.util import get_class_from_name
 
 
@@ -46,9 +45,7 @@ def test_load_storage_config_examples(config_loader_service: ConfigPersistenceSe
     cls = get_class_from_name(config["class"])
     assert issubclass(cls, Storage)
     # Make an instance of the class based on the config.
-    storage_inst = config_loader_service.build_generic(
-        base_cls=Storage,   # type: ignore[type-abstract]
-        tunables=TunableGroups(),
+    storage_inst = config_loader_service.build_storage(
         config=config,
         service=config_loader_service,
     )
diff --git a/mlos_bench/mlos_bench/tests/conftest.py b/mlos_bench/mlos_bench/tests/conftest.py
index 9f646ca8f92..c8a73d36149 100644
--- a/mlos_bench/mlos_bench/tests/conftest.py
+++ b/mlos_bench/mlos_bench/tests/conftest.py
@@ -6,111 +6,28 @@
 Common fixtures for mock TunableGroups and Environment objects.
 """
 
-from typing import Any, Dict, Generator, List
+from typing import Any, Generator, List
 
 import os
 
 from fasteners import InterProcessLock, InterProcessReaderWriterLock
 from pytest_docker.plugin import get_docker_services, Services as DockerServices
 
-import json5 as json
 import pytest
 
-from mlos_bench.tests import SEED
-
-from mlos_bench.config.schemas import ConfigSchema
 from mlos_bench.environments.mock_env import MockEnv
-from mlos_bench.tunables.covariant_group import CovariantTunableGroup
 from mlos_bench.tunables.tunable_groups import TunableGroups
 
+from mlos_bench.tests import SEED, tunable_groups_fixtures
+
 # pylint: disable=redefined-outer-name
 # -- Ignore pylint complaints about pytest references to
 # `tunable_groups` fixture as both a function and a parameter.
 
-
-TUNABLE_GROUPS_JSON = """
-{
-    "provision": {
-        "cost": 1000,
-        "params": {
-            "vmSize": {
-                "description": "Azure VM size",
-                "type": "categorical",
-                "default": "Standard_B4ms",
-                "values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
-            }
-        }
-    },
-    "boot": {
-        "cost": 300,
-        "params": {
-            "idle": {
-                "description": "Idling method",
-                "type": "categorical",
-                "default": "halt",
-                "values": ["halt", "mwait", "noidle"]
-            }
-        }
-    },
-    "kernel": {
-        "cost": 1,
-        "params": {
-            "kernel_sched_migration_cost_ns": {
-                "description": "Cost of migrating the thread to another core",
-                "type": "int",
-                "default": -1,
-                "range": [0, 500000],
-                "special": [-1, 0]
-            },
-            "kernel_sched_latency_ns": {
-                "description": "Initial value for the scheduler period",
-                "type": "int",
-                "default": 2000000,
-                "range": [0, 1000000000]
-            }
-        }
-    }
-}
-"""
-
-
-@pytest.fixture
-def tunable_groups_config() -> Dict[str, Any]:
-    """
-    Fixture to get the JSON string for the tunable groups.
-    """
-    conf = json.loads(TUNABLE_GROUPS_JSON)
-    assert isinstance(conf, dict)
-    ConfigSchema.TUNABLE_PARAMS.validate(conf)
-    return conf
-
-
-@pytest.fixture
-def tunable_groups(tunable_groups_config: dict) -> TunableGroups:
-    """
-    A test fixture that produces a mock TunableGroups.
-
-    Returns
-    -------
-    tunable_groups : TunableGroups
-        A new TunableGroups object for testing.
-    """
-    tunables = TunableGroups(tunable_groups_config)
-    tunables.reset()
-    return tunables
-
-
-@pytest.fixture
-def covariant_group(tunable_groups: TunableGroups) -> CovariantTunableGroup:
-    """
-    Text fixture to get a CovariantTunableGroup from tunable_groups.
-
-    Returns
-    -------
-    CovariantTunableGroup
-    """
-    (_, covariant_group) = next(iter(tunable_groups))
-    return covariant_group
+# Expose some of those as local names so they can be picked up as fixtures by pytest.
+tunable_groups_config = tunable_groups_fixtures.tunable_groups_config
+tunable_groups = tunable_groups_fixtures.tunable_groups
+covariant_group = tunable_groups_fixtures.covariant_group
 
 
 @pytest.fixture
diff --git a/mlos_bench/mlos_bench/tests/environments/__init__.py b/mlos_bench/mlos_bench/tests/environments/__init__.py
index 9fc0c1f9a82..5e71d49b762 100644
--- a/mlos_bench/mlos_bench/tests/environments/__init__.py
+++ b/mlos_bench/mlos_bench/tests/environments/__init__.py
@@ -7,7 +7,7 @@
 """
 
 from datetime import datetime
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Any, Dict, List, Optional, Tuple
 
 import pytest
 
@@ -19,7 +19,7 @@
 
 def check_env_success(env: Environment,
                       tunable_groups: TunableGroups,
-                      expected_results: Dict[str, Union[TunableValue]],
+                      expected_results: Dict[str, TunableValue],
                       expected_telemetry: List[Tuple[datetime, str, Any]],
                       global_config: Optional[dict] = None) -> None:
     """
diff --git a/mlos_bench/mlos_bench/tests/storage/__init__.py b/mlos_bench/mlos_bench/tests/storage/__init__.py
new file mode 100644
index 00000000000..edd7d86133d
--- /dev/null
+++ b/mlos_bench/mlos_bench/tests/storage/__init__.py
@@ -0,0 +1,7 @@
+#
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+"""
+Tests for mlos_bench storage.
+"""
diff --git a/mlos_bench/mlos_bench/tests/storage/conftest.py b/mlos_bench/mlos_bench/tests/storage/conftest.py
index 613730cbed7..494690b675d 100644
--- a/mlos_bench/mlos_bench/tests/storage/conftest.py
+++ b/mlos_bench/mlos_bench/tests/storage/conftest.py
@@ -3,44 +3,17 @@
 # Licensed under the MIT License.
 #
 """
-Test fixtures for mlos_bench storage.
+Export test fixtures for mlos_bench storage.
 """
 
-import pytest
+import mlos_bench.tests.storage.sql.fixtures as sql_storage_fixtures
 
-from mlos_bench.tunables.tunable_groups import TunableGroups
-from mlos_bench.storage.base_storage import Storage
-from mlos_bench.storage.sql.storage import SqlStorage
+# NOTE: For future storage implementation additions, we can refactor this to use
+# lazy_fixture and parameterize the tests across fixtures but keep the test code the
+# same.
 
-# pylint: disable=redefined-outer-name
-
-
-@pytest.fixture
-def storage_memory_sql(tunable_groups: TunableGroups) -> SqlStorage:
-    """
-    Test fixture for in-memory SQLite3 storage.
-    """
-    return SqlStorage(
-        tunables=tunable_groups,
-        service=None,
-        config={
-            "drivername": "sqlite",
-            "database": ":memory:",
-        }
-    )
-
-
-@pytest.fixture
-def exp_storage_memory_sql(storage_memory_sql: Storage) -> SqlStorage.Experiment:
-    """
-    Test fixture for Experiment using in-memory SQLite3 storage.
-    """
-    # pylint: disable=unnecessary-dunder-call
-    return storage_memory_sql.experiment(
-        experiment_id="Test-001",
-        trial_id=1,
-        root_env_config="environment.jsonc",
-        description="pytest experiment",
-        opt_target="score",
-        opt_direction="min",
-    ).__enter__()
+# Expose some of those as local names so they can be picked up as fixtures by pytest.
+storage = sql_storage_fixtures.storage
+exp_storage = sql_storage_fixtures.exp_storage
+exp_storage_with_trials = sql_storage_fixtures.exp_storage_with_trials
+exp_data = sql_storage_fixtures.exp_data
diff --git a/mlos_bench/mlos_bench/tests/storage/exp_data_test.py b/mlos_bench/mlos_bench/tests/storage/exp_data_test.py
index fc459172b86..35d0d7177ce 100644
--- a/mlos_bench/mlos_bench/tests/storage/exp_data_test.py
+++ b/mlos_bench/mlos_bench/tests/storage/exp_data_test.py
@@ -10,49 +10,55 @@
 from mlos_bench.tunables.tunable_groups import TunableGroups
 
 
-def test_load_empty_exp_data(storage_memory_sql: Storage, exp_storage_memory_sql: Storage.Experiment) -> None:
+def test_load_empty_exp_data(storage: Storage, exp_storage: Storage.Experiment) -> None:
     """
     Try to retrieve old experimental data from the empty storage.
     """
-    exp = storage_memory_sql.experiments[exp_storage_memory_sql.experiment_id]
-    assert exp.exp_id == exp_storage_memory_sql.experiment_id
-    assert exp.description == exp_storage_memory_sql.description
+    exp = storage.experiments[exp_storage.experiment_id]
+    assert exp.exp_id == exp_storage.experiment_id
+    assert exp.description == exp_storage.description
     # Only support single objective for now.
-    assert exp.objectives == {exp_storage_memory_sql.opt_target: exp_storage_memory_sql.opt_direction}
+    assert exp.objectives == {exp_storage.opt_target: exp_storage.opt_direction}
 
 
-def test_exp_trial_data_objectives(storage_memory_sql: Storage,
-                                   exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_data_objectives(storage: Storage,
+                                   exp_storage: Storage.Experiment,
                                    tunable_groups: TunableGroups) -> None:
     """
     Start a new trial and check the storage for the trial data.
     """
 
-    trial_opt_new = exp_storage_memory_sql.new_trial(tunable_groups, config={
+    trial_opt_new = exp_storage.new_trial(tunable_groups, config={
         "opt_target": "some-other-target",
         "opt_direction": "max",
     })
     assert trial_opt_new.config() == {
-        "experiment_id": exp_storage_memory_sql.experiment_id,
+        "experiment_id": exp_storage.experiment_id,
         "trial_id": trial_opt_new.trial_id,
         "opt_target": "some-other-target",
         "opt_direction": "max",
     }
 
-    trial_opt_old = exp_storage_memory_sql.new_trial(tunable_groups, config={
+    trial_opt_old = exp_storage.new_trial(tunable_groups, config={
         "opt_target": "back-compat",
         # "opt_direction": "max",   # missing
     })
     assert trial_opt_old.config() == {
-        "experiment_id": exp_storage_memory_sql.experiment_id,
+        "experiment_id": exp_storage.experiment_id,
         "trial_id": trial_opt_old.trial_id,
         "opt_target": "back-compat",
     }
 
-    exp = storage_memory_sql.experiments[exp_storage_memory_sql.experiment_id]
+    exp = storage.experiments[exp_storage.experiment_id]
     # objectives should be the combination of both the trial objectives and the experiment objectives
     assert exp.objectives == {
         "back-compat": None,
         "some-other-target": "max",
-        exp_storage_memory_sql.opt_target: exp_storage_memory_sql.opt_direction,
+        exp_storage.opt_target: exp_storage.opt_direction,
+    }
+
+    trial_data_opt_new = exp.trials[trial_opt_new.trial_id]
+    assert trial_data_opt_new.metadata_dict == {
+        "opt_target": "some-other-target",
+        "opt_direction": "max",
     }
diff --git a/mlos_bench/mlos_bench/tests/storage/exp_load_test.py b/mlos_bench/mlos_bench/tests/storage/exp_load_test.py
index 704715bd6bd..3763a3f1181 100644
--- a/mlos_bench/mlos_bench/tests/storage/exp_load_test.py
+++ b/mlos_bench/mlos_bench/tests/storage/exp_load_test.py
@@ -14,36 +14,36 @@
 from mlos_bench.storage.base_storage import Storage
 
 
-def test_exp_load_empty(exp_storage_memory_sql: Storage.Experiment) -> None:
+def test_exp_load_empty(exp_storage: Storage.Experiment) -> None:
     """
     Try to retrieve old experimental data from the empty storage.
     """
-    (configs, scores, status) = exp_storage_memory_sql.load()
+    (configs, scores, status) = exp_storage.load()
     assert not configs
     assert not scores
     assert not status
 
 
-def test_exp_pending_empty(exp_storage_memory_sql: Storage.Experiment) -> None:
+def test_exp_pending_empty(exp_storage: Storage.Experiment) -> None:
     """
     Try to retrieve pending experiments from the empty storage.
     """
-    trials = list(exp_storage_memory_sql.pending_trials())
+    trials = list(exp_storage.pending_trials())
     assert not trials
 
 
-def test_exp_trial_pending(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_pending(exp_storage: Storage.Experiment,
                            tunable_groups: TunableGroups) -> None:
     """
     Start a trial and check that it is pending.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
-    (pending,) = list(exp_storage_memory_sql.pending_trials())
+    trial = exp_storage.new_trial(tunable_groups)
+    (pending,) = list(exp_storage.pending_trials())
     assert pending.trial_id == trial.trial_id
     assert pending.tunables == tunable_groups
 
 
-def test_exp_trial_pending_many(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_pending_many(exp_storage: Storage.Experiment,
                                 tunable_groups: TunableGroups) -> None:
     """
     Start THREE trials and check that both are pending.
@@ -51,45 +51,45 @@ def test_exp_trial_pending_many(exp_storage_memory_sql: Storage.Experiment,
     config1 = tunable_groups.copy().assign({'idle': 'mwait'})
     config2 = tunable_groups.copy().assign({'idle': 'noidle'})
     trial_ids = {
-        exp_storage_memory_sql.new_trial(config1).trial_id,
-        exp_storage_memory_sql.new_trial(config2).trial_id,
-        exp_storage_memory_sql.new_trial(config2).trial_id,  # Submit same config twice
+        exp_storage.new_trial(config1).trial_id,
+        exp_storage.new_trial(config2).trial_id,
+        exp_storage.new_trial(config2).trial_id,  # Submit same config twice
     }
-    pending_ids = {pending.trial_id for pending in exp_storage_memory_sql.pending_trials()}
+    pending_ids = {pending.trial_id for pending in exp_storage.pending_trials()}
     assert len(pending_ids) == 3
     assert trial_ids == pending_ids
 
 
-def test_exp_trial_pending_fail(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_pending_fail(exp_storage: Storage.Experiment,
                                 tunable_groups: TunableGroups) -> None:
     """
     Start a trial, fail it, and and check that it is NOT pending.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial = exp_storage.new_trial(tunable_groups)
     trial.update(Status.FAILED, datetime.utcnow())
-    trials = list(exp_storage_memory_sql.pending_trials())
+    trials = list(exp_storage.pending_trials())
     assert not trials
 
 
-def test_exp_trial_success(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_success(exp_storage: Storage.Experiment,
                            tunable_groups: TunableGroups) -> None:
     """
     Start a trial, finish it successfully, and and check that it is NOT pending.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial = exp_storage.new_trial(tunable_groups)
     trial.update(Status.SUCCEEDED, datetime.utcnow(), 99.9)
-    trials = list(exp_storage_memory_sql.pending_trials())
+    trials = list(exp_storage.pending_trials())
     assert not trials
 
 
-def test_exp_trial_update_categ(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_update_categ(exp_storage: Storage.Experiment,
                                 tunable_groups: TunableGroups) -> None:
     """
     Update the trial with multiple metrics, some of which are categorical.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial = exp_storage.new_trial(tunable_groups)
     trial.update(Status.SUCCEEDED, datetime.utcnow(), {"score": 99.9, "benchmark": "test"})
-    assert exp_storage_memory_sql.load() == (
+    assert exp_storage.load() == (
         [{
             'idle': 'halt',
             'kernel_sched_latency_ns': '2000000',
@@ -101,18 +101,18 @@ def test_exp_trial_update_categ(exp_storage_memory_sql: Storage.Experiment,
     )
 
 
-def test_exp_trial_update_twice(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_update_twice(exp_storage: Storage.Experiment,
                                 tunable_groups: TunableGroups) -> None:
     """
     Update the trial status twice and receive an error.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial = exp_storage.new_trial(tunable_groups)
     trial.update(Status.FAILED, datetime.utcnow())
     with pytest.raises(RuntimeError):
         trial.update(Status.SUCCEEDED, datetime.utcnow(), 99.9)
 
 
-def test_exp_trial_pending_3(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_pending_3(exp_storage: Storage.Experiment,
                              tunable_groups: TunableGroups) -> None:
     """
     Start THREE trials, let one succeed, another one fail and keep one not updated.
@@ -120,17 +120,17 @@ def test_exp_trial_pending_3(exp_storage_memory_sql: Storage.Experiment,
     """
     score = 99.9
 
-    trial_fail = exp_storage_memory_sql.new_trial(tunable_groups)
-    trial_succ = exp_storage_memory_sql.new_trial(tunable_groups)
-    trial_pend = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial_fail = exp_storage.new_trial(tunable_groups)
+    trial_succ = exp_storage.new_trial(tunable_groups)
+    trial_pend = exp_storage.new_trial(tunable_groups)
 
     trial_fail.update(Status.FAILED, datetime.utcnow())
     trial_succ.update(Status.SUCCEEDED, datetime.utcnow(), score)
 
-    (pending,) = list(exp_storage_memory_sql.pending_trials())
+    (pending,) = list(exp_storage.pending_trials())
     assert pending.trial_id == trial_pend.trial_id
 
-    (configs, scores, status) = exp_storage_memory_sql.load()
+    (configs, scores, status) = exp_storage.load()
     assert len(configs) == 2
     assert scores == [None, score]
     assert status == [Status.FAILED, Status.SUCCEEDED]
diff --git a/mlos_bench/mlos_bench/tests/storage/fixtures.py b/mlos_bench/mlos_bench/tests/storage/fixtures.py
new file mode 100644
index 00000000000..bfa4c5d7b13
--- /dev/null
+++ b/mlos_bench/mlos_bench/tests/storage/fixtures.py
@@ -0,0 +1,100 @@
+#
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+"""
+Test fixtures for mlos_bench storage.
+"""
+
+from datetime import datetime
+from random import random, seed as rand_seed
+
+import pytest
+
+from mlos_bench.environments.status import Status
+from mlos_bench.storage.base_storage import Storage
+from mlos_bench.storage.base_experiment_data import ExperimentData
+from mlos_bench.storage.sql.storage import SqlStorage
+from mlos_bench.optimizers.mock_optimizer import MockOptimizer
+from mlos_bench.tunables.tunable_groups import TunableGroups
+
+# pylint: disable=redefined-outer-name
+
+
+@pytest.fixture
+def storage_memory_sql() -> SqlStorage:
+    """
+    Test fixture for in-memory SQLite3 storage.
+    """
+    return SqlStorage(
+        service=None,
+        config={
+            "drivername": "sqlite",
+            "database": ":memory:",
+        }
+    )
+
+
+@pytest.fixture
+def exp_storage_memory_sql(storage_memory_sql: Storage, tunable_groups: TunableGroups) -> SqlStorage.Experiment:
+    """
+    Test fixture for Experiment using in-memory SQLite3 storage.
+    Note: It has already entered the context upon return.
+    """
+    opt_target = "score"
+    opt_direction = "min"
+    with storage_memory_sql.experiment(
+        experiment_id="Test-001",
+        trial_id=1,
+        root_env_config="environment.jsonc",
+        description="pytest experiment",
+        tunables=tunable_groups,
+        opt_target=opt_target,
+        opt_direction=opt_direction,
+    ) as exp:
+        return exp
+
+
+@pytest.fixture
+def exp_storage_memory_sql_with_trials(exp_storage_memory_sql: Storage.Experiment) -> SqlStorage.Experiment:
+    """
+    Test fixture for Experiment using in-memory SQLite3 storage.
+    """
+    # Add some trials to that experiment.
+    # Note: we're just fabricating some made up function for the ML libraries to try and learn.
+    base_score = 5.0
+    tunable_name = "kernel_sched_latency_ns"
+    tunable_default = exp_storage_memory_sql.tunables.get_tunable(tunable_name)[0].default
+    assert isinstance(tunable_default, int)
+    config_count = 10
+    repeat_count = 3
+    seed = 42
+    rand_seed(seed)
+    opt = MockOptimizer(tunables=exp_storage_memory_sql.tunables, config={"seed": seed})
+    assert opt.start_with_defaults
+    for config_i in range(config_count):
+        tunables = opt.suggest()
+        for repeat_j in range(repeat_count):
+            trial = exp_storage_memory_sql.new_trial(tunables=tunables.copy(), config={
+                "opt_target": exp_storage_memory_sql.opt_target,
+                "opt_direction": exp_storage_memory_sql.opt_direction,
+                "trial_number": config_i * repeat_count + repeat_j + 1,
+            })
+            trial.update_telemetry(status=Status.RUNNING, metrics=[
+                (datetime.utcnow(), "some-metric", base_score + random() / 10),
+            ])
+            tunable_value = float(tunables.get_tunable(tunable_name)[0].numerical_value)
+            trial.update(Status.SUCCEEDED, datetime.utcnow(), metrics={
+                # Give some variance on the score.
+                # And some influence from the tunable value.
+                "score": base_score + 10 * ((tunable_value / tunable_default) - 1) + random() / 10,
+            })
+    return exp_storage_memory_sql
+
+
+@pytest.fixture
+def exp_data(storage_memory_sql: Storage, exp_storage_memory_sql_with_trials: Storage.Experiment) -> ExperimentData:
+    """
+    Test fixture for ExperimentData.
+    """
+    return storage_memory_sql.experiments[exp_storage_memory_sql_with_trials.experiment_id]
diff --git a/mlos_bench/mlos_bench/tests/storage/sql/__init__.py b/mlos_bench/mlos_bench/tests/storage/sql/__init__.py
new file mode 100644
index 00000000000..f3da42c6fec
--- /dev/null
+++ b/mlos_bench/mlos_bench/tests/storage/sql/__init__.py
@@ -0,0 +1,7 @@
+#
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+"""
+Test for mlos_bench sql storage.
+"""
diff --git a/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py b/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py
new file mode 100644
index 00000000000..03be3d3d5f8
--- /dev/null
+++ b/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py
@@ -0,0 +1,99 @@
+#
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+"""
+Test fixtures for mlos_bench storage.
+"""
+
+from datetime import datetime
+from random import random, seed as rand_seed
+
+import pytest
+
+from mlos_bench.environments.status import Status
+from mlos_bench.storage.base_experiment_data import ExperimentData
+from mlos_bench.storage.sql.storage import SqlStorage
+from mlos_bench.optimizers.mock_optimizer import MockOptimizer
+from mlos_bench.tunables.tunable_groups import TunableGroups
+
+# pylint: disable=redefined-outer-name
+
+
+@pytest.fixture
+def storage() -> SqlStorage:
+    """
+    Test fixture for in-memory SQLite3 storage.
+    """
+    return SqlStorage(
+        service=None,
+        config={
+            "drivername": "sqlite",
+            "database": ":memory:",
+        }
+    )
+
+
+@pytest.fixture
+def exp_storage(storage: SqlStorage, tunable_groups: TunableGroups) -> SqlStorage.Experiment:
+    """
+    Test fixture for Experiment using in-memory SQLite3 storage.
+    Note: It has already entered the context upon return.
+    """
+    opt_target = "score"
+    opt_direction = "min"
+    with storage.experiment(
+        experiment_id="Test-001",
+        trial_id=1,
+        root_env_config="environment.jsonc",
+        description="pytest experiment",
+        tunables=tunable_groups,
+        opt_target=opt_target,
+        opt_direction=opt_direction,
+    ) as exp:
+        return exp
+
+
+@pytest.fixture
+def exp_storage_with_trials(exp_storage: SqlStorage.Experiment) -> SqlStorage.Experiment:
+    """
+    Test fixture for Experiment using in-memory SQLite3 storage.
+    """
+    # Add some trials to that experiment.
+    # Note: we're just fabricating some made up function for the ML libraries to try and learn.
+    base_score = 5.0
+    tunable_name = "kernel_sched_latency_ns"
+    tunable_default = exp_storage.tunables.get_tunable(tunable_name)[0].default
+    assert isinstance(tunable_default, int)
+    config_count = 10
+    repeat_count = 3
+    seed = 42
+    rand_seed(seed)
+    opt = MockOptimizer(tunables=exp_storage.tunables, config={"seed": seed})
+    assert opt.start_with_defaults
+    for config_i in range(config_count):
+        tunables = opt.suggest()
+        for repeat_j in range(repeat_count):
+            trial = exp_storage.new_trial(tunables=tunables.copy(), config={
+                "opt_target": exp_storage.opt_target,
+                "opt_direction": exp_storage.opt_direction,
+                "trial_number": config_i * repeat_count + repeat_j + 1,
+            })
+            trial.update_telemetry(status=Status.RUNNING, metrics=[
+                (datetime.utcnow(), "some-metric", base_score + random() / 10),
+            ])
+            tunable_value = float(tunables.get_tunable(tunable_name)[0].numerical_value)
+            trial.update(Status.SUCCEEDED, datetime.utcnow(), metrics={
+                # Give some variance on the score.
+                # And some influence from the tunable value.
+                "score": base_score + 10 * ((tunable_value / tunable_default) - 1) + random() / 10,
+            })
+    return exp_storage
+
+
+@pytest.fixture
+def exp_data(storage: SqlStorage, exp_storage_with_trials: SqlStorage.Experiment) -> ExperimentData:
+    """
+    Test fixture for ExperimentData.
+    """
+    return storage.experiments[exp_storage_with_trials.experiment_id]
diff --git a/mlos_bench/mlos_bench/tests/storage/trial_config_test.py b/mlos_bench/mlos_bench/tests/storage/trial_config_test.py
index cfdecbb67b0..7b07f6993ce 100644
--- a/mlos_bench/mlos_bench/tests/storage/trial_config_test.py
+++ b/mlos_bench/mlos_bench/tests/storage/trial_config_test.py
@@ -10,25 +10,25 @@
 from mlos_bench.tunables.tunable_groups import TunableGroups
 
 
-def test_exp_trial_pending(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_pending(exp_storage: Storage.Experiment,
                            tunable_groups: TunableGroups) -> None:
     """
     Schedule a trial and check that it is pending and has the right configuration.
     """
     config = {"location": "westus2", "num_repeats": 100}
-    trial = exp_storage_memory_sql.new_trial(tunable_groups, config)
-    (pending,) = list(exp_storage_memory_sql.pending_trials())
+    trial = exp_storage.new_trial(tunable_groups, config)
+    (pending,) = list(exp_storage.pending_trials())
     assert pending.trial_id == trial.trial_id
     assert pending.tunables == tunable_groups
     assert pending.config() == {
         "location": "westus2",
         "num_repeats": "100",
         "experiment_id": "Test-001",
-        "trial_id": 1,
+        "trial_id": trial.trial_id,
     }
 
 
-def test_exp_trial_configs(exp_storage_memory_sql: Storage.Experiment,
+def test_exp_trial_configs(exp_storage: Storage.Experiment,
                            tunable_groups: TunableGroups) -> None:
     """
     Start multiple trials with two different configs and check that
@@ -36,18 +36,18 @@ def test_exp_trial_configs(exp_storage_memory_sql: Storage.Experiment,
     """
     config1 = tunable_groups.copy().assign({'idle': 'mwait'})
     trials1 = [
-        exp_storage_memory_sql.new_trial(config1),
-        exp_storage_memory_sql.new_trial(config1),
-        exp_storage_memory_sql.new_trial(config1.copy()),  # Same values, different instance
+        exp_storage.new_trial(config1),
+        exp_storage.new_trial(config1),
+        exp_storage.new_trial(config1.copy()),  # Same values, different instance
     ]
     assert trials1[0].config_id == trials1[1].config_id
     assert trials1[0].config_id == trials1[2].config_id
 
     config2 = tunable_groups.copy().assign({'idle': 'halt'})
     trials2 = [
-        exp_storage_memory_sql.new_trial(config2),
-        exp_storage_memory_sql.new_trial(config2),
-        exp_storage_memory_sql.new_trial(config2.copy()),  # Same values, different instance
+        exp_storage.new_trial(config2),
+        exp_storage.new_trial(config2),
+        exp_storage.new_trial(config2.copy()),  # Same values, different instance
     ]
     assert trials2[0].config_id == trials2[1].config_id
     assert trials2[0].config_id == trials2[2].config_id
@@ -55,7 +55,7 @@ def test_exp_trial_configs(exp_storage_memory_sql: Storage.Experiment,
     assert trials1[0].config_id != trials2[0].config_id
 
     pending_ids = [
-        pending.config_id for pending in exp_storage_memory_sql.pending_trials()
+        pending.config_id for pending in exp_storage.pending_trials()
     ]
     assert len(pending_ids) == 6
     assert len(set(pending_ids)) == 2
diff --git a/mlos_bench/mlos_bench/tests/storage/trial_data_test.py b/mlos_bench/mlos_bench/tests/storage/trial_data_test.py
index 7775e732496..e6ae0b18d82 100644
--- a/mlos_bench/mlos_bench/tests/storage/trial_data_test.py
+++ b/mlos_bench/mlos_bench/tests/storage/trial_data_test.py
@@ -6,4 +6,22 @@
 Unit tests for loading the trial metadata.
 """
 
-# TODO
+import pytest
+
+from mlos_bench.environments.status import Status
+from mlos_bench.storage.base_experiment_data import ExperimentData
+from mlos_bench.tunables.tunable_groups import TunableGroups
+
+
+def test_exp_trial_data(exp_data: ExperimentData,
+                        tunable_groups: TunableGroups) -> None:
+    """
+    Start a new trial and check the storage for the trial data.
+    """
+    trial = exp_data.trials[1]
+    assert trial.status == Status.SUCCEEDED
+    assert trial.tunable_config_dict == tunable_groups.get_param_values()
+    assert trial.metadata_dict["trial_number"] == 1
+    assert list(trial.results_dict.keys()) == ["score"]
+    assert trial.results_dict["score"] == pytest.approx(5.0, rel=0.1)
+    # TODO: test telemetry data too
diff --git a/mlos_bench/mlos_bench/tests/storage/trial_telemetry_test.py b/mlos_bench/mlos_bench/tests/storage/trial_telemetry_test.py
index ecb29029032..3da137ac934 100644
--- a/mlos_bench/mlos_bench/tests/storage/trial_telemetry_test.py
+++ b/mlos_bench/mlos_bench/tests/storage/trial_telemetry_test.py
@@ -47,27 +47,27 @@ def _telemetry_str(data: List[Tuple[datetime, str, Any]]
     return [(ts, key, None if val is None else str(val)) for (ts, key, val) in data]
 
 
-def test_update_telemetry(exp_storage_memory_sql: Storage.Experiment,
+def test_update_telemetry(exp_storage: Storage.Experiment,
                           tunable_groups: TunableGroups,
                           telemetry_data: List[Tuple[datetime, str, Any]]) -> None:
     """
     Make sure update_telemetry() and load_telemetry() methods work.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
-    assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == []
+    trial = exp_storage.new_trial(tunable_groups)
+    assert exp_storage.load_telemetry(trial.trial_id) == []
 
     trial.update_telemetry(Status.RUNNING, telemetry_data)
-    assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
+    assert exp_storage.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
 
 
-def test_update_telemetry_twice(exp_storage_memory_sql: Storage.Experiment,
+def test_update_telemetry_twice(exp_storage: Storage.Experiment,
                                 tunable_groups: TunableGroups,
                                 telemetry_data: List[Tuple[datetime, str, Any]]) -> None:
     """
     Make sure update_telemetry() call is idempotent.
     """
-    trial = exp_storage_memory_sql.new_trial(tunable_groups)
+    trial = exp_storage.new_trial(tunable_groups)
     trial.update_telemetry(Status.RUNNING, telemetry_data)
     trial.update_telemetry(Status.RUNNING, telemetry_data)
     trial.update_telemetry(Status.RUNNING, telemetry_data)
-    assert exp_storage_memory_sql.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
+    assert exp_storage.load_telemetry(trial.trial_id) == _telemetry_str(telemetry_data)
diff --git a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py
new file mode 100644
index 00000000000..2afed8ef17b
--- /dev/null
+++ b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py
@@ -0,0 +1,103 @@
+#
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+"""
+Common fixtures for mock TunableGroups.
+"""
+
+from typing import Any, Dict
+
+import pytest
+
+import json5 as json
+
+from mlos_bench.config.schemas import ConfigSchema
+from mlos_bench.tunables.covariant_group import CovariantTunableGroup
+from mlos_bench.tunables.tunable_groups import TunableGroups
+
+# pylint: disable=redefined-outer-name
+
+TUNABLE_GROUPS_JSON = """
+{
+    "provision": {
+        "cost": 1000,
+        "params": {
+            "vmSize": {
+                "description": "Azure VM size",
+                "type": "categorical",
+                "default": "Standard_B4ms",
+                "values": ["Standard_B2s", "Standard_B2ms", "Standard_B4ms"]
+            }
+        }
+    },
+    "boot": {
+        "cost": 300,
+        "params": {
+            "idle": {
+                "description": "Idling method",
+                "type": "categorical",
+                "default": "halt",
+                "values": ["halt", "mwait", "noidle"]
+            }
+        }
+    },
+    "kernel": {
+        "cost": 1,
+        "params": {
+            "kernel_sched_migration_cost_ns": {
+                "description": "Cost of migrating the thread to another core",
+                "type": "int",
+                "default": -1,
+                "range": [0, 500000],
+                "special": [-1, 0]
+            },
+            "kernel_sched_latency_ns": {
+                "description": "Initial value for the scheduler period",
+                "type": "int",
+                "default": 2000000,
+                "range": [0, 1000000000]
+            }
+        }
+    }
+}
+"""
+
+
+@pytest.fixture
+def tunable_groups_config() -> Dict[str, Any]:
+    """
+    Fixture to get the JSON string for the tunable groups.
+    """
+    conf = json.loads(TUNABLE_GROUPS_JSON)
+    assert isinstance(conf, dict)
+    ConfigSchema.TUNABLE_PARAMS.validate(conf)
+    return conf
+
+
+@pytest.fixture
+def tunable_groups(tunable_groups_config: dict) -> TunableGroups:
+    """
+    A test fixture that produces a mock TunableGroups.
+
+    Returns
+    -------
+    tunable_groups : TunableGroups
+        A new TunableGroups object for testing.
+    """
+    tunables = TunableGroups(tunable_groups_config)
+    tunables.reset()
+    return tunables
+
+
+@pytest.fixture
+def covariant_group(tunable_groups: TunableGroups) -> CovariantTunableGroup:
+    """
+    Text fixture to get a CovariantTunableGroup from tunable_groups.
+
+    Returns
+    -------
+    CovariantTunableGroup
+    """
+    (_, covariant_group) = next(iter(tunable_groups))
+    return covariant_group