Skip to content

Commit

Permalink
add tests for exposure pipeline input/output
Browse files Browse the repository at this point in the history
  • Loading branch information
braingram committed Nov 20, 2024
1 parent 5b991ce commit 7d10663
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 1 deletion.
14 changes: 13 additions & 1 deletion romancal/pipeline/exposure_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,16 @@ def process(self, input):

# determine the input type
file_type = filetype.check(input)
return_lib = True
if file_type == "ModelLibrary":
lib = input
elif file_type == "asn":
lib = ModelLibrary(input)
else:
# for a non-asn non-library input process it as a library
lib = ModelLibrary([input])
# but return it as a datamodel
return_lib = False

# Flag to track if any of the input models are fully saturated
any_saturated = False
Expand Down Expand Up @@ -138,7 +142,15 @@ def process(self, input):

log.info("Roman exposure calibration pipeline ending...")

return lib
# return a ModelLibrary
if return_lib:
return lib

# or a DataModel (for non-asn non-lib inputs)
with lib:
model = lib.borrow(0)
lib.shelve(model)
return model

def create_fully_saturated_zeroed_image(self, input_model):
"""
Expand Down
Empty file.
57 changes: 57 additions & 0 deletions romancal/pipeline/tests/test_exposure_pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import pytest
import roman_datamodels.datamodels as rdm
import roman_datamodels.maker_utils as mk

from romancal.associations.asn_from_list import asn_from_list
from romancal.datamodels.library import ModelLibrary
from romancal.pipeline import ExposurePipeline


@pytest.fixture(scope="function")
def input_value(request, tmp_path):
match request.param:
case "datamodel_fn":
model = mk.mk_datamodel(rdm.RampModel)
fn = tmp_path / "model.asdf"
model.save(fn)
return fn
case "datamodel":
return mk.mk_datamodel(rdm.RampModel)
case "asn_fn":
model = mk.mk_datamodel(rdm.RampModel)
model.meta.filename = "foo.asdf"
model.save(tmp_path / model.meta.filename)
asn = asn_from_list([model.meta.filename], product_name="foo_out")
base_fn, contents = asn.dump(format="json")
asn_filename = tmp_path / base_fn
with open(asn_filename, "w") as f:
f.write(contents)
return asn_filename
case "library":
return ModelLibrary([mk.mk_datamodel(rdm.RampModel)])
case value:
raise Exception(f"Invalid parametrization: {value}")


@pytest.mark.parametrize(
"input_value, expected_output_type",
[
("datamodel_fn", rdm.DataModel),
("datamodel", rdm.DataModel),
("asn_fn", ModelLibrary),
("library", ModelLibrary),
],
indirect=["input_value"],
)
def test_input_to_output(input_value, expected_output_type):
"""
Test that for a particular input_value (as parametrized indirectly
through the input_value fixtrue) the output is the expected type.
"""
pipeline = ExposurePipeline()
# don't fetch references
pipeline.prefetch_references = False
# skip all steps
[setattr(getattr(pipeline, k), "skip", True) for k in pipeline.step_defs]
output_value = pipeline(input_value)
assert isinstance(output_value, expected_output_type)

0 comments on commit 7d10663

Please sign in to comment.