From 5c10f199b442a14805341c979a1df7dd67694981 Mon Sep 17 00:00:00 2001 From: Marwan Zouinkhi Date: Wed, 7 Feb 2024 16:18:34 -0500 Subject: [PATCH] Update validate.py --- dacapo/validate.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/dacapo/validate.py b/dacapo/validate.py index 3458aadf7..a1cf9da7d 100644 --- a/dacapo/validate.py +++ b/dacapo/validate.py @@ -79,7 +79,6 @@ def validate_run( evaluator.set_best(run.validation_scores) for validation_dataset in run.datasplit.validate: - logger.warning("Validating on dataset %s", validation_dataset.name) assert ( validation_dataset.gt is not None ), "We do not yet support validating on datasets without ground truth" @@ -99,7 +98,7 @@ def validate_run( f"{input_gt_array_identifier.container}/{input_gt_array_identifier.dataset}" ).exists() ): - logger.warning("Copying validation inputs!") + logger.info("Copying validation inputs!") input_voxel_size = validation_dataset.raw.voxel_size output_voxel_size = run.model.scale(input_voxel_size) input_shape = run.model.eval_input_shape @@ -137,12 +136,12 @@ def validate_run( ) input_gt[output_roi] = validation_dataset.gt[output_roi] else: - logger.warning("validation inputs already copied!") + logger.info("validation inputs already copied!") prediction_array_identifier = array_store.validation_prediction_array( run.name, iteration, validation_dataset ) - logger.warning("Predicting on dataset %s", validation_dataset.name) + logger.info("Predicting on dataset %s", validation_dataset.name) predict( run.model, validation_dataset.raw, @@ -150,7 +149,7 @@ def validate_run( compute_context=compute_context, output_roi=validation_dataset.gt.roi, ) - logger.warning("Predicted on dataset %s", validation_dataset.name) + logger.info("Predicted on dataset %s", validation_dataset.name) post_processor.set_prediction(prediction_array_identifier)