Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Allow import calculation plan from batch result excel file #567

Merged
merged 11 commits into from
Apr 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test_prereleases.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
strategy:
fail-fast: false
matrix:
platform: [windows-2019, macos-10.15, ubuntu-20.04]
platform: [windows-2019, macos-11, ubuntu-20.04]
python: [3.8]
steps:
- uses: actions/checkout@v2
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [windows-2019, macos-10.15, ubuntu-20.04]
os: [windows-2019, macos-11, ubuntu-20.04]
python_version: ['3.7', '3.8', '3.9']
steps:
- uses: actions/checkout@v2
Expand Down Expand Up @@ -92,7 +92,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ windows-2019, macos-10.15, ubuntu-20.04 ]
os: [ windows-2019, macos-11, ubuntu-20.04 ]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
Expand Down
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ include package/tests/test_data/ome.xsd.xml
include package/tests/test_data/napari_measurements_profile.json
include package/tests/test_data/notebook/*.json
include package/tests/test_data/old_saves/*/*/*.json
include package/tests/test_data/sample_batch_output.xlsx
include Readme.md
include changelog.md
include pyproject.toml
Expand Down
2 changes: 1 addition & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ stages:
strategy:
matrix:
macos:
imageName: 'macos-10.15'
imageName: 'macos-11'
windows:
imageName: 'windows-2019'
pool: {vmImage: $(imageName)}
Expand Down
19 changes: 10 additions & 9 deletions package/PartSeg/_roi_analysis/advanced_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from PartSegCore.analysis.algorithm_description import AnalysisAlgorithmSelection
from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent
from PartSegCore.analysis.measurement_calculation import MEASUREMENT_DICT, MeasurementProfile
from PartSegCore.io_utils import LoadPlanJson
from PartSegCore.universal_const import UNIT_SCALE, Units
from PartSegData import icons_dir

Expand Down Expand Up @@ -257,14 +258,14 @@ def export_profile(self):

def import_profiles(self):
dial = PLoadDialog(
"Segment profile (*.json)",
LoadPlanJson,
settings=self._settings,
path=IO_SAVE_DIRECTORY,
caption="Import profile segment",
)
if dial.exec_():
file_path = dial.selectedFiles()[0]
profs, err = self._settings.load_part(file_path)
res = dial.get_result()
profs, err = res.load_class.load(res.load_location)
if err:
QMessageBox.warning(self, "Import error", "error during importing, part of data were filtered.")
profiles_dict = self._settings.roi_profiles
Expand Down Expand Up @@ -295,14 +296,14 @@ def export_pipeline(self):

def import_pipeline(self):
dial = PLoadDialog(
"Segment pipeline (*.json)",
LoadPlanJson,
settings=self._settings,
path=IO_SAVE_DIRECTORY,
caption="Import pipeline segment",
)
if dial.exec_():
file_path = dial.selectedFiles()[0]
profs, err = self._settings.load_part(file_path)
res = dial.get_result()
profs, err = res.load_class.load(res.load_location)
if err:
QMessageBox.warning(self, "Import error", "error during importing, part of data were filtered.")
profiles_dict = self._settings.roi_pipelines
Expand Down Expand Up @@ -776,15 +777,15 @@ def export_measurement_profiles(self):

def import_measurement_profiles(self):
dial = PLoadDialog(
"Measurement profile (*.json)",
LoadPlanJson,
settings=self.settings,
path="io.export_directory",
caption="Import settings profiles",
parent=self,
)
if dial.exec_():
file_path = str(dial.selectedFiles()[0])
stat, err = self.settings.load_part(file_path)
res = dial.get_result()
stat, err = res.load_class.load(res.load_location)
if err:
QMessageBox.warning(self, "Import error", "error during importing, part of data were filtered.")
measurement_dict = self.settings.measurement_profiles
Expand Down
14 changes: 7 additions & 7 deletions package/PartSeg/_roi_analysis/prepare_plan_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
)
from PartSegCore.analysis.measurement_calculation import MeasurementProfile
from PartSegCore.analysis.save_functions import save_dict
from PartSegCore.io_utils import SaveBase
from PartSegCore.io_utils import LoadPlanExcel, LoadPlanJson, SaveBase
from PartSegCore.universal_const import Units

from ..common_gui.custom_load_dialog import PLoadDialog
Expand Down Expand Up @@ -1147,16 +1147,16 @@ def export_plans(self):

def import_plans(self):
dial = PLoadDialog(
"Calculation plans (*.json)",
[LoadPlanJson, LoadPlanExcel],
settings=self.settings,
path="io.batch_plan_directory",
caption="Import calculation plans",
)
if dial.exec_():
file_path = dial.selectedFiles()[0]
plans, err = self.settings.load_part(file_path)
res = dial.get_result()
plans, err = res.load_class.load(res.load_location)
if err:
QMessageBox.warning(self, "Import error", "error during importing, part of data were filtered.")
QMessageBox.warning(self, "Import error", f"error during importing, part of data were filtered. {err}")
choose = ImportDialog(plans, self.settings.batch_plans, PlanPreview)
if choose.exec_():
for original_name, final_name in choose.get_import_list():
Expand Down Expand Up @@ -1186,9 +1186,9 @@ def plan_preview(self, text):
if self.protect:
return
text = str(text)
if text.strip() == "":
if not text.strip():
return
plan = self.settings.batch_plans[str(text)] # type: CalculationPlan
plan = self.settings.batch_plans[text]
self.plan_view.set_plan(plan)


Expand Down
41 changes: 16 additions & 25 deletions package/PartSeg/common_backend/base_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,7 @@
from contextlib import suppress
from datetime import datetime
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
MutableMapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union

import napari.utils.theme
import numpy as np
Expand All @@ -35,12 +23,12 @@
from PartSegCore import register
from PartSegCore.color_image import default_colormap_dict, default_label_dict
from PartSegCore.color_image.base_colors import starting_colors
from PartSegCore.io_utils import load_metadata_base
from PartSegCore.io_utils import load_matadata_part, load_metadata_base
from PartSegCore.json_hooks import PartSegEncoder
from PartSegCore.project_info import AdditionalLayerDescription, HistoryElement, ProjectInfoBase
from PartSegCore.roi_info import ROIInfo
from PartSegCore.segmentation.algorithm_base import ROIExtractionResult
from PartSegCore.utils import ProfileDict, check_loaded_dict
from PartSegCore.utils import ProfileDict
from PartSegImage import Image

if hasattr(napari.utils.theme, "get_theme"):
Expand Down Expand Up @@ -698,16 +686,19 @@ def dump_part(self, file_path, path_in_dict, names=None):
json.dump(data, ff, cls=self.json_encoder_class, indent=2)

@classmethod
def load_part(cls, file_path):
data = cls.load_metadata(file_path)
bad_key = []
if isinstance(data, MutableMapping) and not check_loaded_dict(data):
bad_key.extend(k for k, v in data.items() if not check_loaded_dict(v))
for el in bad_key:
del data[el]
elif isinstance(data, ProfileDict) and not data.verify_data():
bad_key = data.filter_data()
return data, bad_key
def load_part(cls, data: Union[Path, str]) -> Tuple[dict, List[str]]: # pragma: no cover
"""
Load serialized data. Get valid entries.

:param data: path to file or string to be decoded.
:return:
"""
warnings.warn(
f"{cls.__name__}.load_part is deprecated. Please use PartSegCore.utils.load_matadata_part",
stacklevel=2,
category=FutureWarning,
)
return load_matadata_part(data)

def dump(self, folder_path: Union[Path, str, None] = None):
"""
Expand Down
8 changes: 5 additions & 3 deletions package/PartSeg/common_gui/custom_load_dialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class LoadProperty(typing.NamedTuple):
load_class: typing.Type[LoadBase]


IORegister = typing.Union[typing.Dict[str, type(LoadBase)], type(LoadBase), str]
IORegister = typing.Union[typing.Dict[str, type(LoadBase)], type(LoadBase), str, typing.List[type(LoadBase)]]


class IOMethodMock:
Expand Down Expand Up @@ -66,6 +66,8 @@ def __init__(
):
if isinstance(io_register, str):
io_register = {io_register: IOMethodMock(io_register)}
if isinstance(io_register, list):
io_register = {x.get_name(): x for x in io_register}
if not isinstance(io_register, typing.MutableMapping):
io_register = {io_register.get_name(): io_register}
super().__init__(parent, caption)
Expand Down Expand Up @@ -109,8 +111,8 @@ def accept(self):
else:
super().accept()

def get_result(self):
chosen_class: LoadBase = self.io_register[self.selectedNameFilter()]
def get_result(self) -> LoadProperty:
chosen_class: typing.Type[LoadBase] = self.io_register[self.selectedNameFilter()]
return LoadProperty(self.files_list, self.selectedNameFilter(), chosen_class)


Expand Down
2 changes: 1 addition & 1 deletion package/PartSeg/common_gui/custom_save_dialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def get_result(self) -> SaveProperty:
class PSaveDialog(CustomSaveDialog):
def __init__(
self,
save_register: typing.Union[typing.Dict[str, type(SaveBase)], type(SaveBase)],
save_register: IORegister,
*,
settings: "BaseSettings",
path: str,
Expand Down
16 changes: 7 additions & 9 deletions package/PartSegCore/analysis/batch_processing/batch_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
from ...project_info import AdditionalLayerDescription, HistoryElement
from ...roi_info import ROIInfo
from ...segmentation import RestartableAlgorithm
from ...utils import iterate_names
from .parallel_backend import BatchManager, SubprocessOrder


Expand Down Expand Up @@ -800,15 +801,12 @@ def write_to_excel(
@staticmethod
def write_calculation_plan(writer: pd.ExcelWriter, calculation_plan: CalculationPlan):
book: xlsxwriter.Workbook = writer.book
sheet_base_name = f"info {calculation_plan.name}"[:30]
sheet_name = sheet_base_name
if sheet_name in book.sheetnames: # pragma: no cover
for i in range(100):
sheet_name = f"{sheet_base_name[:26]} ({i})"
if sheet_name not in book.sheetnames:
break
else:
raise ValueError(f"Name collision in sheets with information about calculation plan: {sheet_name}")
sheet_name = iterate_names(f"info {calculation_plan.name}"[:30], book.sheetnames, 30)
if sheet_name is None: # pragma: no cover
raise ValueError(
"Name collision in sheets with information about calculation "
f"plan: {f'info {calculation_plan.name}'[:30]}"
)

sheet = book.add_worksheet(sheet_name)
cell_format = book.add_format({"bold": True})
Expand Down
83 changes: 83 additions & 0 deletions package/PartSegCore/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,15 @@
import numpy as np
import pandas as pd
import tifffile
from openpyxl import load_workbook

from PartSegCore.json_hooks import partseg_object_hook
from PartSegImage import ImageWriter
from PartSegImage.image import minimal_dtype

from .algorithm_describe_base import AlgorithmDescribeBase, AlgorithmProperty
from .project_info import ProjectInfoBase
from .utils import ProfileDict, check_loaded_dict, iterate_names


class SegmentationType(Enum):
Expand Down Expand Up @@ -207,6 +209,25 @@ def load_metadata_base(data: typing.Union[str, Path]):
return decoded_data


def load_matadata_part(data: typing.Union[str, Path]) -> typing.Tuple[typing.Any, typing.List[str]]:
"""
Load serialized data. Get valid entries.

:param data: path to file or string to be decoded.
:return:
"""
# TODO extract to function
data = load_metadata_base(data)
bad_key = []
if isinstance(data, typing.MutableMapping) and not check_loaded_dict(data):
bad_key.extend(k for k, v in data.items() if not check_loaded_dict(v))
for el in bad_key:
del data[el]
elif isinstance(data, ProfileDict) and not data.verify_data():
bad_key = data.filter_data()
return data, bad_key


def proxy_callback(
range_changed: typing.Callable[[int, int], typing.Any],
step_changed: typing.Callable[[int], typing.Any],
Expand Down Expand Up @@ -385,3 +406,65 @@ def get_name(cls) -> str:
@classmethod
def get_fields(cls) -> typing.List[typing.Union[AlgorithmProperty, str]]:
return ["text"]


class LoadPlanJson(LoadBase):
@classmethod
def get_short_name(cls):
return "plan_json"

@classmethod
def load(
cls,
load_locations: typing.List[typing.Union[str, BytesIO, Path]],
range_changed: typing.Callable[[int, int], typing.Any] = None,
step_changed: typing.Callable[[int], typing.Any] = None,
metadata: typing.Optional[dict] = None,
):
return load_matadata_part(load_locations[0])

@classmethod
def get_name(cls) -> str:
return "Calculation plans (*.json)"


class LoadPlanExcel(LoadBase):
@classmethod
def get_short_name(cls):
return "plan_excel"

@classmethod
def load(
cls,
load_locations: typing.List[typing.Union[str, BytesIO, Path]],
range_changed: typing.Callable[[int, int], typing.Any] = None,
step_changed: typing.Callable[[int], typing.Any] = None,
metadata: typing.Optional[dict] = None,
):
data_list, error_list = [], []

xlsx = load_workbook(filename=load_locations[0], read_only=True)
try:
for sheet_name in xlsx.sheetnames:
if sheet_name.startswith("info"):
data = xlsx[sheet_name].cell(row=2, column=2).value
try:
data, err = load_matadata_part(data)
data_list.append(data)
error_list.extend(err)
except ValueError: # pragma: no cover
error_list.append(f"Cannot load data from: {sheet_name}")
finally:
xlsx.close()
data_dict = {}
for calc_plan in data_list:
new_name = iterate_names(calc_plan.name, data_dict)
if new_name is None: # pragma: no cover
error_list.append(f"Cannot determine proper name for {calc_plan.name}")
calc_plan.name = new_name
data_dict[new_name] = calc_plan
return data_dict, error_list

@classmethod
def get_name(cls) -> str:
return "Calculation plans from result (*.xlsx)"
Loading