Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update readme #81

Merged
merged 3 commits into from
Jan 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2024 Potsdam-Institut für Klimafolgenforschung (PIK) e. V.
Copyright (c) 2025 Potsdam-Institut für Klimafolgenforschung (PIK) e. V.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# flodym

The flodym package provides key functionality for material flow analysis, including
The flodym (Flexibe Open Dynamic Material Systems Model) library provides key functionality for building material flow analysis models, including
- the class `MFASystem` acting as a template (parent class) for users to create their own material flow models
- the class `FlodymArray` handling mathematical operations between multi-dimensional arrays
- different classes like `DynamicStockModel` representing stocks accumulation, in- and outflows based on age cohort tracking and lifetime distributions. Those can be integrated in the `MFASystem`.
- different classes representing stocks accumulation, in- and outflows based on age cohort tracking and lifetime distributions. Those can be integrated in the `MFASystem`.
- different options for data input and export, as well as visualization

# Thanks
Expand Down
14 changes: 8 additions & 6 deletions examples/example3.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@
},
"outputs": [],
"source": [
"steel_consumption_file=os.path.join(\"input_data\", \"example3_steel_consumption.xlsx\")\n",
"steel_consumption_file = os.path.join(\"input_data\", \"example3_steel_consumption.xlsx\")\n",
"steel_consumption = pd.read_excel(steel_consumption_file)\n",
"steel_consumption = steel_consumption[[\"CS\", \"T\", \"V\"]]\n",
"steel_consumption = steel_consumption.rename(columns={\"CS\": \"Region\", \"T\": \"Time\", \"V\": \"values\"})\n",
Expand Down Expand Up @@ -112,14 +112,16 @@
"outputs": [],
"source": [
"years = sorted(list(steel_consumption[\"Time\"].unique()))\n",
"dimensions = DimensionSet(dim_list=[\n",
" Dimension(letter=\"t\", name=\"Time\", dtype=np.int64, items=years),\n",
" Dimension(letter=\"r\", name=\"Region\", dtype=str, items=list(country_lifetimes.keys())),\n",
"])\n",
"dimensions = DimensionSet(\n",
" dim_list=[\n",
" Dimension(letter=\"t\", name=\"Time\", dtype=np.int64, items=years),\n",
" Dimension(letter=\"r\", name=\"Region\", dtype=str, items=list(country_lifetimes.keys())),\n",
" ]\n",
")\n",
"\n",
"inflow = StockArray.from_df(dims=dimensions, df=steel_consumption)\n",
"lifetime_values = np.array(list(country_lifetimes.values()))\n",
"lifetime_mean = Parameter(dims=dimensions[('r',)], values=lifetime_values)\n",
"lifetime_mean = Parameter(dims=dimensions[(\"r\",)], values=lifetime_values)\n",
"lifetime_std = relative_std * lifetime_mean"
]
},
Expand Down
14 changes: 8 additions & 6 deletions examples/example3.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
# In this example, we'd like to keep the data in the same format as it was, so we read it in as a pandas dataframe and then convert it to the flodym data format.

# %%
steel_consumption_file=os.path.join("input_data", "example3_steel_consumption.xlsx")
steel_consumption_file = os.path.join("input_data", "example3_steel_consumption.xlsx")
steel_consumption = pd.read_excel(steel_consumption_file)
steel_consumption = steel_consumption[["CS", "T", "V"]]
steel_consumption = steel_consumption.rename(columns={"CS": "Region", "T": "Time", "V": "values"})
Expand All @@ -86,14 +86,16 @@

# %%
years = sorted(list(steel_consumption["Time"].unique()))
dimensions = DimensionSet(dim_list=[
Dimension(letter="t", name="Time", dtype=np.int64, items=years),
Dimension(letter="r", name="Region", dtype=str, items=list(country_lifetimes.keys())),
])
dimensions = DimensionSet(
dim_list=[
Dimension(letter="t", name="Time", dtype=np.int64, items=years),
Dimension(letter="r", name="Region", dtype=str, items=list(country_lifetimes.keys())),
]
)

inflow = StockArray.from_df(dims=dimensions, df=steel_consumption)
lifetime_values = np.array(list(country_lifetimes.values()))
lifetime_mean = Parameter(dims=dimensions[('r',)], values=lifetime_values)
lifetime_mean = Parameter(dims=dimensions[("r",)], values=lifetime_values)
lifetime_std = relative_std * lifetime_mean


Expand Down
4 changes: 1 addition & 3 deletions examples/example5.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,7 @@
" if parameter_name == \"eol recovery rate\":\n",
" # add rows with missing waste /material combinations\n",
" waste_material_combinations = [\n",
" (waste, material)\n",
" for waste in dims[\"w\"].items\n",
" for material in dims[\"m\"].items\n",
" (waste, material) for waste in dims[\"w\"].items for material in dims[\"m\"].items\n",
" ]\n",
" data = data.set_index([\"waste\", \"material\"])\n",
" data = data.reindex(waste_material_combinations).reset_index()\n",
Expand Down
4 changes: 1 addition & 3 deletions examples/example5.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,9 +236,7 @@ def read_parameter_values(self, parameter_name: str, dims: DimensionSet) -> Para
if parameter_name == "eol recovery rate":
# add rows with missing waste /material combinations
waste_material_combinations = [
(waste, material)
for waste in dims["w"].items
for material in dims["m"].items
(waste, material) for waste in dims["w"].items for material in dims["m"].items
]
data = data.set_index(["waste", "material"])
data = data.reindex(waste_material_combinations).reset_index()
Expand Down
4 changes: 3 additions & 1 deletion flodym/export/data_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,5 +103,7 @@ def _convert_to_dict_by_func(mfa: MFASystem, convert_func: Callable) -> dict:
dict_out["stock_dimensions"] = {
s_name: s.stock.dims.letters for s_name, s in mfa.stocks.items()
}
dict_out["stock_processes"] = {s_name: s.process.name for s_name, s in mfa.stocks.items() if s.process is not None}
dict_out["stock_processes"] = {
s_name: s.process.name for s_name, s in mfa.stocks.items() if s.process is not None
}
return dict_out
16 changes: 3 additions & 13 deletions flodym/lifetime_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import scipy.stats
from pydantic import BaseModel as PydanticBaseModel, model_validator
from typing import Any

# from scipy.special import gammaln, logsumexp
# from scipy.optimize import root_scalar

Expand Down Expand Up @@ -207,29 +208,18 @@ class LogNormalLifetime(StandardDeviationLifetimeModel):
Same result as EXCEL function "=LOGNORM.VERT(x;LT_LN;SG_LN;TRUE)"
"""


def _survival_by_year_id(self, m):
# calculate parameter mu of underlying normal distribution:
lt_ln = np.log(
self.mean[m, ...]
/ np.sqrt(
1
+ (
self.mean[m, ...]
* self.mean[m, ...]
/ (self.std[m, ...] * self.std[m, ...])
)
1 + (self.mean[m, ...] * self.mean[m, ...] / (self.std[m, ...] * self.std[m, ...]))
)
)
# calculate parameter sigma of underlying normal distribution
sg_ln = np.sqrt(
np.log(
1
+ (
self.mean[m, ...]
* self.mean[m, ...]
/ (self.std[m, ...] * self.std[m, ...])
)
1 + (self.mean[m, ...] * self.mean[m, ...] / (self.std[m, ...] * self.std[m, ...]))
)
)
# compute survial function
Expand Down
9 changes: 7 additions & 2 deletions flodym/mfa_definition.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,15 @@ class StockDefinition(DefinitionWithDimLetters):

@model_validator(mode="after")
def check_lifetime_model(self):
if self.lifetime_model_class is not None and "lifetime_model" not in self.subclass.__fields__:
if (
self.lifetime_model_class is not None
and "lifetime_model" not in self.subclass.__fields__
):
raise ValueError(f"Lifetime model is given, but not used in subclass {self.subclass}.")
elif self.lifetime_model_class is None and "lifetime_model" in self.subclass.__fields__:
raise ValueError(f"Lifetime model class must be part of definition for given subclass {self.subclass}")
raise ValueError(
f"Lifetime model class must be part of definition for given subclass {self.subclass}"
)
return self


Expand Down
43 changes: 30 additions & 13 deletions flodym/stocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,29 @@ def validate_stock_arrays(self):
if self.stock is None:
self.stock = StockArray(dims=self.dims, name=f"{self.name}_stock")
elif self.stock.dims.letters != self.dims.letters:
raise ValueError(f"Stock dimensions {self.stock.dims.letters} do not match prescribed dims {self.dims.letters}.")
raise ValueError(
f"Stock dimensions {self.stock.dims.letters} do not match prescribed dims {self.dims.letters}."
)
if self.inflow is None:
self.inflow = StockArray(dims=self.dims, name=f"{self.name}_inflow")
elif self.inflow.dims.letters != self.dims.letters:
raise ValueError(f"Inflow dimensions {self.inflow.dims.letters} do not match prescribed dims {self.dims.letters}.")
raise ValueError(
f"Inflow dimensions {self.inflow.dims.letters} do not match prescribed dims {self.dims.letters}."
)
if self.outflow is None:
self.outflow = StockArray(dims=self.dims, name=f"{self.name}_outflow")
elif self.outflow.dims.letters != self.dims.letters:
raise ValueError(f"Outflow dimensions {self.outflow.dims.letters} do not match prescribed dims {self.dims.letters}.")
raise ValueError(
f"Outflow dimensions {self.outflow.dims.letters} do not match prescribed dims {self.dims.letters}."
)
return self

@model_validator(mode="after")
def validate_time_first_dim(self):
if self.dims.letters[0] != self.time_letter:
raise ValueError(f"Time dimension must be the first dimension, i.e. time_letter (now {self.time_letter}) must be the first letter in dims.letters (now {self.dims.letters[0]}).")
raise ValueError(
f"Time dimension must be the first dimension, i.e. time_letter (now {self.time_letter}) must be the first letter in dims.letters (now {self.dims.letters[0]})."
)
return self

@abstractmethod
Expand Down Expand Up @@ -111,7 +119,10 @@ class SimpleFlowDrivenStock(Stock):
"""Given inflows and outflows, the stock can be calculated."""

def _check_needed_arrays(self):
if np.max(np.abs(self.inflow.values)) < 1e-10 and np.max(np.abs(self.outflow.values)) < 1e-10:
if (
np.max(np.abs(self.inflow.values)) < 1e-10
and np.max(np.abs(self.outflow.values)) < 1e-10
):
logging.warning("Inflow and Outflow are zero. This will lead to a zero stock.")

def compute(self):
Expand Down Expand Up @@ -192,7 +203,9 @@ def compute_stock_by_cohort(self) -> np.ndarray:
from the perspective of the stock the inflow has the dimension age-cohort,
as each inflow(t) is added to the age-cohort c = t
"""
self._stock_by_cohort = np.einsum("c...,tc...->tc...", self.inflow.values, self.lifetime_model.sf)
self._stock_by_cohort = np.einsum(
"c...,tc...->tc...", self.inflow.values, self.lifetime_model.sf
)

def compute_outflow_by_cohort(self) -> np.ndarray:
"""Compute outflow by cohort from changes in the stock by cohort and the known inflow."""
Expand Down Expand Up @@ -225,10 +238,14 @@ def compute_inflow_and_outflow(self) -> tuple[np.ndarray]:
sf = self.lifetime_model.sf
# construct the sf of a product of cohort tc remaining in the stock in year t
# First year:
self.inflow.values[0, ...] = np.where(sf[0, 0, ...] != 0.0, self.stock.values[0] / sf[0, 0], 0.0)
self.inflow.values[0, ...] = np.where(
sf[0, 0, ...] != 0.0, self.stock.values[0] / sf[0, 0], 0.0
)
# Future decay of age-cohort of year 0.
self._stock_by_cohort[:, 0, ...] = self.inflow.values[0, ...] * sf[:, 0, ...]
self._outflow_by_cohort[0, 0, ...] = self.inflow.values[0, ...] - self._stock_by_cohort[0, 0, ...]
self._outflow_by_cohort[0, 0, ...] = (
self.inflow.values[0, ...] - self._stock_by_cohort[0, 0, ...]
)
# all other years:
for m in range(1, self._n_t): # for all years m, starting in second year
# 1) Compute outflow from previous age-cohorts up to m-1
Expand Down Expand Up @@ -257,11 +274,11 @@ def inflow_from_balance(self, m: int) -> np.ndarray:
class StockDrivenDSM_NIC(StockDrivenDSM):

def inflow_from_balance(self, m):
is_negative_inflow = self.check_negative_inflow(m)
if is_negative_inflow:
self.inflow_from_balance_correction(m)
else:
super().inflow_from_balance(m)
is_negative_inflow = self.check_negative_inflow(m)
if is_negative_inflow:
self.inflow_from_balance_correction(m)
else:
super().inflow_from_balance(m)

def check_negative_inflow(self, m: int) -> bool:
"""Check if inflow is negative."""
Expand Down
Loading