Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adapt examples, changes to flow naming and df export #33

Merged
merged 7 commits into from
Sep 24, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions examples/example1.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,9 @@
"metadata": {},
"source": [
"## 4. Define the MFA System equations\n",
"We define a class with our system equations in the compute method. Afterwards we create an instance of this class, using the input data defined above. The class (system equations) can then easily be reused with different input data."
"We define a class with our system equations in the compute method. Afterwards we create an instance of this class, using the input data defined above. The class (system equations) can then easily be reused with different input data.\n",
"\n",
"We just need to define the compute method with our system equations, as all the other things we need are inherited from the MFASystem class."
]
},
{
Expand All @@ -113,8 +115,7 @@
"outputs": [],
"source": [
"class SimpleMFA(MFASystem):\n",
" \"\"\"We just need to define the compute method with our system equations,\n",
" as all the other things we need are inherited from the MFASystem class.\"\"\"\n",
"\n",
" def compute(self):\n",
" self.flows['sysenv => process 1'][...] = self.parameters['D'] # the elipsis slice [...] ensures the dimensionality of the flow is not changed\n",
" self.flows['process 1 => process 2'][...] = 1 / (1 - self.parameters['alpha']) * self.parameters['D']\n",
Expand Down
4,526 changes: 138 additions & 4,388 deletions examples/example2.ipynb

Large diffs are not rendered by default.

656 changes: 262 additions & 394 deletions examples/example3.ipynb

Large diffs are not rendered by default.

2,624 changes: 2,264 additions & 360 deletions poetry.lock

Large diffs are not rendered by default.

7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ plotly = "^5.18.0"
pydantic = "^2.8.2"
ruff = "^0.5.6"
docformatter = "^1.7.5"
openpyxl = "^3.1.5"
nbformat = "^5.10.4"
jupyter = "^1.1.1"

[tool.poetry.group.test]
optional = true
Expand All @@ -35,6 +38,10 @@ autodoc-pydantic = "^2.2.0"
sphinx-rtd-theme = "^2.0.0"
myst-parser = "^4.0.0"


[tool.poetry.group.dev.dependencies]
ipykernel = "^6.29.5"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
7 changes: 4 additions & 3 deletions sodym/export/data_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from pydantic import BaseModel as PydanticBaseModel

from ..mfa_system import MFASystem
from .helper import to_valid_file_name


class DataWriter(PydanticBaseModel):
Expand All @@ -28,16 +29,16 @@ def export_mfa_flows_to_csv(self, mfa: MFASystem, export_directory: str):
if not os.path.exists(export_directory):
os.makedirs(export_directory)
for flow_name, flow in mfa.flows.items():
path_out = os.path.join(export_directory, f'{flow_name.replace(" => ", "__2__")}.csv')
flow.to_df().to_csv(path_out, index=False)
path_out = os.path.join(export_directory, f'{to_valid_file_name(flow_name)}.csv')
flow.to_df().to_csv(path_out)
logging.info(f'Data saved in directory {export_directory}')

def export_mfa_stocks_to_csv(self, mfa: MFASystem, export_directory: str):
if not os.path.exists(export_directory):
os.makedirs(export_directory)
for stock_name, stock in mfa.stocks.items():
df = stock.stock.to_df()
path_out = os.path.join(export_directory, f'{stock_name}_stock.csv')
path_out = os.path.join(export_directory, f'{to_valid_file_name(stock_name)}_stock.csv')
df.to_csv(path_out, index=False)
logging.info(f'Data saved in directory {export_directory}')

Expand Down
17 changes: 16 additions & 1 deletion sodym/export/helper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from matplotlib import pyplot as plt
import numpy as np
from pydantic import BaseModel as PydanticBaseModel
import re
import unicodedata

from ..named_dim_arrays import NamedDimArray
from ..dimensions import DimensionSet
Expand Down Expand Up @@ -74,7 +76,7 @@ def fill_fig_ax(self):
n_subplots = self.array.dims[self.subplot_dim].len
nx = int(np.ceil(np.sqrt(n_subplots)))
ny = int(np.ceil(n_subplots / nx))
self.fig_ax = plt.subplots(nx, ny, figsize=(10, 9))
self.fig_ax = plt.subplots(nx, ny, figsize=(10, 9), squeeze=False)

def get_x_array_like_value_array(self):
if self.x_array is None:
Expand Down Expand Up @@ -134,3 +136,16 @@ def list_of_slices(array: NamedDimArray, dim_name_to_slice) -> tuple[list[NamedD
list_array = [array]
list_name = [None]
return list_array, list_name


def to_valid_file_name(value: str) -> str:
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII. Convert spaces or repeated dashes to single dashes.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.
"""
value = str(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]', '_', value).strip('-_')
11 changes: 9 additions & 2 deletions sodym/flow_helper.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
from typing import Callable

from .named_dim_arrays import Process, Flow
from .dimensions import DimensionSet
from .mfa_definition import FlowDefinition
from .flow_naming import process_names_with_arrow


def make_empty_flows(
processes: dict[str, Process], flow_definitions: list[FlowDefinition],
dims: DimensionSet,
dims: DimensionSet, naming: Callable[[Process, Process], str] = process_names_with_arrow
) -> dict[str, Flow]:
"""Initialize all defined flows with zero values."""
flows = {}
Expand All @@ -15,7 +18,11 @@ def make_empty_flows(
to_process = processes[flow_definition.to_process_name]
except KeyError:
raise KeyError(f"Missing process required by flow definition {flow_definition}.")
if flow_definition.name_override is not None:
name = flow_definition.name_override
else:
name = naming(from_process, to_process)
dim_subset = dims.get_subset(flow_definition.dim_letters)
flow = Flow(from_process=from_process, to_process=to_process, dims=dim_subset)
flow = Flow(from_process=from_process, to_process=to_process, name=name, dims=dim_subset)
flows[flow.name] = flow
return flows
13 changes: 13 additions & 0 deletions sodym/flow_naming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from .named_dim_arrays import Process


def process_names_with_arrow(from_process: Process, to_process: Process) -> str:
return f"{from_process.name} => {to_process.name}"


def process_names_no_spaces(from_process: Process, to_process: Process) -> str:
return f"{from_process.name}_to_{to_process.name}"


def process_ids(from_process: Process, to_process: Process) -> str:
return f"F{from_process.id}_{to_process.id}"
1 change: 1 addition & 0 deletions sodym/mfa_definition.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class FlowDefinition(DefinitionWithDimLetters):

from_process_name: str = Field(validation_alias=AliasChoices("from_process_name", "from_process"))
to_process_name: str = Field(validation_alias=AliasChoices("to_process_name", "to_process"))
name_override: Optional[str] = None


class StockDefinition(DefinitionWithDimLetters):
Expand Down
20 changes: 11 additions & 9 deletions sodym/named_dim_arrays.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@ def sub_array_handler(self, definition):
def shape(self):
return self.dims.shape()

def set_values(self, values: np.ndarray):
assert isinstance(values, np.ndarray), "Values must be a numpy array."
assert values.shape == self.shape, "Values must have the same shape as the DimensionSet."
self.values = values

def sum_values(self):
return np.sum(self.values)

Expand Down Expand Up @@ -219,10 +224,12 @@ def __setitem__(self, keys, item):
slice_obj = self.sub_array_handler(keys)
slice_obj.values_pointer[...] = item.sum_values_to(slice_obj.dim_letters)

def to_df(self):
index = pd.MultiIndex.from_product([d.items for d in self.dims], names=self.dims.names)
df = index.to_frame(index=False)
df["value"] = self.values.flatten()
def to_df(self, index: bool = True):
multiindex = pd.MultiIndex.from_product([d.items for d in self.dims], names=self.dims.names)
df = pd.DataFrame({'value': self.values.flatten()})
df = df.set_index(multiindex)
if not index:
df = df.reset_index()
return df


Expand Down Expand Up @@ -404,11 +411,6 @@ class Flow(NamedDimArray):
from_process: Process
to_process: Process

@model_validator(mode="after")
def flow_name_related_to_proccesses(self):
self.name = f"{self.from_process.name} => {self.to_process.name}"
return self

@property
def from_process_id(self):
return self.from_process.id
Expand Down