From 0f85d600540f0dc5bf5d1afbbd44a97d1c7ff80a Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Wed, 8 Nov 2023 15:39:28 +0100 Subject: [PATCH 01/35] chore: small refactoring/renaming --- .../analysis_config_data/analysis_config_data.py | 6 +++--- .../analysis_config_wrapper_base.py | 1 - .../analysis_config_wrapper_factory.py | 12 ++++++------ .../analysis_config_wrapper_with_network.py | 3 ++- .../common/configuration/config_wrapper_protocol.py | 5 ++++- ra2ce/configuration/config_factory.py | 10 +++++----- ra2ce/graph/network_config_wrapper.py | 4 ++-- 7 files changed, 22 insertions(+), 19 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index b662d296c..cf4b5e2e6 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -28,9 +28,9 @@ class AnalysisConfigData(ConfigDataProtocol): @classmethod def from_dict(cls, dict_values: dict) -> AnalysisConfigData: - _new_analysis_ini_config_data = cls() - _new_analysis_ini_config_data.update(**dict_values) - return _new_analysis_ini_config_data + _analysis_config = cls() + _analysis_config.update(**dict_values) + return _analysis_config class AnalysisConfigDataWithNetwork(AnalysisConfigData): diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py index 0c6e5838d..0ab456abe 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py @@ -31,7 +31,6 @@ class AnalysisConfigWrapperBase(ConfigWrapperProtocol): ini_file: Path - root_dir: Path config_data: Optional[AnalysisConfigData] = None @staticmethod diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py index b73cf46a3..5a2ef3e8b 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py @@ -48,7 +48,7 @@ class AnalysisConfigWrapperFactory: @staticmethod def get_analysis_config( ini_file: Path, - analysis_ini_config: AnalysisConfigData, + analysis_config: AnalysisConfigData, network_config: Optional[NetworkConfigWrapper], ) -> AnalysisConfigWrapperBase: """ @@ -65,15 +65,15 @@ def get_analysis_config( Returns: AnalysisConfigWrapperBase: Concrete `AnalysisConfigWrapperBase` DataObjectModel for the given data. """ - if isinstance(analysis_ini_config, AnalysisConfigDataWithNetwork): + if isinstance(analysis_config, AnalysisConfigDataWithNetwork): return AnalysisConfigWrapperWithNetwork.from_data_with_network( - ini_file, analysis_ini_config, network_config + ini_file, analysis_config, network_config ) - elif isinstance(analysis_ini_config, AnalysisConfigDataWithoutNetwork): + elif isinstance(analysis_config, AnalysisConfigDataWithoutNetwork): return AnalysisConfigWrapperWithoutNetwork.from_data( - ini_file, analysis_ini_config + ini_file, analysis_config ) else: raise NotImplementedError( - f"Analysis type {type(analysis_ini_config)} not currently supported." + f"Analysis type {type(analysis_config)} not currently supported." ) diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py index 11de109af..525e17ee1 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py @@ -23,6 +23,7 @@ from __future__ import annotations from pathlib import Path +from typing import Optional from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData from ra2ce.analyses.analysis_config_data.analysis_config_data_validator_with_network import ( @@ -70,7 +71,7 @@ def from_data_with_network( cls, ini_file: Path, config_data: AnalysisConfigData, - network_config: NetworkConfigWrapper, + network_config: Optional[NetworkConfigWrapper], ) -> AnalysisConfigWrapperWithNetwork: """ Initializes this class with a network_configuration. diff --git a/ra2ce/common/configuration/config_wrapper_protocol.py b/ra2ce/common/configuration/config_wrapper_protocol.py index 6d5ba5c38..d843eef56 100644 --- a/ra2ce/common/configuration/config_wrapper_protocol.py +++ b/ra2ce/common/configuration/config_wrapper_protocol.py @@ -31,10 +31,13 @@ @runtime_checkable class ConfigWrapperProtocol(Protocol): # pragma: no cover ini_file: Path - root_dir: Path config_data: Optional[ConfigDataProtocol] = None graphs: List[Any] = [] + @property + def root_dir(self) -> Path: + pass + @classmethod def from_data( cls, ini_file: Path, config_data: ConfigDataProtocol diff --git a/ra2ce/configuration/config_factory.py b/ra2ce/configuration/config_factory.py index d3da83cb7..47e3381d0 100644 --- a/ra2ce/configuration/config_factory.py +++ b/ra2ce/configuration/config_factory.py @@ -69,13 +69,13 @@ def get_config_wrapper( def get_network_config_data(network_ini: Path) -> Optional[NetworkConfigData]: if not network_ini: return None - _config_data = NetworkConfigDataReader().read(network_ini) + _network_config = NetworkConfigDataReader().read(network_ini) # Copy the network ini file to the output directory. - if not _config_data.output_path.exists(): - _config_data.output_path.mkdir(parents=True) - _output_ini = _config_data.output_path.joinpath(network_ini.name) + if not _network_config.output_path.exists(): + _network_config.output_path.mkdir(parents=True) + _output_ini = _network_config.output_path.joinpath(network_ini.name) shutil.copyfile(network_ini, _output_ini) - return NetworkConfigWrapper.from_data(network_ini, _config_data) + return NetworkConfigWrapper.from_data(network_ini, _network_config) @staticmethod def get_analysis_config_data( diff --git a/ra2ce/graph/network_config_wrapper.py b/ra2ce/graph/network_config_wrapper.py index 5d5ff3e36..76696a4c2 100644 --- a/ra2ce/graph/network_config_wrapper.py +++ b/ra2ce/graph/network_config_wrapper.py @@ -24,7 +24,7 @@ import logging from pathlib import Path -from typing import Dict, Optional +from typing import Optional from geopandas import gpd @@ -39,7 +39,7 @@ class NetworkConfigWrapper(ConfigWrapperProtocol): - files: Dict[str, Path] = {} + files: dict[str, Path] = {} config_data: NetworkConfigData def __init__(self) -> None: From 4e4736d16467b2cf420ec2f53c9718dace65a4e1 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Wed, 8 Nov 2023 16:21:38 +0100 Subject: [PATCH 02/35] chore: small refactoring/renaming --- .../analysis_config_reader_without_network.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py index aac67030e..c86ace7aa 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py @@ -41,18 +41,18 @@ class AnalysisConfigReaderWithoutNetwork(AnalysisConfigReaderBase): def read(self, ini_file: Path) -> AnalysisConfigDataWithoutNetwork: if not isinstance(ini_file, Path) or not ini_file.is_file(): raise ValueError("No analysis ini file 'Path' provided.") - _analisis_config_dict = self._get_analysis_config_data(ini_file) - _output_network_ini_file = _analisis_config_dict["output"] / "network.ini" + _analysis_config_dict = self._get_analysis_config_data(ini_file) + _output_network_ini_file = _analysis_config_dict["output"] / "network.ini" _network_config = NetworkConfigDataReader().read(_output_network_ini_file) - _analisis_config_dict.update(_network_config.to_dict()) - _network = _analisis_config_dict.get("network", None) + _analysis_config_dict.update(_network_config.to_dict()) + _network = _analysis_config_dict.get("network", None) if _network: - _analisis_config_dict["origins_destinations"] = _network.get( + _analysis_config_dict["origins_destinations"] = _network.get( "origins_destinations", None ) else: logging.warn(f"Not found network key for the Analysis {ini_file}") - return AnalysisConfigDataWithoutNetwork.from_dict(_analisis_config_dict) + return AnalysisConfigDataWithoutNetwork.from_dict(_analysis_config_dict) def _get_analysis_config_data(self, ini_file: Path) -> dict: _root_path = AnalysisConfigWrapperBase.get_network_root_dir(ini_file) From 366876ee42db240d6c61d4b8cf3accd5f87ac74d Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Thu, 9 Nov 2023 11:53:56 +0100 Subject: [PATCH 03/35] chore: small refactorings/comments --- ra2ce/common/io/readers/ini_file_reader.py | 8 +++++--- .../network_config_data/network_config_data_reader.py | 6 +++--- tests/test_acceptance.py | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ra2ce/common/io/readers/ini_file_reader.py b/ra2ce/common/io/readers/ini_file_reader.py index 4b21052dc..7aee44457 100644 --- a/ra2ce/common/io/readers/ini_file_reader.py +++ b/ra2ce/common/io/readers/ini_file_reader.py @@ -39,8 +39,9 @@ def read(self, ini_file: Path) -> dict: return self._parse_config(ini_file) def _parse_config(self, path: Path = None, opt_cli=None) -> dict: - """Ajusted from HydroMT - source: https://github.com/Deltares/hydromt/blob/af4e5d858b0ac0883719ca59e522053053c21b82/hydromt/cli/cli_utils.py""" + """Adjusted from HydroMT + source: https://github.com/Deltares/hydromt/blob/af4e5d858b0ac0883719ca59e522053053c21b82/hydromt/cli/cli_utils.py + """ opt = {} if path is not None and path.is_file(): opt = self._configread( @@ -81,7 +82,8 @@ def _configread( """read model configuration from file and parse to dictionary Ajusted from HydroMT - source: https://github.com/Deltares/hydromt/blob/af4e5d858b0ac0883719ca59e522053053c21b82/hydromt/config.py""" + source: https://github.com/Deltares/hydromt/blob/af4e5d858b0ac0883719ca59e522053053c21b82/hydromt/config.py + """ if cf is None: cf = ConfigParser(allow_no_value=True, inline_comment_prefixes=[";", "#"]) diff --git a/ra2ce/graph/network_config_data/network_config_data_reader.py b/ra2ce/graph/network_config_data/network_config_data_reader.py index d4f45f160..d5c0f201c 100644 --- a/ra2ce/graph/network_config_data/network_config_data_reader.py +++ b/ra2ce/graph/network_config_data/network_config_data_reader.py @@ -25,11 +25,11 @@ def __init__(self) -> None: converters={"list": lambda x: [x.strip() for x in x.split(",")]}, ) - def read(self, file_to_parse: Path) -> NetworkConfigData: - self._parser.read(file_to_parse) + def read(self, ini_file: Path) -> NetworkConfigData: + self._parser.read(ini_file) self._remove_none_values() - _parent_dir = file_to_parse.parent + _parent_dir = ini_file.parent _config_data = NetworkConfigData( input_path=_parent_dir.joinpath("input"), diff --git a/tests/test_acceptance.py b/tests/test_acceptance.py index c68533aae..a92025a13 100644 --- a/tests/test_acceptance.py +++ b/tests/test_acceptance.py @@ -89,7 +89,7 @@ def purge_output_dirs(): @pytest.mark.parametrize( "case_data_dir", [ - pytest.param("acceptance_test_data", id="Default test data."), + pytest.param("acceptance_test_data", id="Default test data"), ] + _external_test_cases, indirect=["case_data_dir"], From dd18d39a84cd8da2fbb90bb4ad7ee2dbb5cefd74 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Thu, 9 Nov 2023 16:26:50 +0100 Subject: [PATCH 04/35] chore: store analysis config in dataclasses --- .../analysis_config_data.py | 52 ++++-- .../readers/analysis_config_reader_base.py | 173 +++++++++++------- .../analysis_config_reader_with_network.py | 22 ++- .../analysis_config_reader_without_network.py | 41 +++-- .../analysis_config_wrapper_factory.py | 4 +- ra2ce/configuration/config_wrapper.py | 3 +- tests/configuration/test_config_wrapper.py | 4 +- 7 files changed, 198 insertions(+), 101 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index cf4b5e2e6..b35a0baf5 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -22,24 +22,54 @@ from __future__ import annotations +import math +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional + from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol +@dataclass +class ProjectSection: + name: str = "" + + +@dataclass +class AnalysisSection: + name: str = "" + analysis: str = "" # should be enum + aggregate_wl: str = "" # should be enum + threshold: float = math.nan + weighing: str = "" # should be enum + calculate_route_without_disruption: Optional[bool] = False + buffer_meters: float = math.nan + category_field_name: str = "" + save_gpkg: bool = False + save_csv: bool = False + + +@dataclass class AnalysisConfigData(ConfigDataProtocol): - @classmethod - def from_dict(cls, dict_values: dict) -> AnalysisConfigData: - _analysis_config = cls() - _analysis_config.update(**dict_values) - return _analysis_config + root_path: Optional[Path] = None + input_path: Optional[Path] = None + output_path: Optional[Path] = None + static_path: Optional[Path] = None + project: ProjectSection = field(default_factory=lambda: ProjectSection()) + direct: list[AnalysisSection] = field(default_factory=list) + indirect: list[AnalysisSection] = field(default_factory=list) + + def to_dict(self) -> dict: + _dict = self.__dict__ + _dict["project"] = self.project.__dict__ + _dict["direct"] = [dv.__dict__ for dv in self.direct] + _dict["indirect"] = [dv.__dict__ for dv in self.indirect] + return _dict class AnalysisConfigDataWithNetwork(AnalysisConfigData): - @classmethod - def from_dict(cls, dict_values: dict) -> AnalysisConfigDataWithNetwork: - return super().from_dict(dict_values) + pass class AnalysisConfigDataWithoutNetwork(AnalysisConfigData): - @classmethod - def from_dict(cls, dict_values: dict) -> AnalysisConfigDataWithoutNetwork: - return super().from_dict(dict_values) + pass diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index 085a9b9ac..e8e0fca12 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -21,64 +21,129 @@ import logging +import re +from configparser import ConfigParser from pathlib import Path -from shutil import copyfile +from shutil import copy +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSection, + ProjectSection, +) from ra2ce.analyses.analysis_config_data.analysis_config_data_validator_without_network import ( DirectAnalysisNameList, IndirectAnalysisNameList, ) +from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_base import ( + AnalysisConfigWrapperBase, +) from ra2ce.common.configuration.ini_configuration_reader_protocol import ( ConfigDataReaderProtocol, ) -from ra2ce.common.io.readers.ini_file_reader import IniFileReader class AnalysisConfigReaderBase(ConfigDataReaderProtocol): - def _convert_analysis_types(self, config: dict) -> dict: - def set_analysis_values(config_type: str): - if config_type in config: - (config[config_type]).append(config[a]) - else: - config[config_type] = [config[a]] - - analyses_names = [a for a in config.keys() if "analysis" in a] - for a in analyses_names: - if any(t in config[a]["analysis"] for t in DirectAnalysisNameList): - set_analysis_values("direct") - elif any(t in config[a]["analysis"] for t in IndirectAnalysisNameList): - set_analysis_values("indirect") - del config[a] - - return config - - def _import_configuration(self, root_path: Path, config_path: Path) -> dict: - # Read the configurations in network.ini and add the root path to the configuration dictionary. - if not config_path.is_file(): - config_path = root_path / config_path - _config = IniFileReader().read(config_path) - _config["project"]["name"] = config_path.parent.name - _config["root_path"] = root_path - - # Set the paths in the configuration Dict for ease of saving to those folders. - # The output path is set at a different step `IniConfigurationReaderBase::_copy_output_files` - _config["input"] = config_path.parent / "input" - _config["static"] = config_path.parent / "static" - return _config - - def _copy_output_files(self, from_path: Path, config_data: dict) -> None: - self._create_config_dir("output", config_data) + _parser: ConfigParser + + def __init__(self) -> None: + self._parser = ConfigParser(inline_comment_prefixes="#") + + def read(self, ini_file: Path) -> AnalysisConfigData: + if not isinstance(ini_file, Path) or not ini_file.is_file(): + raise ValueError("No analysis ini file 'Path' provided.") + self._parser.read(ini_file) + self._remove_none_values() + + _parent_dir = ini_file.parent + + _config_data = AnalysisConfigData( + input_path=_parent_dir.joinpath("input"), + static_path=_parent_dir.joinpath("static"), + output_path=_parent_dir.joinpath("output"), + **self._get_sections(), + ) + _config_data.root_path = AnalysisConfigWrapperBase.get_network_root_dir( + ini_file + ) + _config_data.project.name = _parent_dir.name + # TODO self._correct_paths(_config_data)?? + + return _config_data + + def _remove_none_values(self) -> None: + # Remove 'None' from values, replace them with empty strings + for _section in self._parser.sections(): + _keys_with_none = [ + k for k, v in self._parser[_section].items() if v == "None" + ] + for _key_with_none in _keys_with_none: + self._parser[_section].pop(_key_with_none) + + def _get_sections(self) -> dict: + return { + "project": self.get_project_section(), + "direct": self.get_analysis_sections("direct"), + "indirect": self.get_analysis_sections("indirect"), + } + + def get_project_section(self) -> ProjectSection: + return ProjectSection(**self._parser["project"]) + + def _get_analysis_section(self, section_name: str) -> AnalysisSection: + _section = AnalysisSection(**self._parser[section_name]) + _section.threshold = self._parser.getfloat( + section_name, + "threshold", + fallback=_section.threshold, + ) + _section.calculate_route_without_disruption = self._parser.getboolean( + section_name, + "calculate_route_without_disruption", + fallback=_section.calculate_route_without_disruption, + ) + _section.buffer_meters = self._parser.getfloat( + section_name, + "buffer_meters", + fallback=_section.buffer_meters, + ) + _section.save_gpkg = self._parser.getboolean( + section_name, "save_gpkg", fallback=_section.save_gpkg + ) + _section.save_csv = self._parser.getboolean( + section_name, "save_csv", fallback=_section.save_csv + ) + return _section + + def get_analysis_sections(self, analysis_type: str) -> list[AnalysisSection]: + _analysis_sections = [] + + _section_names = re.findall(r"(analysis\d)", " ".join(self._parser.keys())) + for _section_name in _section_names: + _analysis_name = self._parser.get(_section_name, "analysis") + if analysis_type == "direct" and _analysis_name in DirectAnalysisNameList: + _analysis_section = self._get_analysis_section(_section_name) + _analysis_sections.append(_analysis_section) + elif ( + analysis_type == "indirect" + and _analysis_name in IndirectAnalysisNameList + ): + _analysis_section = self._get_analysis_section(_section_name) + _analysis_sections.append(_analysis_section) + + return _analysis_sections + + def _copy_output_files( + self, from_path: Path, config_data: AnalysisConfigData + ) -> None: + _output_dir = config_data.output_path + if not _output_dir.exists(): + _output_dir.mkdir(parents=True) try: - copyfile(from_path, config_data["output"] / "{}.ini".format(from_path.stem)) + copy(from_path, _output_dir) except FileNotFoundError as e: logging.warning(e) - def _create_config_dir(self, dir_name: str, config_data: dict): - _dir = config_data["root_path"] / config_data["project"]["name"] / dir_name - if not _dir.exists(): - _dir.mkdir(parents=True) - config_data[dir_name] = _dir - def _parse_path_list( self, property_name: str, path_list: str, config_data: dict ) -> list[Path]: @@ -100,27 +165,3 @@ def _parse_path_list( _list_paths.append(abs_path) return _list_paths - - def _update_path_values(self, config_data: dict) -> None: - """ - TODO: Work in progress, for now it's happening during validation, which should not be the case. - - Args: - config_data (dict): _description_ - """ - _file_types = { - "polygon": "network", - "hazard_map": "hazard", - "origins": "network", - "destinations": "network", - "locations": "network", - } - for config_header, value_dict in config_data.items(): - if not isinstance(value_dict, dict): - continue - for header_prop, prop_value in value_dict.items(): - _prop_name = _file_types.get(header_prop, None) - if _prop_name and prop_value: - config_data[config_header][header_prop] = self._parse_path_list( - _prop_name, prop_value, config_data - ) diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py index 48df087a7..3b4c3e8b5 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py @@ -20,9 +20,11 @@ """ +from configparser import ConfigParser from pathlib import Path from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, AnalysisConfigDataWithNetwork, ) from ra2ce.analyses.analysis_config_data.readers.analysis_config_reader_base import ( @@ -35,7 +37,11 @@ class AnalysisConfigReaderWithNetwork(AnalysisConfigReaderBase): + _parser: ConfigParser + _network_data: NetworkConfigWrapper + def __init__(self, network_data: NetworkConfigWrapper) -> None: + super().__init__() self._network_data = network_data if not network_data: raise ValueError( @@ -43,10 +49,14 @@ def __init__(self, network_data: NetworkConfigWrapper) -> None: ) def read(self, ini_file: Path) -> AnalysisConfigDataWithNetwork: - if not isinstance(ini_file, Path) or not ini_file.is_file(): - raise ValueError("No analysis ini file 'Path' provided.") - _root_path = AnalysisConfigWrapperBase.get_network_root_dir(ini_file) - _config_data = self._import_configuration(_root_path, ini_file) - _config_data = self._convert_analysis_types(_config_data) + _config = super().read(ini_file) + _config_data = AnalysisConfigDataWithNetwork( + input_path=_config.input_path, + output_path=_config.output_path, + static_path=_config.static_path, + project=_config.project, + direct=_config.direct, + indirect=_config.indirect, + ) self._copy_output_files(ini_file, _config_data) - return AnalysisConfigDataWithNetwork.from_dict(_config_data) + return _config_data diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py index c86ace7aa..f7a02119d 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py @@ -21,6 +21,7 @@ import logging +from configparser import ConfigParser from pathlib import Path from ra2ce.analyses.analysis_config_data.analysis_config_data import ( @@ -38,25 +39,39 @@ class AnalysisConfigReaderWithoutNetwork(AnalysisConfigReaderBase): + _parser: ConfigParser + def read(self, ini_file: Path) -> AnalysisConfigDataWithoutNetwork: - if not isinstance(ini_file, Path) or not ini_file.is_file(): - raise ValueError("No analysis ini file 'Path' provided.") - _analysis_config_dict = self._get_analysis_config_data(ini_file) - _output_network_ini_file = _analysis_config_dict["output"] / "network.ini" + """ + Read the configuration from analysis.ini and append with data from network.ini. + The file network.ini should be in the output folder. + + Args: + ini_file (Path): Path of analysis.ini + + Returns: + AnalysisConfigDataWithoutNetwork + """ + _config = super().read(ini_file) + _config_data = AnalysisConfigDataWithoutNetwork( + input_path=_config.input_path, + output_path=_config.output_path, + static_path=_config.static_path, + project=_config.project, + direct=_config.direct, + indirect=_config.indirect, + ) + self._copy_output_files(ini_file, _config_data) + + _output_network_ini_file = _config_data.output_path.joinpath("network.ini") _network_config = NetworkConfigDataReader().read(_output_network_ini_file) - _analysis_config_dict.update(_network_config.to_dict()) - _network = _analysis_config_dict.get("network", None) + _config_data.update(_network_config.to_dict()) + _network = _config_data.get("network", None) if _network: - _analysis_config_dict["origins_destinations"] = _network.get( + _config_data["origins_destinations"] = _network.get( "origins_destinations", None ) else: logging.warn(f"Not found network key for the Analysis {ini_file}") - return AnalysisConfigDataWithoutNetwork.from_dict(_analysis_config_dict) - def _get_analysis_config_data(self, ini_file: Path) -> dict: - _root_path = AnalysisConfigWrapperBase.get_network_root_dir(ini_file) - _config_data = self._import_configuration(_root_path, ini_file) - _config_data = self._convert_analysis_types(_config_data) - self._copy_output_files(ini_file, _config_data) return _config_data diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py index 5a2ef3e8b..17adb800f 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py @@ -67,11 +67,11 @@ def get_analysis_config( """ if isinstance(analysis_config, AnalysisConfigDataWithNetwork): return AnalysisConfigWrapperWithNetwork.from_data_with_network( - ini_file, analysis_config, network_config + ini_file, analysis_config.to_dict(), network_config ) elif isinstance(analysis_config, AnalysisConfigDataWithoutNetwork): return AnalysisConfigWrapperWithoutNetwork.from_data( - ini_file, analysis_config + ini_file, analysis_config.to_dict() ) else: raise NotImplementedError( diff --git a/ra2ce/configuration/config_wrapper.py b/ra2ce/configuration/config_wrapper.py index 685b49d0f..079a74805 100644 --- a/ra2ce/configuration/config_wrapper.py +++ b/ra2ce/configuration/config_wrapper.py @@ -39,7 +39,8 @@ def __init__(self) -> None: self.network_config = None self.analysis_config = None - def get_root_dir(self) -> Path: + @property + def root_dir(self) -> Path: if self.network_config.ini_file: # TODO: What do we need this for? return self.network_config.root_dir diff --git a/tests/configuration/test_config_wrapper.py b/tests/configuration/test_config_wrapper.py index 42bedfdcd..dfd0e3761 100644 --- a/tests/configuration/test_config_wrapper.py +++ b/tests/configuration/test_config_wrapper.py @@ -79,9 +79,9 @@ def test_get_root_dir( # 2. Run test. if not network_ini and not analysis_ini: with pytest.raises(ValueError): - _input_config.get_root_dir() + _input_config.root_dir else: - _root_dir = _input_config.get_root_dir() + _root_dir = _input_config.root_dir # 3. Verify expectations. assert _root_dir == test_data From 3dd22818e497995f35d0e171d06598ab38de0b61 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Thu, 9 Nov 2023 16:27:57 +0100 Subject: [PATCH 05/35] chore: add _path to configs --- .../analysis_config_wrapper_base.py | 2 +- ...analysis_config_wrapper_without_network.py | 2 +- ra2ce/analyses/direct/analyses_direct.py | 18 ++++---- .../analyses/direct/cost_benefit_analysis.py | 18 +++++--- ra2ce/analyses/indirect/analyses_indirect.py | 44 +++++++++++-------- ra2ce/analyses/indirect/losses.py | 10 +++-- .../indirect/origin_closest_destination.py | 8 +++- .../test_analysis_config_wrapper_factory.py | 4 +- .../direct/test_cost_benefit_analysis.py | 6 +-- tests/analyses/direct/test_direct_damage.py | 8 ++-- 10 files changed, 70 insertions(+), 50 deletions(-) diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py index 0ab456abe..7a7f03358 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py @@ -57,7 +57,7 @@ def _create_output_folders(analysis_type: str) -> None: if analysis_type not in self.config_data.keys(): return for a in self.config_data[analysis_type]: - output_path = self.config_data["output"] / a["analysis"] + output_path = self.config_data["output_path"] / a["analysis"] output_path.mkdir(parents=True, exist_ok=True) _create_output_folders("direct") diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py index b16c9d4fb..c818ae0de 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py @@ -73,7 +73,7 @@ def from_data( def configure(self) -> None: self.graphs = NetworkConfigWrapper.read_graphs_from_config( - self.config_data["static"] / "output_graph" + self.config_data["static_path"] / "output_graph" ) self.initialize_output_dirs() diff --git a/ra2ce/analyses/direct/analyses_direct.py b/ra2ce/analyses/direct/analyses_direct.py index ecdad1654..09b6da39f 100644 --- a/ra2ce/analyses/direct/analyses_direct.py +++ b/ra2ce/analyses/direct/analyses_direct.py @@ -63,7 +63,6 @@ def execute(self): starttime = time.time() if analysis["analysis"] == "direct": - gdf = self.road_damage( analysis ) # calls the coordinator for road damage calculation @@ -74,7 +73,7 @@ def execute(self): else: gdf = [] - output_path = self.config["output"] / analysis["analysis"] + output_path = self.config["output_path"] / analysis["analysis"] if analysis["save_gpkg"]: shp_path = output_path / (analysis["name"].replace(" ", "_") + ".gpkg") save_gdf(gdf, shp_path) @@ -126,13 +125,12 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: if analysis["damage_curve"] == "MAN": manual_damage_functions = ManualDamageFunctions() manual_damage_functions.find_damage_functions( - folder=(self.config["input"] / "damage_functions") + folder=(self.config["input_path"] / "damage_functions") ) manual_damage_functions.load_damage_functions() # Choose between event or return period based analysis if analysis["event_type"] == "event": - event_gdf = DamageNetworkEvents(road_gdf, val_cols) event_gdf.main( damage_function=damage_function, @@ -174,7 +172,7 @@ def effectiveness_measures(self, analysis): """ em = EffectivenessMeasures(self.config, analysis) effectiveness_dict = em.load_effectiveness_table( - self.config["input"] / "direct" + self.config["input_path"] / "direct" ) if self.graphs["base_network_hazard"] is None: @@ -182,11 +180,11 @@ def effectiveness_measures(self, analysis): if analysis["create_table"] is True: df = em.create_feature_table( - self.config["input"] / "direct" / analysis["file_name"] + self.config["input_path"] / "direct" / analysis["file_name"] ) else: df = em.load_table( - self.config["input"] / "direct", + self.config["input_path"] / "direct", analysis["file_name"].replace(".gpkg", ".csv"), ) @@ -194,7 +192,9 @@ def effectiveness_measures(self, analysis): df = em.knmi_correction(df) df_cba, costs_dict = em.cost_benefit_analysis(effectiveness_dict) df_cba.round(2).to_csv( - self.config["output"] / analysis["analysis"] / "cost_benefit_analysis.csv", + self.config["output_path"] + / analysis["analysis"] + / "cost_benefit_analysis.csv", decimal=",", sep=";", index=False, @@ -204,7 +204,7 @@ def effectiveness_measures(self, analysis): df_costs = em.calculate_strategy_costs(df, costs_dict) df_costs = df_costs.astype(float).round(2) df_costs.to_csv( - self.config["output"] / analysis["analysis"] / "output_analysis.csv", + self.config["output_path"] / analysis["analysis"] / "output_analysis.csv", decimal=",", sep=";", index=False, diff --git a/ra2ce/analyses/direct/cost_benefit_analysis.py b/ra2ce/analyses/direct/cost_benefit_analysis.py index bd6f9effa..3b3f19bb8 100644 --- a/ra2ce/analyses/direct/cost_benefit_analysis.py +++ b/ra2ce/analyses/direct/cost_benefit_analysis.py @@ -56,19 +56,23 @@ def _validate_input_params(self, analysis: dict, config: dict) -> None: ) logging.error(_error) raise ValueError(_error) - elif not (config["input"] / "direct" / analysis["file_name"]).exists(): + elif not (config["input_path"] / "direct" / analysis["file_name"]).exists(): _error = "Effectiveness of measures calculation: Input file doesn't exist please place file in the following folder: {}".format( - config["input"] / "direct" + config["input_path"] / "direct" ) logging.error(_error) - raise FileNotFoundError(config["input"] / "direct" / analysis["file_name"]) - elif not (config["input"] / "direct" / "effectiveness_measures.csv").exists(): + raise FileNotFoundError( + config["input_path"] / "direct" / analysis["file_name"] + ) + elif not ( + config["input_path"] / "direct" / "effectiveness_measures.csv" + ).exists(): _error = "Effectiveness of measures calculation: lookup table with effectiveness of measures doesnt exist. Please place the effectiveness_measures.csv file in the following folder: {}".format( - config["input"] / "direct" + config["input_path"] / "direct" ) logging.error(_error) raise FileNotFoundError( - config["input"] / "direct" / "effectiveness_measures.csv" + config["input_path"] / "direct" / "effectiveness_measures.csv" ) @staticmethod @@ -234,7 +238,7 @@ def calculate_strategy_effectiveness(self, df, effectiveness_dict): df_total = self.calculate_effectiveness(df, name="standard") df_blockage = pd.read_csv( - self.config["input"] / "direct" / "blockage_costs.csv" + self.config["input_path"] / "direct" / "blockage_costs.csv" ) df_total = df_total.merge(df_blockage, how="left", on="LinkNr") df_total["length"] = df_total[ diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index 663093c43..f50a5e86f 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -58,9 +58,9 @@ class IndirectAnalyses: def __init__(self, config, graphs): self.config = config self.graphs = graphs - if self.config["output"].joinpath("hazard_names.xlsx").is_file(): + if self.config["output_path"].joinpath("hazard_names.xlsx").is_file(): self.hazard_names = pd.read_excel( - self.config["output"].joinpath("hazard_names.xlsx") + self.config["output_path"].joinpath("hazard_names.xlsx") ) self.config["hazard_names"] = list( set(self.hazard_names[self._file_name_key]) @@ -139,7 +139,9 @@ def single_link_losses(self, gdf: gpd.GeoDataFrame, analysis: dict): gdf: The network in GeoDataFrame format. analysis: Dictionary of the configurations for the analysis. """ - losses_fn = self.config["static"] / "hazard" / analysis["loss_per_distance"] + losses_fn = ( + self.config["static_path"] / "hazard" / analysis["loss_per_distance"] + ) losses_df = pd.read_excel(losses_fn, sheet_name="Sheet1") if analysis["loss_type"] == "uniform": @@ -148,7 +150,9 @@ def single_link_losses(self, gdf: gpd.GeoDataFrame, analysis: dict): if analysis["loss_type"] == "categorized": _disruption_file = ( - self.config["static"] / "hazard" / analysis["disruption_per_category"] + self.config["static_path"] + / "hazard" + / analysis["disruption_per_category"] ) _disruption_df = pd.read_excel(_disruption_file, sheet_name="Sheet1") self._single_link_losses_categorized( @@ -387,12 +391,16 @@ def multi_link_losses(self, gdf, analysis): Returns: aggregated_results (GeoDataFrame): The results of the analysis aggregated into a table. """ - losses_fn = self.config["static"] / "hazard" / analysis["loss_per_distance"] + losses_fn = ( + self.config["static_path"] / "hazard" / analysis["loss_per_distance"] + ) losses_df = pd.read_excel(losses_fn, sheet_name="Sheet1") if analysis["loss_type"] == "categorized": disruption_fn = ( - self.config["static"] / "hazard" / analysis["disruption_per_category"] + self.config["static_path"] + / "hazard" + / analysis["disruption_per_category"] ) disruption_df = pd.read_excel(disruption_fn, sheet_name="Sheet1") road_classes = [x for x in disruption_df.columns if "class" in x] @@ -535,7 +543,7 @@ def extract_od_nodes_from_graph( def _get_origin_destination_pairs( self, graph: nx.classes.MultiGraph ) -> list[tuple[int, str], tuple[int, str]]: - od_path = self.config["static"].joinpath( + od_path = self.config["static_path"].joinpath( "output_graph", "origin_destination_table.feather" ) od = gpd.read_feather(od_path) @@ -778,7 +786,7 @@ def multi_link_origin_destination_regional_impact(self, gdf_ori): gdf_ori_ = gdf_ori.copy() # read origin points - origin_fn = Path(self.config["static"]).joinpath( + origin_fn = Path(self.config["static_path"]).joinpath( "output_graph", "origin_destination_table.gpkg" ) origin = gpd.read_file(origin_fn, engine="pyogrio") @@ -853,7 +861,7 @@ def multi_link_isolated_locations( # Load the point shapefile with the locations of which the isolated locations should be identified. locations = gpd.read_feather( - self.config["static"] / "output_graph" / "locations_hazard.feather" + self.config["static_path"] / "output_graph" / "locations_hazard.feather" ) # TODO PUT CRS IN DOCUMENTATION OR MAKE CHANGABLE # reproject the datasets to be able to make a buffer in meters @@ -912,7 +920,7 @@ def multi_link_isolated_locations( ).to_crs(crs=crs) # Save the output results_hz_roads.to_file( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / f"flooded_and_isolated_roads_{hazard_name}.gpkg" ) @@ -1028,7 +1036,7 @@ def execute(self): starttime = time.time() gdf = pd.DataFrame() opt_routes = None - output_path = self.config["output"].joinpath(analysis["analysis"]) + output_path = self.config["output_path"].joinpath(analysis["analysis"]) def _save_gpkg_analysis( base_graph, @@ -1069,13 +1077,13 @@ def _save_gpkg_analysis( "origin_count" in self.config["origins_destinations"].keys() ): od_table = gpd.read_feather( - self.config["static"] + self.config["static_path"] / "output_graph" / "origin_destination_table.feather" ) _equity_weights_file = None if "equity_weight" in analysis.keys(): - _equity_weights_file = self.config["static"].joinpath( + _equity_weights_file = self.config["static_path"].joinpath( "network", analysis["equity_weight"] ) route_traffic_df = self.optimal_route_od_link( @@ -1086,7 +1094,7 @@ def _save_gpkg_analysis( ), ) impact_csv_path = ( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / (analysis["name"].replace(" ", "_") + "_link_traffic.csv") ) @@ -1113,13 +1121,13 @@ def _save_gpkg_analysis( regional_impact_summary_df, ) = self.multi_link_origin_destination_regional_impact(gdf_ori) impact_csv_path = ( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / (analysis["name"].replace(" ", "_") + "_regional_impact.csv") ) regional_impact_df.to_csv(impact_csv_path, index=False) impact_csv_path = ( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / ( analysis["name"].replace(" ", "_") @@ -1130,14 +1138,14 @@ def _save_gpkg_analysis( except Exception: pass impact_csv_path = ( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / (analysis["name"].replace(" ", "_") + "_impact.csv") ) del gdf_ori["geometry"] gdf_ori.to_csv(impact_csv_path, index=False) impact_csv_path = ( - self.config["output"] + self.config["output_path"] / analysis["analysis"] / (analysis["name"].replace(" ", "_") + "_impact_summary.csv") ) diff --git a/ra2ce/analyses/indirect/losses.py b/ra2ce/analyses/indirect/losses.py index 0b2078964..13426c1f6 100644 --- a/ra2ce/analyses/indirect/losses.py +++ b/ra2ce/analyses/indirect/losses.py @@ -180,7 +180,7 @@ def calculate_losses_from_table(self): """ traffic_data = self.load_df( - self.config["input"] / "losses", "traffic_intensities.csv" + self.config["input_path"] / "losses", "traffic_intensities.csv" ) dict1 = { "AS_VTG": "evening_total", @@ -199,7 +199,9 @@ def calculate_losses_from_table(self): } traffic_data.rename(columns=dict1, inplace=True) - detour_data = self.load_df(self.config["input"] / "losses", "detour_data.csv") + detour_data = self.load_df( + self.config["input_path"] / "losses", "detour_data.csv" + ) dict2 = { "VA_AV_HWN": "detour_time_evening", "VA_RD_HWN": "detour_time_remaining", @@ -208,6 +210,8 @@ def calculate_losses_from_table(self): } detour_data.rename(columns=dict2, inplace=True) - vehicle_loss_hours = self.vehicle_loss_hours(self.config["input"] / "losses") + vehicle_loss_hours = self.vehicle_loss_hours( + self.config["input_path"] / "losses" + ) vlh = self.calc_vlh(traffic_data, vehicle_loss_hours, detour_data) return vlh diff --git a/ra2ce/analyses/indirect/origin_closest_destination.py b/ra2ce/analyses/indirect/origin_closest_destination.py index 6fe2f31da..3f55a5e34 100644 --- a/ra2ce/analyses/indirect/origin_closest_destination.py +++ b/ra2ce/analyses/indirect/origin_closest_destination.py @@ -982,7 +982,9 @@ def calc_routes_closest_dest( def load_origins(self): od_path = ( - self.config["static"] / "output_graph" / "origin_destination_table.feather" + self.config["static_path"] + / "output_graph" + / "origin_destination_table.feather" ) od = gpd.read_feather(od_path) origin = od.loc[od["o_id"].notna()] @@ -991,7 +993,9 @@ def load_origins(self): def load_destinations(self): od_path = ( - self.config["static"] / "output_graph" / "origin_destination_table.feather" + self.config["static_path"] + / "output_graph" + / "origin_destination_table.feather" ) od = gpd.read_feather(od_path) destination = od.loc[od["d_id"].notna()] diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py index d5ad4b13a..4f9477b7f 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py @@ -65,8 +65,8 @@ def test_given_known_without_network_config_get_analysis_config_returns_expected _ini_file_path = test_data / "simple_inputs" / "analysis.ini" _config_data = AnalysisConfigDataWithoutNetwork() - _config_data["static"] = test_data / "simple_inputs" / "static" - assert _config_data["static"].is_dir() + _config_data["static_path"] = test_data / "simple_inputs" / "static" + assert _config_data["static_path"].is_dir() _expected_type = AnalysisConfigWrapperWithoutNetwork assert isinstance(_config_data, AnalysisConfigData) diff --git a/tests/analyses/direct/test_cost_benefit_analysis.py b/tests/analyses/direct/test_cost_benefit_analysis.py index d871b4cd7..a57e962f6 100644 --- a/tests/analyses/direct/test_cost_benefit_analysis.py +++ b/tests/analyses/direct/test_cost_benefit_analysis.py @@ -65,7 +65,7 @@ def test_init_raises_when_direct_shp_file_does_not_exist(self): with pytest.raises(FileNotFoundError) as exc_err: EffectivenessMeasures(_config, _analysis) assert str(exc_err.value) == str( - _config["input"] / "direct" / "filedoesnotexist.shp" + _config["input_path"] / "direct" / "filedoesnotexist.shp" ) def test_init_raises_when_effectiveness_measures_does_not_exist(self): @@ -79,11 +79,11 @@ def test_init_raises_when_effectiveness_measures_does_not_exist(self): "climate_period": 2.4, "file_name": "origins.shp", } - assert (_config["input"] / "direct" / "origins.shp").exists() + assert (_config["input_path"] / "direct" / "origins.shp").exists() with pytest.raises(FileNotFoundError) as exc_err: EffectivenessMeasures(_config, _analysis) assert str(exc_err.value) == str( - _config["input"] / "direct" / "effectiveness_measures.csv" + _config["input_path"] / "direct" / "effectiveness_measures.csv" ) @pytest.mark.parametrize( diff --git a/tests/analyses/direct/test_direct_damage.py b/tests/analyses/direct/test_direct_damage.py index c0ca31846..85979eb49 100644 --- a/tests/analyses/direct/test_direct_damage.py +++ b/tests/analyses/direct/test_direct_damage.py @@ -104,8 +104,8 @@ def test_event_based_damage_calculation_huizinga( damage_function = "HZ" # This test roughly follows the DirectDamage.road_damage() controller in analyses_direct.py - test_input = event_input_output["input"] - test_ref_output = event_input_output["output"] + test_input = event_input_output["input_path"] + test_ref_output = event_input_output["output_path"] road_gdf = test_input @@ -286,8 +286,8 @@ def test_old_event_based_damage_calculation_manualfunction( damage_function = "MAN" # This test roughly follows the DirectDamage.road_damage() controller in analyses_direct.py - test_input = event_input_output["input"] - test_ref_output = event_input_output["output"] + test_input = event_input_output["input_path"] + test_ref_output = event_input_output["output_path"] road_gdf = test_input From 322e3a8d82f6d7709ffd07d1c0158e5de86dfd72 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Thu, 9 Nov 2023 16:42:41 +0100 Subject: [PATCH 06/35] chore: add _path to configs --- .../analysis_config_wrapper_without_network.py | 2 +- .../test_analysis_config_data_validator_with_network.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py index c818ae0de..dfcf7ff99 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py @@ -61,7 +61,7 @@ def from_data( raise FileNotFoundError(ini_file) _new_analysis_config.ini_file = ini_file _new_analysis_config.config_data = config_data - _static_dir = config_data.get("static", None) + _static_dir = config_data.get("static_path", None) if _static_dir and _static_dir.is_dir(): config_data.files = NetworkConfigWrapper._get_existent_network_files( _static_dir / "output_graph" diff --git a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py index a9ab3302f..937d15722 100644 --- a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py +++ b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py @@ -41,7 +41,7 @@ def test_validate_with_required_headers(self): ) def test_validate_without_output_reports_error(self, output_dict: dict): # 1. Define test data. - _output_dir = output_dict.get("output", None) + _output_dir = output_dict.get("output_path", None) _expected_error = f"The configuration file 'network.ini' is not found at {_output_dir}.Please make sure to name your network settings file 'network.ini'." _test_config_data = AnalysisConfigDataWithNetwork.from_dict( {"project": {}} | output_dict From e1cf15c97303b7c4213342181614c6d2eefa9b22 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 15:21:01 +0100 Subject: [PATCH 07/35] chore: finish analysis config reader plus tests --- .../analysis_config_data.py | 32 +++++++- ...ysis_config_data_validator_with_network.py | 2 +- ...s_config_data_validator_without_network.py | 65 +++++++++------ .../readers/analysis_config_reader_base.py | 18 ++-- .../analysis_config_reader_with_network.py | 10 +-- .../analysis_config_reader_without_network.py | 12 +-- .../analysis_config_wrapper_base.py | 9 +- .../analysis_config_wrapper_factory.py | 4 +- .../analysis_config_wrapper_with_network.py | 10 +-- ...analysis_config_wrapper_without_network.py | 4 +- .../test_analysis_config_reader_base.py | 32 ++------ .../test_analysis_config_data.py | 16 ---- ...ysis_config_data_validator_with_network.py | 10 +-- ...s_config_data_validator_without_network.py | 82 +++++-------------- .../test_analysis_config_wrapper_factory.py | 4 +- ...st_analysis_config_wrapper_with_network.py | 15 ++-- ...analysis_config_wrapper_without_network.py | 15 ++-- tests/analyses/direct/test_analyses_direct.py | 21 ++--- .../direct/test_cost_benefit_analysis.py | 9 +- 19 files changed, 167 insertions(+), 203 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index b35a0baf5..f4bb99f54 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -36,19 +36,41 @@ class ProjectSection: @dataclass -class AnalysisSection: +class AnalysisSectionIndirect: name: str = "" analysis: str = "" # should be enum aggregate_wl: str = "" # should be enum threshold: float = math.nan weighing: str = "" # should be enum - calculate_route_without_disruption: Optional[bool] = False + equity_weight: str = "" + calculate_route_without_disruption: bool = False buffer_meters: float = math.nan category_field_name: str = "" + file_name: Path = None save_gpkg: bool = False save_csv: bool = False +@dataclass +class AnalysisSectionDirect: + name: str = "" + analysis: str = "" # should be enum + damage_curve: str = "" + event_type: str = "" + risk_calculation: str = "" + loss_per_distance: str = "" + traffic_cols: str = "" + file_name: Path = None + save_shp: bool = False + save_gpkg: bool = False + save_csv: bool = False + + +@dataclass +class AnalysisSection(AnalysisSectionIndirect, AnalysisSectionDirect): + pass + + @dataclass class AnalysisConfigData(ConfigDataProtocol): root_path: Optional[Path] = None @@ -56,14 +78,16 @@ class AnalysisConfigData(ConfigDataProtocol): output_path: Optional[Path] = None static_path: Optional[Path] = None project: ProjectSection = field(default_factory=lambda: ProjectSection()) - direct: list[AnalysisSection] = field(default_factory=list) - indirect: list[AnalysisSection] = field(default_factory=list) + direct: list[AnalysisSectionDirect] = field(default_factory=list) + indirect: list[AnalysisSectionIndirect] = field(default_factory=list) + files: Optional[dict[str, Path]] = field(default_factory=dict) def to_dict(self) -> dict: _dict = self.__dict__ _dict["project"] = self.project.__dict__ _dict["direct"] = [dv.__dict__ for dv in self.direct] _dict["indirect"] = [dv.__dict__ for dv in self.indirect] + _dict["files"] = [dv.__dict__ for dv in self.files] return _dict diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py index e8411dc1d..ccedcbfde 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py @@ -16,7 +16,7 @@ def validate(self) -> ValidationReport: _base_report = AnalysisConfigDataValidatorWithoutNetwork( self._config ).validate() - _output_network_dir = self._config.get("output", None) + _output_network_dir = self._config.output_path if ( not _output_network_dir or not (_output_network_dir / "network.ini").is_file() diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py index f9c03d842..b9bcaadbe 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py @@ -20,7 +20,9 @@ """ +import math from pathlib import Path +from typing import Any from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigDataWithoutNetwork, @@ -84,27 +86,16 @@ def _validate_files( ) return _files_report - def _validate_headers(self, required_headers: list[str]) -> ValidationReport: + def _validate_header(self, header: Any) -> ValidationReport: _report = ValidationReport() - _available_keys = self._config.keys() - - def _check_header(header: str) -> None: - if header not in _available_keys: - _report.error( - f"Property [ {header} ] is not configured. Add property [ {header} ] to the *.ini file. " - ) - - list(map(_check_header, required_headers)) - if not _report.is_valid(): - return _report - - # check if properties have correct input - # TODO: Decide whether also the non-used properties must be checked or those are not checked - # TODO: Decide how to check for multiple analyses (analysis1, analysis2, etc) - for header in required_headers: - # Now check the parameters per configured item. - for key, value in self._config[header].items(): + if isinstance(header, list): + for _item in header: + _report.merge(self._validate_header(_item)) + else: + for key, value in header.__dict__.items(): + if not value: + continue if key not in AnalysisNetworkDictValues.keys(): continue _expected_values_list = AnalysisNetworkDictValues[key] @@ -123,12 +114,40 @@ def _check_header(header: str) -> None: f"Wrong input to property [ {key} ], has to be one of: {_expected_values_list}" ) + return _report + + def _validate_headers(self, required_headers: list[str]) -> ValidationReport: + _report = ValidationReport() + _available_keys = self._config.__dict__.keys() + + def _check_header(header: str) -> None: + if header not in _available_keys: + _report.error( + f"Property [ {header} ] is not configured. Add property [ {header} ] to the *.ini file. " + ) + + list(map(_check_header, required_headers)) + if not _report.is_valid(): + return _report + + # check if properties have correct input + # TODO: Decide whether also the non-used properties must be checked or those are not checked + # TODO: Decide how to check for multiple analyses (analysis1, analysis2, etc) + + for header in required_headers: + # Now check the parameters per configured item. + _attr = getattr(self._config, header) + if not _attr: + continue + else: + _report.merge(self._validate_header(_attr)) + if not _report.is_valid(): _report.error( "There are inconsistencies in the *.ini file. Please consult the log file for more information: {}".format( - self._config["root_path"] + self._config.root_path / "data" - / self._config["project"]["name"] + / self._config.project.name / "output" / "RA2CE.log" ) @@ -138,9 +157,7 @@ def _check_header(header: str) -> None: def validate(self) -> ValidationReport: _report = ValidationReport() - _required_headers = ["project"] - # Analysis are marked as [analysis1], [analysis2], ... - _required_headers.extend([a for a in self._config.keys() if "analysis" in a]) + _required_headers = ["project", "direct", "indirect"] _report.merge(self._validate_headers(_required_headers)) return _report diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index e8e0fca12..3185f042d 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -24,7 +24,7 @@ import re from configparser import ConfigParser from pathlib import Path -from shutil import copy +from shutil import copyfile from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigData, @@ -136,16 +136,22 @@ def get_analysis_sections(self, analysis_type: str) -> list[AnalysisSection]: def _copy_output_files( self, from_path: Path, config_data: AnalysisConfigData ) -> None: - _output_dir = config_data.output_path + _output_dir = config_data.root_path.joinpath(config_data.project.name).joinpath( + "output" + ) + config_data.output_path = _output_dir if not _output_dir.exists(): _output_dir.mkdir(parents=True) try: - copy(from_path, _output_dir) + copyfile( + from_path, + config_data.output_path.joinpath("{}.ini".format(from_path.stem)), + ) except FileNotFoundError as e: logging.warning(e) def _parse_path_list( - self, property_name: str, path_list: str, config_data: dict + self, property_name: str, path_list: str, config_data: AnalysisConfigData ) -> list[Path]: _list_paths = [] for path_value in path_list.split(","): @@ -154,9 +160,7 @@ def _parse_path_list( _list_paths.append(path_value) continue - _project_name_dir = ( - config_data["root_path"] / config_data["project"]["name"] - ) + _project_name_dir = config_data.root_path / config_data.project.name abs_path = _project_name_dir / "static" / property_name / path_value try: assert abs_path.is_file() diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py index 3b4c3e8b5..e9cc6cd61 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_with_network.py @@ -50,13 +50,7 @@ def __init__(self, network_data: NetworkConfigWrapper) -> None: def read(self, ini_file: Path) -> AnalysisConfigDataWithNetwork: _config = super().read(ini_file) - _config_data = AnalysisConfigDataWithNetwork( - input_path=_config.input_path, - output_path=_config.output_path, - static_path=_config.static_path, - project=_config.project, - direct=_config.direct, - indirect=_config.indirect, - ) + _config_data = AnalysisConfigDataWithNetwork(**_config.__dict__) + self._copy_output_files(ini_file, _config_data) return _config_data diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py index f7a02119d..5a7940141 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py @@ -53,14 +53,8 @@ def read(self, ini_file: Path) -> AnalysisConfigDataWithoutNetwork: AnalysisConfigDataWithoutNetwork """ _config = super().read(ini_file) - _config_data = AnalysisConfigDataWithoutNetwork( - input_path=_config.input_path, - output_path=_config.output_path, - static_path=_config.static_path, - project=_config.project, - direct=_config.direct, - indirect=_config.indirect, - ) + _config_data = AnalysisConfigDataWithoutNetwork(**_config.__dict__) + self._copy_output_files(ini_file, _config_data) _output_network_ini_file = _config_data.output_path.joinpath("network.ini") @@ -68,7 +62,7 @@ def read(self, ini_file: Path) -> AnalysisConfigDataWithoutNetwork: _config_data.update(_network_config.to_dict()) _network = _config_data.get("network", None) if _network: - _config_data["origins_destinations"] = _network.get( + _config_data.origins_destinations = _network.get( "origins_destinations", None ) else: diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py index 7a7f03358..743abaaaf 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py @@ -22,7 +22,7 @@ from abc import abstractclassmethod, abstractmethod from pathlib import Path -from typing import Optional +from typing import Any, Optional from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol @@ -32,6 +32,7 @@ class AnalysisConfigWrapperBase(ConfigWrapperProtocol): ini_file: Path config_data: Optional[AnalysisConfigData] = None + graphs: Optional[dict] = None @staticmethod def get_network_root_dir(filepath: Path) -> Path: @@ -54,10 +55,10 @@ def initialize_output_dirs(self) -> None: def _create_output_folders(analysis_type: str) -> None: # Create the output folders - if analysis_type not in self.config_data.keys(): + if not hasattr(self.config_data, analysis_type): return - for a in self.config_data[analysis_type]: - output_path = self.config_data["output_path"] / a["analysis"] + for a in getattr(self.config_data, analysis_type): + output_path = self.config_data.output_path / a.analysis output_path.mkdir(parents=True, exist_ok=True) _create_output_folders("direct") diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py index 17adb800f..5a2ef3e8b 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_factory.py @@ -67,11 +67,11 @@ def get_analysis_config( """ if isinstance(analysis_config, AnalysisConfigDataWithNetwork): return AnalysisConfigWrapperWithNetwork.from_data_with_network( - ini_file, analysis_config.to_dict(), network_config + ini_file, analysis_config, network_config ) elif isinstance(analysis_config, AnalysisConfigDataWithoutNetwork): return AnalysisConfigWrapperWithoutNetwork.from_data( - ini_file, analysis_config.to_dict() + ini_file, analysis_config ) else: raise NotImplementedError( diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py index 525e17ee1..8d3a93be6 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py @@ -89,11 +89,11 @@ def from_data_with_network( return _new_analysis def configure(self) -> None: - self.config_data["files"] = self._network_config.files - self.config_data["network"] = self._network_config.config_data.network.__dict__ - self.config_data[ - "origins_destinations" - ] = self._network_config.config_data.origins_destinations.__dict__ + self.config_data.files = self._network_config.files + self.config_data.network = self._network_config.config_data.network.__dict__ + self.config_data.origins_destinations = ( + self._network_config.config_data.origins_destinations.__dict__ + ) # When Network is present the graphs are retrieved from the already configured object. self.graphs = self._network_config.graphs diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py index dfcf7ff99..5e9bd9be8 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py @@ -68,12 +68,12 @@ def from_data( ) else: logging.error(f"Static dir not found. Value provided: {_static_dir}") - _new_analysis_config.config_data["files"] = config_data.files + _new_analysis_config.config_data.files = config_data.files return _new_analysis_config def configure(self) -> None: self.graphs = NetworkConfigWrapper.read_graphs_from_config( - self.config_data["static_path"] / "output_graph" + self.config_data.static_path.joinpath("output_graph") ) self.initialize_output_dirs() diff --git a/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py b/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py index 21d20be79..d9708815b 100644 --- a/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py +++ b/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py @@ -2,6 +2,10 @@ import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + ProjectSection, +) from ra2ce.analyses.analysis_config_data.readers.analysis_config_reader_base import ( AnalysisConfigReaderBase, ) @@ -24,10 +28,8 @@ def test_copy_output_files_no_file_doesnot_raise( ): # 1. Define test data. _dir_name = "output" - _config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - } + _config_data = AnalysisConfigData(root_path=test_results) + _config_data.project = ProjectSection(name=request.node.name) _expected_dir = test_results / request.node.name / _dir_name if _expected_dir.exists(): shutil.rmtree(_expected_dir) @@ -43,28 +45,6 @@ def test_copy_output_files_no_file_doesnot_raise( assert _expected_dir.exists() assert not _test_file.exists() - def test_create_config_dir_creates_dir_if_does_not_exist( - self, valid_reader: AnalysisConfigReaderBase, request: pytest.FixtureRequest - ): - # 1. Define test data. - _dir_name = "missing_dir" - _config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - } - _expected_dir = test_results / request.node.name / _dir_name - if _expected_dir.exists(): - shutil.rmtree(_expected_dir) - assert not _expected_dir.exists() - - # 2. Run test. - valid_reader._create_config_dir(_dir_name, _config_data) - - # 3. Verify expectations. - assert _dir_name in _config_data.keys() - assert _config_data[_dir_name].exists() - assert _expected_dir.exists() - def test_parse_path_list_non_existing_file( self, valid_reader: AnalysisConfigReaderBase, request: pytest.FixtureRequest ): diff --git a/tests/analyses/analysis_config_data/test_analysis_config_data.py b/tests/analyses/analysis_config_data/test_analysis_config_data.py index 741854ee8..3cd2e9f15 100644 --- a/tests/analyses/analysis_config_data/test_analysis_config_data.py +++ b/tests/analyses/analysis_config_data/test_analysis_config_data.py @@ -21,25 +21,9 @@ def test_initialize(self): assert isinstance(_config_data, AnalysisConfigDataWithNetwork) assert isinstance(_config_data, AnalysisConfigData) - def test_from_dict(self): - _dict_values = {"the answer": 42} - _config_data = AnalysisConfigDataWithNetwork.from_dict(_dict_values) - - assert isinstance(_config_data, AnalysisConfigDataWithNetwork) - assert isinstance(_config_data, AnalysisConfigDataWithNetwork) - assert _config_data == _dict_values - class TestAnalysisConfigDataWithoutNetwork: def test_initialize(self): _config_data = AnalysisConfigDataWithoutNetwork() assert isinstance(_config_data, AnalysisConfigDataWithoutNetwork) assert isinstance(_config_data, AnalysisConfigData) - - def test_from_dict(self): - _dict_values = {"the answer": 42} - _config_data = AnalysisConfigDataWithoutNetwork.from_dict(_dict_values) - - assert isinstance(_config_data, AnalysisConfigDataWithoutNetwork) - assert isinstance(_config_data, AnalysisConfigDataWithoutNetwork) - assert _config_data == _dict_values diff --git a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py index 937d15722..12cf2394f 100644 --- a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py +++ b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_with_network.py @@ -22,7 +22,7 @@ def test_init_validator(self): assert isinstance(_validator, Ra2ceIoValidator) def test_validate_with_required_headers(self): - _test_config_data = AnalysisConfigDataWithNetwork.from_dict({"project": {}}) + _test_config_data = AnalysisConfigDataWithNetwork(project=None) _validation_report = AnalysisConfigDataValidatorWithNetwork( _test_config_data ).validate() @@ -33,9 +33,9 @@ def test_validate_with_required_headers(self): "output_dict", [ pytest.param(dict(), id="No output given"), - pytest.param(dict(output=None), id="Output is none"), + pytest.param(dict(output_path=None), id="Output is none"), pytest.param( - dict(output=(test_data / "not_a_path.ini")), id="Not a valid path." + dict(output_path=(test_data / "not_a_path.ini")), id="Not a valid path." ), ], ) @@ -43,9 +43,7 @@ def test_validate_without_output_reports_error(self, output_dict: dict): # 1. Define test data. _output_dir = output_dict.get("output_path", None) _expected_error = f"The configuration file 'network.ini' is not found at {_output_dir}.Please make sure to name your network settings file 'network.ini'." - _test_config_data = AnalysisConfigDataWithNetwork.from_dict( - {"project": {}} | output_dict - ) + _test_config_data = AnalysisConfigDataWithNetwork(output_path=_output_dir) # 2. Run test. _validation_report = AnalysisConfigDataValidatorWithNetwork( diff --git a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py index 2bb0a0120..6b47bab30 100644 --- a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py +++ b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py @@ -4,7 +4,10 @@ import pytest from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, AnalysisConfigDataWithoutNetwork, + AnalysisSectionDirect, + ProjectSection, ) from ra2ce.analyses.analysis_config_data.analysis_config_data_validator_without_network import ( AnalysisConfigDataValidatorWithoutNetwork, @@ -14,19 +17,20 @@ class TestAnalysisConfigDataValidatorWithoutNetwork: - def _validate_from_dict(self, dict_values: dict) -> ValidationReport: - _test_config_data = AnalysisConfigDataWithoutNetwork.from_dict(dict_values) - _validator = AnalysisConfigDataValidatorWithoutNetwork(_test_config_data) + def _validate_config(self, config_data: AnalysisConfigData) -> ValidationReport: + _validator = AnalysisConfigDataValidatorWithoutNetwork(config_data) return _validator.validate() def test_validate_with_required_headers(self): # 1. Define test data. - _output_test_dir = test_data / "acceptance_test_data" + _output_test_dir = test_data.joinpath("acceptance_test_data") assert _output_test_dir.is_dir() # 2. Run test. - _test_config_data = {"project": {}, "output": _output_test_dir} - _report = self._validate_from_dict(_test_config_data) + _test_config_data = AnalysisConfigData( + project=None, output_path=_output_test_dir + ) + _report = self._validate_config(_test_config_data) # 3. Verify expectations. assert _report.is_valid() @@ -87,21 +91,20 @@ def test_validate_road_types_with_unexpected_road_type_reports_error(self): assert not _report.is_valid() assert len(_report._errors) == 1 - def _validate_headers_from_dict( - self, dict_values: dict, required_headers: list[str] + def _validate_headers( + self, config_data: AnalysisConfigData, required_headers: list[str] ) -> ValidationReport: - _test_config_data = AnalysisConfigDataWithoutNetwork.from_dict(dict_values) - _validator = AnalysisConfigDataValidatorWithoutNetwork(_test_config_data) + _validator = AnalysisConfigDataValidatorWithoutNetwork(config_data) return _validator._validate_headers(required_headers) def test_validate_headers_fails_when_missing_expected_header(self): # 1. Define test data. - _test_config_data = {} + _test_config_data = AnalysisConfigData() _missing_header = "Deltares" _expected_err = f"Property [ {_missing_header} ] is not configured. Add property [ {_missing_header} ] to the *.ini file. " # 2. Run test. - _report = self._validate_headers_from_dict( + _report = self._validate_headers( _test_config_data, required_headers=[_missing_header] ) @@ -109,61 +112,18 @@ def test_validate_headers_fails_when_missing_expected_header(self): assert not _report.is_valid() assert _expected_err in _report._errors - def test_validate_headers_fails_when_wrong_file_value( - self, request: pytest.FixtureRequest - ): - # 1. Define test data. - _required_header = "file_header" - _test_config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - _required_header: {"polygon": [Path("sth")]}, - } - - # 2. Run test. - _report = self._validate_headers_from_dict( - _test_config_data, required_headers=[_required_header] - ) - - # 3. Verify final expectations. - assert not _report.is_valid() - assert len(_report._errors) == 3 - - def test_validate_headers_fails_when_wrong_road_type( + def test_validate_headers_fails_when_invalid_value( self, request: pytest.FixtureRequest ): # 1. Define test data. - _required_header = "road_header" - _test_config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - _required_header: {"road_types": "not a valid road type"}, - } - - # 2. Run test. - _report = self._validate_headers_from_dict( - _test_config_data, required_headers=[_required_header] + _test_config_data = AnalysisConfigData( + root_path=test_results, + project=ProjectSection(name=request.node.name), + direct=[AnalysisSectionDirect(analysis="invalid_analysis_type")], ) - # 3. Verify final expectations. - assert not _report.is_valid() - assert len(_report._errors) == 2 - - def test_validate_headers_fails_when_unexpected_value( - self, request: pytest.FixtureRequest - ): - # 1. Define test data. - _required_header = "unexpected_value" - _test_config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - _required_header: {"network_type": "unmapped_value"}, - } - # 2. Run test. - _report = self._validate_headers_from_dict( - _test_config_data, required_headers=[_required_header] - ) + _report = self._validate_headers(_test_config_data, required_headers=["direct"]) # 3. Verify final expectations. assert not _report.is_valid() diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py index 4f9477b7f..e10af99b1 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_factory.py @@ -65,8 +65,8 @@ def test_given_known_without_network_config_get_analysis_config_returns_expected _ini_file_path = test_data / "simple_inputs" / "analysis.ini" _config_data = AnalysisConfigDataWithoutNetwork() - _config_data["static_path"] = test_data / "simple_inputs" / "static" - assert _config_data["static_path"].is_dir() + _config_data.static_path = test_data / "simple_inputs" / "static" + assert _config_data.static_path.is_dir() _expected_type = AnalysisConfigWrapperWithoutNetwork assert isinstance(_config_data, AnalysisConfigData) diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py index 4be363067..0773141ca 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py @@ -3,7 +3,10 @@ import pytest -from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_with_network import ( AnalysisConfigWrapperWithNetwork, ) @@ -74,11 +77,11 @@ def test_initialize_output_dirs_with_valid_data( # 1. Define test data _analysis = AnalysisConfigWrapperWithNetwork() _output_dir = test_results / request.node.name - _analysis.config_data = { - "direct": [{"analysis": "test_direct"}], - "indirect": [{"analysis": "test_indirect"}], - "output": _output_dir, - } + _analysis.config_data = AnalysisConfigData(output_path=_output_dir) + _analysis.config_data.direct = [AnalysisSectionDirect(analysis="test_direct")] + _analysis.config_data.indirect = [ + AnalysisSectionDirect(analysis="test_indirect") + ] if _output_dir.exists(): shutil.rmtree(_output_dir) diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py index 5bcc22d41..2e2657b7d 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py @@ -2,7 +2,10 @@ import pytest -from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_without_network import ( AnalysisConfigWrapperWithoutNetwork, ) @@ -41,11 +44,11 @@ def test_initialize_output_dirs_with_valid_data( # 1. Define test data _analysis = AnalysisConfigWrapperWithoutNetwork() _output_dir = test_results / request.node.name - _analysis.config_data = { - "direct": [{"analysis": "test_direct"}], - "indirect": [{"analysis": "test_indirect"}], - "output": _output_dir, - } + _analysis.config_data = AnalysisConfigData(output_path=_output_dir) + _analysis.config_data.direct = [AnalysisSectionDirect(analysis="test_direct")] + _analysis.config_data.indirect = [ + AnalysisSectionDirect(analysis="test_indirect") + ] if _output_dir.exists(): shutil.rmtree(_output_dir) diff --git a/tests/analyses/direct/test_analyses_direct.py b/tests/analyses/direct/test_analyses_direct.py index 11a823c05..6a89db6d4 100644 --- a/tests/analyses/direct/test_analyses_direct.py +++ b/tests/analyses/direct/test_analyses_direct.py @@ -1,3 +1,7 @@ +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) from ra2ce.analyses.direct.analyses_direct import DirectAnalyses from tests import test_data @@ -10,16 +14,13 @@ def test_init(self): assert isinstance(_analyses, DirectAnalyses) def test_execute(self): - _config = { - "direct": [ - { - "name": "DummyExecute", - "analysis": "", - "save_gpkg": False, - "save_csv": False, - } + _config = AnalysisConfigData( + direct=[ + AnalysisSectionDirect( + name="DummyExecute", analysis="", save_gpkg=False, save_csv=False + ) ], - "output": test_data, - } + output_path=test_data, + ).to_dict() _graphs = {} DirectAnalyses(_config, _graphs).execute() diff --git a/tests/analyses/direct/test_cost_benefit_analysis.py b/tests/analyses/direct/test_cost_benefit_analysis.py index a57e962f6..00a3a967a 100644 --- a/tests/analyses/direct/test_cost_benefit_analysis.py +++ b/tests/analyses/direct/test_cost_benefit_analysis.py @@ -2,6 +2,7 @@ import pandas as pd import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData from ra2ce.analyses.direct.cost_benefit_analysis import EffectivenessMeasures from tests import test_data @@ -16,7 +17,7 @@ def __init__(self, config, analysis): class TestCostBenefitAnalysis: def test_init_raises_when_file_name_not_defined(self): - _config = {"input": test_data} + _config = AnalysisConfigData(input_path=test_data).to_dict() _analysis = { "return_period": None, "repair_costs": None, @@ -34,7 +35,7 @@ def test_init_raises_when_file_name_not_defined(self): ) def test_init_raises_when_file_name_not_shp(self): - _config = {"input": test_data} + _config = AnalysisConfigData(input_path=test_data).to_dict() _analysis = { "return_period": None, "repair_costs": None, @@ -52,7 +53,7 @@ def test_init_raises_when_file_name_not_shp(self): ) def test_init_raises_when_direct_shp_file_does_not_exist(self): - _config = {"input": test_data} + _config = AnalysisConfigData(input_path=test_data).to_dict() _analysis = { "return_period": None, "repair_costs": None, @@ -69,7 +70,7 @@ def test_init_raises_when_direct_shp_file_does_not_exist(self): ) def test_init_raises_when_effectiveness_measures_does_not_exist(self): - _config = {"input": test_data} + _config = AnalysisConfigData(input_path=test_data).to_dict() _analysis = { "return_period": None, "repair_costs": None, From 07990146623112a442b2c89a7159855d7f73cc8b Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 16:12:39 +0100 Subject: [PATCH 08/35] chore: finish analysis config reader plus tests --- .../readers/analysis_config_reader_base.py | 8 +++++--- .../readers/test_analysis_config_reader_base.py | 12 ++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index 3185f042d..63a463074 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -160,12 +160,14 @@ def _parse_path_list( _list_paths.append(path_value) continue - _project_name_dir = config_data.root_path / config_data.project.name - abs_path = _project_name_dir / "static" / property_name / path_value + _project_name_dir = config_data.root_path.joinpath(config_data.project.name) + abs_path = _project_name_dir.joinpath("static", property_name, path_value) try: assert abs_path.is_file() except AssertionError: - abs_path = _project_name_dir / "input" / property_name / path_value + abs_path = _project_name_dir.joinpath( + "input", property_name, path_value + ) _list_paths.append(abs_path) return _list_paths diff --git a/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py b/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py index d9708815b..f0475386f 100644 --- a/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py +++ b/tests/analyses/analysis_config_data/readers/test_analysis_config_reader_base.py @@ -28,8 +28,9 @@ def test_copy_output_files_no_file_doesnot_raise( ): # 1. Define test data. _dir_name = "output" - _config_data = AnalysisConfigData(root_path=test_results) - _config_data.project = ProjectSection(name=request.node.name) + _config_data = AnalysisConfigData( + root_path=test_results, project=ProjectSection(name=request.node.name) + ) _expected_dir = test_results / request.node.name / _dir_name if _expected_dir.exists(): shutil.rmtree(_expected_dir) @@ -49,10 +50,9 @@ def test_parse_path_list_non_existing_file( self, valid_reader: AnalysisConfigReaderBase, request: pytest.FixtureRequest ): # 1. Define test data. - _config_data = { - "root_path": test_results, - "project": {"name": request.node.name}, - } + _config_data = AnalysisConfigData( + root_path=test_results, project=ProjectSection(name=request.node.name) + ) _prop_name = "a_property" _path_list = "a" _expected_path = test_results / request.node.name / "input" / _prop_name / "a" From 9e1e80948c37fc6287068848f56014228581cd77 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 17:22:22 +0100 Subject: [PATCH 09/35] chore: finish analysis config reader plus tests --- .../analysis_config_data.py | 23 +++++++++++++++++++ .../analysis_config_reader_without_network.py | 6 ++--- ...analysis_config_wrapper_without_network.py | 2 +- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index f4bb99f54..8cc637945 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -28,6 +28,10 @@ from typing import Optional from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol +from ra2ce.graph.network_config_data.network_config_data import ( + NetworkSection, + OriginsDestinationsSection, +) @dataclass @@ -39,6 +43,14 @@ class ProjectSection: class AnalysisSectionIndirect: name: str = "" analysis: str = "" # should be enum + disruption_per_category: str = "" + duration_event: float = math.nan + duration_disruption: float = math.nan + fraction_detour: float = math.nan + fraction_drivethrough: float = math.nan + rest_capacity: float = math.nan + maximum_jam: float = math.nan + partofday: str = "" aggregate_wl: str = "" # should be enum threshold: float = math.nan weighing: str = "" # should be enum @@ -47,6 +59,7 @@ class AnalysisSectionIndirect: buffer_meters: float = math.nan category_field_name: str = "" file_name: Path = None + save_traffic: bool = False save_gpkg: bool = False save_csv: bool = False @@ -55,6 +68,12 @@ class AnalysisSectionIndirect: class AnalysisSectionDirect: name: str = "" analysis: str = "" # should be enum + return_period: float = math.nan + repair_costs: float = math.nan + evaluation_period: float = math.nan + interest_rate: float = math.nan + climate_factor: float = math.nan + climate_period: float = math.nan damage_curve: str = "" event_type: str = "" risk_calculation: str = "" @@ -81,6 +100,10 @@ class AnalysisConfigData(ConfigDataProtocol): direct: list[AnalysisSectionDirect] = field(default_factory=list) indirect: list[AnalysisSectionIndirect] = field(default_factory=list) files: Optional[dict[str, Path]] = field(default_factory=dict) + origins_destinations: Optional[OriginsDestinationsSection] = field( + default_factory=lambda: OriginsDestinationsSection() + ) + network: Optional[NetworkSection] = field(default_factory=lambda: NetworkSection()) def to_dict(self) -> dict: _dict = self.__dict__ diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py index 5a7940141..5835e2d77 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_without_network.py @@ -60,11 +60,9 @@ def read(self, ini_file: Path) -> AnalysisConfigDataWithoutNetwork: _output_network_ini_file = _config_data.output_path.joinpath("network.ini") _network_config = NetworkConfigDataReader().read(_output_network_ini_file) _config_data.update(_network_config.to_dict()) - _network = _config_data.get("network", None) + _network = _config_data.network if _network: - _config_data.origins_destinations = _network.get( - "origins_destinations", None - ) + _config_data.origins_destinations = _network.origins_destinations else: logging.warn(f"Not found network key for the Analysis {ini_file}") diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py index 5e9bd9be8..a2d271698 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_without_network.py @@ -61,7 +61,7 @@ def from_data( raise FileNotFoundError(ini_file) _new_analysis_config.ini_file = ini_file _new_analysis_config.config_data = config_data - _static_dir = config_data.get("static_path", None) + _static_dir = config_data.static_path if _static_dir and _static_dir.is_dir(): config_data.files = NetworkConfigWrapper._get_existent_network_files( _static_dir / "output_graph" From 267cbabefadf0d25f2e77f12862f381878e6e956 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 17:22:57 +0100 Subject: [PATCH 10/35] chore: adjust analyses modules --- ra2ce/analyses/direct/analyses_direct.py | 81 +++-- .../analyses/direct/cost_benefit_analysis.py | 47 +-- ra2ce/analyses/direct/road_damage.py | 4 +- ra2ce/analyses/indirect/analyses_indirect.py | 338 +++++++++--------- ra2ce/analyses/indirect/losses.py | 31 +- .../indirect/origin_closest_destination.py | 49 +-- .../configuration/config_wrapper_protocol.py | 4 +- ra2ce/graph/hazard/hazard_overlay.py | 2 +- .../network_config_data.py | 1 + ra2ce/runners/direct_analysis_runner.py | 4 +- ra2ce/runners/indirect_analysis_runner.py | 4 +- tests/analyses/direct/test_analyses_direct.py | 2 +- .../direct/test_cost_benefit_analysis.py | 93 ++--- .../indirect/test_analyses_indirect.py | 5 +- tests/analyses/indirect/test_losses.py | 65 ++-- .../test_origin_closest_destination.py | 18 +- 16 files changed, 392 insertions(+), 356 deletions(-) diff --git a/ra2ce/analyses/direct/analyses_direct.py b/ra2ce/analyses/direct/analyses_direct.py index 09b6da39f..4c5c61720 100644 --- a/ra2ce/analyses/direct/analyses_direct.py +++ b/ra2ce/analyses/direct/analyses_direct.py @@ -22,9 +22,15 @@ import logging import time +from pathlib import Path +from typing import Any import geopandas as gpd +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) from ra2ce.analyses.direct.cost_benefit_analysis import EffectivenessMeasures from ra2ce.analyses.direct.damage.manual_damage_functions import ManualDamageFunctions from ra2ce.analyses.direct.damage_calculation import ( @@ -43,7 +49,10 @@ class DirectAnalyses: ### THIS SHOULD ONLY DO COORDINATION """ - def __init__(self, config, graphs): + config: AnalysisConfigData + graphs: dict + + def __init__(self, config: AnalysisConfigData, graphs: dict): self.config = config self.graphs = graphs @@ -56,39 +65,43 @@ def execute(self): """ - for analysis in self.config["direct"]: + for analysis in self.config.direct: logging.info( - f"----------------------------- Started analyzing '{analysis['name']}' -----------------------------" + f"----------------------------- Started analyzing '{analysis.name}' -----------------------------" ) starttime = time.time() - if analysis["analysis"] == "direct": + if analysis.analysis == "direct": gdf = self.road_damage( analysis ) # calls the coordinator for road damage calculation - elif analysis["analysis"] == "effectiveness_measures": + elif analysis.analysis == "effectiveness_measures": gdf = self.effectiveness_measures(analysis) else: gdf = [] - output_path = self.config["output_path"] / analysis["analysis"] - if analysis["save_gpkg"]: - shp_path = output_path / (analysis["name"].replace(" ", "_") + ".gpkg") + _output_path = self.config.output_path.joinpath(analysis.analysis) + if analysis.save_gpkg: + shp_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + ".gpkg" + ) save_gdf(gdf, shp_path) - if analysis["save_csv"]: - csv_path = output_path / (analysis["name"].replace(" ", "_") + ".csv") + if analysis.save_csv: + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + ".csv" + ) del gdf["geometry"] gdf.to_csv(csv_path, index=False) endtime = time.time() logging.info( - f"----------------------------- Analysis '{analysis['name']}' finished. " + f"----------------------------- Analysis '{analysis.name}' finished. " f"Time: {str(round(endtime - starttime, 2))}s -----------------------------" ) - def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: + def road_damage(self, analysis: AnalysisSectionDirect) -> gpd.GeoDataFrame: """ ### CONTROLER FOR CALCULATING THE ROAD DAMAGE @@ -108,7 +121,7 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: road_gdf = self.graphs["base_network_hazard"] if self.graphs["base_network_hazard"] is None: - road_gdf = gpd.read_feather(self.config["files"]["base_network_hazard"]) + road_gdf = gpd.read_feather(self.config.files["base_network_hazard"]) road_gdf.columns = rename_road_gdf_to_conventions(road_gdf.columns) @@ -118,19 +131,19 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: ] # Read the desired damage function - damage_function = analysis["damage_curve"] + damage_function = analysis.damage_curve # If you want to use manual damage functions, these need to be loaded first manual_damage_functions = None - if analysis["damage_curve"] == "MAN": + if analysis.damage_curve == "MAN": manual_damage_functions = ManualDamageFunctions() manual_damage_functions.find_damage_functions( - folder=(self.config["input_path"] / "damage_functions") + folder=(self.config.input_path.joinpath("damage_functions")) ) manual_damage_functions.load_damage_functions() # Choose between event or return period based analysis - if analysis["event_type"] == "event": + if analysis.event_type == "event": event_gdf = DamageNetworkEvents(road_gdf, val_cols) event_gdf.main( damage_function=damage_function, @@ -139,7 +152,7 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: return event_gdf.gdf - elif analysis["event_type"] == "return_period": + elif analysis.event_type == "return_period": return_period_gdf = DamageNetworkReturnPeriods(road_gdf, val_cols) return_period_gdf.main( damage_function=damage_function, @@ -147,9 +160,9 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: ) if "risk_calculation" in analysis: # Check if risk_calculation is demanded - if analysis["risk_calculation"] != "none": + if analysis.risk_calculation != "none": return_period_gdf.control_risk_calculation( - mode=analysis["risk_calculation"] + mode=analysis.risk_calculation ) else: @@ -162,39 +175,39 @@ def road_damage(self, analysis: dict) -> gpd.GeoDataFrame: raise ValueError( "The hazard calculation does not know what to do if the analysis specifies {}".format( - analysis["event_type"] + analysis.event_type ) ) - def effectiveness_measures(self, analysis): + def effectiveness_measures(self, analysis: AnalysisSectionDirect): """This function calculated the efficiency of measures. Input is a csv file with efficiency and a list of different aspects you want to check. """ em = EffectivenessMeasures(self.config, analysis) effectiveness_dict = em.load_effectiveness_table( - self.config["input_path"] / "direct" + self.config.input_path.joinpath("direct") ) if self.graphs["base_network_hazard"] is None: - gdf_in = gpd.read_feather(self.config["files"]["base_network_hazard"]) + gdf_in = gpd.read_feather(self.config.files.base_network_hazard) - if analysis["create_table"] is True: + if analysis.create_table is True: df = em.create_feature_table( - self.config["input_path"] / "direct" / analysis["file_name"] + self.config.input_path.joinpath("direct", analysis.file_name) ) else: df = em.load_table( - self.config["input_path"] / "direct", - analysis["file_name"].replace(".gpkg", ".csv"), + self.config.input_path.joinpath("direct"), + analysis.file_name.replace(".gpkg", ".csv"), ) df = em.calculate_strategy_effectiveness(df, effectiveness_dict) df = em.knmi_correction(df) df_cba, costs_dict = em.cost_benefit_analysis(effectiveness_dict) df_cba.round(2).to_csv( - self.config["output_path"] - / analysis["analysis"] - / "cost_benefit_analysis.csv", + self.config.output_path.joinpath( + analysis.analysis, "cost_benefit_analysis.csv" + ), decimal=",", sep=";", index=False, @@ -204,7 +217,7 @@ def effectiveness_measures(self, analysis): df_costs = em.calculate_strategy_costs(df, costs_dict) df_costs = df_costs.astype(float).round(2) df_costs.to_csv( - self.config["output_path"] / analysis["analysis"] / "output_analysis.csv", + self.config.output_path.joinpath(analysis.analysis, "output_analysis.csv"), decimal=",", sep=";", index=False, @@ -214,7 +227,7 @@ def effectiveness_measures(self, analysis): return gdf -def save_gdf(gdf, save_path): +def save_gdf(gdf: gpd.GeoDataFrame, save_path: Path): """Takes in a geodataframe object and outputs shapefiles at the paths indicated by edge_shp and node_shp Arguments: @@ -235,7 +248,7 @@ def save_gdf(gdf, save_path): logging.info("Results saved to: {}".format(save_path)) -def rename_road_gdf_to_conventions(road_gdf_columns): +def rename_road_gdf_to_conventions(road_gdf_columns: Any) -> Any: """ Rename the columns in the road_gdf to the conventions of the ra2ce documentation diff --git a/ra2ce/analyses/direct/cost_benefit_analysis.py b/ra2ce/analyses/direct/cost_benefit_analysis.py index 3b3f19bb8..73ecf0cdd 100644 --- a/ra2ce/analyses/direct/cost_benefit_analysis.py +++ b/ra2ce/analyses/direct/cost_benefit_analysis.py @@ -28,51 +28,54 @@ import numpy as np import pandas as pd +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) + class EffectivenessMeasures: """This is a namespace for methods to calculate effectiveness of measures""" - def __init__(self, config, analysis): + def __init__(self, config: AnalysisConfigData, analysis: AnalysisSectionDirect): self.analysis = analysis self.config = config - self.return_period = analysis["return_period"] # years - self.repair_costs = analysis["repair_costs"] # euro - self.evaluation_period = analysis["evaluation_period"] # years - self.interest_rate = analysis["interest_rate"] / 100 # interest rate - self.climate_factor = analysis["climate_factor"] / analysis["climate_period"] + self.return_period = analysis.return_period # years + self.repair_costs = analysis.repair_costs # euro + self.evaluation_period = analysis.evaluation_period # years + self.interest_rate = analysis.interest_rate / 100 # interest rate + self.climate_factor = analysis.climate_factor / analysis.climate_period self.btw = 1.21 # VAT multiplication factor to include taxes # perform checks on input while initializing class - self._validate_input_params(self.analysis, self.config) + self._validate_input_params(self.config, self.analysis) - def _validate_input_params(self, analysis: dict, config: dict) -> None: - if analysis["file_name"] is None: + def _validate_input_params( + self, config: AnalysisConfigData, analysis: AnalysisSectionDirect + ) -> None: + if analysis.file_name is None: _error = "Effectiveness of measures calculation: No input file configured. Please define an input file in the analysis.ini file." logging.error(_error) raise ValueError(_error) - elif analysis["file_name"].split(".")[1] != "shp": + elif analysis.file_name.split(".")[1] != "shp": _error = "Effectiveness of measures calculation: Wrong input file configured. Extension of input file is -{}-, needs to be -shp- (shapefile)".format( - analysis["file_name"].split(".")[1] + analysis.file_name.split(".")[1] ) logging.error(_error) raise ValueError(_error) - elif not (config["input_path"] / "direct" / analysis["file_name"]).exists(): + elif not (config.input_path / "direct" / analysis.file_name).exists(): _error = "Effectiveness of measures calculation: Input file doesn't exist please place file in the following folder: {}".format( - config["input_path"] / "direct" + config.input_path / "direct" ) logging.error(_error) - raise FileNotFoundError( - config["input_path"] / "direct" / analysis["file_name"] - ) - elif not ( - config["input_path"] / "direct" / "effectiveness_measures.csv" - ).exists(): + raise FileNotFoundError(config.input_path / "direct" / analysis.file_name) + elif not (config.input_path / "direct" / "effectiveness_measures.csv").exists(): _error = "Effectiveness of measures calculation: lookup table with effectiveness of measures doesnt exist. Please place the effectiveness_measures.csv file in the following folder: {}".format( - config["input_path"] / "direct" + config.input_path / "direct" ) logging.error(_error) raise FileNotFoundError( - config["input_path"] / "direct" / "effectiveness_measures.csv" + config.input_path / "direct" / "effectiveness_measures.csv" ) @staticmethod @@ -238,7 +241,7 @@ def calculate_strategy_effectiveness(self, df, effectiveness_dict): df_total = self.calculate_effectiveness(df, name="standard") df_blockage = pd.read_csv( - self.config["input_path"] / "direct" / "blockage_costs.csv" + self.config.input_path / "direct" / "blockage_costs.csv" ) df_total = df_total.merge(df_blockage, how="left", on="LinkNr") df_total["length"] = df_total[ diff --git a/ra2ce/analyses/direct/road_damage.py b/ra2ce/analyses/direct/road_damage.py index 4053c6479..ea9c3a389 100644 --- a/ra2ce/analyses/direct/road_damage.py +++ b/ra2ce/analyses/direct/road_damage.py @@ -22,9 +22,11 @@ import logging +from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData + class DirectAnalyses: - def __init__(self, config): + def __init__(self, config: AnalysisConfigData): self.config = config def execute(self): diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index f50a5e86f..1e7530d0b 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -24,7 +24,7 @@ import logging import time from pathlib import Path -from typing import List, Tuple +from typing import Any, List, Tuple import geopandas as gpd import networkx as nx @@ -35,6 +35,10 @@ from shapely.geometry import LineString, MultiLineString from tqdm import tqdm +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionIndirect, +) from ra2ce.analyses.indirect.losses import Losses from ra2ce.analyses.indirect.origin_closest_destination import OriginClosestDestination from ra2ce.analyses.indirect.traffic_analysis.traffic_analysis_factory import ( @@ -52,21 +56,23 @@ class IndirectAnalyses: graphs: A dictionary with one or multiple NetworkX graphs. """ + config: AnalysisConfigData + graphs: dict + _file_name_key = "File name" _ra2ce_name_key = "RA2CE name" - def __init__(self, config, graphs): + def __init__(self, config: AnalysisConfigData, graphs: list[Any]): self.config = config self.graphs = graphs - if self.config["output_path"].joinpath("hazard_names.xlsx").is_file(): + if self.config.output_path.joinpath("hazard_names.xlsx").is_file(): self.hazard_names = pd.read_excel( - self.config["output_path"].joinpath("hazard_names.xlsx") - ) - self.config["hazard_names"] = list( - set(self.hazard_names[self._file_name_key]) + self.config.output_path.joinpath("hazard_names.xlsx") ) + # TODO Ardt hazard_names + self.config.hazard_names = list(set(self.hazard_names[self._file_name_key])) - def single_link_redundancy(self, graph, analysis): + def single_link_redundancy(self, graph, analysis: AnalysisSectionIndirect): """This is the function to analyse roads with a single link disruption and an alternative route. Args: @@ -75,7 +81,7 @@ def single_link_redundancy(self, graph, analysis): """ # TODO adjust to the right names of the RA2CE tool # if 'road_usage_data_path' in InputDict: - # road_usage_data = pd.read_excel(InputDict['road_usage_data_path']) + # road_usage_data = pd.read_excel(InputDict.road_usage_data_path) # road_usage_data.dropna(axis=0, how='all', subset=['vehicle_type'], inplace=True) # aadt_names = [aadt_name for aadt_name in road_usage_data['attribute_name'] if aadt_name == aadt_name] # else: @@ -100,7 +106,7 @@ def single_link_redundancy(self, graph, analysis): if nx.has_path(graph, u, v): # calculate the alternative distance if that edge is unavailable alt_dist = nx.dijkstra_path_length( - graph, u, v, weight=analysis["weighing"] + graph, u, v, weight=analysis.weighing ) alt_dist_list.append(alt_dist) @@ -109,7 +115,7 @@ def single_link_redundancy(self, graph, analysis): alt_nodes_list.append(alt_nodes) # calculate the difference in distance - dif_dist_list.append(alt_dist - data[analysis["weighing"]]) + dif_dist_list.append(alt_dist - data[analysis.weighing]) detour_exist_list.append(1) else: @@ -132,27 +138,27 @@ def single_link_redundancy(self, graph, analysis): return gdf - def single_link_losses(self, gdf: gpd.GeoDataFrame, analysis: dict): + def single_link_losses( + self, gdf: gpd.GeoDataFrame, analysis: AnalysisSectionIndirect + ): """Calculates single link disruption losses. Args: gdf: The network in GeoDataFrame format. analysis: Dictionary of the configurations for the analysis. """ - losses_fn = ( - self.config["static_path"] / "hazard" / analysis["loss_per_distance"] + losses_fn = self.config.static_path.joinpath( + "hazard", analysis.loss_per_distance ) losses_df = pd.read_excel(losses_fn, sheet_name="Sheet1") - if analysis["loss_type"] == "uniform": + if analysis.loss_type == "uniform": # assume uniform threshold for disruption self._single_link_losses_uniform(gdf, analysis, losses_df) - if analysis["loss_type"] == "categorized": - _disruption_file = ( - self.config["static_path"] - / "hazard" - / analysis["disruption_per_category"] + if analysis.loss_type == "categorized": + _disruption_file = self.config.static_path.joinpath( + "hazard", analysis.disruption_per_category ) _disruption_df = pd.read_excel(_disruption_file, sheet_name="Sheet1") self._single_link_losses_categorized( @@ -162,10 +168,13 @@ def single_link_losses(self, gdf: gpd.GeoDataFrame, analysis: dict): return gdf def _single_link_losses_uniform( - self, gdf: gpd.GeoDataFrame, analysis: dict, losses_df: pd.DataFrame + self, + gdf: gpd.GeoDataFrame, + analysis: AnalysisSectionIndirect, + losses_df: pd.DataFrame, ): - for hz in self.config["hazard_names"]: - for col in analysis["traffic_cols"].split(","): + for hz in self.config.hazard_names: + for col in analysis.traffic_cols.split(","): try: assert gdf[col + "_detour_losses"] assert gdf[col + "_nodetour_losses"] @@ -175,31 +184,27 @@ def _single_link_losses_uniform( # detour_losses = traffic_per_day[veh/day] * detour_distance[meter] * cost_per_meter[USD/meter/vehicle] * duration_disruption[hour] / 24[hour/day] gdf.loc[ (gdf["detour"] == 1) - & ( - gdf[hz + "_" + analysis["aggregate_wl"]] > analysis["threshold"] - ), + & (gdf[hz + "_" + analysis.aggregate_wl] > analysis.threshold), col + "_detour_losses", ] += ( gdf[col] * gdf["diff_dist"] * losses_df.loc[losses_df["traffic_class"] == col, "cost"].values[0] - * analysis["uniform_duration"] + * analysis.uniform_duration / 24 ) # no_detour_losses = traffic_per_day[veh/day] * occupancy[person/veh] * gdp_percapita_per_day[USD/person] * duration_disruption[hour] / 24[hour/day] gdf.loc[ (gdf["detour"] == 0) - & ( - gdf[hz + "_" + analysis["aggregate_wl"]] > analysis["threshold"] - ), + & (gdf[hz + "_" + analysis.aggregate_wl] > analysis.threshold), col + "_nodetour_losses", ] += ( gdf[col] * losses_df.loc[ losses_df["traffic_class"] == col, "occupancy" ].values[0] - * analysis["gdp_percapita"] - * analysis["uniform_duration"] + * analysis.gdp_percapita + * analysis.uniform_duration / 24 ) gdf["total_losses_" + hz] = np.nansum( @@ -210,12 +215,12 @@ def _single_link_losses_uniform( def _single_link_losses_categorized( self, gdf: gpd.GeoDataFrame, - analysis: dict, + analysis: AnalysisSectionIndirect, losses_df: pd.DataFrame, disruption_df: pd.DataFrame, ): _road_classes = [x for x in disruption_df.columns if "class" in x] - for hz in self.config["hazard_names"]: + for hz in self.config.hazard_names: disruption_df["class_identifier"] = "" gdf["class_identifier"] = "" for i, road_class in enumerate(_road_classes): @@ -235,8 +240,8 @@ def _single_link_losses_categorized( ub = 1e10 for road_cat in _all_road_categories: gdf.loc[ - (gdf[hz + "_" + analysis["aggregate_wl"]] > lb) - & (gdf[hz + "_" + analysis["aggregate_wl"]] <= ub) + (gdf[hz + "_" + analysis.aggregate_wl] > lb) + & (gdf[hz + "_" + analysis.aggregate_wl] <= ub) & (gdf["class_identifier"] == road_cat), "duration_disruption", ] = disruption_df_.loc[ @@ -246,7 +251,7 @@ def _single_link_losses_categorized( 0 ] - for col in analysis["traffic_cols"].split(","): + for col in analysis.traffic_cols.split(","): try: assert gdf[col + "_detour_losses"] assert gdf[col + "_nodetour_losses"] @@ -267,7 +272,7 @@ def _single_link_losses_categorized( * losses_df.loc[ losses_df["traffic_class"] == col, "occupancy" ].values[0] - * analysis["gdp_percapita"] + * analysis.gdp_percapita * gdf["duration_disruption"] / 24 ) @@ -276,7 +281,7 @@ def _single_link_losses_categorized( axis=1, ) - def multi_link_redundancy(self, graph, analysis): + def multi_link_redundancy(self, graph: dict, analysis: AnalysisSectionIndirect): """Calculates the multi-link redundancy of a NetworkX graph. The function removes all links of a variable that have a minimum value @@ -292,7 +297,7 @@ def multi_link_redundancy(self, graph, analysis): """ results = [] master_graph = copy.deepcopy(graph) - for hazard in self.config["hazard_names"]: + for hazard in self.config.hazard_names: hazard_name = self.hazard_names.loc[ self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key ].values[0] @@ -310,7 +315,7 @@ def multi_link_redundancy(self, graph, analysis): edges_remove = [ e for e in edges_remove - if (e[-1][hazard_name] > float(analysis["threshold"])) + if (e[-1][hazard_name] > float(analysis.threshold)) & ("bridge" not in e[-1]) ] @@ -328,7 +333,7 @@ def multi_link_redundancy(self, graph, analysis): if nx.has_path(graph, u, v): # calculate the alternative distance if that edge is unavailable alt_dist = nx.dijkstra_path_length( - graph, u, v, weight=analysis["weighing"] + graph, u, v, weight=analysis.weighing ) # save alternative route nodes @@ -367,7 +372,7 @@ def multi_link_redundancy(self, graph, analysis): # previously here you would find if dist == dist which is a critical bug. Replaced by just verifying dist is a value. gdf["diff_dist"] = [ dist - length if dist else np.NaN - for (dist, length) in zip(gdf["alt_dist"], gdf[analysis["weighing"]]) + for (dist, length) in zip(gdf["alt_dist"], gdf[analysis.weighing]) ] gdf["hazard"] = hazard_name @@ -377,7 +382,7 @@ def multi_link_redundancy(self, graph, analysis): aggregated_results = pd.concat(results, ignore_index=True) return aggregated_results - def multi_link_losses(self, gdf, analysis): + def multi_link_losses(self, gdf, analysis: AnalysisSectionIndirect): """Calculates the multi-link redundancy losses of a NetworkX graph. The function removes all links of a variable that have a minimum value @@ -391,31 +396,29 @@ def multi_link_losses(self, gdf, analysis): Returns: aggregated_results (GeoDataFrame): The results of the analysis aggregated into a table. """ - losses_fn = ( - self.config["static_path"] / "hazard" / analysis["loss_per_distance"] + losses_fn = self.config.static_path.joinpath( + "hazard", analysis.loss_per_distance ) losses_df = pd.read_excel(losses_fn, sheet_name="Sheet1") - if analysis["loss_type"] == "categorized": - disruption_fn = ( - self.config["static_path"] - / "hazard" - / analysis["disruption_per_category"] + if analysis.loss_type == "categorized": + disruption_fn = self.config.static_path.joinpath( + "hazard", analysis.disruption_per_category ) disruption_df = pd.read_excel(disruption_fn, sheet_name="Sheet1") road_classes = [x for x in disruption_df.columns if "class" in x] results = [] - for hazard in self.config["hazard_names"]: + for hazard in self.config.hazard_names: hazard_name = self.hazard_names.loc[ self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key ].values[0] gdf_ = gdf.loc[gdf["hazard"] == hazard_name].copy() if ( - analysis["loss_type"] == "uniform" + analysis.loss_type == "uniform" ): # assume uniform threshold for disruption - for col in analysis["traffic_cols"].split(","): + for col in analysis.traffic_cols.split(","): # detour_losses = traffic_per_day[veh/day] * detour_distance[meter] * cost_per_meter[USD/meter/vehicle] * duration_disruption[hour] / 24[hour/day] gdf_.loc[gdf_["connected"] == 1, col + "_losses_detour"] = ( gdf_[col] @@ -423,7 +426,7 @@ def multi_link_losses(self, gdf, analysis): * losses_df.loc[ losses_df["traffic_class"] == col, "cost" ].values[0] - * analysis["uniform_duration"] + * analysis.uniform_duration / 24 ) # no_detour_losses = traffic_per_day[veh/day] * occupancy_per_vehicle[person/veh] * duration_disruption[hour] / 24[hour/day] * gdp_percapita_per_day [USD/person] @@ -432,8 +435,8 @@ def multi_link_losses(self, gdf, analysis): * losses_df.loc[ losses_df["traffic_class"] == col, "occupancy" ].values[0] - * analysis["gdp_percapita"] - * analysis["uniform_duration"] + * analysis.gdp_percapita + * analysis.uniform_duration / 24 ) gdf_["total_losses_" + hz] = np.nansum( @@ -448,7 +451,7 @@ def multi_link_losses(self, gdf, analysis): ) if ( - analysis["loss_type"] == "categorized" + analysis.loss_type == "categorized" ): # assume different disruption type depending on flood depth and road types disruption_df["class_identifier"] = "" gdf_["class_identifier"] = "" @@ -471,8 +474,8 @@ def multi_link_losses(self, gdf, analysis): ub = 1e10 for road_cat in all_road_categories: gdf_.loc[ - (gdf_[hz + "_" + analysis["aggregate_wl"]] > lb) - & (gdf_[hz + "_" + analysis["aggregate_wl"]] <= ub) + (gdf_[hz + "_" + analysis.aggregate_wl] > lb) + & (gdf_[hz + "_" + analysis.aggregate_wl] <= ub) & (gdf_["class_identifier"] == road_cat), "duration_disruption", ] = disruption_df_.loc[ @@ -482,7 +485,7 @@ def multi_link_losses(self, gdf, analysis): 0 ] - for col in analysis["traffic_cols"].split(","): + for col in analysis.traffic_cols.split(","): # detour_losses = traffic_per_day[veh/day] * detour_distance[meter] * cost_per_meter[USD/meter/vehicle] * duration_disruption[hour] / 24[hour/day] gdf_.loc[gdf_["connected"] == 1, col + "_losses_detour"] = ( gdf_[col] @@ -499,7 +502,7 @@ def multi_link_losses(self, gdf, analysis): * losses_df.loc[ losses_df["traffic_class"] == col, "occupancy" ].values[0] - * analysis["gdp_percapita"] + * analysis.gdp_percapita * gdf_["duration_disruption"] / 24 ) @@ -543,7 +546,7 @@ def extract_od_nodes_from_graph( def _get_origin_destination_pairs( self, graph: nx.classes.MultiGraph ) -> list[tuple[int, str], tuple[int, str]]: - od_path = self.config["static_path"].joinpath( + od_path = self.config.static_path.joinpath( "output_graph", "origin_destination_table.feather" ) od = gpd.read_feather(od_path) @@ -573,14 +576,14 @@ def _get_origin_destination_pairs( return od_nodes def optimal_route_origin_destination( - self, graph: nx.classes.MultiGraph, analysis: dict + self, graph: nx.classes.MultiGraph, analysis: AnalysisSectionIndirect ) -> gpd.GeoDataFrame: # create list of origin-destination pairs od_nodes = self._get_origin_destination_pairs(graph) - pref_routes = find_route_ods(graph, od_nodes, analysis["weighing"]) + pref_routes = find_route_ods(graph, od_nodes, analysis.weighing) # if shortest_route: - # pref_routes = pref_routes.loc[pref_routes.sort_values(analysis['weighing']).groupby('o_node').head(3).index] + # pref_routes = pref_routes.loc[pref_routes.sort_values(analysis.weighing).groupby('o_node').head(3).index] return pref_routes def optimal_route_od_link( @@ -592,16 +595,16 @@ def optimal_route_od_link( return TrafficAnalysisFactory.get_analysis( road_network, od_table, - self.config["origins_destinations"]["destinations_names"], + self.config.origins_destinations.destinations_names, equity, ).optimal_route_od_link() - def multi_link_origin_destination(self, graph, analysis): + def multi_link_origin_destination(self, graph, analysis: AnalysisSectionIndirect): """Calculates the connectivity between origins and destinations""" od_nodes = self._get_origin_destination_pairs(graph) all_results = [] - for hazard in self.config["hazard_names"]: + for hazard in self.config.hazard_names: hazard_name = self.hazard_names.loc[ self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key ].values[0] @@ -616,7 +619,7 @@ def multi_link_origin_destination(self, graph, analysis): edges_remove = [ e for e in edges_remove - if (e[-1][hazard_name] > float(analysis["threshold"])) + if (e[-1][hazard_name] > float(analysis.threshold)) & ("bridge" not in e[-1]) ] graph_hz.remove_edges_from(edges_remove) @@ -625,7 +628,7 @@ def multi_link_origin_destination(self, graph, analysis): # igraph_hz = ig.Graph.from_networkx(igraph_hz) # Find the routes - od_routes = find_route_ods(graph_hz, od_nodes, analysis["weighing"]) + od_routes = find_route_ods(graph_hz, od_nodes, analysis.weighing) od_routes["hazard"] = hazard_name all_results.append(od_routes) @@ -786,7 +789,7 @@ def multi_link_origin_destination_regional_impact(self, gdf_ori): gdf_ori_ = gdf_ori.copy() # read origin points - origin_fn = Path(self.config["static_path"]).joinpath( + origin_fn = Path(self.config.static_path).joinpath( "output_graph", "origin_destination_table.gpkg" ) origin = gpd.read_file(origin_fn, engine="pyogrio") @@ -842,7 +845,7 @@ def multi_link_origin_destination_regional_impact(self, gdf_ori): return origin_impact_master, region_impact_master def multi_link_isolated_locations( - self, graph: nx.Graph, analysis: dict, crs=4326 + self, graph: nx.Graph, analysis: AnalysisSectionIndirect, crs=4326 ) -> Tuple[gpd.GeoDataFrame, pd.DataFrame]: """ This function identifies locations that are flooded or isolated due to the disruption of the network caused by a hazard. @@ -861,7 +864,7 @@ def multi_link_isolated_locations( # Load the point shapefile with the locations of which the isolated locations should be identified. locations = gpd.read_feather( - self.config["static_path"] / "output_graph" / "locations_hazard.feather" + self.config.static_path.joinpath("output_graph", "locations_hazard.feather") ) # TODO PUT CRS IN DOCUMENTATION OR MAKE CHANGABLE # reproject the datasets to be able to make a buffer in meters @@ -869,7 +872,7 @@ def multi_link_isolated_locations( # create an empty list to append the df_aggregation to aggregation = pd.DataFrame() - for i, hazard in enumerate(self.config["hazard_names"]): + for i, hazard in enumerate(self.config.hazard_names): # for each hazard event hazard_name = self.hazard_names.loc[ self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key @@ -883,7 +886,7 @@ def multi_link_isolated_locations( edges_hz_direct = [ e for e in edges - if (e[-1][hazard_name] > float(analysis["threshold"])) + if (e[-1][hazard_name] > float(analysis.threshold)) & ("bridge" not in e[-1]) ] edges_hz_indirect = [e for e in edges if e not in edges_hz_direct] @@ -916,13 +919,13 @@ def multi_link_isolated_locations( pd.concat([network_hz_direct, network_hz_indirect]) ) results_hz_roads = buffer_geometry( - results_hz_roads, analysis["buffer_meters"] + results_hz_roads, analysis.buffer_meters ).to_crs(crs=crs) # Save the output results_hz_roads.to_file( - self.config["output_path"] - / analysis["analysis"] - / f"flooded_and_isolated_roads_{hazard_name}.gpkg" + self.config.output_path.joinpath( + analysis.analysis, f"flooded_and_isolated_roads_{hazard_name}.gpkg" + ) ) # relate the locations to network disruption due to hazard by spatial overlay @@ -936,12 +939,12 @@ def multi_link_isolated_locations( # TODO: Put in analyses.ini file a variable to set the threshold for locations that are not isolated when they are flooded. # Extract the flood depth of the locations - # intersect = intersect.loc[intersect[hazard_name] > analysis['threshold_locations']] + # intersect = intersect.loc[intersect[hazard_name] > analysis.threshold_locations] # get location stats df_aggregation = self._summarize_locations( locations_hz, - cat_col=analysis["category_field_name"], + cat_col=analysis.category_field_name, hazard_id=hazard_name[:-3], ) @@ -1029,14 +1032,14 @@ def _summarize_locations( def execute(self): """Executes the indirect analysis.""" _pickle_reader = GraphPickleReader() - for analysis in self.config["indirect"]: + for analysis in self.config.indirect: logging.info( - f"----------------------------- Started analyzing '{analysis['name']}' -----------------------------" + f"----------------------------- Started analyzing '{analysis.name}' -----------------------------" ) starttime = time.time() gdf = pd.DataFrame() opt_routes = None - output_path = self.config["output_path"].joinpath(analysis["analysis"]) + _output_path = self.config.output_path.joinpath(analysis.analysis) def _save_gpkg_analysis( base_graph, @@ -1045,46 +1048,46 @@ def _save_gpkg_analysis( ): for to_save, save_name in zip(to_save_gdf, to_save_gdf_names): if not to_save.empty: - gpkg_path = output_path.joinpath( - analysis["name"].replace(" ", "_") + f"_{save_name}.gpkg" + gpkg_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + f"_{save_name}.gpkg" ) save_gdf(to_save, gpkg_path) # Save the Graph - gpkg_path_nodes = output_path.joinpath( - analysis["name"].replace(" ", "_") + "_results_nodes.gpkg" + gpkg_path_nodes = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_results_nodes.gpkg" ) - gpkg_path_edges = output_path.joinpath( - analysis["name"].replace(" ", "_") + "_results_edges.gpkg" + gpkg_path_edges = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_results_edges.gpkg" ) graph_to_gpkg(base_graph, gpkg_path_edges, gpkg_path_nodes) - if analysis.get("weighing", "") == "distance": + if analysis.weighing == "distance": # The name is different in the graph. - analysis["weighing"] = "length" - _config_files = self.config["files"] - if analysis["analysis"] == "single_link_redundancy": + analysis.weighing = "length" + _config_files = self.config.files + if analysis.analysis == "single_link_redundancy": g = _pickle_reader.read(_config_files["base_graph"]) gdf = self.single_link_redundancy(g, analysis) - elif analysis["analysis"] == "multi_link_redundancy": + elif analysis.analysis == "multi_link_redundancy": g = _pickle_reader.read(_config_files["base_graph_hazard"]) gdf = self.multi_link_redundancy(g, analysis) - elif analysis["analysis"] == "optimal_route_origin_destination": + elif analysis.analysis == "optimal_route_origin_destination": g = _pickle_reader.read(_config_files["origins_destinations_graph"]) gdf = self.optimal_route_origin_destination(g, analysis) - if analysis.get("save_traffic", False) and ( - "origin_count" in self.config["origins_destinations"].keys() + if analysis.save_traffic and hasattr( + self.config.origins_destinations, "origin_count" ): od_table = gpd.read_feather( - self.config["static_path"] - / "output_graph" - / "origin_destination_table.feather" + self.config.static_path.joinpath( + "output_graph", "origin_destination_table.feather" + ) ) _equity_weights_file = None - if "equity_weight" in analysis.keys(): - _equity_weights_file = self.config["static_path"].joinpath( - "network", analysis["equity_weight"] + if "equity_weight" in analysis.keys() and analysis.equity_weight: + _equity_weights_file = self.config.static_path.joinpath( + "network", analysis.equity_weight ) route_traffic_df = self.optimal_route_od_link( gdf, @@ -1093,19 +1096,17 @@ def _save_gpkg_analysis( _equity_weights_file ), ) - impact_csv_path = ( - self.config["output_path"] - / analysis["analysis"] - / (analysis["name"].replace(" ", "_") + "_link_traffic.csv") + impact_csv_path = _output_path.joinpath( + (analysis.name.replace(" ", "_") + "_link_traffic.csv"), ) route_traffic_df.to_csv(impact_csv_path, index=False) - elif analysis["analysis"] == "multi_link_origin_destination": + elif analysis.analysis == "multi_link_origin_destination": g = _pickle_reader.read( - self.config["files"]["origins_destinations_graph_hazard"] + self.config.files["origins_destinations_graph_hazard"] ) gdf = self.multi_link_origin_destination(g, analysis) g_not_disrupted = _pickle_reader.read( - self.config["files"]["origins_destinations_graph_hazard"] + self.config.files["origins_destinations_graph_hazard"] ) gdf_not_disrupted = self.optimal_route_origin_destination( g_not_disrupted, analysis @@ -1115,50 +1116,42 @@ def _save_gpkg_analysis( gdf_ori, ) = self.multi_link_origin_destination_impact(gdf, gdf_not_disrupted) try: - assert self.config["origins_destinations"]["region"] + assert self.config.origins_destinations.region ( regional_impact_df, regional_impact_summary_df, ) = self.multi_link_origin_destination_regional_impact(gdf_ori) - impact_csv_path = ( - self.config["output_path"] - / analysis["analysis"] - / (analysis["name"].replace(" ", "_") + "_regional_impact.csv") + impact_csv_path = _output_path.joinpath( + (analysis.name.replace(" ", "_") + "_regional_impact.csv"), ) regional_impact_df.to_csv(impact_csv_path, index=False) - impact_csv_path = ( - self.config["output_path"] - / analysis["analysis"] - / ( - analysis["name"].replace(" ", "_") + impact_csv_path = _output_path.joinpath( + ( + analysis.name.replace(" ", "_") + "_regional_impact_summary.csv" - ) + ), ) regional_impact_summary_df.to_csv(impact_csv_path) except Exception: pass - impact_csv_path = ( - self.config["output_path"] - / analysis["analysis"] - / (analysis["name"].replace(" ", "_") + "_impact.csv") + impact_csv_path = _output_path.joinpath( + (analysis.name.replace(" ", "_") + "_impact.csv"), ) del gdf_ori["geometry"] gdf_ori.to_csv(impact_csv_path, index=False) - impact_csv_path = ( - self.config["output_path"] - / analysis["analysis"] - / (analysis["name"].replace(" ", "_") + "_impact_summary.csv") + impact_csv_path = _output_path.joinpath( + (analysis.name.replace(" ", "_") + "_impact_summary.csv"), ) disruption_impact_df.to_csv(impact_csv_path, index=False) - elif analysis["analysis"] == "single_link_losses": - g = _pickle_reader.read(self.config["files"]["base_graph_hazard"]) + elif analysis.analysis == "single_link_losses": + g = _pickle_reader.read(self.config.files["base_graph_hazard"]) gdf = self.single_link_redundancy(g, analysis) gdf = self.single_link_losses(gdf, analysis) - elif analysis["analysis"] == "multi_link_losses": - g = _pickle_reader.read(self.config["files"]["base_graph_hazard"]) + elif analysis.analysis == "multi_link_losses": + g = _pickle_reader.read(self.config.files["base_graph_hazard"]) gdf = self.multi_link_redundancy(g, analysis) gdf = self.multi_link_losses(gdf, analysis) - elif analysis["analysis"] == "optimal_route_origin_closest_destination": + elif analysis.analysis == "optimal_route_origin_closest_destination": analyzer = OriginClosestDestination( self.config, analysis, self.hazard_names ) @@ -1167,30 +1160,30 @@ def _save_gpkg_analysis( opt_routes, destinations, ) = analyzer.optimal_route_origin_closest_destination() - if analysis["save_gpkg"]: + if analysis.save_gpkg: # Save the GeoDataFrames to_save_gdf = [destinations, opt_routes] to_save_gdf_names = ["destinations", "optimal_routes"] _save_gpkg_analysis(base_graph, to_save_gdf, to_save_gdf_names) - if analysis["save_csv"]: - csv_path = output_path / ( - analysis["name"].replace(" ", "_") + "_destinations.csv" + if analysis.save_csv: + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_destinations.csv" ) del destinations["geometry"] destinations.to_csv(csv_path, index=False) - csv_path = output_path / ( - analysis["name"].replace(" ", "_") + "_optimal_routes.csv" + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_optimal_routes.csv" ) del opt_routes["geometry"] opt_routes.to_csv(csv_path, index=False) - elif analysis["analysis"] == "multi_link_origin_closest_destination": + elif analysis.analysis == "multi_link_origin_closest_destination": analyzer = OriginClosestDestination( self.config, analysis, self.hazard_names ) - if analysis.get("calculate_route_without_disruption", False): + if analysis.calculate_route_without_disruption: ( base_graph, opt_routes_without_hazard, @@ -1221,7 +1214,7 @@ def _save_gpkg_analysis( ) = analyzer.multi_link_origin_closest_destination() opt_routes_without_hazard = gpd.GeoDataFrame() - if analysis["save_gpkg"]: + if analysis.save_gpkg: # Save the GeoDataFrames to_save_gdf = [ origins, @@ -1236,16 +1229,16 @@ def _save_gpkg_analysis( "optimal_routes_with_hazard", ] _save_gpkg_analysis(base_graph, to_save_gdf, to_save_gdf_names) - if analysis["save_csv"]: - csv_path = output_path / ( - analysis["name"].replace(" ", "_") + "_destinations.csv" + if analysis.save_csv: + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_destinations.csv" ) if "geometry" in destinations.columns: del destinations["geometry"] destinations.to_csv(csv_path, index=False) - csv_path = output_path / ( - analysis["name"].replace(" ", "_") + "_optimal_routes.csv" + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_optimal_routes.csv" ) if not opt_routes_without_hazard.empty: del opt_routes_without_hazard["geometry"] @@ -1255,58 +1248,57 @@ def _save_gpkg_analysis( opt_routes_with_hazard.to_csv(csv_path, index=False) agg_results.to_excel( - output_path - / (analysis["name"].replace(" ", "_") + "_results.xlsx"), + _output_path.joinpath( + analysis.name.replace(" ", "_") + "_results.xlsx" + ), index=False, ) - elif analysis["analysis"] == "losses": + elif analysis.analysis == "losses": if self.graphs["base_network_hazard"] is None: - gdf_in = gpd.read_feather( - self.config["files"]["base_network_hazard"] - ) + gdf_in = gpd.read_feather(self.config.files["base_network_hazard"]) losses = Losses(self.config, analysis) df = losses.calculate_losses_from_table() gdf = gdf_in.merge(df, how="left", on="LinkNr") - elif analysis["analysis"] == "multi_link_isolated_locations": - g = _pickle_reader.read(self.config["files"]["base_graph_hazard"]) + elif analysis.analysis == "multi_link_isolated_locations": + g = _pickle_reader.read(self.config.files["base_graph_hazard"]) gdf, df = self.multi_link_isolated_locations(g, analysis) - df_path = output_path / ( - analysis["name"].replace(" ", "_") + "_results.csv" + df_path = _output_path / ( + analysis.name.replace(" ", "_") + "_results.csv" ) df.to_csv(df_path, index=False) else: - _error = f"Analysis {analysis['analysis']} does not exist in RA2CE. Please choose an existing analysis." + _error = f"Analysis {analysis.analysis} does not exist in RA2CE. Please choose an existing analysis." logging.error(_error) raise ValueError(_error) if not gdf.empty: # Not for all analyses a gdf is created as output. - if analysis["save_gpkg"]: - gpkg_path = output_path.joinpath( - analysis["name"].replace(" ", "_") + ".gpkg" + if analysis.save_gpkg: + gpkg_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + ".gpkg" ) save_gdf(gdf, gpkg_path) if opt_routes: - gpkg_path = output_path.joinpath( - analysis["name"].replace(" ", "_") + "_optimal_routes.gpkg" + gpkg_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + "_optimal_routes.gpkg" ) save_gdf(gdf, gpkg_path) - if analysis["save_csv"]: - csv_path = output_path.joinpath( - analysis["name"].replace(" ", "_") + ".csv" + if analysis.save_csv: + csv_path = _output_path.joinpath( + analysis.name.replace(" ", "_") + ".csv" ) gdf.to_csv(csv_path, index=False) # Save the configuration for this analysis to the output folder. - with open(output_path / "settings.txt", "w") as f: - for key in analysis: - print(key + " = " + str(analysis[key]), file=f) + with open(_output_path / "settings.txt", "w") as f: + for key in analysis.__dict__: + print(key + " = " + str(getattr(analysis, key)), file=f) endtime = time.time() logging.info( - f"----------------------------- Analysis '{analysis['name']}' finished. Time: {str(round(endtime - starttime, 2))}s -----------------------------" + f"----------------------------- Analysis '{analysis.name}' finished. Time: {str(round(endtime - starttime, 2))}s -----------------------------" ) diff --git a/ra2ce/analyses/indirect/losses.py b/ra2ce/analyses/indirect/losses.py index 13426c1f6..aef281322 100644 --- a/ra2ce/analyses/indirect/losses.py +++ b/ra2ce/analyses/indirect/losses.py @@ -23,18 +23,23 @@ import numpy as np import pandas as pd +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionIndirect, +) + class Losses: - def __init__(self, config, analysis): + def __init__(self, config: AnalysisConfigData, analysis: AnalysisSectionIndirect): self.config = config self.analysis = analysis - self.duration = analysis["duration_event"] - self.duration_disr = analysis["duration_disruption"] - self.detour_traffic = analysis["fraction_detour"] - self.traffic_throughput = analysis["fraction_drivethrough"] - self.rest_capacity = analysis["rest_capacity"] - self.maximum = analysis["maximum_jam"] - self.partofday = analysis["partofday"] + self.duration = analysis.duration_event + self.duration_disr = analysis.duration_disruption + self.detour_traffic = analysis.fraction_detour + self.traffic_throughput = analysis.fraction_drivethrough + self.rest_capacity = analysis.rest_capacity + self.maximum = analysis.maximum_jam + self.partofday = analysis.partofday @staticmethod def vehicle_loss_hours(path): @@ -180,7 +185,7 @@ def calculate_losses_from_table(self): """ traffic_data = self.load_df( - self.config["input_path"] / "losses", "traffic_intensities.csv" + self.config.input_path / "losses", "traffic_intensities.csv" ) dict1 = { "AS_VTG": "evening_total", @@ -199,9 +204,7 @@ def calculate_losses_from_table(self): } traffic_data.rename(columns=dict1, inplace=True) - detour_data = self.load_df( - self.config["input_path"] / "losses", "detour_data.csv" - ) + detour_data = self.load_df(self.config.input_path / "losses", "detour_data.csv") dict2 = { "VA_AV_HWN": "detour_time_evening", "VA_RD_HWN": "detour_time_remaining", @@ -210,8 +213,6 @@ def calculate_losses_from_table(self): } detour_data.rename(columns=dict2, inplace=True) - vehicle_loss_hours = self.vehicle_loss_hours( - self.config["input_path"] / "losses" - ) + vehicle_loss_hours = self.vehicle_loss_hours(self.config.input_path / "losses") vlh = self.calc_vlh(traffic_data, vehicle_loss_hours, detour_data) return vlh diff --git a/ra2ce/analyses/indirect/origin_closest_destination.py b/ra2ce/analyses/indirect/origin_closest_destination.py index 3f55a5e34..6288bcbaf 100644 --- a/ra2ce/analyses/indirect/origin_closest_destination.py +++ b/ra2ce/analyses/indirect/origin_closest_destination.py @@ -30,6 +30,10 @@ import pandas as pd from shapely.geometry import LineString, MultiLineString from tqdm import tqdm +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionIndirect, +) from ra2ce.common.io.readers.graph_pickle_reader import GraphPickleReader @@ -42,23 +46,26 @@ class OriginClosestDestination: graphs: A dictionary with one or multiple NetworkX graphs. """ - def __init__(self, config: dict, analysis: dict, hazard_names: pd.DataFrame): + def __init__( + self, + config: AnalysisConfigData, + analysis: AnalysisSectionIndirect, + hazard_names: pd.DataFrame, + ): self.crs = 4326 # TODO PUT IN DOCUMENTATION OR MAKE CHANGABLE self.unit = "km" - if "threshold" in analysis: - self.network_threshold = analysis["threshold"] + if hasattr(analysis, "threshold"): + self.network_threshold = analysis.threshold self.threshold_destinations = 0 # TODO MAKE PARAMETER IN ANALYSES.INI - self.weighing = analysis["weighing"] - self.o_name = config["origins_destinations"]["origins_names"] - self.d_name = config["origins_destinations"]["destinations_names"] - self.od_id = config["origins_destinations"]["id_name_origin_destination"] - self.origin_out_fraction = config["origins_destinations"]["origin_out_fraction"] - self.origin_count = config["origins_destinations"]["origin_count"] + self.weighing = analysis.weighing + self.o_name = config.origins_destinations.origins_names + self.d_name = config.origins_destinations.destinations_names + self.od_id = config.origins_destinations.id_name_origin_destination + self.origin_out_fraction = config.origins_destinations.origin_out_fraction + self.origin_count = config.origins_destinations.origin_count self.od_key = "od_id" self.id_name = ( - config["network"]["file_id"] - if config["network"]["file_id"] is not None - else "rfid" + config.network.file_id if config.network.file_id is not None else "rfid" ) self.analysis = analysis self.config = config @@ -67,9 +74,9 @@ def __init__(self, config: dict, analysis: dict, hazard_names: pd.DataFrame): self.destination_names = None self.destination_key = None - if config["origins_destinations"].get("category", None): + if config.origins_destinations.category: self.destination_key = "category" - self.destination_key_value = config["origins_destinations"]["category"] + self.destination_key_value = config.origins_destinations.category self.results_dict = {} @@ -81,7 +88,7 @@ def read(graph_file): def optimal_route_origin_closest_destination(self): """Calculates per origin the location of its closest destination""" - graph = self.read(self.config["files"]["origins_destinations_graph"]) + graph = self.read(self.config.files["origins_destinations_graph"]) # Load the origins and destinations origins = self.load_origins() @@ -95,7 +102,7 @@ def optimal_route_origin_closest_destination(self): if self.destination_key: self.destination_names = list( - destinations[self.config["origins_destinations"]["category"]].unique() + destinations[self.config.origins_destinations.category].unique() ) self.destination_names_short = { dn: f"D{i+1}" for i, dn in enumerate(self.destination_names) @@ -141,7 +148,7 @@ def optimal_route_origin_closest_destination(self): def multi_link_origin_closest_destination(self): """Calculates per origin the location of its closest destination with hazard disruption""" - graph = self.read(self.config["files"]["origins_destinations_graph_hazard"]) + graph = self.read(self.config.files["origins_destinations_graph_hazard"]) # Load the origins and destinations origins = self.load_origins() @@ -152,7 +159,7 @@ def multi_link_origin_closest_destination(self): if self.destination_key: self.destination_names = list( - destinations[self.config["origins_destinations"]["category"]].unique() + destinations[self.config.origins_destinations.category].unique() ) self.destination_names_short = { dn: f"D{i+1}" for i, dn in enumerate(self.destination_names) @@ -167,7 +174,7 @@ def multi_link_origin_closest_destination(self): self.hazard_names.loc[ self.hazard_names["File name"] == hazard, "RA2CE name" ].values[0] - for hazard in self.config["hazard_names"] + for hazard in self.config.hazard_names ] hazards.sort() for hazard_name in hazards: @@ -982,7 +989,7 @@ def calc_routes_closest_dest( def load_origins(self): od_path = ( - self.config["static_path"] + self.config.static_path / "output_graph" / "origin_destination_table.feather" ) @@ -993,7 +1000,7 @@ def load_origins(self): def load_destinations(self): od_path = ( - self.config["static_path"] + self.config.static_path / "output_graph" / "origin_destination_table.feather" ) diff --git a/ra2ce/common/configuration/config_wrapper_protocol.py b/ra2ce/common/configuration/config_wrapper_protocol.py index d843eef56..a58b23629 100644 --- a/ra2ce/common/configuration/config_wrapper_protocol.py +++ b/ra2ce/common/configuration/config_wrapper_protocol.py @@ -23,7 +23,7 @@ from __future__ import annotations from pathlib import Path -from typing import Any, List, Optional, Protocol, runtime_checkable +from typing import Any, Optional, Protocol, runtime_checkable from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol @@ -32,7 +32,7 @@ class ConfigWrapperProtocol(Protocol): # pragma: no cover ini_file: Path config_data: Optional[ConfigDataProtocol] = None - graphs: List[Any] = [] + graphs: Optional[dict] = None @property def root_dir(self) -> Path: diff --git a/ra2ce/graph/hazard/hazard_overlay.py b/ra2ce/graph/hazard/hazard_overlay.py index 75573c840..34f295a03 100644 --- a/ra2ce/graph/hazard/hazard_overlay.py +++ b/ra2ce/graph/hazard/hazard_overlay.py @@ -506,7 +506,7 @@ def hazard_intersect_with_reprojection( ) -> gpd.GeoDataFrame: """Intersect geodataframe and hazard with reprojection""" # Check if the graph needs to be reprojected - hazard_crs = pyproj.CRS.from_user_input(self.config["hazard"]["hazard_crs"]) + hazard_crs = pyproj.CRS.from_user_input(self.config.hazard["hazard_crs"]) gdf_crs = pyproj.CRS.from_user_input(gdf.crs) if ( diff --git a/ra2ce/graph/network_config_data/network_config_data.py b/ra2ce/graph/network_config_data/network_config_data.py index 7036888a2..ed7de1494 100644 --- a/ra2ce/graph/network_config_data/network_config_data.py +++ b/ra2ce/graph/network_config_data/network_config_data.py @@ -96,6 +96,7 @@ class NetworkConfigData(ConfigDataProtocol): crs: CRS = field(default_factory=lambda: CRS.from_user_input(4326)) project: ProjectSection = field(default_factory=lambda: ProjectSection()) network: NetworkSection = field(default_factory=lambda: NetworkSection()) + graphs: Optional[dict] = None origins_destinations: OriginsDestinationsSection = field( default_factory=lambda: OriginsDestinationsSection() ) diff --git a/ra2ce/runners/direct_analysis_runner.py b/ra2ce/runners/direct_analysis_runner.py index 5a5eda488..e10298f04 100644 --- a/ra2ce/runners/direct_analysis_runner.py +++ b/ra2ce/runners/direct_analysis_runner.py @@ -37,8 +37,8 @@ def __str__(self) -> str: @staticmethod def can_run(ra2ce_input: ConfigWrapper) -> bool: if ( - not ra2ce_input.analysis_config - or "direct" not in ra2ce_input.analysis_config.config_data + not bool(ra2ce_input.analysis_config) + or not ra2ce_input.analysis_config.config_data.direct ): return False if not ra2ce_input.network_config: diff --git a/ra2ce/runners/indirect_analysis_runner.py b/ra2ce/runners/indirect_analysis_runner.py index b074934fe..c7a9962ec 100644 --- a/ra2ce/runners/indirect_analysis_runner.py +++ b/ra2ce/runners/indirect_analysis_runner.py @@ -35,8 +35,8 @@ def __str__(self) -> str: @staticmethod def can_run(ra2ce_input: ConfigWrapper) -> bool: return ( - not ra2ce_input.analysis_config - or "indirect" in ra2ce_input.analysis_config.config_data + bool(ra2ce_input.analysis_config) + and ra2ce_input.analysis_config.config_data.indirect ) def run(self, analysis_config: AnalysisConfigWrapperBase) -> None: diff --git a/tests/analyses/direct/test_analyses_direct.py b/tests/analyses/direct/test_analyses_direct.py index 6a89db6d4..515bb75a5 100644 --- a/tests/analyses/direct/test_analyses_direct.py +++ b/tests/analyses/direct/test_analyses_direct.py @@ -21,6 +21,6 @@ def test_execute(self): ) ], output_path=test_data, - ).to_dict() + ) _graphs = {} DirectAnalyses(_config, _graphs).execute() diff --git a/tests/analyses/direct/test_cost_benefit_analysis.py b/tests/analyses/direct/test_cost_benefit_analysis.py index 00a3a967a..d66229db9 100644 --- a/tests/analyses/direct/test_cost_benefit_analysis.py +++ b/tests/analyses/direct/test_cost_benefit_analysis.py @@ -2,13 +2,16 @@ import pandas as pd import pytest -from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, +) from ra2ce.analyses.direct.cost_benefit_analysis import EffectivenessMeasures from tests import test_data class MockEffectivenessMeasures(EffectivenessMeasures): - def __init__(self, config, analysis): + def __init__(self, config: AnalysisConfigData, analysis: AnalysisSectionDirect): """ This class is only meant to inherit from `Effectiveness measures` and allow the partial testing of certain methods for pure code coverage reasons. """ @@ -17,16 +20,16 @@ def __init__(self, config, analysis): class TestCostBenefitAnalysis: def test_init_raises_when_file_name_not_defined(self): - _config = AnalysisConfigData(input_path=test_data).to_dict() - _analysis = { - "return_period": None, - "repair_costs": None, - "evaluation_period": None, - "interest_rate": 42, - "climate_factor": 24, - "climate_period": 2.4, - "file_name": None, - } + _config = AnalysisConfigData(input_path=test_data) + _analysis = AnalysisSectionDirect( + return_period=None, + repair_costs=None, + evaluation_period=None, + interest_rate=42, + climate_factor=24, + climate_period=2.4, + file_name=None, + ) with pytest.raises(ValueError) as exc_err: EffectivenessMeasures(_config, _analysis) assert ( @@ -35,16 +38,16 @@ def test_init_raises_when_file_name_not_defined(self): ) def test_init_raises_when_file_name_not_shp(self): - _config = AnalysisConfigData(input_path=test_data).to_dict() - _analysis = { - "return_period": None, - "repair_costs": None, - "evaluation_period": None, - "interest_rate": 42, - "climate_factor": 24, - "climate_period": 2.4, - "file_name": "just_a_file.txt", - } + _config = AnalysisConfigData(input_path=test_data) + _analysis = AnalysisSectionDirect( + return_period=None, + repair_costs=None, + evaluation_period=None, + interest_rate=42, + climate_factor=24, + climate_period=2.4, + file_name="just_a_file.txt", + ) with pytest.raises(ValueError) as exc_err: EffectivenessMeasures(_config, _analysis) assert ( @@ -53,38 +56,38 @@ def test_init_raises_when_file_name_not_shp(self): ) def test_init_raises_when_direct_shp_file_does_not_exist(self): - _config = AnalysisConfigData(input_path=test_data).to_dict() - _analysis = { - "return_period": None, - "repair_costs": None, - "evaluation_period": None, - "interest_rate": 42, - "climate_factor": 24, - "climate_period": 2.4, - "file_name": "filedoesnotexist.shp", - } + _config = AnalysisConfigData(input_path=test_data) + _analysis = AnalysisSectionDirect( + return_period=None, + repair_costs=None, + evaluation_period=None, + interest_rate=42, + climate_factor=24, + climate_period=2.4, + file_name="filedoesnotexist.shp", + ) with pytest.raises(FileNotFoundError) as exc_err: EffectivenessMeasures(_config, _analysis) assert str(exc_err.value) == str( - _config["input_path"] / "direct" / "filedoesnotexist.shp" + _config.input_path.joinpath("direct", "filedoesnotexist.shp") ) def test_init_raises_when_effectiveness_measures_does_not_exist(self): - _config = AnalysisConfigData(input_path=test_data).to_dict() - _analysis = { - "return_period": None, - "repair_costs": None, - "evaluation_period": None, - "interest_rate": 42, - "climate_factor": 24, - "climate_period": 2.4, - "file_name": "origins.shp", - } - assert (_config["input_path"] / "direct" / "origins.shp").exists() + _config = AnalysisConfigData(input_path=test_data) + _analysis = AnalysisSectionDirect( + return_period=None, + repair_costs=None, + evaluation_period=None, + interest_rate=42, + climate_factor=24, + climate_period=2.4, + file_name="origins.shp", + ) + assert (_config.input_path.joinpath("direct", "origins.shp")).exists() with pytest.raises(FileNotFoundError) as exc_err: EffectivenessMeasures(_config, _analysis) assert str(exc_err.value) == str( - _config["input_path"] / "direct" / "effectiveness_measures.csv" + _config.input_path.joinpath("direct", "effectiveness_measures.csv") ) @pytest.mark.parametrize( diff --git a/tests/analyses/indirect/test_analyses_indirect.py b/tests/analyses/indirect/test_analyses_indirect.py index 3378f2580..821cd393e 100644 --- a/tests/analyses/indirect/test_analyses_indirect.py +++ b/tests/analyses/indirect/test_analyses_indirect.py @@ -1,4 +1,5 @@ -from ra2ce.analyses.indirect.analyses_indirect import IndirectAnalyses, Losses +from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.indirect.analyses_indirect import IndirectAnalyses from tests import test_data @@ -6,7 +7,7 @@ class TestIndirectAnalyses: def test_initialize(self): # 1. Define test data. _graphs = {} - _config = {"output": test_data} + _config = AnalysisConfigData(output_path=test_data) # 2. Run test. _indirect_analyses = IndirectAnalyses(_config, _graphs) diff --git a/tests/analyses/indirect/test_losses.py b/tests/analyses/indirect/test_losses.py index ba0eb01ae..8a3095d5a 100644 --- a/tests/analyses/indirect/test_losses.py +++ b/tests/analyses/indirect/test_losses.py @@ -3,6 +3,10 @@ import numpy as np import pandas as pd import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionIndirect, +) from ra2ce.analyses.indirect.losses import Losses @@ -10,16 +14,16 @@ class TestLosses: def test_initialize(self): # 1. Define test data - _config = {} - _analyses = { - "duration_event": None, - "duration_disruption": None, - "fraction_detour": None, - "fraction_drivethrough": None, - "rest_capacity": None, - "maximum_jam": None, - "partofday": None, - } + _config = AnalysisConfigData + _analyses = AnalysisSectionIndirect( + duration_event=None, + duration_disruption=None, + fraction_detour=None, + fraction_drivethrough=None, + rest_capacity=None, + maximum_jam=None, + partofday=None, + ) # 2. Run test. _losses = Losses(_config, _analyses) @@ -29,16 +33,16 @@ def test_initialize(self): def test_traffic_shockwave(self): # 1. Define test data - _config = {} - _analyses = { - "duration_event": 60, - "duration_disruption": None, - "fraction_detour": None, - "fraction_drivethrough": 24, - "rest_capacity": 42, - "maximum_jam": None, - "partofday": None, - } + _config = AnalysisConfigData() + _analyses = AnalysisSectionIndirect( + duration_event=60, + duration_disruption=None, + fraction_detour=None, + fraction_drivethrough=24, + rest_capacity=42, + maximum_jam=None, + partofday=None, + ) _losses = Losses(_config, _analyses) _capacity = pd.Series([42, 24, 12]) _intensity = pd.Series([4.2, 2.4, 1.2]) @@ -60,16 +64,17 @@ def test_traffic_shockwave(self): def test_calc_vlh(self, part_of_day: str): # 1. Define test data # TODO: Not sure of the input format values float of series? - _config = {} - _analyses = { - "duration_event": 60, - "duration_disruption": 15, - "fraction_detour": 1.24, - "fraction_drivethrough": 24, - "rest_capacity": 42, - "maximum_jam": 100, - "partofday": part_of_day, - } + _config = AnalysisConfigData() + _analyses = AnalysisSectionIndirect( + duration_event=60, + duration_disruption=15, + fraction_detour=1.24, + fraction_drivethrough=24, + rest_capacity=42, + maximum_jam=100, + partofday=part_of_day, + ) + _losses = Losses(_config, _analyses) _traffic_data = pd.DataFrame( { diff --git a/tests/analyses/indirect/test_origin_closest_destination.py b/tests/analyses/indirect/test_origin_closest_destination.py index 7eb938ed0..a8808c21b 100644 --- a/tests/analyses/indirect/test_origin_closest_destination.py +++ b/tests/analyses/indirect/test_origin_closest_destination.py @@ -1,11 +1,19 @@ +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionIndirect, +) from ra2ce.analyses.indirect.origin_closest_destination import OriginClosestDestination +from ra2ce.graph.network_config_data.network_config_data import ( + NetworkSection, + OriginsDestinationsSection, +) class TestOriginClosestDestination: def test_init_with_category(self): # 1. Define test data. - _config_dict = { - "origins_destinations": dict( + _config_dict = AnalysisConfigData( + origins_destinations=OriginsDestinationsSection( origins_names="", destinations_names="", id_name_origin_destination="", @@ -13,9 +21,9 @@ def test_init_with_category(self): origin_count="", category="dummy_value", ), - "network": dict(file_id=""), - } - _analysis = dict(threshold="", weighing="") + network=NetworkSection(file_id=""), + ) + _analysis = AnalysisSectionIndirect(threshold="", weighing="") _hazard_names = None # 2. Run test. From 66cfbc4997e4b17a493e206d322be0242d35c750 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 17:33:10 +0100 Subject: [PATCH 11/35] chore: fix code smell --- ra2ce/common/configuration/config_wrapper_protocol.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ra2ce/common/configuration/config_wrapper_protocol.py b/ra2ce/common/configuration/config_wrapper_protocol.py index a58b23629..c71275c0c 100644 --- a/ra2ce/common/configuration/config_wrapper_protocol.py +++ b/ra2ce/common/configuration/config_wrapper_protocol.py @@ -36,6 +36,9 @@ class ConfigWrapperProtocol(Protocol): # pragma: no cover @property def root_dir(self) -> Path: + """ + To be implemented + """ pass @classmethod From a1e8c0c986a9013dbde43e9f51f98795adbcbeb0 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 17:45:56 +0100 Subject: [PATCH 12/35] chore: have network config as object --- .../analysis_config_wrapper_with_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py index 8d3a93be6..77e2e9c4d 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py @@ -90,9 +90,9 @@ def from_data_with_network( def configure(self) -> None: self.config_data.files = self._network_config.files - self.config_data.network = self._network_config.config_data.network.__dict__ + self.config_data.network = self._network_config.config_data.network self.config_data.origins_destinations = ( - self._network_config.config_data.origins_destinations.__dict__ + self._network_config.config_data.origins_destinations ) # When Network is present the graphs are retrieved from the already configured object. From 65fdce06cdb1c7c919c83200a8569edc11fa0a87 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Fri, 10 Nov 2023 19:49:56 +0100 Subject: [PATCH 13/35] chore: boy scout changes --- .../analysis_config_data_validator_with_network.py | 2 +- ra2ce/analyses/indirect/analyses_indirect.py | 8 ++++---- ra2ce/graph/exporters/network_exporter_base.py | 6 +++--- ra2ce/ra2ce_logging.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py index ccedcbfde..86be16878 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_with_network.py @@ -19,7 +19,7 @@ def validate(self) -> ValidationReport: _output_network_dir = self._config.output_path if ( not _output_network_dir - or not (_output_network_dir / "network.ini").is_file() + or not (_output_network_dir.joinpath("network.ini")).is_file() ): _base_report.error( f"The configuration file 'network.ini' is not found at {_output_network_dir}." diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index 1e7530d0b..9bd3d8474 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -24,7 +24,7 @@ import logging import time from pathlib import Path -from typing import Any, List, Tuple +from typing import Any import geopandas as gpd import networkx as nx @@ -846,7 +846,7 @@ def multi_link_origin_destination_regional_impact(self, gdf_ori): def multi_link_isolated_locations( self, graph: nx.Graph, analysis: AnalysisSectionIndirect, crs=4326 - ) -> Tuple[gpd.GeoDataFrame, pd.DataFrame]: + ) -> tuple[gpd.GeoDataFrame, pd.DataFrame]: """ This function identifies locations that are flooded or isolated due to the disruption of the network caused by a hazard. It iterates over multiple hazard scenarios, modifies the graph to represent direct and indirect impacts, and then @@ -1043,8 +1043,8 @@ def execute(self): def _save_gpkg_analysis( base_graph, - to_save_gdf: List[gpd.GeoDataFrame], - to_save_gdf_names: List[str], + to_save_gdf: list[gpd.GeoDataFrame], + to_save_gdf_names: list[str], ): for to_save, save_name in zip(to_save_gdf, to_save_gdf_names): if not to_save.empty: diff --git a/ra2ce/graph/exporters/network_exporter_base.py b/ra2ce/graph/exporters/network_exporter_base.py index c3986ebd8..604a3f22f 100644 --- a/ra2ce/graph/exporters/network_exporter_base.py +++ b/ra2ce/graph/exporters/network_exporter_base.py @@ -21,7 +21,7 @@ from pathlib import Path -from typing import Any, List, Union +from typing import Any, Union import geopandas as gpd import networkx as nx @@ -35,9 +35,9 @@ class NetworkExporterBase(Ra2ceExporterProtocol): - _export_types: List[str] = ["pickle"] + _export_types: list[str] = ["pickle"] - def __init__(self, basename: str, export_types: List[str]) -> None: + def __init__(self, basename: str, export_types: list[str]) -> None: self._basename = basename self._export_types = export_types self.pickle_path = None diff --git a/ra2ce/ra2ce_logging.py b/ra2ce/ra2ce_logging.py index 70280e14a..ced360bbc 100644 --- a/ra2ce/ra2ce_logging.py +++ b/ra2ce/ra2ce_logging.py @@ -31,7 +31,7 @@ class Ra2ceLogger: def __init__(self, logging_dir: Path, logger_name: str) -> None: if not logging_dir.is_dir(): logging_dir.mkdir(parents=True) - self.log_file = logging_dir / f"{logger_name}.log" + self.log_file = logging_dir.joinpath(f"{logger_name}.log") if not self.log_file.is_file(): self.log_file.touch() From c5af46fe7f691ffcbfdfe97b580977f4f393db78 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Sat, 11 Nov 2023 22:28:33 +0100 Subject: [PATCH 14/35] chore: expand config validation --- .../readers/analysis_config_reader_base.py | 7 +++++++ .../network_config_data/network_config_data_validator.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index 63a463074..a7f59f962 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -91,6 +91,7 @@ def get_project_section(self) -> ProjectSection: return ProjectSection(**self._parser["project"]) def _get_analysis_section(self, section_name: str) -> AnalysisSection: + # TODO expand _section = AnalysisSection(**self._parser[section_name]) _section.threshold = self._parser.getfloat( section_name, @@ -107,6 +108,12 @@ def _get_analysis_section(self, section_name: str) -> AnalysisSection: "buffer_meters", fallback=_section.buffer_meters, ) + _section.save_traffic = self._parser.getboolean( + section_name, "save_traffic", fallback=_section.save_traffic + ) + _section.save_shp = self._parser.getboolean( + section_name, "save_shp", fallback=_section.save_shp + ) _section.save_gpkg = self._parser.getboolean( section_name, "save_gpkg", fallback=_section.save_gpkg ) diff --git a/ra2ce/graph/network_config_data/network_config_data_validator.py b/ra2ce/graph/network_config_data/network_config_data_validator.py index e881d015e..c816fea02 100644 --- a/ra2ce/graph/network_config_data/network_config_data_validator.py +++ b/ra2ce/graph/network_config_data/network_config_data_validator.py @@ -57,7 +57,7 @@ "save_csv": [True, False, None], "hazard_map": ["file", None], "aggregate_wl": ["max", "min", "mean", None], - "weighing": ["distance", "time", None], + "weighing": ["distance", "length", "time", None], "save_traffic": [True, False, None], "locations": ["file", None], } From a2d8a60a06e55163f6fb700117bb79dd734129b1 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 08:08:26 +0100 Subject: [PATCH 15/35] chore: fix analysis test --- ra2ce/analyses/indirect/analyses_indirect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index 9bd3d8474..dfa99c4b9 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -1085,7 +1085,7 @@ def _save_gpkg_analysis( ) ) _equity_weights_file = None - if "equity_weight" in analysis.keys() and analysis.equity_weight: + if hasattr(analysis, "equity_weight") and analysis.equity_weight: _equity_weights_file = self.config.static_path.joinpath( "network", analysis.equity_weight ) From 7ea3ef7100d0323ad1ba7be1b572a2be0ed813bd Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 08:09:02 +0100 Subject: [PATCH 16/35] chore: small boyscout changes --- ra2ce/common/validation/validation_report.py | 3 +++ ra2ce/graph/exporters/network_exporter_factory.py | 4 ++-- tests/test_main.py | 4 +--- tests/test_ra2ce_logging.py | 3 +-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ra2ce/common/validation/validation_report.py b/ra2ce/common/validation/validation_report.py index 983c49b37..889117af7 100644 --- a/ra2ce/common/validation/validation_report.py +++ b/ra2ce/common/validation/validation_report.py @@ -25,6 +25,9 @@ class ValidationReport: + _errors: list + _warns: list + def __init__(self) -> None: self._errors = [] self._warns = [] diff --git a/ra2ce/graph/exporters/network_exporter_factory.py b/ra2ce/graph/exporters/network_exporter_factory.py index a1f44ce05..6bab014fc 100644 --- a/ra2ce/graph/exporters/network_exporter_factory.py +++ b/ra2ce/graph/exporters/network_exporter_factory.py @@ -21,7 +21,7 @@ from pathlib import Path -from typing import List, Type +from typing import Type import geopandas as gpd import networkx as nx @@ -42,7 +42,7 @@ def export( network: NETWORK_TYPE, basename: str, output_dir: Path, - export_types: List[str], + export_types: list[str], ) -> None: _exporter_type = self.get_exporter(network) self._exporter = _exporter_type(basename, export_types) diff --git a/tests/test_main.py b/tests/test_main.py index d48b2ba3b..f8d088d04 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1,5 +1,3 @@ -from typing import List - import pytest from click.testing import CliRunner @@ -36,7 +34,7 @@ class TestMainCli: ], ) def test_given_invalid_paths_raises_value_error( - self, arguments: List[str], expected_error: str + self, arguments: list[str], expected_error: str ): _run_result = CliRunner().invoke( main.run_analysis, diff --git a/tests/test_ra2ce_logging.py b/tests/test_ra2ce_logging.py index 46d357583..a29a12e96 100644 --- a/tests/test_ra2ce_logging.py +++ b/tests/test_ra2ce_logging.py @@ -1,7 +1,6 @@ import logging import shutil from pathlib import Path -from typing import List import pytest @@ -9,7 +8,7 @@ from tests import test_results -def get_logged_text_lines(log_file: Path) -> List[str]: +def get_logged_text_lines(log_file: Path) -> list[str]: _logged_text = log_file.read_text() assert _logged_text return _logged_text.splitlines(keepends=False) From 61e1e10cf5c562eeb4201d34231a6b5ee4675347 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 08:19:59 +0100 Subject: [PATCH 17/35] chore: fix runner tests --- tests/runners/dummy_classes.py | 3 ++- tests/runners/test_analysis_runner_factory.py | 5 ++++- tests/runners/test_direct_analysis_runner.py | 6 +++--- tests/runners/test_indirect_analysis_runner.py | 2 +- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/runners/dummy_classes.py b/tests/runners/dummy_classes.py index b4ba87f86..1ade89746 100644 --- a/tests/runners/dummy_classes.py +++ b/tests/runners/dummy_classes.py @@ -1,3 +1,4 @@ +from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_base import ( AnalysisConfigWrapperBase, ) @@ -7,7 +8,7 @@ class DummyAnalysisConfig(AnalysisConfigWrapperBase): def __init__(self) -> None: - self.config_data = {} + self.config_data = AnalysisConfigData(direct=[], indirect=[]) @classmethod def from_data(cls, **kwargs): diff --git a/tests/runners/test_analysis_runner_factory.py b/tests/runners/test_analysis_runner_factory.py index 5adbb2c87..87952ec01 100644 --- a/tests/runners/test_analysis_runner_factory.py +++ b/tests/runners/test_analysis_runner_factory.py @@ -1,4 +1,5 @@ import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData from ra2ce.graph.network_config_data.network_config_data import NetworkConfigData from ra2ce.runners.analysis_runner_factory import AnalysisRunnerFactory @@ -22,7 +23,9 @@ def test_get_runner_with_manny_supported_runners_returns_analysis_runner_instanc ): # 1. Define test data. _config_wrapper = DummyRa2ceInput() - _config_wrapper.analysis_config.config_data = {"indirect": 42, "direct": 24} + _config_wrapper.analysis_config.config_data = AnalysisConfigData( + indirect=42, direct=24 + ) _config_wrapper.network_config.config_data = NetworkConfigData() _config_wrapper.network_config.config_data.hazard.hazard_map = 4224 diff --git a/tests/runners/test_direct_analysis_runner.py b/tests/runners/test_direct_analysis_runner.py index 00fc8c5de..10186c034 100644 --- a/tests/runners/test_direct_analysis_runner.py +++ b/tests/runners/test_direct_analysis_runner.py @@ -19,7 +19,7 @@ def dummy_ra2ce_input(self): def test_given_direct_configuration_can_run(self, dummy_ra2ce_input: ConfigWrapper): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data["direct"] = None + dummy_ra2ce_input.analysis_config.config_data.direct = "sth" dummy_ra2ce_input.network_config.config_data.hazard.hazard_map = "A value" # 2. Run test. @@ -44,7 +44,7 @@ def test_given_wrong_network_hazard_configuration_cannot_run( self, dummy_ra2ce_input: ConfigWrapper ): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data["direct"] = None + dummy_ra2ce_input.analysis_config.config_data.direct = "sth" # 2. Run test. _result = DirectAnalysisRunner.can_run(dummy_ra2ce_input) @@ -56,7 +56,7 @@ def test_given_no_network_config_returns_false( self, dummy_ra2ce_input: ConfigWrapper ): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data["direct"] = "sth" + dummy_ra2ce_input.analysis_config.config_data.direct = "sth" dummy_ra2ce_input.network_config = None # 2. Run test. diff --git a/tests/runners/test_indirect_analysis_runner.py b/tests/runners/test_indirect_analysis_runner.py index 75ee7ccf7..e525bf2a0 100644 --- a/tests/runners/test_indirect_analysis_runner.py +++ b/tests/runners/test_indirect_analysis_runner.py @@ -11,7 +11,7 @@ def test_given_direct_configuration_can_run(self): # 1. Define test data. _input_config = DummyRa2ceInput() assert _input_config - _input_config.analysis_config.config_data["indirect"] = None + _input_config.analysis_config.config_data.indirect = "sth" # 2. Run test. _result = IndirectAnalysisRunner.can_run(_input_config) From f68517882c8ad51533432a5063cae6e0e2021418 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 12:20:18 +0100 Subject: [PATCH 18/35] chore: refactor analyses in a single list --- .../analysis_config_data.py | 75 ++++++--- ...s_config_data_validator_without_network.py | 17 +- .../readers/analysis_config_reader_base.py | 152 +++++++++++++++--- ...s_config_data_validator_without_network.py | 6 +- ...st_analysis_config_wrapper_with_network.py | 11 +- ...analysis_config_wrapper_without_network.py | 11 +- tests/analyses/direct/test_analyses_direct.py | 2 +- tests/runners/dummy_classes.py | 2 +- tests/runners/test_analysis_runner_factory.py | 14 +- 9 files changed, 213 insertions(+), 77 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index 8cc637945..1b7e9d692 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -33,6 +33,20 @@ OriginsDestinationsSection, ) +IndirectAnalysisNameList: list[str] = [ + "single_link_redundancy", + "multi_link_redundancy", + "optimal_route_origin_destination", + "multi_link_origin_destination", + "optimal_route_origin_closest_destination", + "multi_link_origin_closest_destination", + "losses", + "single_link_losses", + "multi_link_losses", + "multi_link_isolated_locations", +] +DirectAnalysisNameList: list[str] = ["direct", "effectiveness_measures"] + @dataclass class ProjectSection: @@ -40,10 +54,22 @@ class ProjectSection: @dataclass -class AnalysisSectionIndirect: +class AnalysisSection: name: str = "" analysis: str = "" # should be enum + save_gpkg: bool = False + save_csv: bool = False + + +@dataclass +class AnalysisSectionIndirect(AnalysisSection): + # general + weighing: str = "" # should be enum + loss_per_distance: str = "" + loss_type: str = "" # should be enum disruption_per_category: str = "" + traffic_cols: str = "" # should be list? + # losses duration_event: float = math.nan duration_disruption: float = math.nan fraction_detour: float = math.nan @@ -51,43 +77,35 @@ class AnalysisSectionIndirect: rest_capacity: float = math.nan maximum_jam: float = math.nan partofday: str = "" + # accessiblity analyses aggregate_wl: str = "" # should be enum threshold: float = math.nan - weighing: str = "" # should be enum + threshold_destinations: float = math.nan + uniform_duration: float = math.nan + gdp_percapita: float = math.nan equity_weight: str = "" calculate_route_without_disruption: bool = False buffer_meters: float = math.nan + threshold_locations: float = math.nan category_field_name: str = "" - file_name: Path = None save_traffic: bool = False - save_gpkg: bool = False - save_csv: bool = False @dataclass -class AnalysisSectionDirect: - name: str = "" - analysis: str = "" # should be enum +class AnalysisSectionDirect(AnalysisSection): + # adaptation/effectiveness measures return_period: float = math.nan repair_costs: float = math.nan evaluation_period: float = math.nan interest_rate: float = math.nan climate_factor: float = math.nan climate_period: float = math.nan + # road damage damage_curve: str = "" event_type: str = "" risk_calculation: str = "" - loss_per_distance: str = "" - traffic_cols: str = "" - file_name: Path = None - save_shp: bool = False - save_gpkg: bool = False - save_csv: bool = False - - -@dataclass -class AnalysisSection(AnalysisSectionIndirect, AnalysisSectionDirect): - pass + create_table: bool = False + file_name: Optional[Path] = None @dataclass @@ -97,8 +115,7 @@ class AnalysisConfigData(ConfigDataProtocol): output_path: Optional[Path] = None static_path: Optional[Path] = None project: ProjectSection = field(default_factory=lambda: ProjectSection()) - direct: list[AnalysisSectionDirect] = field(default_factory=list) - indirect: list[AnalysisSectionIndirect] = field(default_factory=list) + analyses: list[AnalysisSection] = field(default_factory=list) files: Optional[dict[str, Path]] = field(default_factory=dict) origins_destinations: Optional[OriginsDestinationsSection] = field( default_factory=lambda: OriginsDestinationsSection() @@ -113,6 +130,22 @@ def to_dict(self) -> dict: _dict["files"] = [dv.__dict__ for dv in self.files] return _dict + @property + def direct(self): + return list( + analysis + for analysis in self.analyses + if analysis.analysis in DirectAnalysisNameList + ) + + @property + def indirect(self): + return list( + analysis + for analysis in self.analyses + if analysis.analysis in IndirectAnalysisNameList + ) + class AnalysisConfigDataWithNetwork(AnalysisConfigData): pass diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py index b9bcaadbe..a73af158a 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data_validator_without_network.py @@ -26,6 +26,8 @@ from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigDataWithoutNetwork, + IndirectAnalysisNameList, + DirectAnalysisNameList, ) from ra2ce.common.validation.ra2ce_validator_protocol import Ra2ceIoValidator from ra2ce.common.validation.validation_report import ValidationReport @@ -33,19 +35,6 @@ NetworkDictValues, ) -IndirectAnalysisNameList: list[str] = [ - "single_link_redundancy", - "multi_link_redundancy", - "optimal_route_origin_destination", - "multi_link_origin_destination", - "optimal_route_origin_closest_destination", - "multi_link_origin_closest_destination", - "losses", - "single_link_losses", - "multi_link_losses", - "multi_link_isolated_locations", -] -DirectAnalysisNameList: list[str] = ["direct", "effectiveness_measures"] AnalysisNetworkDictValues = NetworkDictValues | { "analysis": IndirectAnalysisNameList + DirectAnalysisNameList } @@ -157,7 +146,7 @@ def _check_header(header: str) -> None: def validate(self) -> ValidationReport: _report = ValidationReport() - _required_headers = ["project", "direct", "indirect"] + _required_headers = ["project", "analyses"] _report.merge(self._validate_headers(_required_headers)) return _report diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index a7f59f962..77022ea4a 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -29,11 +29,11 @@ from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigData, AnalysisSection, - ProjectSection, -) -from ra2ce.analyses.analysis_config_data.analysis_config_data_validator_without_network import ( + AnalysisSectionDirect, + AnalysisSectionIndirect, DirectAnalysisNameList, IndirectAnalysisNameList, + ProjectSection, ) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_base import ( AnalysisConfigWrapperBase, @@ -67,7 +67,6 @@ def read(self, ini_file: Path) -> AnalysisConfigData: ini_file ) _config_data.project.name = _parent_dir.name - # TODO self._correct_paths(_config_data)?? return _config_data @@ -83,21 +82,74 @@ def _remove_none_values(self) -> None: def _get_sections(self) -> dict: return { "project": self.get_project_section(), - "direct": self.get_analysis_sections("direct"), - "indirect": self.get_analysis_sections("indirect"), + "analyses": self.get_analysis_sections(), } def get_project_section(self) -> ProjectSection: return ProjectSection(**self._parser["project"]) - def _get_analysis_section(self, section_name: str) -> AnalysisSection: - # TODO expand - _section = AnalysisSection(**self._parser[section_name]) + def _get_analysis_section_indirect( + self, section_name: str + ) -> AnalysisSectionIndirect: + _section = AnalysisSectionIndirect(**self._parser[section_name]) + _section.save_gpkg = self._parser.getboolean( + section_name, "save_gpkg", fallback=_section.save_gpkg + ) + _section.save_csv = self._parser.getboolean( + section_name, "save_csv", fallback=_section.save_csv + ) + # losses + _section.duration_event = self._parser.getfloat( + section_name, + "duration_event", + fallback=_section.duration_event, + ) + _section.duration_disruption = self._parser.getfloat( + section_name, + "duration_disruption", + fallback=_section.duration_disruption, + ) + _section.fraction_detour = self._parser.getfloat( + section_name, + "fraction_detour", + fallback=_section.fraction_detour, + ) + _section.fraction_drivethrough = self._parser.getfloat( + section_name, + "fraction_drivethrough", + fallback=_section.fraction_drivethrough, + ) + _section.rest_capacity = self._parser.getfloat( + section_name, + "rest_capacity", + fallback=_section.rest_capacity, + ) + _section.maximum_jam = self._parser.getfloat( + section_name, + "maximum_jam", + fallback=_section.maximum_jam, + ) + # accessiblity analyses _section.threshold = self._parser.getfloat( section_name, "threshold", fallback=_section.threshold, ) + _section.threshold_destinations = self._parser.getfloat( + section_name, + "threshold_destinations", + fallback=_section.threshold_destinations, + ) + _section.uniform_duration = self._parser.getfloat( + section_name, + "uniform_duration", + fallback=_section.uniform_duration, + ) + _section.gdp_percapita = self._parser.getfloat( + section_name, + "gdp_percapita", + fallback=_section.gdp_percapita, + ) _section.calculate_route_without_disruption = self._parser.getboolean( section_name, "calculate_route_without_disruption", @@ -108,35 +160,87 @@ def _get_analysis_section(self, section_name: str) -> AnalysisSection: "buffer_meters", fallback=_section.buffer_meters, ) + _section.threshold_locations = self._parser.getfloat( + section_name, + "threshold_locations", + fallback=_section.threshold_locations, + ) _section.save_traffic = self._parser.getboolean( section_name, "save_traffic", fallback=_section.save_traffic ) - _section.save_shp = self._parser.getboolean( - section_name, "save_shp", fallback=_section.save_shp - ) + + return _section + + def _get_analysis_section_direct(self, section_name: str) -> AnalysisSectionDirect: + _section = AnalysisSectionDirect(**self._parser[section_name]) _section.save_gpkg = self._parser.getboolean( section_name, "save_gpkg", fallback=_section.save_gpkg ) _section.save_csv = self._parser.getboolean( section_name, "save_csv", fallback=_section.save_csv ) + # adaptation/effectiveness measures + _section.return_period = self._parser.getfloat( + section_name, + "return_period", + fallback=_section.return_period, + ) + _section.repair_costs = self._parser.getfloat( + section_name, + "repair_costs", + fallback=_section.repair_costs, + ) + _section.evaluation_period = self._parser.getfloat( + section_name, + "evaluation_period", + fallback=_section.evaluation_period, + ) + _section.return_pinterest_rateeriod = self._parser.getfloat( + section_name, + "interest_rate", + fallback=_section.interest_rate, + ) + _section.climate_factor = self._parser.getfloat( + section_name, + "climate_factor", + fallback=_section.climate_factor, + ) + _section.climate_period = self._parser.getfloat( + section_name, + "climate_period", + fallback=_section.climate_period, + ) + # road damage + _section.create_table = self._parser.getboolean( + section_name, + "create_table", + fallback=_section.create_table, + ) return _section - def get_analysis_sections(self, analysis_type: str) -> list[AnalysisSection]: + def get_analysis_sections(self) -> list[AnalysisSection]: + """ + Extracts info from [analysis] sections + + Returns: + list[AnalysisSection]: List of analyses (both direct and indirect) + """ _analysis_sections = [] - _section_names = re.findall(r"(analysis\d)", " ".join(self._parser.keys())) + _section_names = list( + section_name + for section_name in self._parser.sections() + if "analysis" in section_name + ) for _section_name in _section_names: - _analysis_name = self._parser.get(_section_name, "analysis") - if analysis_type == "direct" and _analysis_name in DirectAnalysisNameList: - _analysis_section = self._get_analysis_section(_section_name) - _analysis_sections.append(_analysis_section) - elif ( - analysis_type == "indirect" - and _analysis_name in IndirectAnalysisNameList - ): - _analysis_section = self._get_analysis_section(_section_name) - _analysis_sections.append(_analysis_section) + _analysis_type = self._parser.get(_section_name, "analysis") + if _analysis_type in DirectAnalysisNameList: + _analysis_section = self._get_analysis_section_direct(_section_name) + elif _analysis_type in IndirectAnalysisNameList: + _analysis_section = self._get_analysis_section_indirect(_section_name) + else: + raise ValueError(f"Analysis {_analysis_type} not supported.") + _analysis_sections.append(_analysis_section) return _analysis_sections diff --git a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py index 6b47bab30..eb8aebf15 100644 --- a/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py +++ b/tests/analyses/analysis_config_data/test_analysis_config_data_validator_without_network.py @@ -119,11 +119,13 @@ def test_validate_headers_fails_when_invalid_value( _test_config_data = AnalysisConfigData( root_path=test_results, project=ProjectSection(name=request.node.name), - direct=[AnalysisSectionDirect(analysis="invalid_analysis_type")], + analyses=[AnalysisSectionDirect(analysis="invalid_analysis_type")], ) # 2. Run test. - _report = self._validate_headers(_test_config_data, required_headers=["direct"]) + _report = self._validate_headers( + _test_config_data, required_headers=["analyses"] + ) # 3. Verify final expectations. assert not _report.is_valid() diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py index 0773141ca..bf89e82f1 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_with_network.py @@ -6,6 +6,7 @@ from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigData, AnalysisSectionDirect, + AnalysisSectionIndirect, ) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_with_network import ( AnalysisConfigWrapperWithNetwork, @@ -78,9 +79,9 @@ def test_initialize_output_dirs_with_valid_data( _analysis = AnalysisConfigWrapperWithNetwork() _output_dir = test_results / request.node.name _analysis.config_data = AnalysisConfigData(output_path=_output_dir) - _analysis.config_data.direct = [AnalysisSectionDirect(analysis="test_direct")] - _analysis.config_data.indirect = [ - AnalysisSectionDirect(analysis="test_indirect") + _analysis.config_data.analyses = [ + AnalysisSectionDirect(analysis="effectiveness_measures"), + AnalysisSectionIndirect(analysis="single_link_redundancy"), ] if _output_dir.exists(): shutil.rmtree(_output_dir) @@ -90,5 +91,5 @@ def test_initialize_output_dirs_with_valid_data( # 3. Verify expectations. assert _output_dir.exists() - assert _output_dir.joinpath("test_direct").exists() - assert _output_dir.joinpath("test_indirect").exists() + assert _output_dir.joinpath("effectiveness_measures").exists() + assert _output_dir.joinpath("single_link_redundancy").exists() diff --git a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py index 2e2657b7d..22b9ee1ca 100644 --- a/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py +++ b/tests/analyses/analysis_config_wrapper/test_analysis_config_wrapper_without_network.py @@ -5,6 +5,7 @@ from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigData, AnalysisSectionDirect, + AnalysisSectionIndirect, ) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_without_network import ( AnalysisConfigWrapperWithoutNetwork, @@ -45,9 +46,9 @@ def test_initialize_output_dirs_with_valid_data( _analysis = AnalysisConfigWrapperWithoutNetwork() _output_dir = test_results / request.node.name _analysis.config_data = AnalysisConfigData(output_path=_output_dir) - _analysis.config_data.direct = [AnalysisSectionDirect(analysis="test_direct")] - _analysis.config_data.indirect = [ - AnalysisSectionDirect(analysis="test_indirect") + _analysis.config_data.analyses = [ + AnalysisSectionDirect(analysis="effectiveness_measures"), + AnalysisSectionIndirect(analysis="single_link_redundancy"), ] if _output_dir.exists(): shutil.rmtree(_output_dir) @@ -57,5 +58,5 @@ def test_initialize_output_dirs_with_valid_data( # 3. Verify expectations. assert _output_dir.exists() - assert _output_dir.joinpath("test_direct").exists() - assert _output_dir.joinpath("test_indirect").exists() + assert _output_dir.joinpath("effectiveness_measures").exists() + assert _output_dir.joinpath("single_link_redundancy").exists() diff --git a/tests/analyses/direct/test_analyses_direct.py b/tests/analyses/direct/test_analyses_direct.py index 515bb75a5..a1ce34e78 100644 --- a/tests/analyses/direct/test_analyses_direct.py +++ b/tests/analyses/direct/test_analyses_direct.py @@ -15,7 +15,7 @@ def test_init(self): def test_execute(self): _config = AnalysisConfigData( - direct=[ + analyses=[ AnalysisSectionDirect( name="DummyExecute", analysis="", save_gpkg=False, save_csv=False ) diff --git a/tests/runners/dummy_classes.py b/tests/runners/dummy_classes.py index 1ade89746..152617070 100644 --- a/tests/runners/dummy_classes.py +++ b/tests/runners/dummy_classes.py @@ -8,7 +8,7 @@ class DummyAnalysisConfig(AnalysisConfigWrapperBase): def __init__(self) -> None: - self.config_data = AnalysisConfigData(direct=[], indirect=[]) + self.config_data = AnalysisConfigData(analyses=[]) @classmethod def from_data(cls, **kwargs): diff --git a/tests/runners/test_analysis_runner_factory.py b/tests/runners/test_analysis_runner_factory.py index 87952ec01..6c79de1df 100644 --- a/tests/runners/test_analysis_runner_factory.py +++ b/tests/runners/test_analysis_runner_factory.py @@ -1,10 +1,13 @@ import pytest -from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, + AnalysisSectionDirect, + AnalysisSectionIndirect, +) from ra2ce.graph.network_config_data.network_config_data import NetworkConfigData from ra2ce.runners.analysis_runner_factory import AnalysisRunnerFactory from ra2ce.runners.analysis_runner_protocol import AnalysisRunner -from ra2ce.runners.indirect_analysis_runner import IndirectAnalysisRunner from tests.runners.dummy_classes import DummyRa2ceInput @@ -18,13 +21,16 @@ def test_get_runner_unknown_input_raises_error(self): == "No analysis runner found for the given configuration." ) - def test_get_runner_with_manny_supported_runners_returns_analysis_runner_instance( + def test_get_runner_with_many_supported_runners_returns_analysis_runner_instance( self, ): # 1. Define test data. _config_wrapper = DummyRa2ceInput() _config_wrapper.analysis_config.config_data = AnalysisConfigData( - indirect=42, direct=24 + analyses=[ + AnalysisSectionDirect(analysis="effectiveness_measures"), + AnalysisSectionIndirect(analysis="single_link_redundancy"), + ] ) _config_wrapper.network_config.config_data = NetworkConfigData() _config_wrapper.network_config.config_data.hazard.hazard_map = 4224 From e773c8008eae4e57e69c199ae09ae73412fc7183 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 12:20:43 +0100 Subject: [PATCH 19/35] chore: remove superfluous checks on attr --- ra2ce/analyses/indirect/analyses_indirect.py | 2 +- ra2ce/analyses/indirect/origin_closest_destination.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index dfa99c4b9..57afa3387 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -1085,7 +1085,7 @@ def _save_gpkg_analysis( ) ) _equity_weights_file = None - if hasattr(analysis, "equity_weight") and analysis.equity_weight: + if analysis.equity_weight: _equity_weights_file = self.config.static_path.joinpath( "network", analysis.equity_weight ) diff --git a/ra2ce/analyses/indirect/origin_closest_destination.py b/ra2ce/analyses/indirect/origin_closest_destination.py index 6288bcbaf..f47cc0b64 100644 --- a/ra2ce/analyses/indirect/origin_closest_destination.py +++ b/ra2ce/analyses/indirect/origin_closest_destination.py @@ -54,9 +54,8 @@ def __init__( ): self.crs = 4326 # TODO PUT IN DOCUMENTATION OR MAKE CHANGABLE self.unit = "km" - if hasattr(analysis, "threshold"): - self.network_threshold = analysis.threshold - self.threshold_destinations = 0 # TODO MAKE PARAMETER IN ANALYSES.INI + self.network_threshold = analysis.threshold + self.threshold_destinations = analysis.threshold_destinations self.weighing = analysis.weighing self.o_name = config.origins_destinations.origins_names self.d_name = config.origins_destinations.destinations_names From 3a36c9ab162fec864f15696d657fe4c3cedb148d Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 16:11:17 +0100 Subject: [PATCH 20/35] chore: standardize/fix runner tests --- tests/runners/dummy_classes.py | 8 +++-- tests/runners/test_direct_analysis_runner.py | 16 +++++++--- .../runners/test_indirect_analysis_runner.py | 31 +++++++++++++------ 3 files changed, 39 insertions(+), 16 deletions(-) diff --git a/tests/runners/dummy_classes.py b/tests/runners/dummy_classes.py index 152617070..eff76e54a 100644 --- a/tests/runners/dummy_classes.py +++ b/tests/runners/dummy_classes.py @@ -1,4 +1,6 @@ -from ra2ce.analyses.analysis_config_data.analysis_config_data import AnalysisConfigData +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisConfigData, +) from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_base import ( AnalysisConfigWrapperBase, ) @@ -6,7 +8,7 @@ from ra2ce.graph.network_config_wrapper import NetworkConfigWrapper -class DummyAnalysisConfig(AnalysisConfigWrapperBase): +class DummyAnalysisConfigWrapper(AnalysisConfigWrapperBase): def __init__(self) -> None: self.config_data = AnalysisConfigData(analyses=[]) @@ -23,5 +25,5 @@ def is_valid(self) -> bool: class DummyRa2ceInput(ConfigWrapper): def __init__(self) -> None: - self.analysis_config = DummyAnalysisConfig() + self.analysis_config = DummyAnalysisConfigWrapper() self.network_config = NetworkConfigWrapper() diff --git a/tests/runners/test_direct_analysis_runner.py b/tests/runners/test_direct_analysis_runner.py index 10186c034..29619b968 100644 --- a/tests/runners/test_direct_analysis_runner.py +++ b/tests/runners/test_direct_analysis_runner.py @@ -1,7 +1,9 @@ import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisSectionDirect, +) from ra2ce.configuration.config_wrapper import ConfigWrapper -from ra2ce.graph.network_config_data.network_config_data import NetworkConfigData from ra2ce.runners.direct_analysis_runner import DirectAnalysisRunner from tests.runners.dummy_classes import DummyRa2ceInput @@ -19,7 +21,9 @@ def dummy_ra2ce_input(self): def test_given_direct_configuration_can_run(self, dummy_ra2ce_input: ConfigWrapper): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data.direct = "sth" + dummy_ra2ce_input.analysis_config.config_data.analyses = [ + AnalysisSectionDirect(analysis="effectiveness_measures") + ] dummy_ra2ce_input.network_config.config_data.hazard.hazard_map = "A value" # 2. Run test. @@ -44,7 +48,9 @@ def test_given_wrong_network_hazard_configuration_cannot_run( self, dummy_ra2ce_input: ConfigWrapper ): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data.direct = "sth" + dummy_ra2ce_input.analysis_config.config_data.analyses = [ + AnalysisSectionDirect(analysis="effectiveness_measures") + ] # 2. Run test. _result = DirectAnalysisRunner.can_run(dummy_ra2ce_input) @@ -56,7 +62,9 @@ def test_given_no_network_config_returns_false( self, dummy_ra2ce_input: ConfigWrapper ): # 1. Define test data. - dummy_ra2ce_input.analysis_config.config_data.direct = "sth" + dummy_ra2ce_input.analysis_config.config_data.analyses = [ + AnalysisSectionDirect(analysis="effectiveness_measures") + ] dummy_ra2ce_input.network_config = None # 2. Run test. diff --git a/tests/runners/test_indirect_analysis_runner.py b/tests/runners/test_indirect_analysis_runner.py index e525bf2a0..205d0d16b 100644 --- a/tests/runners/test_indirect_analysis_runner.py +++ b/tests/runners/test_indirect_analysis_runner.py @@ -1,3 +1,8 @@ +import pytest +from ra2ce.analyses.analysis_config_data.analysis_config_data import ( + AnalysisSectionIndirect, +) +from ra2ce.configuration.config_wrapper import ConfigWrapper from ra2ce.runners.indirect_analysis_runner import IndirectAnalysisRunner from tests.runners.dummy_classes import DummyRa2ceInput @@ -7,25 +12,33 @@ def test_init_direct_analysis_runner(self): _runner = IndirectAnalysisRunner() assert str(_runner) == "Indirect Analysis Runner" - def test_given_direct_configuration_can_run(self): + @pytest.fixture + def dummy_ra2ce_input(self): + _ra2ce_input = DummyRa2ceInput() + assert isinstance(_ra2ce_input, ConfigWrapper) + yield _ra2ce_input + + def test_given_indirect_configuration_can_run( + self, dummy_ra2ce_input: ConfigWrapper + ): # 1. Define test data. - _input_config = DummyRa2ceInput() - assert _input_config - _input_config.analysis_config.config_data.indirect = "sth" + dummy_ra2ce_input.analysis_config.config_data.analyses = [ + AnalysisSectionIndirect(analysis="single_link_redundancy") + ] # 2. Run test. - _result = IndirectAnalysisRunner.can_run(_input_config) + _result = IndirectAnalysisRunner.can_run(dummy_ra2ce_input) # 3. Verify expectations. assert _result - def test_given_wrong_analysis_configuration_cannot_run(self): + def test_given_wrong_analysis_configuration_cannot_run( + self, dummy_ra2ce_input: ConfigWrapper + ): # 1. Define test data. - _input_config = DummyRa2ceInput() - assert _input_config # 2. Run test. - _result = IndirectAnalysisRunner.can_run(_input_config) + _result = IndirectAnalysisRunner.can_run(dummy_ra2ce_input) # 3. Verify expectations. assert not _result From 0488f7a718437c37f3d6ea7e1d147f3ada83dea1 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 16:11:34 +0100 Subject: [PATCH 21/35] chore: fix indirect analysis error --- ra2ce/analyses/indirect/analyses_indirect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index d75c10c85..66a183847 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -1188,7 +1188,7 @@ def _save_gpkg_analysis( ) = analyzer.optimal_route_origin_closest_destination() if ( - analyzer.config["files"]["origins_destinations_graph_hazard"] + analyzer.config.files["origins_destinations_graph_hazard"] is None ): origins = analyzer.load_origins() From 563f2899116b9bc3586c6919ef0436aad83a0fbc Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 16:11:57 +0100 Subject: [PATCH 22/35] chore: correct network config types --- .../analysis_config_data/analysis_config_data.py | 14 ++++++++------ .../readers/analysis_config_reader_base.py | 5 ++--- .../analysis_config_wrapper_with_network.py | 3 +-- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index 1b7e9d692..339ecbe03 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -29,7 +29,7 @@ from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol from ra2ce.graph.network_config_data.network_config_data import ( - NetworkSection, + NetworkConfigData, OriginsDestinationsSection, ) @@ -54,7 +54,7 @@ class ProjectSection: @dataclass -class AnalysisSection: +class AnalysisSectionBase: name: str = "" analysis: str = "" # should be enum save_gpkg: bool = False @@ -62,7 +62,7 @@ class AnalysisSection: @dataclass -class AnalysisSectionIndirect(AnalysisSection): +class AnalysisSectionIndirect(AnalysisSectionBase): # general weighing: str = "" # should be enum loss_per_distance: str = "" @@ -92,7 +92,7 @@ class AnalysisSectionIndirect(AnalysisSection): @dataclass -class AnalysisSectionDirect(AnalysisSection): +class AnalysisSectionDirect(AnalysisSectionBase): # adaptation/effectiveness measures return_period: float = math.nan repair_costs: float = math.nan @@ -115,12 +115,14 @@ class AnalysisConfigData(ConfigDataProtocol): output_path: Optional[Path] = None static_path: Optional[Path] = None project: ProjectSection = field(default_factory=lambda: ProjectSection()) - analyses: list[AnalysisSection] = field(default_factory=list) + analyses: list[AnalysisSectionBase] = field(default_factory=list) files: Optional[dict[str, Path]] = field(default_factory=dict) origins_destinations: Optional[OriginsDestinationsSection] = field( default_factory=lambda: OriginsDestinationsSection() ) - network: Optional[NetworkSection] = field(default_factory=lambda: NetworkSection()) + network: Optional[NetworkConfigData] = field( + default_factory=lambda: NetworkConfigData() + ) def to_dict(self) -> dict: _dict = self.__dict__ diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index 77022ea4a..dc0959a9d 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -21,14 +21,13 @@ import logging -import re from configparser import ConfigParser from pathlib import Path from shutil import copyfile from ra2ce.analyses.analysis_config_data.analysis_config_data import ( AnalysisConfigData, - AnalysisSection, + AnalysisSectionBase, AnalysisSectionDirect, AnalysisSectionIndirect, DirectAnalysisNameList, @@ -218,7 +217,7 @@ def _get_analysis_section_direct(self, section_name: str) -> AnalysisSectionDire ) return _section - def get_analysis_sections(self) -> list[AnalysisSection]: + def get_analysis_sections(self) -> list[AnalysisSectionBase]: """ Extracts info from [analysis] sections diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py index 77e2e9c4d..25e4b60e3 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_with_network.py @@ -32,12 +32,11 @@ from ra2ce.analyses.analysis_config_wrapper.analysis_config_wrapper_base import ( AnalysisConfigWrapperBase, ) -from ra2ce.graph.network_config_data.network_config_data import NetworkConfigData from ra2ce.graph.network_config_wrapper import NetworkConfigWrapper class AnalysisConfigWrapperWithNetwork(AnalysisConfigWrapperBase): - _network_config: NetworkConfigData + _network_config: NetworkConfigWrapper def __init__(self) -> None: self.config_data = AnalysisConfigData() From 2d2ef2ae965a076a3b4b7e5176592124543fdc46 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Mon, 13 Nov 2023 16:22:34 +0100 Subject: [PATCH 23/35] chore: make hazard_names variable explicit --- .../analysis_config_data.py | 1 + ra2ce/analyses/indirect/analyses_indirect.py | 34 +++++++++++-------- .../indirect/origin_closest_destination.py | 4 +-- .../test_origin_closest_destination.py | 2 +- 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index 339ecbe03..11083658d 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -123,6 +123,7 @@ class AnalysisConfigData(ConfigDataProtocol): network: Optional[NetworkConfigData] = field( default_factory=lambda: NetworkConfigData() ) + hazard_names: Optional[list[str]] = field(default_factory=list) def to_dict(self) -> dict: _dict = self.__dict__ diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index 66a183847..4d42bd10b 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -57,6 +57,7 @@ class IndirectAnalyses: config: AnalysisConfigData graphs: dict + hazard_names_df: pd.DataFrame _file_name_key = "File name" _ra2ce_name_key = "RA2CE name" @@ -65,13 +66,14 @@ def __init__(self, config: AnalysisConfigData, graphs: list[Any]): self.config = config self.graphs = graphs if self.config.output_path.joinpath("hazard_names.xlsx").is_file(): - self.hazard_names = pd.read_excel( + self.hazard_names_df = pd.read_excel( self.config.output_path.joinpath("hazard_names.xlsx") ) - # TODO Ardt hazard_names - self.config.hazard_names = list(set(self.hazard_names[self._file_name_key])) + self.config.hazard_names = list( + set(self.hazard_names_df[self._file_name_key]) + ) else: - self.hazard_names = pd.DataFrame(data=None) + self.hazard_names_df = pd.DataFrame(data=None) self.config.hazard_names = list() def single_link_redundancy(self, graph, analysis: AnalysisSectionIndirect): @@ -300,8 +302,9 @@ def multi_link_redundancy(self, graph: dict, analysis: AnalysisSectionIndirect): results = [] master_graph = copy.deepcopy(graph) for hazard in self.config.hazard_names: - hazard_name = self.hazard_names.loc[ - self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key + hazard_name = self.hazard_names_df.loc[ + self.hazard_names_df[self._file_name_key] == hazard, + self._ra2ce_name_key, ].values[0] graph = copy.deepcopy(master_graph) @@ -412,8 +415,9 @@ def multi_link_losses(self, gdf, analysis: AnalysisSectionIndirect): results = [] for hazard in self.config.hazard_names: - hazard_name = self.hazard_names.loc[ - self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key + hazard_name = self.hazard_names_df.loc[ + self.hazard_names_df[self._file_name_key] == hazard, + self._ra2ce_name_key, ].values[0] gdf_ = gdf.loc[gdf["hazard"] == hazard_name].copy() @@ -604,8 +608,9 @@ def multi_link_origin_destination(self, graph, analysis: AnalysisSectionIndirect all_results = [] for hazard in self.config.hazard_names: - hazard_name = self.hazard_names.loc[ - self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key + hazard_name = self.hazard_names_df.loc[ + self.hazard_names_df[self._file_name_key] == hazard, + self._ra2ce_name_key, ].values[0] graph_hz = copy.deepcopy(graph) @@ -871,8 +876,9 @@ def multi_link_isolated_locations( aggregation = pd.DataFrame() for i, hazard in enumerate(self.config.hazard_names): # for each hazard event - hazard_name = self.hazard_names.loc[ - self.hazard_names[self._file_name_key] == hazard, self._ra2ce_name_key + hazard_name = self.hazard_names_df.loc[ + self.hazard_names_df[self._file_name_key] == hazard, + self._ra2ce_name_key, ].values[0] graph_hz_direct = copy.deepcopy(graph) @@ -1150,7 +1156,7 @@ def _save_gpkg_analysis( gdf = self.multi_link_losses(gdf, analysis) elif analysis.analysis == "optimal_route_origin_closest_destination": analyzer = OriginClosestDestination( - self.config, analysis, self.hazard_names + self.config, analysis, self.hazard_names_df ) ( base_graph, @@ -1177,7 +1183,7 @@ def _save_gpkg_analysis( opt_routes.to_csv(csv_path, index=False) elif analysis.analysis == "multi_link_origin_closest_destination": analyzer = OriginClosestDestination( - self.config, analysis, self.hazard_names + self.config, analysis, self.hazard_names_df ) if analysis.calculate_route_without_disruption: diff --git a/ra2ce/analyses/indirect/origin_closest_destination.py b/ra2ce/analyses/indirect/origin_closest_destination.py index 30cc679af..5c342c1d7 100644 --- a/ra2ce/analyses/indirect/origin_closest_destination.py +++ b/ra2ce/analyses/indirect/origin_closest_destination.py @@ -50,7 +50,7 @@ def __init__( self, config: AnalysisConfigData, analysis: AnalysisSectionIndirect, - hazard_names: pd.DataFrame, + hazard_names_df: pd.DataFrame, ): self.crs = 4326 # TODO PUT IN DOCUMENTATION OR MAKE CHANGEABLE self.unit = "km" @@ -69,7 +69,7 @@ def __init__( self.analysis = analysis self.config = config - self.hazard_names = hazard_names + self.hazard_names = hazard_names_df self.destination_names = None self.destination_key = None diff --git a/tests/analyses/indirect/test_origin_closest_destination.py b/tests/analyses/indirect/test_origin_closest_destination.py index a8808c21b..c17d87ced 100644 --- a/tests/analyses/indirect/test_origin_closest_destination.py +++ b/tests/analyses/indirect/test_origin_closest_destination.py @@ -28,7 +28,7 @@ def test_init_with_category(self): # 2. Run test. _ocd = OriginClosestDestination( - config=_config_dict, analysis=_analysis, hazard_names=_hazard_names + config=_config_dict, analysis=_analysis, hazard_names_df=_hazard_names ) # 3. Verify expectations. From 0462fb0b5751711736248ffd6d33d2502d6af38a Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Tue, 14 Nov 2023 09:57:02 +0100 Subject: [PATCH 24/35] chore: have traffic_cols als list --- .../analyses/analysis_config_data/analysis_config_data.py | 2 +- .../readers/analysis_config_reader_base.py | 8 +++++++- ra2ce/analyses/indirect/analyses_indirect.py | 8 ++++---- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index 11083658d..849c79b13 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -68,7 +68,7 @@ class AnalysisSectionIndirect(AnalysisSectionBase): loss_per_distance: str = "" loss_type: str = "" # should be enum disruption_per_category: str = "" - traffic_cols: str = "" # should be list? + traffic_cols: list[str] = field(default_factory=list) # losses duration_event: float = math.nan duration_disruption: float = math.nan diff --git a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py index dc0959a9d..8e7f814d6 100644 --- a/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py +++ b/ra2ce/analyses/analysis_config_data/readers/analysis_config_reader_base.py @@ -46,7 +46,10 @@ class AnalysisConfigReaderBase(ConfigDataReaderProtocol): _parser: ConfigParser def __init__(self) -> None: - self._parser = ConfigParser(inline_comment_prefixes="#") + self._parser = ConfigParser( + inline_comment_prefixes="#", + converters={"list": lambda x: [x.strip() for x in x.split(",")]}, + ) def read(self, ini_file: Path) -> AnalysisConfigData: if not isinstance(ini_file, Path) or not ini_file.is_file(): @@ -98,6 +101,9 @@ def _get_analysis_section_indirect( section_name, "save_csv", fallback=_section.save_csv ) # losses + _section.traffic_cols = self._parser.getlist( + section_name, "traffic_cols", fallback=_section.traffic_cols + ) _section.duration_event = self._parser.getfloat( section_name, "duration_event", diff --git a/ra2ce/analyses/indirect/analyses_indirect.py b/ra2ce/analyses/indirect/analyses_indirect.py index 4d42bd10b..f306ef4e4 100644 --- a/ra2ce/analyses/indirect/analyses_indirect.py +++ b/ra2ce/analyses/indirect/analyses_indirect.py @@ -178,7 +178,7 @@ def _single_link_losses_uniform( losses_df: pd.DataFrame, ): for hz in self.config.hazard_names: - for col in analysis.traffic_cols.split(","): + for col in analysis.traffic_cols: try: assert gdf[col + "_detour_losses"] assert gdf[col + "_nodetour_losses"] @@ -255,7 +255,7 @@ def _single_link_losses_categorized( 0 ] - for col in analysis.traffic_cols.split(","): + for col in analysis.traffic_cols: try: assert gdf[col + "_detour_losses"] assert gdf[col + "_nodetour_losses"] @@ -424,7 +424,7 @@ def multi_link_losses(self, gdf, analysis: AnalysisSectionIndirect): if ( analysis.loss_type == "uniform" ): # assume uniform threshold for disruption - for col in analysis.traffic_cols.split(","): + for col in analysis.traffic_cols: # detour_losses = traffic_per_day[veh/day] * detour_distance[meter] * cost_per_meter[USD/meter/vehicle] * duration_disruption[hour] / 24[hour/day] gdf_.loc[gdf_["connected"] == 1, col + "_losses_detour"] = ( gdf_[col] @@ -491,7 +491,7 @@ def multi_link_losses(self, gdf, analysis: AnalysisSectionIndirect): 0 ] - for col in analysis.traffic_cols.split(","): + for col in analysis.traffic_cols: # detour_losses = traffic_per_day[veh/day] * detour_distance[meter] * cost_per_meter[USD/meter/vehicle] * duration_disruption[hour] / 24[hour/day] gdf_.loc[gdf_["connected"] == 1, col + "_losses_detour"] = ( gdf_[col] From 713493ea3eaa80f61a8a8c18fafd2b2d882487d8 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Tue, 14 Nov 2023 09:57:14 +0100 Subject: [PATCH 25/35] chore: small boy scout changes --- ra2ce/analyses/indirect/origin_closest_destination.py | 3 ++- tests/test_acceptance.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ra2ce/analyses/indirect/origin_closest_destination.py b/ra2ce/analyses/indirect/origin_closest_destination.py index 5c342c1d7..c0abc6cfd 100644 --- a/ra2ce/analyses/indirect/origin_closest_destination.py +++ b/ra2ce/analyses/indirect/origin_closest_destination.py @@ -24,6 +24,7 @@ import copy import logging from typing import Optional, Union +from pathlib import Path import geopandas as gpd import networkx as nx @@ -80,7 +81,7 @@ def __init__( self.results_dict = {} @staticmethod - def read(graph_file): + def read(graph_file: Path): _pickle_reader = GraphPickleReader() g = _pickle_reader.read(graph_file) return g diff --git a/tests/test_acceptance.py b/tests/test_acceptance.py index a92025a13..ee786e96b 100644 --- a/tests/test_acceptance.py +++ b/tests/test_acceptance.py @@ -1,7 +1,7 @@ import shutil from itertools import chain from pathlib import Path -from typing import Dict, Iterator, Optional +from typing import Iterator, Optional import pytest from click.testing import CliRunner @@ -162,7 +162,7 @@ def test_indirect_analysis( self, case_data_dir: Path, expected_graph_files: list[str], - expected_analysis_files: Dict[str, list[str]], + expected_analysis_files: dict[str, list[str]], ): """To test the graph and network creation from a shapefile. Also applies line segmentation for the network.""" # 1. Given test data From ec954841a826ebf4dd9f44c31631d466ea335076 Mon Sep 17 00:00:00 2001 From: Ardt Klapwijk Date: Tue, 14 Nov 2023 11:40:17 +0100 Subject: [PATCH 26/35] chore: fix review comments --- .../analysis_config_data.py | 8 +- .../analysis_config_wrapper_base.py | 2 - ra2ce/analyses/direct/analyses_direct.py | 6 +- ra2ce/analyses/indirect/losses.py | 27 ++-- .../configuration/config_wrapper_protocol.py | 2 +- ra2ce/common/io/readers/ini_file_reader.py | 118 ------------------ ra2ce/common/validation/validation_report.py | 4 +- ra2ce/runners/direct_analysis_runner.py | 2 +- ra2ce/runners/indirect_analysis_runner.py | 2 +- .../test_analysis_config_data.py | 33 +++++ tests/analyses/indirect/test_losses.py | 7 +- .../common/io/readers/test_ini_file_reader.py | 28 ----- 12 files changed, 62 insertions(+), 177 deletions(-) delete mode 100644 ra2ce/common/io/readers/ini_file_reader.py delete mode 100644 tests/common/io/readers/test_ini_file_reader.py diff --git a/ra2ce/analyses/analysis_config_data/analysis_config_data.py b/ra2ce/analyses/analysis_config_data/analysis_config_data.py index 849c79b13..b37f0c7da 100644 --- a/ra2ce/analyses/analysis_config_data/analysis_config_data.py +++ b/ra2ce/analyses/analysis_config_data/analysis_config_data.py @@ -102,8 +102,8 @@ class AnalysisSectionDirect(AnalysisSectionBase): climate_period: float = math.nan # road damage damage_curve: str = "" - event_type: str = "" - risk_calculation: str = "" + event_type: str = "" # should be enum + risk_calculation: str = "" # should be enum create_table: bool = False file_name: Optional[Path] = None @@ -134,7 +134,7 @@ def to_dict(self) -> dict: return _dict @property - def direct(self): + def direct(self) -> list[DirectAnalysisNameList]: return list( analysis for analysis in self.analyses @@ -142,7 +142,7 @@ def direct(self): ) @property - def indirect(self): + def indirect(self) -> list[IndirectAnalysisNameList]: return list( analysis for analysis in self.analyses diff --git a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py index 743abaaaf..605c3ed54 100644 --- a/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py +++ b/ra2ce/analyses/analysis_config_wrapper/analysis_config_wrapper_base.py @@ -55,8 +55,6 @@ def initialize_output_dirs(self) -> None: def _create_output_folders(analysis_type: str) -> None: # Create the output folders - if not hasattr(self.config_data, analysis_type): - return for a in getattr(self.config_data, analysis_type): output_path = self.config_data.output_path / a.analysis output_path.mkdir(parents=True, exist_ok=True) diff --git a/ra2ce/analyses/direct/analyses_direct.py b/ra2ce/analyses/direct/analyses_direct.py index 3bcdef3cd..8178fe3fe 100644 --- a/ra2ce/analyses/direct/analyses_direct.py +++ b/ra2ce/analyses/direct/analyses_direct.py @@ -159,8 +159,10 @@ def road_damage(self, analysis: AnalysisSectionDirect) -> gpd.GeoDataFrame: manual_damage_functions=manual_damage_functions, ) - if "risk_calculation" in analysis: # Check if risk_calculation is demanded - if analysis.risk_calculation != "none": + if analysis.risk_calculation: + if ( + analysis.risk_calculation != "none" + ): # Check if risk_calculation is demanded return_period_gdf.control_risk_calculation( mode=analysis.risk_calculation ) diff --git a/ra2ce/analyses/indirect/losses.py b/ra2ce/analyses/indirect/losses.py index aef281322..4c68bffbe 100644 --- a/ra2ce/analyses/indirect/losses.py +++ b/ra2ce/analyses/indirect/losses.py @@ -20,6 +20,7 @@ """ +from pathlib import Path import numpy as np import pandas as pd @@ -31,15 +32,14 @@ class Losses: def __init__(self, config: AnalysisConfigData, analysis: AnalysisSectionIndirect): - self.config = config - self.analysis = analysis - self.duration = analysis.duration_event - self.duration_disr = analysis.duration_disruption - self.detour_traffic = analysis.fraction_detour - self.traffic_throughput = analysis.fraction_drivethrough - self.rest_capacity = analysis.rest_capacity - self.maximum = analysis.maximum_jam - self.partofday = analysis.partofday + self.losses_input_path: Path = config.input_path.joinpath("losses") + self.duration: float = analysis.duration_event + self.duration_disr: float = analysis.duration_disruption + self.detour_traffic: float = analysis.fraction_detour + self.traffic_throughput: float = analysis.fraction_drivethrough + self.rest_capacity: float = analysis.rest_capacity + self.maximum: float = analysis.maximum_jam + self.partofday: str = analysis.partofday @staticmethod def vehicle_loss_hours(path): @@ -183,10 +183,7 @@ def calculate_losses_from_table(self): #TODO: if yes: read gdf #TODO: koppelen van VVU aan de directe schade berekeningen """ - - traffic_data = self.load_df( - self.config.input_path / "losses", "traffic_intensities.csv" - ) + traffic_data = self.load_df(self.losses_input_path, "traffic_intensities.csv") dict1 = { "AS_VTG": "evening_total", "AS_FRGT": "evening_freight", @@ -204,7 +201,7 @@ def calculate_losses_from_table(self): } traffic_data.rename(columns=dict1, inplace=True) - detour_data = self.load_df(self.config.input_path / "losses", "detour_data.csv") + detour_data = self.load_df(_losses_input_path, "detour_data.csv") dict2 = { "VA_AV_HWN": "detour_time_evening", "VA_RD_HWN": "detour_time_remaining", @@ -213,6 +210,6 @@ def calculate_losses_from_table(self): } detour_data.rename(columns=dict2, inplace=True) - vehicle_loss_hours = self.vehicle_loss_hours(self.config.input_path / "losses") + vehicle_loss_hours = self.vehicle_loss_hours(_losses_input_path) vlh = self.calc_vlh(traffic_data, vehicle_loss_hours, detour_data) return vlh diff --git a/ra2ce/common/configuration/config_wrapper_protocol.py b/ra2ce/common/configuration/config_wrapper_protocol.py index c71275c0c..bf3301390 100644 --- a/ra2ce/common/configuration/config_wrapper_protocol.py +++ b/ra2ce/common/configuration/config_wrapper_protocol.py @@ -23,7 +23,7 @@ from __future__ import annotations from pathlib import Path -from typing import Any, Optional, Protocol, runtime_checkable +from typing import Optional, Protocol, runtime_checkable from ra2ce.common.configuration.config_data_protocol import ConfigDataProtocol diff --git a/ra2ce/common/io/readers/ini_file_reader.py b/ra2ce/common/io/readers/ini_file_reader.py deleted file mode 100644 index 7aee44457..000000000 --- a/ra2ce/common/io/readers/ini_file_reader.py +++ /dev/null @@ -1,118 +0,0 @@ -""" - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Risk Assessment and Adaptation for Critical Infrastructure (RA2CE). - Copyright (C) 2023 Stichting Deltares - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -""" - - -import codecs -from ast import literal_eval -from configparser import ConfigParser -from pathlib import Path - -import numpy as np - -from ra2ce.common.io.readers.file_reader_protocol import FileReaderProtocol - - -class IniFileReader(FileReaderProtocol): - """ - DEPRECATED: Implement instead your own reader such as done for the networks (`ra2ce.graph.network_config_data.network_config_data_reader.py`) - """ - - def read(self, ini_file: Path) -> dict: - return self._parse_config(ini_file) - - def _parse_config(self, path: Path = None, opt_cli=None) -> dict: - """Adjusted from HydroMT - source: https://github.com/Deltares/hydromt/blob/af4e5d858b0ac0883719ca59e522053053c21b82/hydromt/cli/cli_utils.py - """ - opt = {} - if path is not None and path.is_file(): - opt = self._configread( - path, abs_path=False - ) # Set from True to False 29-7-2021 by Frederique - # make sure paths in config section are not abs paths - if ( - "setup_config" in opt - ): # BELOW IS CURRENTLY NOT USED IN RA2CE BUT COULD BE GOOD FOR FUTURE LINKAGE WITH HYDROMT - opt["setup_config"].update(self._configread(path).get("config", {})) - elif path is not None: - raise IOError(f"Config not found at {path}") - if ( - opt_cli is not None - ): # BELOW IS CURRENTLY NOT USED IN RA2CE BUT COULD BE GOOD FOR FUTURE LINKAGE WITH HYDROMT - for section in opt_cli: - if not isinstance(opt_cli[section], dict): - raise ValueError( - "No section found in --opt values: " - "use
.