Skip to content

Commit

Permalink
Merge branch 'master' into chore/352-osdamage-analysis-of-the-damage-…
Browse files Browse the repository at this point in the history
…module-returns-a-tuple
  • Loading branch information
sahand-asgarpour authored May 24, 2024
2 parents eb54017 + 989cf0f commit 76afa93
Show file tree
Hide file tree
Showing 7 changed files with 127 additions and 102 deletions.
2 changes: 1 addition & 1 deletion examples/data/multi_link_losses/analysis.ini
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ name = beira
[analysis1]
name = beira_multi_link_losses
analysis = multi_link_losses
threshold = 0.5
threshold = 0
weighing = time
duration_event = 600
production_loss_per_capita_per_day = 1000
Expand Down
1 change: 1 addition & 0 deletions examples/data/single_link_losses/analysis.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ name = beira
name = beira_single_losses
analysis = single_link_losses
weighing = time
threshold = 0
duration_event = 600
production_loss_per_capita_per_day = 1000
part_of_day = day
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ class AnalysisSectionIndirect(AnalysisSectionBase):
# the redundancy analysis) and the intensities
# accessibility analyses
aggregate_wl: AggregateWlEnum = field(default_factory=lambda: AggregateWlEnum.NONE)
threshold: float = math.nan
threshold: float = 0.0
threshold_destinations: float = math.nan
uniform_duration: float = math.nan
gdp_percapita: float = math.nan
Expand Down
146 changes: 80 additions & 66 deletions ra2ce/analysis/indirect/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,26 +217,23 @@ def _get_disrupted_criticality_analysis_results(
else:
criticality_analysis = criticality_analysis.drop_duplicates(["u", "v"])

if self.analysis.analysis == AnalysisIndirectEnum.SINGLE_LINK_LOSSES:
# filter out all links not affected by the hazard
if self.analysis.aggregate_wl == AggregateWlEnum.NONE:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_ma"] != 0
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MAX:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_max"] != 0
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MEAN:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_mean"] != 0
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MIN:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_min"] != 0
]
else:
self.criticality_analysis = criticality_analysis
# filter out all links not affected by the hazard
if self.analysis.aggregate_wl == AggregateWlEnum.NONE:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_ma"] > self.analysis.threshold
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MAX:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_max"] > self.analysis.threshold
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MEAN:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_mean"] > self.analysis.threshold
]
elif self.analysis.aggregate_wl == AggregateWlEnum.MIN:
self.criticality_analysis = criticality_analysis[
criticality_analysis["EV1_min"] > self.analysis.threshold
]

self.criticality_analysis_non_disrupted = criticality_analysis[
~criticality_analysis.index.isin(self.criticality_analysis.index)
Expand Down Expand Up @@ -272,7 +269,7 @@ def _get_intensities_simplified_graph(self) -> pd.DataFrame:

row_data = max_intensities.squeeze()
else:
row_data = self.intensities.loc[index]
row_data = self.intensities.loc[int(index)]

_intensities_simplified_graph_list.append(row_data)
_intensities_simplified_graph = pd.DataFrame(
Expand Down Expand Up @@ -306,7 +303,9 @@ def _get_range(height: float) -> str:
return f"{x}-{y}"
raise ValueError(f"No matching range found for height {height}")

def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
def _create_result(
vlh: gpd.GeoDataFrame, connectivity_attribute: str
) -> gpd.GeoDataFrame:
"""
Args: vlh: calculated vehicle_loss_hours GeoDataFrame. For single_link_losses it only includes the
Expand All @@ -317,38 +316,38 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
Multi_link_losses this is not necessary because of the underlying multi_link_redundancy analysis.
"""
if self.analysis.analysis == AnalysisIndirectEnum.MULTI_LINK_LOSSES:
return vlh
columns_without_index = [
col
for col in self.criticality_analysis_non_disrupted.columns
if col not in ["level_0"]
]
# Get the vlh_columns from vehicle_loss_hours that vlh calculations are filled in.
vlh_columns = list(
set(vlh.columns)
- set(
self.criticality_analysis_non_disrupted[
columns_without_index
].columns
)
)
vlh[vlh_columns] = vlh[vlh_columns].fillna(0)

result = pd.concat(
[
vlh,
self.criticality_analysis_non_disrupted[
[
f"{self.link_id}",
f"{self.link_type_column}",
"geometry",
f"{self.performance_metric}",
"detour",
]
+ list(events.columns)
],
self.criticality_analysis_non_disrupted[columns_without_index],
]
)
result = result.reset_index()

# Get the columns from vehicle_loss_hours that are not in common
additional_columns = list(
set(vlh.columns) - set(self.criticality_analysis_non_disrupted.columns)
)
# Fill 0 for the vlh_columns of vlh and self.criticality_analysis_non_disrupted
result.loc[result.index.difference(vlh.index), vlh_columns] = result.loc[
result.index.difference(vlh.index), vlh_columns
].fillna(0)
for col in ["index", "level_0"]:
if col in result.columns:
result = result.drop(col, axis=1)

# Fill 0 for the additional columns of self.criticality_analysis_non_disrupted
result.loc[
result.index.difference(vlh.index), additional_columns
] = result.loc[
result.index.difference(vlh.index), additional_columns
].fillna(
0
)
return result

_check_validity_criticality_analysis()
Expand Down Expand Up @@ -383,7 +382,8 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
# Check if the index name exists in the columns
if vehicle_loss_hours_df.index.name in vehicle_loss_hours_df.columns:
vehicle_loss_hours_df.reset_index(drop=True, inplace=True)
vehicle_loss_hours_df.reset_index(inplace=True)
else:
vehicle_loss_hours_df.reset_index(inplace=True)

# find the link_type and the hazard intensity
connectivity_attribute = None
Expand All @@ -395,18 +395,12 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
if "detour" in self.criticality_analysis.columns
else "connected"
)

vlh_additional_columns = self.criticality_analysis.columns.difference(
vehicle_loss_hours_df.columns
).tolist()
vehicle_loss_hours_df = pd.merge(
vehicle_loss_hours_df,
self.criticality_analysis[
[
f"{self.link_type_column}",
"geometry",
f"{self.performance_metric}",
connectivity_attribute,
]
+ list(events.columns)
],
self.criticality_analysis[vlh_additional_columns],
left_on=self.link_id,
right_index=True,
)
Expand All @@ -418,6 +412,7 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
for event in events.columns.tolist():
for _, vlh_row in vehicle_loss_hours.iterrows():
row_hazard_range = _get_range(vlh_row[event])
row_connectivity = vlh_row[connectivity_attribute]
row_performance_changes = performance_change.loc[
[vlh_row[self.link_id]]
]
Expand Down Expand Up @@ -445,11 +440,16 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
performance_row[-1]["v"],
performance_key,
)
if math.isnan(row_performance_change):
if (
math.isnan(row_performance_change) and row_connectivity == 0
) or row_performance_change == 0:
self._calculate_production_loss_per_capita(
vehicle_loss_hours, vlh_row, event
)
elif (u, v, k) == row_u_v_k:
elif not (
math.isnan(row_performance_change)
and math.isnan(row_connectivity)
) and ((u, v, k) == row_u_v_k):
self._populate_vehicle_loss_hour(
vehicle_loss_hours,
row_hazard_range,
Expand All @@ -458,7 +458,9 @@ def _create_result(vlh: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
event,
)

vehicle_loss_hours_result = _create_result(vehicle_loss_hours)
vehicle_loss_hours_result = _create_result(
vehicle_loss_hours, connectivity_attribute
)
return vehicle_loss_hours_result

def _calculate_production_loss_per_capita(
Expand Down Expand Up @@ -524,13 +526,13 @@ def _populate_vehicle_loss_hour(
self.resilience_curve["link_type_hazard_intensity"]
== link_type_hazard_range
]

disruption = (
(
row_relevant_curve["duration_steps"].apply(pd.Series)
* (row_relevant_curve["functionality_loss_ratio"]).apply(
pd.Series
)
/ 100
).sum(axis=1)
).squeeze()
if disruption > max_disruption:
Expand All @@ -548,6 +550,15 @@ def _populate_vehicle_loss_hour(
raise Exception(
f"""{link_type_hazard_range} was not found in the introduced resilience_curve"""
)

divisor = 100
if all(
ratio <= 1
for ratio_tuple in relevant_curve["functionality_loss_ratio"]
for ratio in ratio_tuple
):
divisor = 1

duration_steps: list = relevant_curve["duration_steps"].item()
functionality_loss_ratios: list = relevant_curve[
"functionality_loss_ratio"
Expand All @@ -564,11 +575,14 @@ def _populate_vehicle_loss_hour(
)

vlh_trip_type_event_series = sum(
intensity_trip_type
* duration
* loss_ratio
* performance_change
* vot_trip_type
(
intensity_trip_type
* duration
* loss_ratio
* performance_change
* vot_trip_type
)
/ divisor
for duration, loss_ratio in zip(
duration_steps, functionality_loss_ratios
)
Expand Down
75 changes: 42 additions & 33 deletions ra2ce/analysis/indirect/multi_link_redundancy.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import numpy as np
import osmnx
import pandas as pd
from geopandas import GeoDataFrame

from ra2ce.analysis.analysis_config_data.analysis_config_data import (
AnalysisSectionIndirect,
Expand Down Expand Up @@ -40,36 +39,47 @@ def __init__(
self.output_path = analysis_input.output_path
self.hazard_names = analysis_input.hazard_names

def _update_time(self, gdf_calculated: pd.DataFrame, gdf_graph: gpd.GeoDataFrame):
def _update_time(
self, df_calculated: pd.DataFrame, gdf_graph: gpd.GeoDataFrame
) -> tuple[pd.DataFrame, gpd.GeoDataFrame]:
"""
updates the time column with the calculated dataframe and updates the rest of the gdf_graph if time is None.
"""
if (
WeighingEnum.TIME.config_value in gdf_graph.columns
and WeighingEnum.TIME.config_value in df_calculated.columns
):
df_calculated = df_calculated.drop(columns=[WeighingEnum.TIME.config_value])
return df_calculated, gdf_graph

if (
WeighingEnum.TIME.config_value not in gdf_graph.columns
or WeighingEnum.TIME.config_value not in gdf_calculated.columns
and WeighingEnum.TIME.config_value not in df_calculated.columns
):
return gdf_graph
gdf_graph[WeighingEnum.TIME.config_value] = gdf_calculated[
WeighingEnum.TIME.config_value
]
for i, row in gdf_graph.iterrows():
row_avgspeed = row.get("avgspeed", None)
row_length = row.get("length", None)
if (
pd.isna(row[WeighingEnum.TIME.config_value])
and row_avgspeed
and row_length
):
gdf_graph.at[i, WeighingEnum.TIME.config_value] = (
row_length * 1e-3 / row_avgspeed
)
else:
gdf_graph.at[i, WeighingEnum.TIME.config_value] = row.get(
WeighingEnum.TIME.config_value, None
)
return gdf_graph
return df_calculated, gdf_graph

def execute(self) -> GeoDataFrame:
elif WeighingEnum.TIME.config_value in df_calculated.columns:
gdf_graph[WeighingEnum.TIME.config_value] = df_calculated[
WeighingEnum.TIME.config_value
]
for i, row in gdf_graph.iterrows():
row_avgspeed = row.get("avgspeed", None)
row_length = row.get("length", None)
if (
pd.isna(row[WeighingEnum.TIME.config_value])
and row_avgspeed
and row_length
):
gdf_graph.at[i, WeighingEnum.TIME.config_value] = (
row_length * 1e-3 / row_avgspeed
)
else:
gdf_graph.at[i, WeighingEnum.TIME.config_value] = row.get(
WeighingEnum.TIME.config_value, None
)
return df_calculated, gdf_graph

def execute(self) -> gpd.GeoDataFrame:
"""Calculates the multi-link redundancy of a NetworkX graph.
The function removes all links of a variable that have a minimum value
Expand Down Expand Up @@ -157,13 +167,12 @@ def _is_not_none(value):
alt_value = _weighing_analyser.calculate_distance()
alt_nodes, connected = np.NaN, 0

diff = round(
alt_value
- _weighing_analyser.weighing_data[
self.analysis.weighing.config_value
],
3,
)
current_value = _weighing_analyser.weighing_data[
self.analysis.weighing.config_value
]
if not current_value: # if None
current_value = np.nan
diff = round(alt_value - current_value, 3)

data = {
"u": [u],
Expand All @@ -186,14 +195,14 @@ def _is_not_none(value):
errors="coerce",
)

df_calculated, gdf = self._update_time(df_calculated, gdf)

# Merge the dataframes
if "rfid" in gdf:
gdf = gdf.merge(df_calculated, how="left", on=["u", "v", "rfid"])
else:
gdf = gdf.merge(df_calculated, how="left", on=["u", "v"])

gdf = self._update_time(df_calculated, gdf)

gdf["hazard"] = hazard_name

results.append(gdf)
Expand Down
2 changes: 1 addition & 1 deletion ra2ce/analysis/indirect/single_link_redundancy.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def execute(self) -> GeoDataFrame:
- _weighing_analyser.weighing_data[
self.analysis.weighing.config_value
],
2,
7,
)
)

Expand Down
Loading

0 comments on commit 76afa93

Please sign in to comment.