Skip to content

Commit 8bbbe1e

Browse files
committed
Homogenize the cooling constraints implementation
1 parent 1609f0e commit 8bbbe1e

File tree

1 file changed

+180
-87
lines changed

1 file changed

+180
-87
lines changed

message_ix_models/model/water/data/water_for_ppl.py

+180-87
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,57 @@ def hist_cap(x: pd.Series, context: "Context", hold_cost: pd.DataFrame) -> list:
198198
]
199199

200200

201+
def apply_act_cap_multiplier(
202+
df: pd.DataFrame,
203+
hold_cost: pd.DataFrame,
204+
cap_fact_parent: pd.DataFrame = None,
205+
param_name: str = "",
206+
) -> pd.DataFrame:
207+
"""
208+
Generalized function to apply hold_cost factors and optionally divide by cap factor.
209+
hold cost contain the share per cooling technologies and their activity factors
210+
compared to parent technologies.
211+
212+
Parameters
213+
----------
214+
df : pd.DataFrame
215+
The input dataframe in long format, containing 'node_loc', 'technology', and 'value'.
216+
hold_cost : pd.DataFrame
217+
DataFrame with 'utype', region-specific multipliers (wide format), and 'technology'.
218+
cap_fact_parent : pd.DataFrame, optional
219+
DataFrame with capacity factors, used only if 'capacity' is in param_name.
220+
param_name : str, optional
221+
The name of the parameter being processed.
222+
223+
Returns
224+
-------
225+
pd.DataFrame
226+
The modified dataframe.
227+
"""
228+
229+
# Melt hold_cost to long format
230+
hold_cost_long = hold_cost.melt(
231+
id_vars=["utype", "technology"], var_name="node_loc", value_name="multiplier"
232+
)
233+
234+
# Merge and apply hold_cost multipliers
235+
df = df.merge(hold_cost_long, how="left")
236+
df["value"] *= df["multiplier"]
237+
238+
# filter with value > 0
239+
df = df[df["value"] > 0]
240+
241+
# If parameter is capacity-related, divide by cap_fact
242+
if "capacity" in param_name and cap_fact_parent is not None:
243+
df = df.merge(cap_fact_parent, how="left")
244+
df["value"] /= df["cap_fact"]
245+
df.drop(columns="cap_fact", inplace=True)
246+
247+
df.drop(columns=["utype", "multiplier"], inplace=True)
248+
249+
return df
250+
251+
201252
def relax_growth_constraint(
202253
ref_hist: pd.DataFrame,
203254
scen,
@@ -836,98 +887,118 @@ def cool_tech(context: "Context") -> dict[str, pd.DataFrame]:
836887
search_cols=search_cols,
837888
)
838889

839-
changed_value_series = ref_hist_act.apply(
840-
hist_act, axis=1, context=context, hold_cost=hold_cost
841-
)
842-
changed_value_series_flat = [
843-
row for series in changed_value_series for row in series
844-
]
845-
columns_act = [
846-
"node_loc",
847-
"technology",
848-
"cooling_technology",
849-
"year_act",
850-
"value",
851-
"new_value",
852-
"unit",
853-
]
854-
# dataframe for historical activities of cooling techs
855-
act_value_df = pd.DataFrame(changed_value_series_flat, columns=columns_act)
856-
act_value_df = act_value_df[act_value_df["new_value"] > 0]
857-
858-
# now hist capacity
859-
changed_value_series = ref_hist_cap.apply(
860-
hist_cap, axis=1, context=context, hold_cost=hold_cost
861-
)
862-
changed_value_series_flat = [
863-
row for series in changed_value_series for row in series
864-
]
865-
columns_cap = [
866-
"node_loc",
867-
"technology",
868-
"cooling_technology",
869-
"year_vtg",
870-
"value",
871-
"new_value",
872-
"unit",
873-
]
874-
cap_value_df = pd.DataFrame(changed_value_series_flat, columns=columns_cap)
875-
cap_value_df = cap_value_df[cap_value_df["new_value"] > 0]
890+
# changed_value_series = ref_hist_act.apply(
891+
# hist_act, axis=1, context=context, hold_cost=hold_cost
892+
# )
893+
# changed_value_series_flat = [
894+
# row for series in changed_value_series for row in series
895+
# ]
896+
# columns_act = [
897+
# "node_loc",
898+
# "technology",
899+
# "cooling_technology",
900+
# "year_act",
901+
# "value",
902+
# "new_value",
903+
# "unit",
904+
# ]
905+
# # dataframe for historical activities of cooling techs
906+
# act_value_df = pd.DataFrame(changed_value_series_flat, columns=columns_act)
907+
# act_value_df = act_value_df[act_value_df["new_value"] > 0]
876908

877-
h_act = make_df(
878-
"historical_activity",
879-
node_loc=act_value_df["node_loc"],
880-
technology=act_value_df["cooling_technology"],
881-
year_act=act_value_df["year_act"],
882-
mode="M1",
883-
time="year",
884-
value=act_value_df["new_value"],
885-
# TODO finalize units
886-
unit="GWa",
887-
)
909+
# # now hist capacity
910+
# changed_value_series = ref_hist_cap.apply(
911+
# hist_cap, axis=1, context=context, hold_cost=hold_cost
912+
# )
913+
# changed_value_series_flat = [
914+
# row for series in changed_value_series for row in series
915+
# ]
916+
# columns_cap = [
917+
# "node_loc",
918+
# "technology",
919+
# "cooling_technology",
920+
# "year_vtg",
921+
# "value",
922+
# "new_value",
923+
# "unit",
924+
# ]
925+
# cap_value_df = pd.DataFrame(changed_value_series_flat, columns=columns_cap)
926+
# cap_value_df = cap_value_df[cap_value_df["new_value"] > 0]
927+
928+
# h_act = make_df(
929+
# "historical_activity",
930+
# node_loc=act_value_df["node_loc"],
931+
# technology=act_value_df["cooling_technology"],
932+
# year_act=act_value_df["year_act"],
933+
# mode="M1",
934+
# time="year",
935+
# value=act_value_df["new_value"],
936+
# # TODO finalize units
937+
# unit="GWa",
938+
# )
888939

889-
results["historical_activity"] = h_act
940+
# results["historical_activity"] = h_act
890941

891942
# hist cap to be divided by cap_factor of the parent tec
892943
cap_fact_parent = scen.par(
893-
"capacity_factor", {"technology": cap_value_df["technology"]}
944+
"capacity_factor", {"technology": cooling_df["parent_tech"]}
894945
)
895946
# cap_fact_parent = cap_fact_parent[
896947
# (cap_fact_parent["node_loc"] == "R12_NAM")
897-
# & (cap_fact_parent["technology"] == "nuc_lc")
948+
# & (cap_fact_parent["technology"] == "coal_ppl_u") # nuc_lc
898949
# ]
899950
# keep node_loc, technology , year_vtg and value
900-
cap_fact_parent = cap_fact_parent[
951+
cap_fact_parent1 = cap_fact_parent[
901952
["node_loc", "technology", "year_vtg", "value"]
902953
].drop_duplicates()
903-
# filter for values that have year_act < sc.firstmodelyear
904-
cap_fact_parent = cap_fact_parent[cap_fact_parent["year_vtg"] < scen.firstmodelyear]
954+
cap_fact_parent1 = cap_fact_parent1[
955+
cap_fact_parent1["year_vtg"] < scen.firstmodelyear
956+
]
905957
# group by "node_loc", "technology", "year_vtg" and get the minimum value
906-
cap_fact_parent = cap_fact_parent.groupby(
958+
cap_fact_parent1 = cap_fact_parent1.groupby(
907959
["node_loc", "technology", "year_vtg"], as_index=False
908960
).min()
909-
910-
# rename value to cap_fact
911-
cap_fact_parent.rename(columns={"value": "cap_fact"}, inplace=True)
961+
# filter for values that have year_act < sc.firstmodelyear
912962

913-
# merge cap_fact_parent with cap_value_df
914-
cap_value_df = pd.merge(cap_value_df, cap_fact_parent, how="left")
915-
# divide new_value by cap_fact
916-
cap_value_df["new_value"] = cap_value_df["new_value"] / cap_value_df["cap_fact"]
917-
# drop cap_fact
918-
cap_value_df.drop(columns="cap_fact", inplace=True)
963+
# in some cases the capacity parameters are used with year_all
964+
# (e.g. initial_new_capacity_up). need year_Act for this
965+
cap_fact_parent2 = cap_fact_parent[
966+
["node_loc", "technology", "year_act", "value"]
967+
].drop_duplicates()
968+
cap_fact_parent2 = cap_fact_parent2[
969+
cap_fact_parent2["year_act"] >= scen.firstmodelyear
970+
]
971+
# group by "node_loc", "technology", "year_vtg" and get the minimum value
972+
cap_fact_parent2 = cap_fact_parent2.groupby(
973+
["node_loc", "technology", "year_act"], as_index=False
974+
).min()
975+
cap_fact_parent2.rename(columns={"year_act": "year_vtg"}, inplace=True)
919976

920-
# Make model compatible df for histroical new capacity
921-
h_cap = make_df(
922-
"historical_new_capacity",
923-
node_loc=cap_value_df["node_loc"],
924-
technology=cap_value_df["cooling_technology"],
925-
year_vtg=cap_value_df["year_vtg"],
926-
value=cap_value_df["new_value"],
927-
unit="GWa",
977+
cap_fact_parent = pd.concat([cap_fact_parent1, cap_fact_parent2])
978+
979+
# rename value to cap_fact
980+
cap_fact_parent.rename(
981+
columns={"value": "cap_fact", "technology": "utype"}, inplace=True
928982
)
929983

930-
results["historical_new_capacity"] = h_cap
984+
# # merge cap_fact_parent with cap_value_df
985+
# cap_value_df = pd.merge(cap_value_df, cap_fact_parent, how="left")
986+
# # divide new_value by cap_fact
987+
# cap_value_df["new_value"] = cap_value_df["new_value"] / cap_value_df["cap_fact"]
988+
# # drop cap_fact
989+
# cap_value_df.drop(columns="cap_fact", inplace=True)
990+
991+
# # Make model compatible df for histroical new capacity
992+
# h_cap = make_df(
993+
# "historical_new_capacity",
994+
# node_loc=cap_value_df["node_loc"],
995+
# technology=cap_value_df["cooling_technology"],
996+
# year_vtg=cap_value_df["year_vtg"],
997+
# value=cap_value_df["new_value"],
998+
# unit="GWa",
999+
# )
1000+
1001+
# results["historical_new_capacity"] = h_cap
9311002

9321003
# Manually removing extra technologies not required
9331004
# TODO make it automatic to not include the names manually
@@ -1059,15 +1130,27 @@ def cool_tech(context: "Context") -> dict[str, pd.DataFrame]:
10591130
# Extract inand expand some paramenters from parent technologies
10601131
# Define parameter names to be extracted
10611132
param_names = [
1133+
"historical_activity",
1134+
"historical_new_capacity",
10621135
"initial_activity_up",
10631136
"initial_activity_lo",
10641137
"initial_new_capacity_up",
10651138
"soft_activity_up",
10661139
"soft_activity_lo",
1140+
"soft_new_capacity_up",
10671141
"level_cost_activity_soft_up",
10681142
"level_cost_activity_soft_lo",
10691143
"growth_activity_lo",
10701144
"growth_activity_up",
1145+
"growth_new_capacity_up",
1146+
]
1147+
1148+
multip_list = [
1149+
"historical_activity",
1150+
"historical_new_capacity",
1151+
"initial_activity_up",
1152+
"initial_activity_lo",
1153+
"initial_new_capacity_up",
10711154
]
10721155

10731156
# Extract parameters dynamically
@@ -1079,26 +1162,36 @@ def cool_tech(context: "Context") -> dict[str, pd.DataFrame]:
10791162
suffixes = ["__ot_fresh", "__cl_fresh", "__air", "__ot_saline"]
10801163

10811164
for df, param_name in list_params:
1165+
df_param = pd.DataFrame()
10821166
for suffix in suffixes:
10831167
df_add = df.copy()
10841168
df_add["technology"] = df_add["technology"] + suffix
1085-
results[param_name] = pd.concat(
1086-
[results.get(param_name, pd.DataFrame()), df_add], ignore_index=True
1087-
)
1169+
df_param = pd.concat([df_param, df_add])
10881170

1089-
# Function to rename and add transformed parameters
1090-
def rename_and_add(param_name, new_name, rename_dict):
1091-
if param_name in results:
1092-
df_transformed = (
1093-
results[param_name].drop(columns="time").rename(columns=rename_dict)
1171+
if param_name in multip_list:
1172+
df_param_share = apply_act_cap_multiplier(
1173+
df_param, hold_cost, cap_fact_parent, param_name
10941174
)
1095-
results[new_name] = df_transformed
1175+
else:
1176+
df_param_share = df_param
10961177

1097-
# Apply renaming for multiple parameters
1098-
rename_and_add("soft_activity_up", "soft_new_capacity_up", {"year_act": "year_vtg"})
1099-
rename_and_add(
1100-
"growth_activity_up", "growth_new_capacity_up", {"year_act": "year_vtg"}
1101-
)
1178+
results[param_name] = pd.concat(
1179+
[results.get(param_name, pd.DataFrame()), df_param_share], ignore_index=True
1180+
)
1181+
1182+
# # Function to rename and add transformed parameters
1183+
# def rename_and_add(param_name, new_name, rename_dict):
1184+
# if param_name in results:
1185+
# df_transformed = (
1186+
# results[param_name].drop(columns="time").rename(columns=rename_dict)
1187+
# )
1188+
# results[new_name] = df_transformed
1189+
1190+
# # Apply renaming for multiple parameters
1191+
# rename_and_add("soft_activity_up", "soft_new_capacity_up", {"year_act": "year_vtg"})
1192+
# rename_and_add(
1193+
# "growth_activity_up", "growth_new_capacity_up", {"year_act": "year_vtg"}
1194+
# )
11021195

11031196
# add share constraints for cooling technologies based on SSP assumptions
11041197
df_share = cooling_shares_SSP_from_yaml(context)

0 commit comments

Comments
 (0)