diff --git a/CaseStudy.py b/CaseStudy.py
index 4fba134..522e3c2 100644
--- a/CaseStudy.py
+++ b/CaseStudy.py
@@ -17,13 +17,12 @@ class CaseStudy:
# Lists of dataframes based on their dependencies - every table should only be present in one of these lists
rpk_dependent_dataframes: list[str] = ["dPower_Demand",
"dPower_Hindex",
- "dPower_ImpExpProfiles",
+ "dPower_ImportExport",
"dPower_Inflows",
"dPower_VRESProfiles"]
rp_only_dependent_dataframes: list[str] = ["dPower_WeightsRP"]
k_only_dependent_dataframes: list[str] = ["dPower_WeightsK"]
non_time_dependent_dataframes: list[str] = ["dPower_BusInfo",
- "dPower_ImpExpHubs",
"dPower_Network",
"dPower_Storage",
"dPower_ThermalGen",
@@ -55,10 +54,9 @@ def __init__(self,
power_weightsrp_file: str = "Power_WeightsRP.xlsx", dPower_WeightsRP: pd.DataFrame = None,
power_weightsk_file: str = "Power_WeightsK.xlsx", dPower_WeightsK: pd.DataFrame = None,
power_hindex_file: str = "Power_Hindex.xlsx", dPower_Hindex: pd.DataFrame = None,
- power_impexphubs_file: str = "Power_ImpExpHubs.xlsx", dPower_ImpExpHubs: pd.DataFrame = None,
- power_impexpprofiles_file: str = "Power_ImpExpProfiles.xlsx", dPower_ImpExpProfiles: pd.DataFrame = None,
+ power_importexport_file: str = "Power_ImportExport.xlsx", dPower_ImportExport: pd.DataFrame = None,
clip_method: str = "none", clip_value: float = 0):
- self.data_folder = data_folder if data_folder.endswith("/") else data_folder + "/"
+ self.data_folder = str(data_folder) if str(data_folder).endswith("/") else str(data_folder) + "/"
self.do_not_scale_units = do_not_scale_units
self.do_not_merge_single_node_buses = do_not_merge_single_node_buses
@@ -194,20 +192,13 @@ def __init__(self,
self.dPower_Inflows = ExcelReader.get_Power_Inflows(self.data_folder + self.power_inflows_file)
if self.dPower_Parameters["pEnablePowerImportExport"]:
- if dPower_ImpExpHubs is not None:
- self.dPower_ImpExpHubs = dPower_ImpExpHubs
+ if dPower_ImportExport is not None:
+ self.dPower_ImportExport = dPower_ImportExport
else:
- self.power_impexphubs_file = power_impexphubs_file
- self.dPower_ImpExpHubs = self.get_dPower_ImpExpHubs()
-
- if dPower_ImpExpProfiles is not None:
- self.dPower_ImpExpProfiles = dPower_ImpExpProfiles
- else:
- self.power_impexpprofiles_file = power_impexpprofiles_file
- self.dPower_ImpExpProfiles = self.get_dPower_ImpExpProfiles()
+ self.power_importexport_file = power_importexport_file
+ self.dPower_ImportExport = ExcelReader.get_Power_ImportExport(self.data_folder + self.power_importexport_file)
else:
- self.dPower_ImpExpHubs = None
- self.dPower_ImpExpProfiles = None
+ self.dPower_ImportExport = None
if not do_not_merge_single_node_buses:
self.merge_single_node_buses()
@@ -241,8 +232,7 @@ def scale_CaseStudy(self):
self.scale_dPower_Storage()
if self.dPower_Parameters["pEnablePowerImportExport"]:
- self.scale_dPower_ImpExpHubs()
- self.scale_dPower_ImpExpProfiles()
+ self.scale_dPower_ImportExport()
def remove_scaling(self):
self.power_scaling_factor = 1 / self.power_scaling_factor
@@ -331,13 +321,10 @@ def scale_dPower_Storage(self):
if self.dPower_Storage['DisEffic'].isna().any() or self.dPower_Storage['ChEffic'].isna().any():
raise ValueError("DisEffic and ChEffic in 'Power_Storage.xlsx' must not contain NaN values. Please check the data.")
- def scale_dPower_ImpExpHubs(self):
- self.dPower_ImpExpHubs["Pmax Import"] *= self.power_scaling_factor
- self.dPower_ImpExpHubs["Pmax Export"] *= self.power_scaling_factor
-
- def scale_dPower_ImpExpProfiles(self):
- self.dPower_ImpExpProfiles["ImpExp"] *= self.power_scaling_factor
- self.dPower_ImpExpProfiles["Price"] *= self.cost_scaling_factor / self.power_scaling_factor
+ def scale_dPower_ImportExport(self):
+ self.dPower_ImportExport["ImpExpMinimum"] *= self.power_scaling_factor
+ self.dPower_ImportExport["ImpExpMaximum"] *= self.power_scaling_factor
+ self.dPower_ImportExport["ImpExpPrice"] *= self.cost_scaling_factor / self.power_scaling_factor
def get_dGlobal_Parameters(self):
ExcelReader.check_LEGOExcel_version(self.data_folder + self.global_parameters_file, "v0.1.0", False)
@@ -380,76 +367,6 @@ def yesNo_to_bool(df: pd.DataFrame, columns_to_be_changed: list[str]):
raise ValueError(f"Value for {column} must be either 'Yes' or 'No'.")
return df
- def get_dPower_ImpExpHubs(self):
- dPower_ImpExpHubs = pd.read_excel(self.data_folder + self.power_impexphubs_file, skiprows=[0, 1, 3, 4, 5])
- dPower_ImpExpHubs = dPower_ImpExpHubs.drop(dPower_ImpExpHubs.columns[0], axis=1)
- dPower_ImpExpHubs = dPower_ImpExpHubs.set_index(['hub', 'i'])
-
- # Validate that all values for "Import Type" and "Export Type" == [Imp/ExpFix or Imp/ExpMax]
- errors = dPower_ImpExpHubs[~dPower_ImpExpHubs['Import Type'].isin(['ImpFix', 'ImpMax'])]
- if len(errors) > 0:
- raise ValueError(f"'Import Type' must be 'ImpFix' or 'ImpMax'. Please check: \n{errors}\n")
- errors = dPower_ImpExpHubs[~dPower_ImpExpHubs['Export Type'].isin(['ExpFix', 'ExpMax'])]
- if len(errors) > 0:
- raise ValueError(f"'Export Type' must be 'ExpFix' or 'ExpMax'. Please check: \n{errors}\n")
-
- # Validate that for each hub, all connections have the same Import Type and Export Type
- errors = dPower_ImpExpHubs.groupby('hub').agg({'Import Type': 'nunique', 'Export Type': 'nunique'})
- errors = errors[(errors['Import Type'] > 1) | (errors['Export Type'] > 1)]
- if len(errors) > 0:
- raise ValueError(f"Each hub must have the same Import Type (Fix or Max) and the same Export Type (Fix or Max) for each connection. Please check: \n{errors.index}\n")
-
- # If column 'scenario' is not present, add it
- if 'scenario' not in dPower_ImpExpHubs.columns:
- dPower_ImpExpHubs['scenario'] = 'ScenarioA' # TODO: Fill this dynamically, once the Excel file is updated
- return dPower_ImpExpHubs
-
- def get_dPower_ImpExpProfiles(self):
- with warnings.catch_warnings(action="ignore", category=UserWarning): # Otherwise there is a warning regarding data validation in the Excel-File (see https://stackoverflow.com/questions/53965596/python-3-openpyxl-userwarning-data-validation-extension-not-supported)
- dPower_ImpExpProfiles = pd.read_excel(self.data_folder + self.power_impexpprofiles_file, skiprows=[0, 1, 3, 4, 5], sheet_name='Power ImpExpProfiles')
- dPower_ImpExpProfiles = dPower_ImpExpProfiles.drop(dPower_ImpExpProfiles.columns[0], axis=1)
- dPower_ImpExpProfiles = dPower_ImpExpProfiles.melt(id_vars=['hub', 'rp', 'Type'], var_name='k', value_name='Value')
-
- # Validate that each multiindex is only present once
- dPower_ImpExpProfiles = dPower_ImpExpProfiles.set_index(['hub', 'rp', 'k', 'Type'])
- if not dPower_ImpExpProfiles.index.is_unique:
- raise ValueError(f"Indices for Imp-/Export values must be unique (i.e., no two entries for the same hub, rp, Type and k). Please check these indices: {dPower_ImpExpProfiles.index[dPower_ImpExpProfiles.index.duplicated(keep=False)]}")
-
- # Validate that all values for "Type" == [ImpExp, Price]
- dPower_ImpExpProfiles = dPower_ImpExpProfiles.reset_index().set_index(['hub', 'rp', 'k'])
- errors = dPower_ImpExpProfiles[~dPower_ImpExpProfiles['Type'].isin(['ImpExp', 'Price'])]
- if len(errors) > 0:
- raise ValueError(f"'Type' must be 'ImpExp' or 'Price'. Please check: \n{errors}\n")
-
- # Create combined table (with one row for each hub, rp and k)
- dPower_ImpExpProfiles = dPower_ImpExpProfiles.pivot(columns="Type", values="Value")
- dPower_ImpExpProfiles.columns.name = None # Fix name of columns/indices (which are altered through pivot)
-
- # Check that Pmax of ImpExpConnections can handle the maximum import and export (for those connections that are ImpFix or ExpFix)
- max_import = dPower_ImpExpProfiles[dPower_ImpExpProfiles["ImpExp"] >= 0]["ImpExp"].groupby("hub").max()
- max_export = -dPower_ImpExpProfiles[dPower_ImpExpProfiles["ImpExp"] <= 0]["ImpExp"].groupby("hub").min()
-
- pmax_sum_by_hub = self.dPower_ImpExpHubs.groupby('hub').agg({'Pmax Import': 'sum', 'Pmax Export': 'sum', 'Import Type': 'first', 'Export Type': 'first'})
- import_violations = max_import[(max_import > pmax_sum_by_hub['Pmax Import']) & (pmax_sum_by_hub['Import Type'] == 'ImpFix')]
- export_violations = max_export[(max_export > pmax_sum_by_hub['Pmax Export']) & (pmax_sum_by_hub['Export Type'] == 'ExpFix')]
-
- if not import_violations.empty:
- error_information = pd.concat([import_violations, pmax_sum_by_hub['Pmax Import']], axis=1) # Concat Pmax information and maximum import
- error_information = error_information[error_information["ImpExp"].notna()] # Only show rows where there is a violation
- error_information = error_information.rename(columns={"ImpExp": "Max Import from Profiles", "Pmax Import": "Sum of Pmax Import from Hub Definition"}) # Rename columns for readability
- raise ValueError(f"At least one hub has ImpFix imports which exceed the sum of Pmax of all connections. Please check: \n{error_information}\n")
-
- if not export_violations.empty:
- error_information = pd.concat([export_violations, pmax_sum_by_hub['Pmax Export']], axis=1) # Concat Pmax information and maximum export
- error_information = error_information[error_information["ImpExp"].notna()] # Only show rows where there is a violation
- error_information = error_information.rename(columns={"ImpExp": "Max Export from Profiles", "Pmax Export": "Sum of Pmax Export from Hub Definition"}) # Rename columns for readability
- raise ValueError(f"At least one hub has ExpFix exports which exceed the sum of Pmax of all connections. Please check: \n{error_information}\n")
-
- # If column 'scenario' is not present, add it
- if 'scenario' not in dPower_ImpExpProfiles.columns:
- dPower_ImpExpProfiles['scenario'] = "ScenarioA" # TODO: Fill this dynamically, once the Excel file is updated
- return dPower_ImpExpProfiles
-
@staticmethod
def get_connected_buses(connection_matrix, bus: str):
connected_buses = []
@@ -740,6 +657,8 @@ def filter_timesteps(self, start: str, end: str, inplace: bool = False) -> Optio
for df_name in CaseStudy.k_dependent_dataframes:
if hasattr(case_study, df_name) and getattr(case_study, df_name) is not None:
df = getattr(case_study, df_name)
+ if df is None:
+ continue
index = df.index.names
df_reset = df.reset_index()
diff --git a/ExcelReader.py b/ExcelReader.py
index f9012ef..eb21bad 100644
--- a/ExcelReader.py
+++ b/ExcelReader.py
@@ -3,6 +3,7 @@
import openpyxl
import pandas as pd
from openpyxl import load_workbook
+from openpyxl.utils.cell import get_column_letter
from printer import Printer
@@ -198,6 +199,84 @@ def get_Power_Hindex(excel_file_path: str, keep_excluded_entries: bool = False,
return dPower_Hindex
+def get_Power_ImportExport(excel_file_path: str, keep_excluded_entries: bool = False, fail_on_wrong_version: bool = False) -> pd.DataFrame:
+ """
+ Read the dPower_ImportExport data from the Excel file.
+ :param excel_file_path: Path to the Excel file
+ :param keep_excluded_entries: Unused but kept for compatibility with other functions
+ :param fail_on_wrong_version: If True, raise an error if the version of the Excel file does not match the expected version
+ :return: dPower_ImportExport
+ """
+ if keep_excluded_entries:
+ printer.warning("'keep_excluded_entries' is set for 'get_Power_ImportExport', although nothing is excluded anyway - please check if this is intended.")
+
+ check_LEGOExcel_version(excel_file_path, "v0.0.1", fail_on_wrong_version)
+ xls = pd.ExcelFile(excel_file_path)
+ data = pd.DataFrame()
+
+ for scenario in xls.sheet_names: # Iterate through all sheets, i.e., through all scenarios
+ # Read row 3 (information about hubs and nodes)
+ hub_i_df = pd.read_excel(excel_file_path, skiprows=[0, 1, 3], nrows=2, sheet_name=scenario)
+ hub_i = []
+ hubs = []
+ i = 6 # Start checking from column 7 (index 6, zero-based)
+ while i < hub_i_df.shape[1]:
+ hubs.append(hub_i_df.columns[i])
+ hub_i.append((hub_i_df.columns[i], hub_i_df.columns[i + 1]))
+ if "Unnamed" not in hub_i_df.columns[i + 2]:
+ raise ValueError(f"Power_ImportExport: Expected pairs of columns for hub and i, but found an unexpected text '{hub_i_df.columns[i + 2]}' at column index {get_column_letter(i + 3)}. Please check the Excel file format.")
+ i += 3 # Move to the next pair (skip the "Unnamed" column)
+
+ if len(hubs) != len(set(hubs)):
+ raise ValueError(f"Power_ImportExport: Found duplicate hub names in the header row. Hubs must be unique. Please check the Excel file.")
+
+ df = pd.read_excel(excel_file_path, skiprows=[0, 1, 2, 4, 5, 6], sheet_name=scenario)
+ df = df.drop(df.columns[0], axis=1) # Drop the first column (which is empty)
+
+ for i, col in enumerate(df.columns):
+ if i < 5:
+ continue # Skip the first five columns
+ hub = hub_i[(i - 5) // 3][0]
+ node = hub_i[(i - 5) // 3][1]
+
+ match (i - 5) % 3:
+ case 0:
+ if "ImpExpMinimum" not in col:
+ raise ValueError(f"Power_ImportExport: Expected column 'ImpExpMinimum' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
+ col_name = "ImpExpMinimum"
+ case 1:
+ if "ImpExpMaximum" not in col:
+ raise ValueError(f"Power_ImportExport: Expected column 'ImpExpMaximum' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
+ col_name = "ImpExpMaximum"
+ case 2:
+ if "ImpExpPrice" not in col:
+ raise ValueError(f"Power_ImportExport: Expected column 'ImpExpPrice' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
+ col_name = "ImpExpPrice"
+ case _:
+ raise ValueError("This should never happen.")
+
+ if "@" in hub:
+ raise ValueError(f"Power_ImportExport: Found '@' in hub name {hub}, which is not allowed. Please rename it.")
+ elif "@" in node:
+ raise ValueError(f"Power_ImportExport: Found '@' in node name {node}, which is not allowed. Please rename it.")
+ df = df.rename(columns={col: f"{hub}@{node}@{col_name}"})
+
+ df = df.melt(id_vars=["id", "rp", "k", "dataPackage", "dataSource"])
+
+ df[["hub", "i", "valueType"]] = df["variable"].str.split("@", expand=True) # Split the variable column into hub, i and valueType
+
+ df = df.pivot(index=["id", "rp", "k", "dataPackage", "dataSource", "hub", "i"], columns="valueType", values="value")
+ df.columns.name = None # Fix name of columns/indices (which are altered through pivot)
+
+ df["scenario"] = scenario
+
+ df = df.reset_index().set_index(["hub", "i", "rp", "k"]) # Set multiindex
+
+ data = pd.concat([data, df], ignore_index=False) # Append the DataFrame to the main DataFrame
+
+ return data
+
+
def get_Power_Inflows(excel_file_path: str, keep_excluded_entries: bool = False, fail_on_wrong_version: bool = False) -> pd.DataFrame:
"""
Read the dPower_Inflows data from the Excel file.
diff --git a/ExcelWriter.py b/ExcelWriter.py
index 0807ffa..2342b56 100644
--- a/ExcelWriter.py
+++ b/ExcelWriter.py
@@ -44,7 +44,7 @@ def __init__(self, excel_definitions_path: str = None):
self.fonts = Font.dict_from_xml(self.xml_root.find("Fonts"), self.colors)
self.texts = Text.dict_from_xml(self.xml_root.find("Texts"))
self.cell_styles = CellStyle.dict_from_xml(self.xml_root.find("CellStyles"), self.fonts, self.colors, self.number_formats, self.alignments)
- self.columns = Column.dict_from_xml(self.xml_root.find("Columns"), self.cell_styles) | Column.dict_from_xml(self.xml_root.find("PivotColumns"), self.cell_styles)
+ self.columns = Column.dict_from_xml(self.xml_root.find("Columns"), self.cell_styles) | Column.dict_from_xml(self.xml_root.find("GroupedColumns"), self.cell_styles) | Column.dict_from_xml(self.xml_root.find("PivotColumns"), self.cell_styles)
self.excel_definitions = TableDefinition.dict_from_xml(self.xml_root.find("TableDefinitions"), self.columns, self.colors, self.cell_styles)
pass
@@ -85,8 +85,9 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
data = data.copy() # Create a copy of the DataFrame to avoid modifying the original data
- # Prepare columns if data should be pivoted
+ # Prepare columns if data should be pivoted or grouped
pivot_columns = []
+ grouped_columns = []
target_column = None
target_column_index = None
for i, column in enumerate(excel_definition.columns):
@@ -95,6 +96,8 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
raise ValueError(f"Excel definition '{excel_definition_id}' has (at least) two pivot columns defined: '{target_column.db_name}' and '{column.db_name}'. Only one pivot column is allowed.")
target_column = column
target_column_index = i
+ elif column.grouped:
+ grouped_columns.append(column)
else:
if column.db_name != "NOEXCL": # Skip first column if it is the (empty and thus unused) placeholder for the excl column
pivot_columns.append(column.db_name)
@@ -114,6 +117,19 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
data.reset_index(inplace=True)
+ if len(grouped_columns) > 0:
+ matchingColumns = [col.matching_index for col in grouped_columns]
+ matchingColumnsWithoutNone = list(filter(lambda x: x is not None, matchingColumns))
+ matchingIndices = data.reset_index().set_index(matchingColumnsWithoutNone).index.unique()
+ for i in range(len(matchingIndices) - 1):
+ for col in grouped_columns:
+ column_templates.append(col)
+
+ # Restructure data to similar shape as Excel
+ data = data.reset_index().pivot(index=["id", "rp", "k", "dataPackage", "dataSource", "scenario"], columns=matchingColumnsWithoutNone, values=[col.db_name for col in grouped_columns])
+ data.columns.name = None # Fix name of columns/indices (which are altered through pivot)
+ data = data.reset_index().set_index(["id", "rp", "k", "dataPackage", "dataSource"])
+
if len(data) == 0:
printer.warning(f"No data found for Excel definition '{excel_definition_id}' - writing an empty file.")
data = pd.DataFrame(columns=[col.db_name for col in column_templates] + ["scenario"])
@@ -121,6 +137,7 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
for scenario_index, scenario in enumerate(scenarios):
scenario_data = data[data["scenario"] == scenario]
+ no_wrap_description_set = False
if scenario_index == 0:
ws = wb.active
@@ -166,7 +183,12 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
if column.db_name != "NOEXCL": # Skip first column if it is the (empty and thus unused) placeholder for the excl column
# Readable name
- ws.cell(row=3, column=i + 1, value=column.readable_name)
+ if not column.grouped:
+ ws.cell(row=3, column=i + 1, value=column.readable_name)
+ else:
+ group_number = (i - 6) // len(grouped_columns)
+ group_index = (i - 6) % len(grouped_columns)
+ ws.cell(row=3, column=i + 1, value=str(matchingIndices[group_number][group_index] if group_index < len(matchingIndices[group_number]) else ""))
ExcelWriter.__setCellStyle(self.cell_styles["readableName"], ws.cell(row=3, column=i + 1))
# Database name
@@ -174,13 +196,16 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
ExcelWriter.__setCellStyle(self.cell_styles["dbName"], ws.cell(row=4, column=i + 1))
# Description
- ws.cell(row=5, column=i + 1, value=column.description)
- if i != target_column_index:
+ if not column.grouped or not no_wrap_description_set:
+ ws.cell(row=5, column=i + 1, value=column.description)
+ if column.grouped:
+ no_wrap_description_set = True
+ if i != target_column_index and not column.grouped:
ExcelWriter.__setCellStyle(self.cell_styles["description"], ws.cell(row=5, column=i + 1))
else: # If the column is a pivoted column, set the style without wrapping text
- cell_style_withou_wrap_text = deepcopy(self.cell_styles["description"])
- cell_style_withou_wrap_text.alignment.wrap_text = False
- ExcelWriter.__setCellStyle(cell_style_withou_wrap_text, ws.cell(row=5, column=i + 1))
+ cell_style_without_wrap_text = deepcopy(self.cell_styles["description"])
+ cell_style_without_wrap_text.alignment.wrap_text = False
+ ExcelWriter.__setCellStyle(cell_style_without_wrap_text, ws.cell(row=5, column=i + 1))
# Database behavior
if i != 0: # Skip db-behavior for the first column (excl)
@@ -198,8 +223,11 @@ def _write_Excel_from_definition(self, data: pd.DataFrame, folder_path: str, exc
if col.readable_name is None and j == 0: continue # Skip first column if it is empty, since it is the (unused) placeholder for the excl column
if col.db_name == "excl": # Excl. column is written by placing 'X' in lines which should be excluded
ws.cell(row=i + 8, column=j + 1, value='X' if isinstance(values[col.db_name], str) or not np.isnan(values[col.db_name]) else None)
+ elif col.grouped:
+ group_number = (j - 6) // len(grouped_columns)
+ ws.cell(row=i + 8, column=j + 1, value=values[col.db_name, *matchingIndices[group_number]])
else:
- ws.cell(row=i + 8, column=j + 1, value=values[col.db_name])
+ ws.cell(row=i + 8, column=j + 1, value=values[col.db_name].iloc[-1] if isinstance(values[col.db_name], pd.Series) else values[col.db_name])
ExcelWriter.__setCellStyle(col.cell_style, ws.cell(row=i + 8, column=j + 1))
path = folder_path + ("/" if not folder_path.endswith("/") else "") + excel_definition.file_name + ".xlsx"
@@ -301,6 +329,15 @@ def write_Power_Hindex(self, dPower_Hindex: pd.DataFrame, folder_path: str) -> N
"""
self._write_Excel_from_definition(dPower_Hindex, folder_path, "Power_Hindex")
+ def write_Power_ImportExport(self, dPower_ImportExport: pd.DataFrame, folder_path: str) -> None:
+ """
+ Write the dPower_ImportExport DataFrame to an Excel file in LEGO format.
+ :param dPower_ImportExport: DataFrame containing the dPower_ImportExport data.
+ :param folder_path: Path to the folder where the Excel file will be saved.
+ :return: None
+ """
+ self._write_Excel_from_definition(dPower_ImportExport, folder_path, "Power_ImportExport")
+
def write_Power_Inflows(self, dPower_Inflows: pd.DataFrame, folder_path: str) -> None:
"""
Write the dPower_Inflows DataFrame to an Excel file in LEGO format.
@@ -494,6 +531,7 @@ def model_to_excel(model: pyomo.core.Model, target_path: str) -> None:
("Power_Demand", f"{args.caseStudyFolder}Power_Demand.xlsx", ExcelReader.get_Power_Demand, ew.write_Power_Demand),
("Power_Demand_KInRows", f"{args.caseStudyFolder}Power_Demand_KInRows.xlsx", ExcelReader.get_Power_Demand_KInRows, ew.write_Power_Demand_KInRows),
("Power_Hindex", f"{args.caseStudyFolder}Power_Hindex.xlsx", ExcelReader.get_Power_Hindex, ew.write_Power_Hindex),
+ ("Power_ImportExport", f"{args.caseStudyFolder}Power_ImportExport.xlsx", ExcelReader.get_Power_ImportExport, ew.write_Power_ImportExport),
("Power_Inflows", f"{args.caseStudyFolder}Power_Inflows.xlsx", ExcelReader.get_Power_Inflows, ew.write_Power_Inflows),
("Power_Inflows_KInRows", f"{args.caseStudyFolder}Power_Inflows_KInRows.xlsx", ExcelReader.get_Power_Inflows_KInRows, ew.write_Power_Inflows_KInRows),
("Power_Network", f"{args.caseStudyFolder}Power_Network.xlsx", ExcelReader.get_Power_Network, ew.write_Power_Network),
diff --git a/TableDefinition.py b/TableDefinition.py
index b2120de..f0076d9 100644
--- a/TableDefinition.py
+++ b/TableDefinition.py
@@ -154,7 +154,7 @@ def dict_from_xml(cls, cell_styles: xml.etree.ElementTree.Element, font_dict: Op
class Column:
- def __init__(self, readable_name: str, db_name: str, description: str, unit: str, column_width: float, cell_style: CellStyle, pivoted: bool, scenario_dependent: bool = False):
+ def __init__(self, readable_name: str, db_name: str, description: str, unit: str, column_width: float, cell_style: CellStyle, pivoted: bool, scenario_dependent: bool = False, grouped: bool = False, matching_index: Optional[str] = None):
self.readable_name = readable_name
self.db_name = db_name
self.description = description
@@ -171,6 +171,8 @@ def __init__(self, readable_name: str, db_name: str, description: str, unit: str
self.cell_style = cell_style
self.scenario_dependent = scenario_dependent
self.pivoted = pivoted
+ self.grouped = grouped
+ self.matching_index = matching_index
def get_copy_with_scenario_dependent(self, scenario_dependent: bool, color_dict: dict[str, Color]) -> Self:
"""
@@ -218,6 +220,7 @@ def dict_from_xml(cls, columns: xml.etree.ElementTree.Element, cell_style_dict:
unit = column.find("Unit").text
column_width = float(column.find("ColumnWidth").text)
cell_style = cell_style_dict[column.find("CellStyle").text] if column.find("CellStyle").text is not None else None
+ matching_index = column.find("MatchingIndex").text if column.tag == "GroupedColumn" else None
return_dict[column_id] = Column(readable_name=readable_name,
db_name=column_id if column.tag != "PivotColumn" else column.find("DatabaseName").text,
@@ -225,7 +228,9 @@ def dict_from_xml(cls, columns: xml.etree.ElementTree.Element, cell_style_dict:
unit=unit,
column_width=column_width,
cell_style=cell_style,
- pivoted=column.tag == "PivotColumn")
+ pivoted=column.tag == "PivotColumn",
+ grouped=column.tag == "GroupedColumn",
+ matching_index=matching_index)
except KeyError as e:
missing_styles = []
for column in columns:
diff --git a/TableDefinitions.xml b/TableDefinitions.xml
index 82a6c69..18771ac 100644
--- a/TableDefinitions.xml
+++ b/TableDefinitions.xml
@@ -105,6 +105,22 @@
+
+ v0.0.1
+ Power - Import/Export Hubs and Profiles
+ 45.0
+
+
+
+
+
+
+
+
+
+
+
+
v0.1.0
Power - Inflows
@@ -930,6 +946,34 @@
+
+
+ Maximum Imp-/Export
+
+ [MW]
+ 23
+ rightInt
+ i
+
+
+ Minimum Imp-/Export
+ Minimum/Maximum Import/Export (positive numbers are imports, going from hub to the node) and price at this hub. Always specify the hub above 'ImpExpMinimum' and the connected node above 'ImpExpMaximum'.
+
+ [MW]
+ 23
+ rightInt
+ hub
+
+
+ Imp-/Export Price
+
+ [€/MWh]
+ 23
+ rightFloat2
+
+
+
+
diff --git a/data/example/Power_ImportExport.xlsx b/data/example/Power_ImportExport.xlsx
new file mode 100644
index 0000000..bac6077
Binary files /dev/null and b/data/example/Power_ImportExport.xlsx differ
diff --git a/tests/test_ExcelReaderWriter.py b/tests/test_ExcelReaderWriter.py
index bd09443..e33f829 100644
--- a/tests/test_ExcelReaderWriter.py
+++ b/tests/test_ExcelReaderWriter.py
@@ -16,6 +16,7 @@
("Power_Demand", f"{case_study_folder}Power_Demand.xlsx", ExcelReader.get_Power_Demand, ew.write_Power_Demand),
("Power_Demand_KInRows", f"{case_study_folder}Power_Demand_KInRows.xlsx", ExcelReader.get_Power_Demand_KInRows, ew.write_Power_Demand_KInRows),
("Power_Hindex", f"{case_study_folder}Power_Hindex.xlsx", ExcelReader.get_Power_Hindex, ew.write_Power_Hindex),
+ ("Power_ImportExport", f"{case_study_folder}Power_ImportExport.xlsx", ExcelReader.get_Power_ImportExport, ew.write_Power_ImportExport),
("Power_Inflows", f"{case_study_folder}Power_Inflows.xlsx", ExcelReader.get_Power_Inflows, ew.write_Power_Inflows),
("Power_Inflows_KInRows", f"{case_study_folder}Power_Inflows_KInRows.xlsx", ExcelReader.get_Power_Inflows_KInRows, ew.write_Power_Inflows_KInRows),
("Power_Network", f"{case_study_folder}Power_Network.xlsx", ExcelReader.get_Power_Network, ew.write_Power_Network),