Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
111 changes: 15 additions & 96 deletions CaseStudy.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,12 @@ class CaseStudy:
# Lists of dataframes based on their dependencies - every table should only be present in one of these lists
rpk_dependent_dataframes: list[str] = ["dPower_Demand",
"dPower_Hindex",
"dPower_ImpExpProfiles",
"dPower_ImportExport",
"dPower_Inflows",
"dPower_VRESProfiles"]
rp_only_dependent_dataframes: list[str] = ["dPower_WeightsRP"]
k_only_dependent_dataframes: list[str] = ["dPower_WeightsK"]
non_time_dependent_dataframes: list[str] = ["dPower_BusInfo",
"dPower_ImpExpHubs",
"dPower_Network",
"dPower_Storage",
"dPower_ThermalGen",
Expand Down Expand Up @@ -55,10 +54,9 @@ def __init__(self,
power_weightsrp_file: str = "Power_WeightsRP.xlsx", dPower_WeightsRP: pd.DataFrame = None,
power_weightsk_file: str = "Power_WeightsK.xlsx", dPower_WeightsK: pd.DataFrame = None,
power_hindex_file: str = "Power_Hindex.xlsx", dPower_Hindex: pd.DataFrame = None,
power_impexphubs_file: str = "Power_ImpExpHubs.xlsx", dPower_ImpExpHubs: pd.DataFrame = None,
power_impexpprofiles_file: str = "Power_ImpExpProfiles.xlsx", dPower_ImpExpProfiles: pd.DataFrame = None,
power_importexport_file: str = "Power_ImportExport.xlsx", dPower_ImportExport: pd.DataFrame = None,
clip_method: str = "none", clip_value: float = 0):
self.data_folder = data_folder if data_folder.endswith("/") else data_folder + "/"
self.data_folder = str(data_folder) if str(data_folder).endswith("/") else str(data_folder) + "/"
self.do_not_scale_units = do_not_scale_units
self.do_not_merge_single_node_buses = do_not_merge_single_node_buses

Expand Down Expand Up @@ -194,20 +192,13 @@ def __init__(self,
self.dPower_Inflows = ExcelReader.get_Power_Inflows(self.data_folder + self.power_inflows_file)

if self.dPower_Parameters["pEnablePowerImportExport"]:
if dPower_ImpExpHubs is not None:
self.dPower_ImpExpHubs = dPower_ImpExpHubs
if dPower_ImportExport is not None:
self.dPower_ImportExport = dPower_ImportExport
else:
self.power_impexphubs_file = power_impexphubs_file
self.dPower_ImpExpHubs = self.get_dPower_ImpExpHubs()

if dPower_ImpExpProfiles is not None:
self.dPower_ImpExpProfiles = dPower_ImpExpProfiles
else:
self.power_impexpprofiles_file = power_impexpprofiles_file
self.dPower_ImpExpProfiles = self.get_dPower_ImpExpProfiles()
self.power_importexport_file = power_importexport_file
self.dPower_ImportExport = ExcelReader.get_Power_ImportExport(self.data_folder + self.power_importexport_file)
else:
self.dPower_ImpExpHubs = None
self.dPower_ImpExpProfiles = None
self.dPower_ImportExport = None

if not do_not_merge_single_node_buses:
self.merge_single_node_buses()
Expand Down Expand Up @@ -241,8 +232,7 @@ def scale_CaseStudy(self):
self.scale_dPower_Storage()

if self.dPower_Parameters["pEnablePowerImportExport"]:
self.scale_dPower_ImpExpHubs()
self.scale_dPower_ImpExpProfiles()
self.scale_dPower_ImportExport()

def remove_scaling(self):
self.power_scaling_factor = 1 / self.power_scaling_factor
Expand Down Expand Up @@ -331,13 +321,10 @@ def scale_dPower_Storage(self):
if self.dPower_Storage['DisEffic'].isna().any() or self.dPower_Storage['ChEffic'].isna().any():
raise ValueError("DisEffic and ChEffic in 'Power_Storage.xlsx' must not contain NaN values. Please check the data.")

def scale_dPower_ImpExpHubs(self):
self.dPower_ImpExpHubs["Pmax Import"] *= self.power_scaling_factor
self.dPower_ImpExpHubs["Pmax Export"] *= self.power_scaling_factor

def scale_dPower_ImpExpProfiles(self):
self.dPower_ImpExpProfiles["ImpExp"] *= self.power_scaling_factor
self.dPower_ImpExpProfiles["Price"] *= self.cost_scaling_factor / self.power_scaling_factor
def scale_dPower_ImportExport(self):
self.dPower_ImportExport["ImpExpMinimum"] *= self.power_scaling_factor
self.dPower_ImportExport["ImpExpMaximum"] *= self.power_scaling_factor
self.dPower_ImportExport["ImpExpPrice"] *= self.cost_scaling_factor / self.power_scaling_factor

def get_dGlobal_Parameters(self):
ExcelReader.check_LEGOExcel_version(self.data_folder + self.global_parameters_file, "v0.1.0", False)
Expand Down Expand Up @@ -380,76 +367,6 @@ def yesNo_to_bool(df: pd.DataFrame, columns_to_be_changed: list[str]):
raise ValueError(f"Value for {column} must be either 'Yes' or 'No'.")
return df

def get_dPower_ImpExpHubs(self):
dPower_ImpExpHubs = pd.read_excel(self.data_folder + self.power_impexphubs_file, skiprows=[0, 1, 3, 4, 5])
dPower_ImpExpHubs = dPower_ImpExpHubs.drop(dPower_ImpExpHubs.columns[0], axis=1)
dPower_ImpExpHubs = dPower_ImpExpHubs.set_index(['hub', 'i'])

# Validate that all values for "Import Type" and "Export Type" == [Imp/ExpFix or Imp/ExpMax]
errors = dPower_ImpExpHubs[~dPower_ImpExpHubs['Import Type'].isin(['ImpFix', 'ImpMax'])]
if len(errors) > 0:
raise ValueError(f"'Import Type' must be 'ImpFix' or 'ImpMax'. Please check: \n{errors}\n")
errors = dPower_ImpExpHubs[~dPower_ImpExpHubs['Export Type'].isin(['ExpFix', 'ExpMax'])]
if len(errors) > 0:
raise ValueError(f"'Export Type' must be 'ExpFix' or 'ExpMax'. Please check: \n{errors}\n")

# Validate that for each hub, all connections have the same Import Type and Export Type
errors = dPower_ImpExpHubs.groupby('hub').agg({'Import Type': 'nunique', 'Export Type': 'nunique'})
errors = errors[(errors['Import Type'] > 1) | (errors['Export Type'] > 1)]
if len(errors) > 0:
raise ValueError(f"Each hub must have the same Import Type (Fix or Max) and the same Export Type (Fix or Max) for each connection. Please check: \n{errors.index}\n")

# If column 'scenario' is not present, add it
if 'scenario' not in dPower_ImpExpHubs.columns:
dPower_ImpExpHubs['scenario'] = 'ScenarioA' # TODO: Fill this dynamically, once the Excel file is updated
return dPower_ImpExpHubs

def get_dPower_ImpExpProfiles(self):
with warnings.catch_warnings(action="ignore", category=UserWarning): # Otherwise there is a warning regarding data validation in the Excel-File (see https://stackoverflow.com/questions/53965596/python-3-openpyxl-userwarning-data-validation-extension-not-supported)
dPower_ImpExpProfiles = pd.read_excel(self.data_folder + self.power_impexpprofiles_file, skiprows=[0, 1, 3, 4, 5], sheet_name='Power ImpExpProfiles')
dPower_ImpExpProfiles = dPower_ImpExpProfiles.drop(dPower_ImpExpProfiles.columns[0], axis=1)
dPower_ImpExpProfiles = dPower_ImpExpProfiles.melt(id_vars=['hub', 'rp', 'Type'], var_name='k', value_name='Value')

# Validate that each multiindex is only present once
dPower_ImpExpProfiles = dPower_ImpExpProfiles.set_index(['hub', 'rp', 'k', 'Type'])
if not dPower_ImpExpProfiles.index.is_unique:
raise ValueError(f"Indices for Imp-/Export values must be unique (i.e., no two entries for the same hub, rp, Type and k). Please check these indices: {dPower_ImpExpProfiles.index[dPower_ImpExpProfiles.index.duplicated(keep=False)]}")

# Validate that all values for "Type" == [ImpExp, Price]
dPower_ImpExpProfiles = dPower_ImpExpProfiles.reset_index().set_index(['hub', 'rp', 'k'])
errors = dPower_ImpExpProfiles[~dPower_ImpExpProfiles['Type'].isin(['ImpExp', 'Price'])]
if len(errors) > 0:
raise ValueError(f"'Type' must be 'ImpExp' or 'Price'. Please check: \n{errors}\n")

# Create combined table (with one row for each hub, rp and k)
dPower_ImpExpProfiles = dPower_ImpExpProfiles.pivot(columns="Type", values="Value")
dPower_ImpExpProfiles.columns.name = None # Fix name of columns/indices (which are altered through pivot)

# Check that Pmax of ImpExpConnections can handle the maximum import and export (for those connections that are ImpFix or ExpFix)
max_import = dPower_ImpExpProfiles[dPower_ImpExpProfiles["ImpExp"] >= 0]["ImpExp"].groupby("hub").max()
max_export = -dPower_ImpExpProfiles[dPower_ImpExpProfiles["ImpExp"] <= 0]["ImpExp"].groupby("hub").min()

pmax_sum_by_hub = self.dPower_ImpExpHubs.groupby('hub').agg({'Pmax Import': 'sum', 'Pmax Export': 'sum', 'Import Type': 'first', 'Export Type': 'first'})
import_violations = max_import[(max_import > pmax_sum_by_hub['Pmax Import']) & (pmax_sum_by_hub['Import Type'] == 'ImpFix')]
export_violations = max_export[(max_export > pmax_sum_by_hub['Pmax Export']) & (pmax_sum_by_hub['Export Type'] == 'ExpFix')]

if not import_violations.empty:
error_information = pd.concat([import_violations, pmax_sum_by_hub['Pmax Import']], axis=1) # Concat Pmax information and maximum import
error_information = error_information[error_information["ImpExp"].notna()] # Only show rows where there is a violation
error_information = error_information.rename(columns={"ImpExp": "Max Import from Profiles", "Pmax Import": "Sum of Pmax Import from Hub Definition"}) # Rename columns for readability
raise ValueError(f"At least one hub has ImpFix imports which exceed the sum of Pmax of all connections. Please check: \n{error_information}\n")

if not export_violations.empty:
error_information = pd.concat([export_violations, pmax_sum_by_hub['Pmax Export']], axis=1) # Concat Pmax information and maximum export
error_information = error_information[error_information["ImpExp"].notna()] # Only show rows where there is a violation
error_information = error_information.rename(columns={"ImpExp": "Max Export from Profiles", "Pmax Export": "Sum of Pmax Export from Hub Definition"}) # Rename columns for readability
raise ValueError(f"At least one hub has ExpFix exports which exceed the sum of Pmax of all connections. Please check: \n{error_information}\n")

# If column 'scenario' is not present, add it
if 'scenario' not in dPower_ImpExpProfiles.columns:
dPower_ImpExpProfiles['scenario'] = "ScenarioA" # TODO: Fill this dynamically, once the Excel file is updated
return dPower_ImpExpProfiles

@staticmethod
def get_connected_buses(connection_matrix, bus: str):
connected_buses = []
Expand Down Expand Up @@ -740,6 +657,8 @@ def filter_timesteps(self, start: str, end: str, inplace: bool = False) -> Optio
for df_name in CaseStudy.k_dependent_dataframes:
if hasattr(case_study, df_name) and getattr(case_study, df_name) is not None:
df = getattr(case_study, df_name)
if df is None:
continue

index = df.index.names
df_reset = df.reset_index()
Expand Down
79 changes: 79 additions & 0 deletions ExcelReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import openpyxl
import pandas as pd
from openpyxl import load_workbook
from openpyxl.utils.cell import get_column_letter

from printer import Printer

Expand Down Expand Up @@ -198,6 +199,84 @@ def get_Power_Hindex(excel_file_path: str, keep_excluded_entries: bool = False,
return dPower_Hindex


def get_Power_ImportExport(excel_file_path: str, keep_excluded_entries: bool = False, fail_on_wrong_version: bool = False) -> pd.DataFrame:
"""
Read the dPower_ImportExport data from the Excel file.
:param excel_file_path: Path to the Excel file
:param keep_excluded_entries: Unused but kept for compatibility with other functions
:param fail_on_wrong_version: If True, raise an error if the version of the Excel file does not match the expected version
:return: dPower_ImportExport
"""
if keep_excluded_entries:
printer.warning("'keep_excluded_entries' is set for 'get_Power_ImportExport', although nothing is excluded anyway - please check if this is intended.")

check_LEGOExcel_version(excel_file_path, "v0.0.1", fail_on_wrong_version)
xls = pd.ExcelFile(excel_file_path)
data = pd.DataFrame()

for scenario in xls.sheet_names: # Iterate through all sheets, i.e., through all scenarios
# Read row 3 (information about hubs and nodes)
hub_i_df = pd.read_excel(excel_file_path, skiprows=[0, 1, 3], nrows=2, sheet_name=scenario)
hub_i = []
hubs = []
i = 6 # Start checking from column 7 (index 6, zero-based)
while i < hub_i_df.shape[1]:
hubs.append(hub_i_df.columns[i])
hub_i.append((hub_i_df.columns[i], hub_i_df.columns[i + 1]))
if "Unnamed" not in hub_i_df.columns[i + 2]:
raise ValueError(f"Power_ImportExport: Expected pairs of columns for hub and i, but found an unexpected text '{hub_i_df.columns[i + 2]}' at column index {get_column_letter(i + 3)}. Please check the Excel file format.")
i += 3 # Move to the next pair (skip the "Unnamed" column)

if len(hubs) != len(set(hubs)):
raise ValueError(f"Power_ImportExport: Found duplicate hub names in the header row. Hubs must be unique. Please check the Excel file.")

df = pd.read_excel(excel_file_path, skiprows=[0, 1, 2, 4, 5, 6], sheet_name=scenario)
df = df.drop(df.columns[0], axis=1) # Drop the first column (which is empty)

for i, col in enumerate(df.columns):
if i < 5:
continue # Skip the first five columns
hub = hub_i[(i - 5) // 3][0]
node = hub_i[(i - 5) // 3][1]

match (i - 5) % 3:
case 0:
if "ImpExpMinimum" not in col:
raise ValueError(f"Power_ImportExport: Expected column 'ImpExpMinimum' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
col_name = "ImpExpMinimum"
case 1:
if "ImpExpMaximum" not in col:
raise ValueError(f"Power_ImportExport: Expected column 'ImpExpMaximum' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
col_name = "ImpExpMaximum"
case 2:
if "ImpExpPrice" not in col:
raise ValueError(f"Power_ImportExport: Expected column 'ImpExpPrice' at column index {get_column_letter(i + 2)}, but found '{col}'. Please check the Excel file format.")
col_name = "ImpExpPrice"
case _:
raise ValueError("This should never happen.")

if "@" in hub:
raise ValueError(f"Power_ImportExport: Found '@' in hub name {hub}, which is not allowed. Please rename it.")
elif "@" in node:
raise ValueError(f"Power_ImportExport: Found '@' in node name {node}, which is not allowed. Please rename it.")
df = df.rename(columns={col: f"{hub}@{node}@{col_name}"})

df = df.melt(id_vars=["id", "rp", "k", "dataPackage", "dataSource"])

df[["hub", "i", "valueType"]] = df["variable"].str.split("@", expand=True) # Split the variable column into hub, i and valueType

df = df.pivot(index=["id", "rp", "k", "dataPackage", "dataSource", "hub", "i"], columns="valueType", values="value")
df.columns.name = None # Fix name of columns/indices (which are altered through pivot)

df["scenario"] = scenario

df = df.reset_index().set_index(["hub", "i", "rp", "k"]) # Set multiindex

data = pd.concat([data, df], ignore_index=False) # Append the DataFrame to the main DataFrame

return data


def get_Power_Inflows(excel_file_path: str, keep_excluded_entries: bool = False, fail_on_wrong_version: bool = False) -> pd.DataFrame:
"""
Read the dPower_Inflows data from the Excel file.
Expand Down
Loading
Loading