diff --git a/gtep/demo_pull_vars_for_bus.py b/gtep/demo_pull_vars_for_bus.py new file mode 100644 index 0000000..3fc86ee --- /dev/null +++ b/gtep/demo_pull_vars_for_bus.py @@ -0,0 +1,51 @@ +import pandas as pd +from pathlib import Path + +# Net Capacity Factor || CAPEX || Construction Financing Cost || Overnight Capital Cost || fixed operation etc. || variable operation etc. || fuel costs($/MWh) (fuel costs won't exist for all types, can be assumed 0 in that case) +# Summart_CF || Summary_CAPEX || Scrape || Scrape || Scrape || Scrape || Summary_Fuel + +# pull the bus gen mappigs +bus_data_df = pd.read_csv("Bus_data_gen_mappings.csv") + +# read pricing data +pricing_data_book_names = [ + "Land-Based Wind", + "Solar - CSP", + "Natural Gas_FE", + "Coal_FE", + "Biopower", + "Nuclear", + "Geothermal", + "Solar - Utility PV", +] +pricing_data_sheet = "./gtep/2022 v3 Annual Technology Baseline Workbook Mid-year update 2-15-2023 Clean.xlsx" +pricing_data_sheet_path = Path(pricing_data_sheet) +pricing_dict = {} +for this_book_name in pricing_data_book_names: + pricing_dict[this_book_name] = pd.read_excel( + pricing_data_sheet_path, this_book_name + ) + +# now show a demo of how you can use the bus data df to pull data of interest + +# example: get Land-Basd Wind CAPEX for all counties using the Moderate value for all years +# CAPEX ($/kW) + +# define the gen type +gen_type = "Land-Based Wind" +var_of_interest = "CAPEX ($/kW)" +subvar_of_interest = "Moderate" + +demo_df = pricing_dict[gen_type][ + (pricing_dict[gen_type]["Key1"] == var_of_interest) + & (pricing_dict[gen_type]["Key3"] == subvar_of_interest) +] + +# bus_data_df['Land-Based Wind'] + +bus_cutdown_df = bus_data_df[["Bus Name", gen_type]] +the_thing_we_want = bus_cutdown_df["Land-Based Wind"].iloc[0] +the_row_that_has_the_prices = demo_df[demo_df["Key2"] == the_thing_we_want] +bus_capex_df = pd.merge(bus_cutdown_df, demo_df, left_on=gen_type, right_on="Key2") + +pass diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index ccaade8..47d9b57 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -19,8 +19,9 @@ import numpy as np import re +from configparser import ConfigParser -from matplotlib.patches import Rectangle, RegularPolygon, PathPatch +from matplotlib.patches import Rectangle, RegularPolygon, PathPatch, Circle from matplotlib.collections import PatchCollection import matplotlib.cm as cm from matplotlib.transforms import Affine2D @@ -29,11 +30,32 @@ logger = logging.getLogger(__name__) + # [TODO] inject units into plots class ExpansionPlanningSolution: def __init__(self): # PopPop the Power Optimization Possum says ౿ᓕ ̤Ꜥ·> --- "eat trash, heck __init__, hold me" - pass + self.set_config_defaults() + + def set_config_defaults(self): + self.config = ConfigParser() + # set default values + self.config["model_specific_nomenclature"] = { + "bus_descriptor": "bus", + } + self.config["period_categories"] = { + "plot_investment": True, + "plot_representative": True, + "plot_commitment": True, + "plot_dispatch": True, + } + self.config["power_flow"] = { + "bus_extension": 0, + "network_flow_glyph_type": "custom", + } + + def load_config(self, config_file): + self.config.read_file(open(config_file)) def load_from_file(self): pass @@ -58,7 +80,11 @@ def load_from_model(self, gtep_model): self.num_commit = gtep_model.num_commit # int self.num_dispatch = gtep_model.num_dispatch # int - self.expressions = {expr.name: value(expr) for expr in gtep_model.model.component_data_objects(Expression) if ("Commitment" in expr.name) or ("Investment" in expr.name)} + self.expressions = { + expr.name: value(expr) + for expr in gtep_model.model.component_data_objects(Expression) + if ("Commitment" in expr.name) or ("Investment" in expr.name) + } def import_data_object(self, data_obj): self.data = data_obj.md @@ -111,12 +137,18 @@ def _to_dict(self): # handle binary if val[0].is_binary(): - results_dict["solution_loader"]["primals"][tmp_key]['is_binary'] = val[0].is_binary() + results_dict["solution_loader"]["primals"][tmp_key]["is_binary"] = val[ + 0 + ].is_binary() # handle units, sometimes they dont have anything if val[0].get_units() is not None: - results_dict["solution_loader"]["primals"][tmp_key]["units"] = val[0].get_units().name + results_dict["solution_loader"]["primals"][tmp_key]["units"] = ( + val[0].get_units().name + ) else: - results_dict["solution_loader"]["primals"][tmp_key]["units"] = val[0].get_units() + results_dict["solution_loader"]["primals"][tmp_key]["units"] = val[ + 0 + ].get_units() # renest "termination_condition" as a json-friendly dictionary # things are either vars (which have some sort of signifier in [] brackets) or are an attribute, which dont @@ -137,19 +169,19 @@ def _to_dict(self): # handle binary if val[0].is_binary(): - tmp_dict['is_binary'] = val[0].is_binary() - + tmp_dict["is_binary"] = val[0].is_binary() + # handle units, sometimes they dont have anything if val[0].get_units() is not None: - tmp_dict['units'] = val[0].get_units().name + tmp_dict["units"] = val[0].get_units().name else: - tmp_dict['units'] = val[0].get_units() + tmp_dict["units"] = val[0].get_units() # allocate the nested dictionary def nested_set(this_dict, key, val): if len(key) > 1: # check if it's a binary var and pull up one layer - if key[1] == 'binary_indicator_var': + if key[1] == "binary_indicator_var": this_dict[key[0]] = val else: this_dict.setdefault(key[0], {}) @@ -183,7 +215,7 @@ def nested_set(this_dict, key, val): # split out expressions self.expressions_tree = results_dict["expressions_tree"] - # mint the final dictionary to save + # mint the final dictionary to save out_dict = {"data": self.data.data, "results": results_dict} self.primals_tree = results_dict["primals_tree"] @@ -205,7 +237,9 @@ def discover_level_relationships(self, dispatch_level_dict): relationships_dict[primal_name].add(primal_category) except IndexError as iEx: - print(f"[WARNING] discover_level_relationships has encountered an error: Attempted to split out {this_key}, failed with error: \"{iEx}\". Assigning as axuilary.") + print( + f'[WARNING] discover_level_relationships has encountered an error: Attempted to split out {this_key}, failed with error: "{iEx}". Assigning as axuilary.' + ) # convert sets to frozensets to be hashable for this_key in relationships_dict: @@ -240,30 +274,70 @@ def _level_relationship_dict_to_df_workhorse( # check if this is a variable by checking if it has a "value" if "value" in period_dict["primals_by_name"][this_koi][this_voi]: # if its an integer, cast it as a boolean for now - if 'is_binary' in period_dict["primals_by_name"][this_koi][this_voi]: - if period_dict["primals_by_name"][this_koi][this_voi]['is_binary']: + if ( + "is_binary" + in period_dict["primals_by_name"][this_koi][this_voi] + ): + if period_dict["primals_by_name"][this_koi][this_voi][ + "is_binary" + ]: df_data_dict[f"{this_koi}_{this_voi}_value"].append( - bool(round(period_dict["primals_by_name"][this_koi][this_voi]["value"])) # have to cast to int because there are floating point errors + bool( + round( + period_dict["primals_by_name"][this_koi][ + this_voi + ]["value"] + ) + ) # have to cast to int because there are floating point errors + ) + units_dict.setdefault( + f"{this_koi}_{this_voi}_value", + period_dict["primals_by_name"][this_koi][this_voi][ + "units" + ], ) - units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) else: df_data_dict[f"{this_koi}_{this_voi}_value"].append( - period_dict["primals_by_name"][this_koi][this_voi]["value"] + period_dict["primals_by_name"][this_koi][this_voi][ + "value" + ] + ) + units_dict.setdefault( + f"{this_koi}_{this_voi}_value", + period_dict["primals_by_name"][this_koi][this_voi][ + "units" + ], ) - units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) else: df_data_dict[f"{this_koi}_{this_voi}_value"].append( - period_dict["primals_by_name"][this_koi][this_voi]["value"] - ) - units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) + period_dict["primals_by_name"][this_koi][this_voi][ + "value" + ] + ) + units_dict.setdefault( + f"{this_koi}_{this_voi}_value", + period_dict["primals_by_name"][this_koi][this_voi][ + "units" + ], + ) df_data_dict[f"{this_koi}_{this_voi}_lower_bound"].append( - period_dict["primals_by_name"][this_koi][this_voi]["bounds"][0] + period_dict["primals_by_name"][this_koi][this_voi][ + "bounds" + ][0] + ) + units_dict.setdefault( + f"{this_koi}_{this_voi}_value", + period_dict["primals_by_name"][this_koi][this_voi]["units"], ) - units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) df_data_dict[f"{this_koi}_{this_voi}_upper_bound"].append( - period_dict["primals_by_name"][this_koi][this_voi]["bounds"][1] + period_dict["primals_by_name"][this_koi][this_voi][ + "bounds" + ][1] + ) + units_dict.setdefault( + f"{this_koi}_{this_voi}_value", + period_dict["primals_by_name"][this_koi][this_voi]["units"], ) - units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) # try to make a DF, and if not just pass back an empty try: @@ -272,50 +346,58 @@ def _level_relationship_dict_to_df_workhorse( data_df = data_df.fillna(value=np.nan) return data_df, units_dict except ValueError as vEx: - print(f"[WARNING] _level_relationship_dict_to_df_workhorse attempted to create dataframe and failed: {vEx}") + print( + f"[WARNING] _level_relationship_dict_to_df_workhorse attempted to create dataframe and failed: {vEx}" + ) return pd.DataFrame(), {} - def _plot_workhorse_relational(self, - level_key, - df, - keys, - vars, - parent_key_string, - pretty_title="Selected Data", - plot_bounds=False, - save_dir=".", - aspect_ratio=1): - - - + def _plot_workhorse_relational( + self, + level_key, + df, + keys, + vars, + parent_key_string, + pretty_title="Selected Data", + plot_bounds=False, + save_dir=".", + aspect_ratio=1, + ): + # figure out how big the plot needs to be - gridspec_height = 2*max(len(keys), len(vars)) + gridspec_height = 2 * max(len(keys), len(vars)) gridspec_width = 2 fig_width_padding = 0 fig_height_padding = 0 max_figheight = 48 total_periods = len(df[level_key]) - key_gridspec_div = floor(gridspec_height/len(keys)) # number of gridspec heights a key plot can be - var_gridspec_div = floor(gridspec_height/len(vars)) # number of gridspec heights a var plot can be + key_gridspec_div = floor( + gridspec_height / len(keys) + ) # number of gridspec heights a key plot can be + var_gridspec_div = floor( + gridspec_height / len(vars) + ) # number of gridspec heights a var plot can be # to make things look nice, we dont want height or width to be more than twice the other - fig_width = (total_periods*gridspec_width*4)+fig_width_padding + fig_width = (total_periods * gridspec_width * 4) + fig_width_padding fig_width = min(max_figheight, fig_width) - fig_height = (2*gridspec_height)+fig_height_padding - if fig_width/fig_height > aspect_ratio: - fig_height = floor(fig_width/aspect_ratio) - elif fig_height/fig_width > aspect_ratio: - fig_width = floor(fig_height/aspect_ratio) + fig_height = (2 * gridspec_height) + fig_height_padding + if fig_width / fig_height > aspect_ratio: + fig_height = floor(fig_width / aspect_ratio) + elif fig_height / fig_width > aspect_ratio: + fig_width = floor(fig_height / aspect_ratio) # set up plot - fig = plt.figure(figsize=(fig_width, - fig_height), - tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot + fig = plt.figure( + figsize=(fig_width, fig_height), tight_layout=False + ) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot gs = fig.add_gridspec(gridspec_height, gridspec_width) # plot out the keys of interest ax_koi_list = [] for ix_koi, this_koi in enumerate(keys): - ax_koi = fig.add_subplot(gs[(ix_koi*key_gridspec_div):((ix_koi+1)*key_gridspec_div), 0]) + ax_koi = fig.add_subplot( + gs[(ix_koi * key_gridspec_div) : ((ix_koi + 1) * key_gridspec_div), 0] + ) ax_koi_list.append(ax_koi) for iy, this_voi in enumerate(vars): @@ -345,7 +427,9 @@ def _plot_workhorse_relational(self, ax_voi_list = [] # plot generations and curtailmentsagainst each outher for ix_voi, this_voi in enumerate(vars): - ax_voi = fig.add_subplot(gs[(ix_voi*var_gridspec_div):((ix_voi+1)*var_gridspec_div), 1]) + ax_voi = fig.add_subplot( + gs[(ix_voi * var_gridspec_div) : ((ix_voi + 1) * var_gridspec_div), 1] + ) ax_voi_list.append(ax_voi) for this_koi in keys: ax_voi.plot( @@ -372,56 +456,91 @@ def _plot_workhorse_relational(self, fig.align_labels() fig.suptitle(f"{parent_key_string}") - fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png") + fig.savefig( + f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png" + ) plt.close() - - def _plot_workhose_binaries(self, - level_key, - df, - keys, - vars, - parent_key_string, - pretty_title="Selected Data", - save_dir=".",): - + + def _plot_workhose_binaries( + self, + level_key, + df, + keys, + vars, + parent_key_string, + pretty_title="Selected Data", + save_dir=".", + show_off_indicator=True, + ): + fig = plt.figure(figsize=(32, 16), tight_layout=False) - gs = fig.add_gridspec(1, 1) # only need 1 plot for now + gs = fig.add_gridspec(1, 1) # only need 1 plot for now # if all the variables are binaries, we can assume that the vars are all binaries and the keys are all categories total_height = len(vars) - interstate_height = 1./(len(keys)+2) - width = 1 + interstate_height = 1.0 / (len(keys) + 2) + width = 1 width_padding = 0.05 ax_bins = fig.add_subplot(gs[:, :]) - ax_bins.set_ylim([-0.5, total_height-0.5]) # set ylims to support bools - ax_bins.set_xlim([0.5, len(df[level_key])+0.5]) # set xlims to support bools - ax_bins.set_yticklabels([None]+list(vars)) + ax_bins.set_ylim([-0.5, total_height - 0.5]) # set ylims to support bools + ax_bins.set_xlim([0.5, len(df[level_key]) + 0.5]) # set xlims to support bools + ax_bins.set_yticklabels([None] + list(vars)) ax_bins.yaxis.set_major_locator(MaxNLocator(integer=True)) ax_bins.xaxis.set_major_locator(MaxNLocator(integer=True)) for axline_ix in range(total_height): - ax_bins.axhline(axline_ix+0.5, color='grey', linewidth=3,) # draw a seperator line between each level + ax_bins.axhline( + axline_ix + 0.5, + color="grey", + linewidth=3, + ) # draw a seperator line between each level for axline_ix in range(len(df[level_key])): - ax_bins.axvline(axline_ix+0.5, color='grey', linewidth=3, linestyle='dotted', alpha=0.5) # draw a seperator line between each level + ax_bins.axvline( + axline_ix + 0.5, + color="grey", + linewidth=3, + linestyle="dotted", + alpha=0.5, + ) # draw a seperator line between each level for ix_key, this_koi in enumerate(keys): # make a dummy line to steal the color cycler and make a single item for the legend - line, = ax_bins.plot( + (line,) = ax_bins.plot( [None], [None], label=f"{this_koi}", linewidth=5, ) for ix_var, this_voi in enumerate(vars): - for tx, is_it_on in zip(df[level_key], df[f"{this_koi}_{this_voi}_value"]): + for tx, is_it_on in zip( + df[level_key], df[f"{this_koi}_{this_voi}_value"] + ): if is_it_on: - tmp_rect = plt.Rectangle([tx-0.5+width_padding, ((ix_var)+(interstate_height*(ix_key+1)))-0.5], - width-(width_padding*2), - interstate_height, - alpha=0.9, - edgecolor='black', - color=line.get_color()) + tmp_rect = plt.Rectangle( + [ + tx - 0.5 + width_padding, + ((ix_var) + (interstate_height * (ix_key + 1))) - 0.5, + ], + width - (width_padding * 2), + interstate_height, + alpha=0.9, + edgecolor="black", + color=line.get_color(), + ) + ax_bins.add_patch(tmp_rect) + elif show_off_indicator: + + tmp_rect = plt.Rectangle( + [ + tx - 0.5 + width_padding, + ((ix_var) + (interstate_height * (ix_key + 1))) - 0.5, + ], + width - (width_padding * 2), + interstate_height, + alpha=0.25, + linewidth=5.0, + edgecolor="black", + color="grey", + ) ax_bins.add_patch(tmp_rect) - - ax_bins.set_xlabel(f"{level_key} $[n]$") ax_bins.set_title("State Variable Time History") @@ -430,11 +549,11 @@ def _plot_workhose_binaries(self, fig.align_labels() fig.suptitle(f"{parent_key_string}") - fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png") + fig.savefig( + f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png" + ) plt.close() - - def _level_relationship_df_to_plot( self, level_key, @@ -449,45 +568,79 @@ def _level_relationship_df_to_plot( ): # [HACK] hard coding the generator state order, to be fixed later - config['order_gen_state'] = ['genOff', 'genShutdown', 'genStartup', 'genOn'] + config["order_gen_state"] = ["genOff", "genShutdown", "genStartup", "genOn"] + config["order_gen_invest_state"] = [ + "genDisabled", + "genRetired", + "genExtended", + "genInstalled", + "genOperational", + ] # check if ALL the possible things to look at are binaries all_binaries = True for ix, this_voi in enumerate(vars): for iy, this_koi in enumerate(keys): - if not (df[f"{this_koi}_{this_voi}_value"].dtype == 'bool'): + if not (df[f"{this_koi}_{this_voi}_value"].dtype == "bool"): all_binaries = False break if all_binaries: # check the config to see if we have any overrides - if 'order_gen_state' in config: + for override_category in self.config["binary_order_overrides"]: # check that everything can be mapped over matched_config_override = True for item in vars: - if not item in config['order_gen_state']: + if not item in eval( + self.config["binary_order_overrides"][override_category] + ): matched_config_override = False break if matched_config_override: - vars = config['order_gen_state'] - - self._plot_workhose_binaries(level_key, - df, - keys, - vars, - parent_key_string, - pretty_title, - save_dir,) - + vars = eval( + self.config["binary_order_overrides"][override_category] + ) + + # if "order_gen_state" in config: + # # check that everything can be mapped over + # matched_config_override = True + # for item in vars: + # if not item in config["order_gen_state"]: + # matched_config_override = False + # break + # if matched_config_override: + # vars = config["order_gen_state"] + # if "order_gen_invest_state" in config: + # # check that everything can be mapped over + # matched_config_override = True + # for item in vars: + # if not item in config["order_gen_invest_state"]: + # matched_config_override = False + # break + # if matched_config_override: + # vars = config["order_gen_invest_state"] + + self._plot_workhose_binaries( + level_key, + df, + keys, + vars, + parent_key_string, + pretty_title, + save_dir, + ) + else: - self._plot_workhorse_relational(level_key, - df, - keys, - vars, - parent_key_string, - pretty_title, - plot_bounds, - save_dir) + self._plot_workhorse_relational( + level_key, + df, + keys, + vars, + parent_key_string, + pretty_title, + plot_bounds, + save_dir, + ) def _expressions_plot_workhorse( self, @@ -499,12 +652,14 @@ def _expressions_plot_workhorse( ): # go through a commitment period and parse out the dispatch periods # slice out all keys pertaining to dispatchPeriod - level_period_keys = [this_key for this_key in upper_level_dict.keys() if (level_key in this_key) ] + level_period_keys = [ + this_key for this_key in upper_level_dict.keys() if (level_key in this_key) + ] # scan level period for keys that have values associated keys_of_vals_of_interest = [] for this_key in upper_level_dict[level_period_keys[0]]: - if 'value' in upper_level_dict[level_period_keys[0]][this_key]: + if "value" in upper_level_dict[level_period_keys[0]][this_key]: keys_of_vals_of_interest.append(this_key) print(keys_of_vals_of_interest) @@ -514,21 +669,24 @@ def _expressions_plot_workhorse( if len(keys_of_vals_of_interest) > 0: # check all the periods and sort them out vals_dict = {} - + for this_key in upper_level_dict: - level_period_number = int(re.split('\[|\]', this_key.split(level_key)[1])[1]) + level_period_number = int( + re.split("\[|\]", this_key.split(level_key)[1])[1] + ) vals_dict.setdefault(level_period_number, {}) for this_val_key in keys_of_vals_of_interest: - vals_dict[level_period_number][this_val_key] = upper_level_dict[this_key][this_val_key]['value'] - + vals_dict[level_period_number][this_val_key] = upper_level_dict[ + this_key + ][this_val_key]["value"] print(vals_dict) # now pivot the dictionary to make a dataframe # make a dictionary where the keys are the top layer - df_dict = {key:[] for key in keys_of_vals_of_interest} + df_dict = {key: [] for key in keys_of_vals_of_interest} sorted_vals_period = sorted(vals_dict) - df_dict['period_number'] = sorted_vals_period + df_dict["period_number"] = sorted_vals_period for this_val_period in sorted_vals_period: for this_key in keys_of_vals_of_interest: df_dict[this_key].append(vals_dict[this_val_period][this_key]) @@ -539,40 +697,46 @@ def _expressions_plot_workhorse( # plot the DF # figure out how big the plot needs to be - gridspec_height = 2*len(keys_of_vals_of_interest) + gridspec_height = 2 * len(keys_of_vals_of_interest) gridspec_width = 2 fig_width_padding = 0 fig_height_padding = 0 max_figheight = 48 total_periods = len(expression_level_df) - key_gridspec_div = floor(gridspec_height/len(keys_of_vals_of_interest)) # number of gridspec heights a key plot can be + key_gridspec_div = floor( + gridspec_height / len(keys_of_vals_of_interest) + ) # number of gridspec heights a key plot can be # to make things look nice, we dont want height or width to be more than twice the other - fig_width = (total_periods*gridspec_width*4)+fig_width_padding + fig_width = (total_periods * gridspec_width * 4) + fig_width_padding fig_width = min(max_figheight, fig_width) - fig_height = (2*gridspec_height)+fig_height_padding - if fig_width/fig_height > 2: - fig_height = floor(fig_width/2) - elif fig_height/fig_width > 2: - fig_width = floor(fig_height/2) + fig_height = (2 * gridspec_height) + fig_height_padding + if fig_width / fig_height > 2: + fig_height = floor(fig_width / 2) + elif fig_height / fig_width > 2: + fig_width = floor(fig_height / 2) # set up plot - fig = plt.figure(figsize=(fig_width, - fig_height), - tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot + fig = plt.figure( + figsize=(fig_width, fig_height), tight_layout=False + ) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot gs = fig.add_gridspec(gridspec_height, gridspec_width) # plot out the keys of interest - + pretty_title = "Expression" - - + ax_koi_list = [] for ix_koi, this_koi in enumerate(keys_of_vals_of_interest): - ax_koi = fig.add_subplot(gs[(ix_koi*key_gridspec_div):((ix_koi+1)*key_gridspec_div), :]) + ax_koi = fig.add_subplot( + gs[ + (ix_koi * key_gridspec_div) : ((ix_koi + 1) * key_gridspec_div), + :, + ] + ) ax_koi_list.append(ax_koi) ax_koi.plot( - expression_level_df['period_number'], + expression_level_df["period_number"], expression_level_df[this_koi], label=f"{this_koi}", marker="o", @@ -585,14 +749,13 @@ def _expressions_plot_workhorse( ax_koi_list[-1].set_xlabel(f"{level_key} $[n]$") ax_koi_list[0].set_title(f"{pretty_title} by Type") - fig.align_labels() fig.suptitle(f"{parent_key_string}") - fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png") + fig.savefig( + f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png" + ) plt.close() - - def _level_plot_workhorse( self, level_key, @@ -604,12 +767,16 @@ def _level_plot_workhorse( # go through a commitment period and parse out the dispatch periods level_timeseries = [] # slice out all keys pertaining to dispatchPeriod - level_period_keys = [this_key for this_key in upper_level_dict.keys() if (level_key in this_key) ] + level_period_keys = [ + this_key for this_key in upper_level_dict.keys() if (level_key in this_key) + ] # aux_var_dict = {} for this_key in level_period_keys: level_period_dict = {} # cut out which dispatch period this is - level_period_number = int(re.split('\[|\]', this_key.split(level_key)[1])[1]) + level_period_number = int( + re.split("\[|\]", this_key.split(level_key)[1])[1] + ) # print(level_period_number) level_period_dict["period_number"] = level_period_number @@ -631,20 +798,20 @@ def _level_plot_workhorse( # - category: "commitmentPeriod" # - name: "1" primal_category = this_primal.split("[")[0] - primal_name = this_primal.split("[")[1].split("]")[0] - + primal_name = this_primal.split("[")[1].split("]")[0] + # create one view that shares the categories, and one the shares the names primals_by_category.setdefault(primal_category, {}) primals_by_name.setdefault(primal_name, {}) - primals_by_category[primal_category][primal_name] = ( - tmp_save_primal - ) + primals_by_category[primal_category][primal_name] = tmp_save_primal primals_by_name[primal_name][primal_category] = tmp_save_primal except IndexError as iEx: - print(f"[WARNING] _level_plot_workhorse has encountered an error: Attempted to split out {this_primal} from {this_key}, failed with error {iEx}. Skipping.") - # aux_var_dict.setdefault(this_primal, []) + print( + f"[WARNING] _level_plot_workhorse has encountered an error: Attempted to split out {this_primal} from {this_key}, failed with error {iEx}. Skipping." + ) + # aux_var_dict.setdefault(this_primal, []) # aux_var_dict[this_primal].append(this_key) level_period_dict["primals_by_category"] = primals_by_category level_period_dict["primals_by_name"] = primals_by_name @@ -655,13 +822,13 @@ def _level_plot_workhorse( # print(aux_var_primal_name, aux_var_category_name) # sort by the dispatch period number - level_timeseries = sorted( - level_timeseries, key=lambda x: x["period_number"] - ) + level_timeseries = sorted(level_timeseries, key=lambda x: x["period_number"]) # discover the relationships at the dispatch level # ASSUMES that all the dispatch levels have the exact same underlying variables and relationships - level_relationships = self.discover_level_relationships(upper_level_dict[level_period_keys[0]]) + level_relationships = self.discover_level_relationships( + upper_level_dict[level_period_keys[0]] + ) # plot relationships for vars_of_interest, keys_of_interest in level_relationships.items(): @@ -670,11 +837,37 @@ def _level_plot_workhorse( tmp_voi = sorted(vars_of_interest) tmp_koi = sorted(keys_of_interest) + # check the KOIs against the config for any overides + for override_category in self.config["diagnostic_keys_of_interest"]: + # check that everything can be mapped over + override_set = set( + eval(self.config["diagnostic_keys_of_interest"][override_category]) + ) + check_set = set() + # if there are SOME keys but not ALL keys, override the tmp_koi with the one in config + for item in tmp_koi: + # if item in eval( + # self.config["diagnostic_keys_of_interest"][override_category] + # ): + if item in override_set: + check_set.add(item) + # check if all the matched_keys are in the override_category + if override_set == check_set: + tmp_koi = sorted( + eval( + self.config["diagnostic_keys_of_interest"][ + override_category + ] + ) + ) + # make a df for debug and also easy tabularness for plots - this_df_of_interest, this_df_units = self._level_relationship_dict_to_df_workhorse( - level_key, level_timeseries, tmp_koi, tmp_voi + this_df_of_interest, this_df_units = ( + self._level_relationship_dict_to_df_workhorse( + level_key, level_timeseries, tmp_koi, tmp_voi + ) ) - + # check if we got anything in the df if not this_df_of_interest.empty: @@ -683,37 +876,41 @@ def _level_plot_workhorse( # [HACK] # if we find powerflow, plot it as a network - if 'powerFlow' in tmp_voi: - self._plot_graph_workhorse(this_df_of_interest, - 'powerFlow', - parent_key_string, - units=this_df_units, - pretty_title=this_pretty_title, - save_dir=save_dir,) - - # [HACK] put this back one intent level when done - # plot it - self._level_relationship_df_to_plot( - level_key, + if "powerFlow" in tmp_voi: + self._plot_graph_workhorse( this_df_of_interest, tmp_koi, - tmp_voi, + "powerFlow", parent_key_string, + units=this_df_units, pretty_title=this_pretty_title, save_dir=save_dir, - plot_bounds=plot_bounds) - - - def _plot_graph_workhorse(self, - df, - value_key, - parent_key_string, - - what_is_a_bus_called='branch', #'dc_branch', - - units=None, - pretty_title="Selected Data", - save_dir=".",): + ) + + # [HACK] put this back one intent level when done + # plot it + self._level_relationship_df_to_plot( + level_key, + this_df_of_interest, + tmp_koi, + tmp_voi, + parent_key_string, + pretty_title=this_pretty_title, + save_dir=save_dir, + plot_bounds=plot_bounds, + ) + + def _plot_graph_workhorse( + self, + df, + key_of_interest, + value_key, + parent_key_string, + what_is_a_bus_called="branch", #'dc_branch', + units=None, + pretty_title="Selected Data", + save_dir=".", + ): # testing networkx plots # preslice out data of interest @@ -730,129 +927,296 @@ def _plot_graph_workhorse(self, G = nx.Graph() labels = {} # add nodes - for item in self.data.data['elements']['bus']: - G.add_node(item) - labels[item] = item + + tmp_buses_of_interest = self.data.data["elements"]["bus"] + # check the overrides to see if we need to grab specific buses + + for this_koi in key_of_interest: + from_bus = self.data.data["elements"]["branch"][this_koi]["from_bus"] + to_bus = self.data.data["elements"]["branch"][this_koi]["to_bus"] + + G.add_node(from_bus) + labels[from_bus] = from_bus + G.add_node(to_bus) + labels[to_bus] = to_bus + + # for item in tmp_buses_of_interest: + # # cycle through the branches of interest to find the buses of interest + # for this_branch_key in self.data.data["elements"]["branch"]: + # if item in [ + # self.data.data["elements"]["branch"][this_branch_key]["from_bus"], + # self.data.data["elements"]["branch"][this_branch_key]["to_bus"], + # ]: + # G.add_node(item) + # labels[item] = item + # break # do edges manually later # set up plot - fig = plt.figure(figsize=(16, 8), tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot + fig = plt.figure( + figsize=(16, 8), tight_layout=False + ) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot ax_graph = fig.add_subplot() ax_graph.grid(False) # # add edges # for item in self.data.data['elements']['branch']: # G.add_edge(self.data.data['elements']['branch'][item]['from_bus'], self.data.data['elements']['branch'][item]['to_bus']) - + # G = nx.path_graph(5) graph_node_position_dict = nx.kamada_kawai_layout(G) # graph_node_position_dict = nx.planar_layout(G) # graph_node_position_dict = nx.spectral_layout(G) - nx.drawing.draw_networkx_nodes(G, graph_node_position_dict, node_size=1000, ax=ax_graph) - nx.draw_networkx_labels(G, graph_node_position_dict, labels, font_size=18, font_color="whitesmoke", ax=ax_graph) - - def draw_single_edge_flow(item, - glyph_values_slice, - ax_graph, - cmap=cm.rainbow, - norm=Normalize(vmin=None, vmax=None), - glyph_type='custom'): - - - def generate_flow_glyphs(num_glyphs, - spacing=0.05, - glyph_type='triangle', - glyph_rotation=0., - verts=3): - + nx.drawing.draw_networkx_nodes( + G, graph_node_position_dict, node_size=1000, ax=ax_graph + ) + nx.draw_networkx_labels( + G, + graph_node_position_dict, + labels, + font_size=18, + font_color="whitesmoke", + ax=ax_graph, + ) + + def draw_single_edge_flow( + item, + glyph_values_slice, + ax_graph, + cmap=cm.rainbow, + norm=Normalize(vmin=None, vmax=None), + glyph_type="custom", + ): + + def generate_flow_glyphs( + weights, + spacing=0.05, + glyph_type="triangle", + glyph_rotation=0.0, + verts=3, + ): + + num_glyphs = len(weights) + flow_glyphs = [] - for this_block_ix in range(num_glyphs): + # put down a start marker + glyph_anchor_coord = [(0.5) / float(num_glyphs), 0] + glyph_verts = 5 + glyph_radius = (1.0 / float(num_glyphs)) / 2.0 + # apply nudges + glyph_radius *= 1 - (spacing / 2.0) + flow_glyphs.append(Circle(glyph_anchor_coord, radius=glyph_radius)) + + yscale_transform = Affine2D().scale(sx=0.25, sy=0.25 / glyph_radius) + # rescale y to make it fit in a 1x1 box + flow_glyphs[-1].set_transform(yscale_transform) + + # # [XXX] make the start marker a thunderbird because thom has dumb ideas + # glyph_anchor_coord = [(0.5) / float(num_glyphs), 0] + # # height is y, width is x + # consistent_width = 1.0 / float(num_glyphs) + # # apply scaling + # x_nudge = consistent_width * (spacing) + # patch_width = consistent_width - x_nudge + # patch_height = 1 + # codes, verts = zip( + # *[ + # (mpath.Path.MOVETO, glyph_anchor_coord), + # ( + # mpath.Path.LINETO, + # [ + # glyph_anchor_coord[0], + # glyph_anchor_coord[1] + patch_height, + # ], + # ), + # ( + # mpath.Path.LINETO, + # [ + # glyph_anchor_coord[0] + patch_width * 0.7, + # glyph_anchor_coord[1] + patch_height, + # ], + # ), # go 70% of the width along the top + # ( + # mpath.Path.LINETO, + # [ + # glyph_anchor_coord[0] + patch_width, + # glyph_anchor_coord[1] + patch_height * 0.5, + # ], + # ), # go the rest of the width and meet in the center + # ( + # mpath.Path.LINETO, + # [ + # glyph_anchor_coord[0] + patch_width * 0.7, + # glyph_anchor_coord[1], + # ], + # ), # go back a bit and to the bottom to finish the wedge + # (mpath.Path.LINETO, glyph_anchor_coord), + # ] + + # now cycle through the periods and make the full line + for this_block_ix, this_block_weight in enumerate(weights): # normalizing this patch to 1 - #### + #### # rectangle version #### - if glyph_type == 'rectangle': + if glyph_type == "rectangle": # anchor for rectangles are set to bottom left - glyph_anchor_coord = [this_block_ix/float(num_glyphs), -.5] + glyph_anchor_coord = [this_block_ix / float(num_glyphs), -0.5] # height is y, width is x - consistent_width = 1./float(num_glyphs) + consistent_width = 1.0 / float(num_glyphs) # apply scaling - x_nudge = consistent_width*(spacing) + x_nudge = consistent_width * (spacing) # nudge the start forward a bit (by the nudge factor) - glyph_anchor_coord[0]+=x_nudge - patch_width = consistent_width-x_nudge + glyph_anchor_coord[0] += x_nudge + patch_width = consistent_width - x_nudge patch_height = 1 - flow_glyphs.append(Rectangle(glyph_anchor_coord, - patch_width, - patch_height)) + flow_glyphs.append( + Rectangle(glyph_anchor_coord, patch_width, patch_height) + ) - #### + #### # triangle version #### - if glyph_type == 'triangle': + if glyph_type == "triangle": # triangles need to be in the center and then given a size - glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] + glyph_anchor_coord = [ + (this_block_ix + 0.5) / float(num_glyphs), + 0, + ] glyph_verts = 3 - glyph_radius = (1./float(num_glyphs))/2. + glyph_radius = (1.0 / float(num_glyphs)) / 2.0 # apply nudges - glyph_radius *= (1-(spacing/2.)) - flow_glyphs.append(RegularPolygon(glyph_anchor_coord, - glyph_verts, - radius=glyph_radius, - orientation=glyph_rotation)) - - yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) + glyph_radius *= 1 - (spacing / 2.0) + # flip direction if it's the other sign + if this_block_weight >= 0: + flow_glyphs.append( + RegularPolygon( + glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation, + ) + ) + else: + flow_glyphs.append( + RegularPolygon( + glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation + np.pi, + ) + ) + yscale_transform = Affine2D().scale(sx=1, sy=0.5 / glyph_radius) # rescale y to make it fit in a 1x1 box flow_glyphs[-1].set_transform(yscale_transform) - #### + #### # n-gon version #### - if glyph_type == 'n-gon': + if glyph_type == "n-gon": # triangles need to be in the center and then given a size - glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] + glyph_anchor_coord = [ + (this_block_ix + 0.5) / float(num_glyphs), + 0, + ] glyph_verts = verts - glyph_radius = (1./float(num_glyphs))/2. + glyph_radius = (1.0 / float(num_glyphs)) / 2.0 # apply nudges - glyph_radius *= (1-(spacing)) - flow_glyphs.append(RegularPolygon(glyph_anchor_coord, - glyph_verts, - radius=glyph_radius, - orientation=glyph_rotation)) - - yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) + glyph_radius *= 1 - (spacing) + # flip direction if it's the other sign + if this_block_weight >= 0: + flow_glyphs.append( + RegularPolygon( + glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation, + ) + ) + else: + flow_glyphs.append( + RegularPolygon( + glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation + np.pi, + ) + ) + + yscale_transform = Affine2D().scale(sx=1, sy=0.5 / glyph_radius) # rescale y to make it fit in a 1x1 box flow_glyphs[-1].set_transform(yscale_transform) - #### + #### # custom_flow #### - if glyph_type == 'custom': + if glyph_type == "custom": # anchor for rectangles are set to bottom left - glyph_anchor_coord = [this_block_ix/float(num_glyphs), -.5] + glyph_anchor_coord = [this_block_ix / float(num_glyphs), -0.5] # height is y, width is x - consistent_width = 1./float(num_glyphs) + consistent_width = 1.0 / float(num_glyphs) # apply scaling - x_nudge = consistent_width*(spacing) - patch_width = consistent_width-x_nudge + x_nudge = consistent_width * (spacing) + patch_width = consistent_width - x_nudge patch_height = 1 - codes, verts = zip(*[ - (mpath.Path.MOVETO, glyph_anchor_coord), - (mpath.Path.LINETO, [glyph_anchor_coord[0], glyph_anchor_coord[1]+patch_height]), - (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width*.7, glyph_anchor_coord[1]+patch_height]), # go 70% of the width along the top - (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width, glyph_anchor_coord[1]+patch_height*.5]), # go the rest of the width and meet in the center - (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width*.7, glyph_anchor_coord[1]]), # go back a bit and to the bottom to finish the wedge - (mpath.Path.LINETO, glyph_anchor_coord)]) # go to home - - flow_glyphs.append(PathPatch(mpath.Path(verts, codes), ec="none"),) - - rotation_transofrm = Affine2D().rotate_around(glyph_anchor_coord[0]+patch_width*.5, - glyph_anchor_coord[1]+patch_height*.5, - glyph_rotation) + codes, verts = zip( + *[ + (mpath.Path.MOVETO, glyph_anchor_coord), + ( + mpath.Path.LINETO, + [ + glyph_anchor_coord[0], + glyph_anchor_coord[1] + patch_height, + ], + ), + ( + mpath.Path.LINETO, + [ + glyph_anchor_coord[0] + patch_width * 0.7, + glyph_anchor_coord[1] + patch_height, + ], + ), # go 70% of the width along the top + ( + mpath.Path.LINETO, + [ + glyph_anchor_coord[0] + patch_width, + glyph_anchor_coord[1] + patch_height * 0.5, + ], + ), # go the rest of the width and meet in the center + ( + mpath.Path.LINETO, + [ + glyph_anchor_coord[0] + patch_width * 0.7, + glyph_anchor_coord[1], + ], + ), # go back a bit and to the bottom to finish the wedge + (mpath.Path.LINETO, glyph_anchor_coord), + ] + ) # go to home + + flow_glyphs.append( + PathPatch(mpath.Path(verts, codes), ec="none"), + ) + + # flip direction if it's the other sign + rotation_transform = None + + if this_block_weight >= 0: + rotation_transform = Affine2D().rotate_around( + glyph_anchor_coord[0] + patch_width * 0.5, + glyph_anchor_coord[1] + patch_height * 0.5, + glyph_rotation, + ) + else: + rotation_transform = Affine2D().rotate_around( + glyph_anchor_coord[0] + patch_width * 0.5, + glyph_anchor_coord[1] + patch_height * 0.5, + glyph_rotation + np.pi, + ) # rescale y to make it fit in a 1x1 box - flow_glyphs[-1].set_transform(rotation_transofrm) + flow_glyphs[-1].set_transform(rotation_transform) return flow_glyphs @@ -861,49 +1225,81 @@ def generate_flow_glyphs(num_glyphs, # weights_bot = (np.random.randn(4)+1)/2. # weights_top = np.array(range(num_blocks))/(num_blocks*2) # weights_bot = (np.array(range(num_blocks))+num_blocks)/(num_blocks*2) + weights_top = glyph_values_slice weights_bot = glyph_values_slice - top_flow_glyphs = generate_flow_glyphs(len(weights_top), glyph_type=glyph_type) - top_facecolors = cmap(norm(weights_top)) - top_flow_collection = PatchCollection(top_flow_glyphs, facecolors=top_facecolors, edgecolors='grey', alpha=0.5) + top_flow_glyphs = generate_flow_glyphs(weights_top, glyph_type=glyph_type) + # first weight is the start marker. Make that the same every time + top_facecolors = np.zeros((len(weights_top) + 1, 4)) + top_facecolors[0] = [0.0, 0.0, 0.0, 1] + top_facecolors[1:] = cmap(norm(abs(weights_top))) + top_flow_collection = PatchCollection( + top_flow_glyphs, facecolors=top_facecolors, edgecolors="grey", alpha=0.5 + ) # bot_flow_glyphs = generate_flow_glyphs(len(weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi/2.)) # for squares - bot_flow_glyphs = generate_flow_glyphs(len(weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi)) # for custom + bot_flow_glyphs = generate_flow_glyphs( + weights_bot, glyph_type=glyph_type, glyph_rotation=(np.pi) + ) # for custom bot_flow_glyphs = reversed(bot_flow_glyphs) bot_facecolors = cmap(norm(weights_bot)) # bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, edgecolors='grey', alpha=0.5) # [HACK] # scale and move top and bottom collections # top_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, 0.5) #+ ax_graph.transData # [HACK] - top_base_transform = Affine2D().scale(sx=1, sy=1.0) + Affine2D().translate(0, 0.0) #+ ax_graph.transData + top_base_transform = Affine2D().scale(sx=1, sy=1.0) + Affine2D().translate( + 0, 0.0 + ) # + ax_graph.transData top_flow_collection.set_transform(top_base_transform) - bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5)# + ax_graph.transData + bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate( + 0, -0.5 + ) # + ax_graph.transData # bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5) + ax_graph.transData # bot_flow_collection.set_transform(bot_base_transform) # [HACK] # combine collections and move to edge between nodes # attempt to rotate - start_key = self.data.data['elements'][what_is_a_bus_called][item]['from_bus'] - end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] + start_key = self.data.data["elements"][what_is_a_bus_called][item][ + "from_bus" + ] + end_key = self.data.data["elements"][what_is_a_bus_called][item]["to_bus"] start_pos = graph_node_position_dict[start_key] end_pos = graph_node_position_dict[end_key] - node_distance = np.linalg.norm(end_pos-start_pos) - rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) + node_distance = np.linalg.norm(end_pos - start_pos) + rot_angle_rad = np.arctan2( + (end_pos[1] - start_pos[1]), (end_pos[0] - start_pos[0]) + ) along_edge_scale = 0.4 away_from_edge_scale = 0.1 # set up transformations # stretch to the distance between target nodes - length_transform = Affine2D().scale(sx=node_distance*along_edge_scale, sy=1) + length_transform = Affine2D().scale( + sx=node_distance * along_edge_scale, sy=1 + ) # squish scale_transform = Affine2D().scale(sx=1, sy=away_from_edge_scale) # rotate - rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) + rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) # translate to the node start, then push it along the edge until it's apprximately centered and scaled nicely - translate_transform = Affine2D().translate(start_pos[0]+(np.cos(rot_angle_rad)*node_distance*.5*(1-along_edge_scale)), - start_pos[1]+(np.sin(rot_angle_rad)*node_distance*.5*(1-along_edge_scale))) - t2 = length_transform + scale_transform + rot_transform + translate_transform + ax_graph.transData + translate_transform = Affine2D().translate( + start_pos[0] + + ( + np.cos(rot_angle_rad) * node_distance * 0.5 * (1 - along_edge_scale) + ), + start_pos[1] + + ( + np.sin(rot_angle_rad) * node_distance * 0.5 * (1 - along_edge_scale) + ), + ) + t2 = ( + length_transform + + scale_transform + + rot_transform + + translate_transform + + ax_graph.transData + ) top_flow_collection.set_transform(top_flow_collection.get_transform() + t2) # bot_flow_collection.set_transform(bot_flow_collection.get_transform() + t2) # [HACK] @@ -914,25 +1310,29 @@ def generate_flow_glyphs(num_glyphs, # add edges # define edge colorbar - cmap = cm.rainbow - normalize = Normalize(vmin=df_min, vmax=df_max) + # cmap = cm.rainbow + cmap = cm.cool + # normalize = Normalize(vmin=df_min, vmax=df_max) + normalize = Normalize(vmin=0, vmax=max([abs(df_min), df_max])) cmappable = cm.ScalarMappable(norm=normalize, cmap=cmap) - for item in self.data.data['elements'][what_is_a_bus_called]: - + # for item in self.data.data["elements"][what_is_a_bus_called]: + for item in key_of_interest: # grab the keys we care about - start_key = self.data.data['elements'][what_is_a_bus_called][item]['from_bus'] - end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] + start_key = self.data.data["elements"][what_is_a_bus_called][item][ + "from_bus" + ] + end_key = self.data.data["elements"][what_is_a_bus_called][item]["to_bus"] start_pos = graph_node_position_dict[start_key] end_pos = graph_node_position_dict[end_key] # edge_key = f"branch_{start_key}_{end_key}_{value_key}_value" # alt_edge_key = f"branch_{end_key}_{start_key}_{value_key}_value" edge_key = f"{item}_{value_key}_value" alt_edge_key = f"{item}_{value_key}_value" - + # kind = 'triangle' # kind = 'rectangle' - kind = 'custom' + kind = "custom" glyph_values_slice = None try: glyph_values_slice = df[edge_key].values @@ -940,14 +1340,34 @@ def generate_flow_glyphs(num_glyphs, try: glyph_values_slice = df[alt_edge_key].values except KeyError as kex_second_attempt: - print(f"Attempted to slice DF in network twice using {edge_key} and {alt_edge_key}, failed both.") - draw_single_edge_flow(item, glyph_values_slice, ax_graph, cmap=cmap, norm=normalize, glyph_type=kind) + print( + f"Attempted to slice DF in network twice using {edge_key} and {alt_edge_key}, failed both." + ) + draw_single_edge_flow( + item, + glyph_values_slice, + ax_graph, + cmap=cmap, + norm=normalize, + glyph_type=kind, + ) # forward arrow - ax_graph.arrow(start_pos[0], start_pos[1], (end_pos[0]-start_pos[0]), (end_pos[1]-start_pos[1]), color='black') + ax_graph.arrow( + start_pos[0], + start_pos[1], + (end_pos[0] - start_pos[0]), + (end_pos[1] - start_pos[1]), + color="black", + ) # backward arrow - ax_graph.arrow(end_pos[0], end_pos[1], (start_pos[0]-end_pos[0]), (start_pos[1]-end_pos[1]), color='black') - + ax_graph.arrow( + end_pos[0], + end_pos[1], + (start_pos[0] - end_pos[0]), + (start_pos[1] - end_pos[1]), + color="black", + ) # insert colorbar fig.colorbar(cmappable, ax=ax_graph, label=f"{value_key}{units_str}") @@ -955,37 +1375,70 @@ def generate_flow_glyphs(num_glyphs, fig.suptitle(f"{parent_key_string}_{value_key}") # save - fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}_graph.png") + fig.savefig( + f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}_graph.png" + ) pass def plot_levels(self, save_dir="."): - # self._plot_graph_workhorse() + # pre-precessing + + # [XXX] TODO + # # special handling for buses and extensions + # # if we've specified that we have both a specific bus set of interest and want to do some number of extensions + # if self.config.has_option( + # "power_flow", "bus_extension" + # ) and self.config.has_option( + # "diagnostic_keys_of_interest", "buses_of_interest" + # ): + # # for this_ + # # # go through each branch and find any that are "to" or "from" the bus we care about + # # for this_branch in self.data.data["elements"]["branch"] + # # from_bus = self.data.data["elements"]["branch"][this_branch]["from_bus"] + # # to_bus = self.data.data["elements"]["branch"][this_branch]["to_bus"] + # self._plot_graph_workhorse() # plot or represent primals trees for this_root_level_key in self.primals_tree: if "investmentStage" in this_root_level_key: # run the toplevel keys parent_key_string = f"{this_root_level_key}" - self._level_plot_workhorse( - "investmentStage", self.primals_tree, this_root_level_key, save_dir - ) + if eval(self.config["period_categories"]["plot_investment"]): + self._level_plot_workhorse( + "investmentStage", + self.primals_tree, + this_root_level_key, + save_dir, + ) # run the representative period subkeys investment_level_cut = self.primals_tree[this_root_level_key] parent_key_string = f"{this_root_level_key}" - self._level_plot_workhorse( - "representativePeriod", investment_level_cut, parent_key_string, save_dir - ) + if eval(self.config["period_categories"]["plot_representative"]): + self._level_plot_workhorse( + "representativePeriod", + investment_level_cut, + parent_key_string, + save_dir, + ) for this_inv_level_key in self.primals_tree[this_root_level_key].keys(): if "representativePeriod" in this_inv_level_key: - representative_level_cut = self.primals_tree[this_root_level_key][this_inv_level_key] - parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" - self._level_plot_workhorse( - "commitmentPeriod", representative_level_cut, parent_key_string, save_dir + representative_level_cut = self.primals_tree[ + this_root_level_key + ][this_inv_level_key] + parent_key_string = ( + f"{this_root_level_key}_{this_inv_level_key}" ) + if eval(self.config["period_categories"]["plot_commitment"]): + self._level_plot_workhorse( + "commitmentPeriod", + representative_level_cut, + parent_key_string, + save_dir, + ) for this_rep_level_key in self.primals_tree[ this_root_level_key @@ -997,9 +1450,15 @@ def plot_levels(self, save_dir="."): parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" - self._level_plot_workhorse( - "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir - ) + if eval( + self.config["period_categories"]["plot_dispatch"] + ): + self._level_plot_workhorse( + "dispatchPeriod", + commitment_level_cut, + parent_key_string, + save_dir, + ) # # plot or represent expressions # self._expressions_plot_workhorse( @@ -1033,5 +1492,5 @@ def plot_levels(self, save_dir="."): # self._expressions_plot_workhorse( # "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir - # ) + # ) # pass diff --git a/gtep/map_bus_to_vars.py b/gtep/map_bus_to_vars.py index 7d1bd03..cd29984 100644 --- a/gtep/map_bus_to_vars.py +++ b/gtep/map_bus_to_vars.py @@ -6,35 +6,249 @@ # read bus data bus_data_df = pd.read_csv("Bus_data.csv") -# read pricing data -pricing_data_book_names = [ - "Land-Based Wind", - "Solar - CSP", - "Natural Gas_FE", - "Coal_FE", - "Biopower", - "Nuclear", - "Geothermal", - "Solar - Utility PV", -] -pricing_data_sheet = "./gtep/2022 v3 Annual Technology Baseline Workbook Mid-year update 2-15-2023 Clean.xlsx" -pricing_data_sheet_path = Path(pricing_data_sheet) -pricing_dict = {} -for this_book_name in pricing_data_book_names: - pricing_dict[this_book_name] = pd.read_excel( - pricing_data_sheet_path, this_book_name - ) - +# if you need find the counties given the busses, run below, otherwise it is slow +# run below only once vvvvvvvvvvvv geolocator = Nominatim(user_agent="GetLoc") county_lookup = [] for this_lat, this_lon in zip( bus_data_df["Bus latitude"], bus_data_df["Bus longitude"] ): tmp_county = geolocator.reverse([this_lat, this_lon]) - county_lookup.append(tmp_county.raw["address"]["county"]) + county_lookup.append( + tmp_county.raw["address"]["county"][:-7] + ) # shove in just the county name, not the "county" suffix bus_data_df["County"] = county_lookup bus_data_df.to_csv("Bus_data_extended.csv", index=False) +# run above only once vvvvvvvvvvvv + +bus_data_df = pd.read_csv("Bus_data_extended.csv") + +# read the mapping tables (C.1 and C.2) +c1_data_sheet = ( + "./gtep/LTSA_ResourceCitingMethodology_CountyRatingsBasedOnResourceType.xlsx" +) +c1_data_sheet_path = Path(c1_data_sheet) +c2_data_sheet = ( + "./gtep/LTSA_ResourceCitingMethodology_SuitabilityofCountyforTechnologyType.xlsx" +) +c2_data_sheet_path = Path(c2_data_sheet) + +c1_df = pd.read_excel(c1_data_sheet_path, keep_default_na=False, na_values=["_"]) +c2_df = pd.read_excel(c2_data_sheet_path, keep_default_na=False, na_values=["_"]) + +# summary of mappings +mappings_dict = { + "Land-Based Wind": { + "Table": "C.1", + "Column": "WindCond.", + 1: "Land-Based Wind - Class 8", + 2: "Land-Based Wind - Class 7", + 3: "Land-Based Wind - Class 6", + 4: "Land-Based Wind - Class 5", + }, + "Solar - CSP": { + "Table": "C.1", + "Column": "SolarThermalCond.", + 7: "CSP - Class 2", + 6.75: "CSP - Class 2", + 6.5: "CSP - Class 3", + 6: "CSP - Class 7", + 5.75: "CSP - Class 7", + 5.5: None, + }, + "Natural Gas_CT": { + "Table": "C.2", + "Column": "NatGasCT", + "Good": "NG F-Frame CT", + "Avg.": "NG F-Frame CT", + "NA": None, + "RO": "NG F-Frame CT", + }, + "Natural Gas_FE": { + "Table": "C.2", + "Column": "NatGasCC", + "Good": "NG Combined Cycle (H-Frame) Max CCS", + "Avg.": "NG Combined Cycle (H-Frame) 95% CCS", + "NA": None, + "RO": None, + }, + "Coal_FE": { + "Table": "C.2", + "Column": "Coal", + "Good": None, + "Avg.": None, + "NA": None, + "RO": None, + }, + "Biopower": { + "Table": "C.2", + "Column": "Biomass", + "Good": "Biopower - Dedicated", + "Avg.": None, + "NA": None, + "RO": None, + }, + "Nuclear": { + "Table": "C.2", + "Column": "Nuclear", + "Good": "Nuclear - Small Modular Reactor", + "Avg.": None, + "NA": None, + "RO": None, + }, + "Geothermal": { + "Table": "C.1", + "Column": "Geothermal", + "H": "Geothermal - NF EGS / Binary", + "M": None, + "NA": None, + }, + "Solar - Utility PV": { + "Table": "C.1", + "Column": "SolarPV", + "VH": "Utility PV - Class 1", + "H": "Utility PV - Class 2", + "M": "Utility PV - Class 4", + }, +} + +# allocate for the mappings +allocation_dict = { + item: [ + None for _ in range(bus_data_df["County"].shape[0]) + ] # preallocate a bunch of lists with Nones for some amount of safety + for item in mappings_dict +} + +# allocate some debug stuff (mostly checking if we have all the counties) +missing_counties = [] + +# now run through and assign each bus the appropriate data +for county_ix, this_county in enumerate(bus_data_df["County"]): + # clean up this county info by removing spaces + clean_this_county = this_county.replace(" ", "") + + # check that the county is even in the tables + if (clean_this_county in c1_df["County"].values) and ( + clean_this_county in c1_df["County"].values + ): + # cycle through the generation types + for this_gen, this_map in mappings_dict.items(): + mapping_row = None + # pick the correct mapping dictionary + if this_map["Table"] == "C.1": + mapping_row = c1_df[c1_df["County"] == clean_this_county] + if this_map["Table"] == "C.2": + if clean_this_county in c1_df["County"].values: + mapping_row = c2_df[c2_df["County"] == clean_this_county] + + county_quality = mapping_row[this_map["Column"]].values[0] + workbook_map = this_map[county_quality] + allocation_dict[this_gen][county_ix] = workbook_map + else: + print( + f"[WARNING]: County {clean_this_county} not found in either C.1 or C.2. Skipping. Values will be 'None' in dataframe." + ) + missing_counties.append(clean_this_county) + +# assign new data to the old df +for gen_type, allocations in allocation_dict.items(): + bus_data_df[gen_type] = allocations + +# save the new DF +bus_data_df.to_csv("Bus_data_gen_mappings.csv", index=False) + +# print some diagnostic info +print(f"Missing the following counties: {missing_counties}") + +pass + +# mappings template: +# mappings_dict = { +# "Land-Based Wind": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Solar - CSP": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Natural Gas_FE": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Coal_FE": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Biopower": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Nuclear": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Geothermal": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# "Solar - Utility PV": {"Source": None, "Good": None, "Avg": None, "NA": None, "RO": None}, +# } + +# PopPop the Power Optimization Possum says ౿ᓕ ̤Ꜥ·> --- "when at first you don't succeed, scream." +# --- Land-based Wind --- +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Condition 1 Class 8 +# - Condition 2 Class 7 +# - Condition 3 Class 6 +# - Condition 4 Class 5 +# *************************************************************************************************************************************************** + +# --- Solar-CSP --- +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Good (7) Class 2 +# - Good (6.75) Class 2 +# - Average (6.5) Class 3 +# - Below Average (6) Class 7 +# - Below Average (5.75) Class 7 +# - Poor (5.5 None +# *************************************************************************************************************************************************** + +# --- NatGasCT --- +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good/Avg/RO NG F-Frame CT +# - NA None +# *************************************************************************************************************************************************** + +# --- NatGasCC --- +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good NG Combined Cycle (H-Frame) Max CCS +# - Avg H-FrameCC95CCS +# - RO None +# - NA None +# *************************************************************************************************************************************************** + +# --- Coal_FE --- +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good None (ignore new, newTo2ndGen, newToTT, CCS95, CCS95To2ndGen, CCS95ToTT, MaxCCS, MaxCCSTo2ndGen, MaxCCSToTT, IGCC) +# - NA None +# *************************************************************************************************************************************************** + +# --- Biopower --- +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good Biopower - Dedicated +# - NA None +# *************************************************************************************************************************************************** + +# --- Nuclear --- +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good Nuclear - Small Modular Reactor (pretend AP1000 isn't a thing) +# - NA None +# *************************************************************************************************************************************************** + +# --- Geothermal --- +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - High Geothermal - NF EGS / Binary (Ignore HydroFlash, HydroBinary, NFEGSFlash) +# - NA None +# *************************************************************************************************************************************************** + +# --- Solar - Utility PV --- +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Very High Class 1 +# - High Class 2 +# - Medium Class 4 +# *************************************************************************************************************************************************** + # tech_baseline_to_suitability_map @@ -74,13 +288,13 @@ # Generation Type (Workbook) Generation Type (Table C.1) Generation Type (Table C.2) Resource Classification (Workbook) Resource Rating (Table C.1) Technology Rating (Table C.2) # Land-based Wind Wind Cond. Wind Class 1-10 (lower is better) Poor (1) - Very Good (7) Good, Avg., NA # Solar-CSP Solar Thermal Condition Solar Thermal Class 2, 3, or 7 (lower is better) 5.5, 5.75, 6, 6.5, 6.75, 7 Good, Avg., NA -# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCT F-FrameCT H, M, L, VL Good, Avg., RO, NA -# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS H, M, L, VL Good, Avg., RO, NA -# Natural Gas_FE Surface Water NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS H, M, L, VL Good, Avg., RO, NA +# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCT NG F-Frame CT H, M, L, VL Good, Avg., RO, NA +# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, NG Combined Cycle (H-Frame) Max CCS H, M, L, VL Good, Avg., RO, NA +# Natural Gas_FE Surface Water NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, NG Combined Cycle (H-Frame) Max CCS H, M, L, VL Good, Avg., RO, NA # Coal_FE Railroad Density Coal new, newTo2ndGen, newToTT, CCS95, CCS95To2ndGen, CCS95ToTT, MaxCCS, MaxCCSTo2ndGen, MaxCCSToTT, IGCC H, M, L, VL Good, NA -# Biopower Biomass Biomass Dedicated H, M, L Good, NA -# Nuclear Surface Water Nuclear AP1000, SmallModularReactor H, M, L, VL Good, NA -# Geothermal Geothermal GeoThermal HydroFlash, HydroBinary, NFEGSFlash, NFEGSBinary, DeepEGSFlash, DeepEGSBinary H, M, NA Good, NA +# Biopower Biomass Biomass Biopower - Dedicated H, M, L Good, NA +# Nuclear Surface Water Nuclear AP1000, Nuclear - Small Modular Reactor H, M, L, VL Good, NA +# Geothermal Geothermal GeoThermal HydroFlash, HydroBinary, NFEGSFlash, Geothermal - NF EGS / Binary, DeepEGSFlash, DeepEGSBinary H, M, NA Good, NA # Solar - Utility PV Solar PV SolarPV Class 1-10 (lower is better) VH, H, M Good, Avg. # need to combine ratings to say is the RESOURCE AVAILABLE and is the TECHNOLOGY SUITABLE and then map that to the PRICE of THINGS for that County @@ -91,6 +305,24 @@ # Resource Rating also has a class rating, BUT it's reversed: "zone 1" is the worst, "zone 7" is the best. Table C.1: Condition Rating (CR) 1-7 # Mapping the counties together we can align the Condition Rating with the Technology Rating and get NA = 1, "Avg" == 2, "Good" == 3 or 4. # For sake of argument, Let's just assume that CR1 --> Class10, CR2 --> Class9, etc. +# Finally found a wind map: https://www.nrel.gov/grid/assets/images/wtk-100-north-america-50-nm-with-data-extents-01.jpg +# Of course the bins don't match anything else +# East texas is 6-6.9 m/s. That's either Class 9 or Class 8. Should map to "Wind Condition 1". +# 7-7.9 maps to Wind Condition 2 (ish), which maps to Class 7 or Class 6 +# darkest blue on the map is 8.0-8.9, which is clearly Wind Condition 4 (there's not higher level on the map really), which should be Class 5-2. OOF. +# That leaves Wind Condition 3, which means we should put Class 6 here. Which means: +# Condition 4 --> Good --> Class 5 +# Condition 3 --> Good --> Class 6 +# Condition 2 --> Avg --> Class 7 +# Condition 1 --> NA --> Class 8 +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Condition 1 Class 8 +# - Condition 2 Class 7 +# - Condition 3 Class 6 +# - Condition 4 Class 5 +# *************************************************************************************************************************************************** + # --- Solar-CSP --- # The Resource Rating here is actually decent, and is measured in kWh/m^2/day. This is actually useful. Praise be. @@ -123,7 +355,7 @@ # NatGasCT is easy: there is only one mapping you can do. # *************************************************************************************************************************************************** # - C.2 Rating Workbook Classification -# - Good/Avg/RO F-FrameCT +# - Good/Avg/RO NG F-Frame CT # - NA None # *************************************************************************************************************************************************** @@ -134,10 +366,10 @@ # CCS evolves upwards, either it's "normal" or it's 95% or it's "Max" (97%). # How any of that maps to either Surface Water or to Pipeline Density is completely lost on me. # Not solving now. Placeholder mapping -# F-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS +# F-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, NG Combined Cycle (H-Frame) Max CCS # *************************************************************************************************************************************************** # - C.2 Rating Workbook Classification -# - Good H-FrameCCMaxCCS +# - Good NG Combined Cycle (H-Frame) Max CCS # - Avg H-FrameCC95CCS # - RO None # - NA None @@ -158,7 +390,7 @@ # Biopower is also easy, there's only one mapping. Either it's there or not. # *************************************************************************************************************************************************** # - C.2 Rating Workbook Classification -# - Good Dedicated +# - Good Biopower - Dedicated # - NA None # *************************************************************************************************************************************************** @@ -166,7 +398,7 @@ # Nuclear also needs more information to be useful, but we can at least pick one. # *************************************************************************************************************************************************** # - C.2 Rating Workbook Classification -# - Good SmallModularReactor (pretend AP1000 isn't a thing) +# - Good Nuclear - Small Modular Reactor (pretend AP1000 isn't a thing) # - NA None # *************************************************************************************************************************************************** @@ -186,7 +418,7 @@ # one. So let's just assign "Good" to something and NA to everything None # *************************************************************************************************************************************************** # - C.2 Rating Workbook Classification -# - High NFEGSBinary (Ignore HydroFlash, HydroBinary, NFEGSFlash) +# - High Geothermal - NF EGS / Binary (Ignore HydroFlash, HydroBinary, NFEGSFlash) # - NA None # *************************************************************************************************************************************************** diff --git a/gtep/post_processing_config.cfg b/gtep/post_processing_config.cfg new file mode 100644 index 0000000..64e3aa2 --- /dev/null +++ b/gtep/post_processing_config.cfg @@ -0,0 +1,37 @@ +[model_specific_nomenclature] +bus_descriptor = "branch" + +[period_categories] +plot_investment = True +plot_representative = True +plot_commitment = True +plot_dispatch = True + +[value_categories] +plot_generators = True +plot_buses = True + +[diagnostic_keys_of_interest] +# buses_of_interest = ["bus1", "bus2", "bus3", "bus4", "bus10"] # these are the buses we want to plot +# buses_of_interest = ["bus1", "bus2", "bus10"] # these are the buses we want to plot +buses_of_interest = ["bus1", "bus4", "bus10"] # these are the buses we want to plot +# branches_of_interest = ["branch_1_10", "branch_1_2", "branch_1_4", "branch_2_3", "branch_3_4_0", "branch_3_4_1", "branch_4_10"] # these are the buses we want to plot +branches_of_interest = ["branch_1_10", "branch_3_4_0", "branch_3_4_1", "branch_4_10"] # these are the buses we want to plot +# nonrenewable_generators_of_interest = ["10_STEAM", "3_CT", "4_CC", "4_STEAM"] +nonrenewable_generators_of_interest = ["3_CT", "4_CC", "10_STEAM"] +# renewable_generators_of_interest = ['10_PV', '1_HYDRO', '2_RTPV', '4_WIND'] +renewable_generators_of_interest = ['1_HYDRO', '2_RTPV', '4_WIND'] + +[power_flow] +bus_extension = 0 # this is the number of buses connected to the buses of interest to also plot +network_flow_glyph_type = "custom" + +[binary_order_overrides] # orders should be a parsable list, bottom up (i.e. first element in list is on the bottom of the plot, last is on top) +order_gen_state = ["genOff", "genShutdown", "genStartup", "genOn"] +order_gen_invest_state = [ + "genDisabled", + "genRetired", + "genExtended", + "genInstalled", + "genOperational", + ]