From 7b907bbe6f942624a2244bc58d475e328103be6c Mon Sep 17 00:00:00 2001 From: romainsacchi Date: Mon, 11 Mar 2024 06:31:12 +0000 Subject: [PATCH] Black reformating --- dev/timing.py | 10 +++++++--- pathways/pathways.py | 34 +++++++++++++++++++++------------- pathways/utils.py | 15 ++++++++------- 3 files changed, 36 insertions(+), 23 deletions(-) diff --git a/dev/timing.py b/dev/timing.py index 3515d64..9e3b7c5 100644 --- a/dev/timing.py +++ b/dev/timing.py @@ -9,11 +9,15 @@ p.calculate( methods=[ "RELICS - metals extraction - Lithium", - "RELICS - metals extraction - Molybdenum" + "RELICS - metals extraction - Molybdenum", + ], + regions=[ + "WEU", ], - regions=["WEU",], scenarios=[scenario], - years=[2020, ], + years=[ + 2020, + ], variables=[ v for v in p.scenarios.coords["variables"].values diff --git a/pathways/pathways.py b/pathways/pathways.py index 70bdf86..f236179 100644 --- a/pathways/pathways.py +++ b/pathways/pathways.py @@ -189,9 +189,7 @@ def fetch_inventories_locations(A_index: Dict[str, Tuple[str, str, str]]) -> Lis :return: List of locations. """ - return list(set([ - act[3] for act in A_index - ])) + return list(set([act[3] for act in A_index])) def generate_A_indices(A_index, reverse_classifications, lca_results_coords): @@ -212,7 +210,6 @@ def generate_A_indices(A_index, reverse_classifications, lca_results_coords): return np.swapaxes(acts_idx, 0, 1) - def process_region(data: Tuple) -> Union[None, Dict[str, Any]]: global impact_categories ( @@ -252,13 +249,22 @@ def process_region(data: Tuple) -> Union[None, Dict[str, Any]]: if lcia_matrix is not None: impact_categories = lca_results_coords["impact_category"].values target = np.zeros( - (len(act_categories), len(list(vars_idx)), len(locations), len(impact_categories)) + ( + len(act_categories), + len(list(vars_idx)), + len(locations), + len(impact_categories), + ) ) else: if flows is not None: - target = np.zeros((len(act_categories), len(list(vars_idx)), len(locations), len(flows))) + target = np.zeros( + (len(act_categories), len(list(vars_idx)), len(locations), len(flows)) + ) else: - target = np.zeros((len(act_categories), len(list(vars_idx)), len(locations), len(B_index))) + target = np.zeros( + (len(act_categories), len(list(vars_idx)), len(locations), len(B_index)) + ) for v, variable in enumerate(variables): idx, dataset = vars_idx[variable]["idx"], vars_idx[variable]["dataset"] @@ -432,11 +438,13 @@ def create_dict_for_specific_model(row, model): # Create the dictionary structure for this row for the specific model dict_structure = { key: { - "dataset": [{ - "name": row["dataset name"], - "reference product": row["dataset reference product"], - "unit": row["unit"], - }], + "dataset": [ + { + "name": row["dataset name"], + "reference product": row["dataset reference product"], + "unit": row["unit"], + } + ], "scenario variable": row[model], } } @@ -654,7 +662,7 @@ def calculate( ) # Remove contribution from activities in other activities - #A = remove_double_counting(A, vars_info) + # A = remove_double_counting(A, vars_info) # check unclassified activities check_unclassified_activities(A_index, self.classifications) diff --git a/pathways/utils.py b/pathways/utils.py index 323d475..ffc45ef 100644 --- a/pathways/utils.py +++ b/pathways/utils.py @@ -230,7 +230,9 @@ def create_lca_results_array( def display_results( - lca_results: Union[xr.DataArray, None], cutoff: float = 0.001, interpolate: bool = False + lca_results: Union[xr.DataArray, None], + cutoff: float = 0.001, + interpolate: bool = False, ) -> xr.DataArray: if lca_results is None: raise ValueError("No results to display") @@ -246,22 +248,21 @@ def display_results( # Step 2: Aggregate values below the cutoff across the 'act_category' dimension # Summing all values below the cutoff for each combination of other dimensions - below_cutoff = lca_results.where(lca_results <= cutoff).sum(dim='act_category') + below_cutoff = lca_results.where(lca_results <= cutoff).sum(dim="act_category") # Since summing removes the 'act_category', we need to add it back # Create a new coordinate for 'act_category' that includes 'other' - new_act_category = np.append(lca_results.act_category.values, 'other') + new_act_category = np.append(lca_results.act_category.values, "other") # Create a new DataArray for below-cutoff values with 'act_category' as 'other' # This involves broadcasting below_cutoff to match the original array's dimensions but with 'act_category' replaced - other_data = below_cutoff.expand_dims({'act_category': ['other']}, axis=0) + other_data = below_cutoff.expand_dims({"act_category": ["other"]}, axis=0) # Step 3: Combine the above-cutoff data with the new 'other' data # Concatenate along the 'act_category' dimension - combined = xr.concat([above_cutoff, other_data], dim='act_category') + combined = xr.concat([above_cutoff, other_data], dim="act_category") # Ensure the 'act_category' coordinate is updated to include 'other' - combined = combined.assign_coords({'act_category': new_act_category}) + combined = combined.assign_coords({"act_category": new_act_category}) return combined -