diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 00000000..03d44783 --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,24 @@ +name: Black Code Formatter + +on: [push, pull_request] + +jobs: + black: + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: "3.12" + + - name: Install Black + run: | + python -m pip install --upgrade pip + pip install black + + - name: Check Black Formatting + run: black --check . \ No newline at end of file diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml new file mode 100644 index 00000000..47abfc03 --- /dev/null +++ b/.github/workflows/pylint.yml @@ -0,0 +1,72 @@ +name: Pylint + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + linting: + runs-on: ubuntu-latest + services: + docker: + image: qgis/qgis:latest + options: --user root + ports: + - 99:99 + + steps: + - name: Checkout qgis-plugin + uses: actions/checkout@v4 + + - name: Run QGIS Docker container + run: | + docker run -d --name qgis-testing-environment \ + -v ${GITHUB_WORKSPACE}:/tests_directory \ + -e DISPLAY=:99 qgis/qgis:latest tail -f /dev/null + + - name: Wait for Docker to be ready + run: | + until docker exec qgis-testing-environment sh -c "ls /"; do sleep 1; done + + - name: Install plugin manually via Copy + run: | + docker exec qgis-testing-environment sh -c ' + PLUGIN_SRC_PATH=/tests_directory/seat + PLUGIN_DEST_DIR=/usr/share/qgis/python/plugins/seat + mkdir -p $PLUGIN_DEST_DIR + cp -r $PLUGIN_SRC_PATH/* $PLUGIN_DEST_DIR/ + ' + + - name: Install Dependencies + run: | + docker exec qgis-testing-environment sh -c "apt-get update && apt-get install -y python3-pandas python3-netcdf4" + + - name: Create a Virtual Environment with system site packages + run: | + docker exec qgis-testing-environment sh -c "/usr/bin/python3.12 -m venv --system-site-packages /tests_directory/venv" + + - name: Add Plugin to PYTHONPATH in Virtual Environment + run: | + docker exec qgis-testing-environment sh -c "echo '/usr/share/qgis/python/plugins' > /tests_directory/venv/lib/python3.12/site-packages/seat.pth" + + - name: Install Pylint in Virtual Environment + run: | + docker exec qgis-testing-environment sh -c "/tests_directory/venv/bin/pip install pylint" + + - name: Run Pylint + run: | + docker exec qgis-testing-environment sh -c "/tests_directory/venv/bin/pylint /tests_directory/seat/modules/velocity_module.py" + docker exec qgis-testing-environment sh -c "/tests_directory/venv/bin/pylint /tests_directory/seat/modules/shear_stress_module.py" + docker exec qgis-testing-environment sh -c "/tests_directory/venv/bin/pylint /tests_directory/seat/modules/power_module.py" + docker exec qgis-testing-environment sh -c "/tests_directory/venv/bin/pylint /tests_directory/seat/modules/stressor_utils.py" + + + - name: Clean up Docker container + if: always() + run: | + docker stop qgis-testing-environment + docker rm qgis-testing-environment diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index bc227a7f..83aad1fc 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -7,7 +7,6 @@ on: push: branches: - main - - workflow jobs: test-seat-package: @@ -33,7 +32,7 @@ jobs: run: | until docker exec qgis-testing-environment sh -c "ls /"; do sleep 1; done - - name: Install plugin by manually via Copy + - name: Install plugin manually via Copy run: | docker exec qgis-testing-environment sh -c ' PLUGIN_SRC_PATH=/tests_directory/seat @@ -47,10 +46,6 @@ jobs: docker exec qgis-testing-environment sh -c "python3 /tests_directory/tests/install_dependencies.py" docker exec qgis-testing-environment sh -c "apt-get update && apt-get install -y python3-pandas python3-netcdf4 python3-pytest" - - name: List installed packages - run: | - docker exec qgis-testing-environment sh -c "pip list" - - name: Run Pytest run: | docker exec qgis-testing-environment pytest /tests_directory/tests diff --git a/docs/src/conf.py b/docs/src/conf.py index ecb59e3e..5d4dcb7c 100644 --- a/docs/src/conf.py +++ b/docs/src/conf.py @@ -19,12 +19,12 @@ # -- Project information ----------------------------------------------------- -project = 'SEAT' -copyright = '2023 Sandia National Laboratories' -author = 'Tim Nelson, Caleb Grant, Eben Pendleton, Sterling Olson' +project = "SEAT" +copyright = "2023 Sandia National Laboratories" +author = "Tim Nelson, Caleb Grant, Eben Pendleton, Sterling Olson" # The full version, including alpha/beta/rc tags -release = '[1.0.0-beta1]' +release = "[1.0.0-beta1]" # -- General configuration --------------------------------------------------- @@ -33,13 +33,13 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.intersphinx', - 'sphinx_autodoc_typehints', - 'sphinx_rtd_theme', - 'sphinx_rtd_dark_mode', - 'sphinx_copybutton', + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx_autodoc_typehints", + "sphinx_rtd_theme", + "sphinx_rtd_dark_mode", + "sphinx_copybutton", ] # Add any paths that contain templates here, relative to this directory. @@ -48,7 +48,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # Napoleon settings diff --git a/docs/src/media/compress.py b/docs/src/media/compress.py index 53461259..fb921c54 100644 --- a/docs/src/media/compress.py +++ b/docs/src/media/compress.py @@ -12,38 +12,37 @@ def compress_image(image_path, output_path, quality=100): The quality parameter controls the quality of the compression. Lowering the quality value will decrease the file size. """ - image = Image.open(image_path).convert( - 'RGB') # Ensure the image is in RGB mode + image = Image.open(image_path).convert("RGB") # Ensure the image is in RGB mode max_width = 1000 - width_percent = (max_width / float(image.size[0])) + width_percent = max_width / float(image.size[0]) height_size = int((float(image.size[1]) * float(width_percent))) resized_img = image.resize((max_width, height_size), Image.ANTIALIAS) - resized_img.save(output_path, 'WEBP', quality=quality) + resized_img.save(output_path, "WEBP", quality=quality) def process_single_png(filename): - if not filename.endswith('.png'): + if not filename.endswith(".png"): print(f"{filename} is not a .png file.") return print(f"Processing {filename}...") file_path = filename - output_path = os.path.splitext(file_path)[0] + '.webp' + output_path = os.path.splitext(file_path)[0] + ".webp" # Convert PNG to WEBP without compressing, just for the format change - image = Image.open(file_path).convert('RGB') - image.save(output_path, 'WEBP') + image = Image.open(file_path).convert("RGB") + image.save(output_path, "WEBP") print(f"{filename} converted to {os.path.basename(output_path)}\n") def process_webp_files(): - for filename in os.listdir('.'): - if filename.endswith('.webp'): + for filename in os.listdir("."): + if filename.endswith(".webp"): print(f"Processing {filename}...") - file_path = os.path.join('.', filename) - output_path = os.path.splitext(file_path)[0] + '.webp' + file_path = os.path.join(".", filename) + output_path = os.path.splitext(file_path)[0] + ".webp" # Get initial file size in kB initial_file_size_kb = os.path.getsize(file_path) / 1024 @@ -55,7 +54,8 @@ def process_webp_files(): # Get final file size in kB final_file_size_kb = os.path.getsize(output_path) / 1024 print( - f"{os.path.basename(output_path)}: Final Size: {final_file_size_kb:.2f} kB\n") + f"{os.path.basename(output_path)}: Final Size: {final_file_size_kb:.2f} kB\n" + ) def main(): diff --git a/seat/modules/acoustics_module.py b/seat/modules/acoustics_module.py index 054ff9e9..943c80e9 100644 --- a/seat/modules/acoustics_module.py +++ b/seat/modules/acoustics_module.py @@ -32,11 +32,11 @@ calculate_cell_area, resample_structured_grid, bin_layer, - secondary_constraint_geotiff_to_numpy + secondary_constraint_geotiff_to_numpy, ) -def create_species_array(species_filename, x, y, variable='percent', latlon=False): +def create_species_array(species_filename, x, y, variable="percent", latlon=False): """ Interpolates or creates an array of percent or density of species @@ -66,29 +66,40 @@ def create_species_array(species_filename, x, y, variable='percent', latlon=Fals """ # if ((receptor_filename is not None) or (not receptor_filename == "")): if not ((species_filename is None) or (species_filename == "")): - if species_filename.endswith('.tif'): + if species_filename.endswith(".tif"): data = gdal.Open(species_filename) img = data.GetRasterBand(1) receptor_array = img.ReadAsArray() receptor_array[receptor_array < 0] = 0 - (upper_left_x, x_size, x_rotation, upper_left_y, - y_rotation, y_size) = data.GetGeoTransform() + (upper_left_x, x_size, x_rotation, upper_left_y, y_rotation, y_size) = ( + data.GetGeoTransform() + ) cols = data.RasterXSize rows = data.RasterYSize r_rows = np.arange(rows) * y_size + upper_left_y + (y_size / 2) r_cols = np.arange(cols) * x_size + upper_left_x + (x_size / 2) if latlon == True: - r_cols = np.where(r_cols < 0, r_cols+360, r_cols) + r_cols = np.where(r_cols < 0, r_cols + 360, r_cols) x_grid, y_grid = np.meshgrid(r_cols, r_rows) - variable_array = griddata((x_grid.flatten(), y_grid.flatten( - )), receptor_array.flatten(), (x, y), method='nearest', fill_value=0) - - elif species_filename.endswith('.csv'): + variable_array = griddata( + (x_grid.flatten(), y_grid.flatten()), + receptor_array.flatten(), + (x, y), + method="nearest", + fill_value=0, + ) + + elif species_filename.endswith(".csv"): df = pd.read_csv(species_filename) - columns_keep = ['latitude', 'longitude', variable] + columns_keep = ["latitude", "longitude", variable] df = df[columns_keep] - variable_array = griddata((df.longitude.to_numpy(), df.latitude.to_numpy( - )), df[variable].to_numpy(), (x, y), method='nearest', fill_value=0) + variable_array = griddata( + (df.longitude.to_numpy(), df.latitude.to_numpy()), + df[variable].to_numpy(), + (x, y), + method="nearest", + fill_value=0, + ) else: raise Exception("Invalid File Type. Must be of type .tif or .csv") else: @@ -96,13 +107,15 @@ def create_species_array(species_filename, x, y, variable='percent', latlon=Fals return variable_array -def calculate_acoustic_stressors(fpath_dev, - probabilities_file, - receptor_filename, - fpath_nodev=None, - species_folder=None, # secondary constraint - latlon=True, - Averaging = None): +def calculate_acoustic_stressors( + fpath_dev, + probabilities_file, + receptor_filename, + fpath_nodev=None, + species_folder=None, # secondary constraint + latlon=True, + Averaging=None, +): """ Calculates the stressor layers as arrays from model and parameter input. @@ -148,27 +161,34 @@ def calculate_acoustic_stressors(fpath_dev, if not os.path.exists(receptor_filename): raise FileNotFoundError(f"The file {receptor_filename} does not exist.") - paracousti_files = [os.path.join(fpath_dev, i) - for i in os.listdir(fpath_dev) if i.endswith('.nc')] - boundary_conditions = pd.read_csv( - probabilities_file).set_index('Paracousti File').fillna(0) - boundary_conditions['% of yr'] = 100 * \ - (boundary_conditions['% of yr'] / boundary_conditions['% of yr'].sum()) + paracousti_files = [ + os.path.join(fpath_dev, i) for i in os.listdir(fpath_dev) if i.endswith(".nc") + ] + boundary_conditions = ( + pd.read_csv(probabilities_file).set_index("Paracousti File").fillna(0) + ) + boundary_conditions["% of yr"] = 100 * ( + boundary_conditions["% of yr"] / boundary_conditions["% of yr"].sum() + ) receptor = pd.read_csv(receptor_filename, index_col=0, header=None).T - Threshold = receptor['Threshold (dB re 1uPa)'].astype( - float).to_numpy().item() - if not ((receptor['species file averaged area (km2)'].values is None) or (receptor['species file averaged area (km2)'].values == "")): - grid_res_species = receptor['species file averaged area (km2)'].astype( - float).to_numpy().item() * 1.0e6 # converted to m2 + Threshold = receptor["Threshold (dB re 1uPa)"].astype(float).to_numpy().item() + if not ( + (receptor["species file averaged area (km2)"].values is None) + or (receptor["species file averaged area (km2)"].values == "") + ): + grid_res_species = ( + receptor["species file averaged area (km2)"].astype(float).to_numpy().item() + * 1.0e6 + ) # converted to m2 else: grid_res_species = 0.0 # Averaging = receptor['Depth Averaging'].values.item() - variable = receptor['Paracousti Variable'].values.item() + variable = receptor["Paracousti Variable"].values.item() for ic, paracousti_file in enumerate(paracousti_files): with Dataset(paracousti_file) as ds: - # ds = Dataset(paracousti_file) + # ds = Dataset(paracousti_file) acoust_var = ds.variables[variable][:].data cords = ds.variables[variable].coordinates.split() X = ds.variables[cords[0]][:].data @@ -177,45 +197,62 @@ def calculate_acoustic_stressors(fpath_dev, acoust_var = np.transpose(acoust_var, (1, 2, 0)) if ic == 0: xunits = ds.variables[cords[0]].units - if 'degrees' in xunits: + if "degrees" in xunits: latlon = True - XCOR = np.where(X < 0, X+360, X) + XCOR = np.where(X < 0, X + 360, X) else: XCOR = X YCOR = Y - ACOUST_VAR = np.zeros((len(paracousti_files), np.shape(acoust_var)[ - 0], np.shape(acoust_var)[1], np.shape(acoust_var)[2])) + ACOUST_VAR = np.zeros( + ( + len(paracousti_files), + np.shape(acoust_var)[0], + np.shape(acoust_var)[1], + np.shape(acoust_var)[2], + ) + ) ACOUST_VAR[ic, :] = acoust_var - if not ((fpath_nodev is None) or (fpath_nodev == "")): # Assumes same grid as paracousti_files + if not ( + (fpath_nodev is None) or (fpath_nodev == "") + ): # Assumes same grid as paracousti_files if not os.path.exists(fpath_nodev): raise FileNotFoundError(f"The directory {fpath_nodev} does not exist.") - baseline_files = [os.path.join(fpath_nodev, i) for i in os.listdir( - fpath_nodev) if i.endswith('.nc')] + baseline_files = [ + os.path.join(fpath_nodev, i) + for i in os.listdir(fpath_nodev) + if i.endswith(".nc") + ] for ic, baseline_file in enumerate(baseline_files): with Dataset(baseline_file) as ds: - # ds = Dataset(baseline_file) + # ds = Dataset(baseline_file) baseline = ds.variables[variable][:].data cords = ds.variables[variable].coordinates.split() if ds.variables[cords[0]][:].data.shape[0] != baseline.shape[0]: baseline = np.transpose(baseline, (1, 2, 0)) if ic == 0: - Baseline = np.zeros((len(baseline_files), np.shape(baseline)[ - 0], np.shape(baseline)[1], np.shape(baseline)[2])) + Baseline = np.zeros( + ( + len(baseline_files), + np.shape(baseline)[0], + np.shape(baseline)[1], + np.shape(baseline)[2], + ) + ) Baseline[ic, :] = baseline else: Baseline = np.zeros(ACOUST_VAR.shape) - if Averaging == 'Depth Maximum': + if Averaging == "Depth Maximum": ACOUST_VAR = np.nanmax(ACOUST_VAR, axis=3) Baseline = np.nanmax(Baseline, axis=3) - elif Averaging == 'Depth Average': + elif Averaging == "Depth Average": ACOUST_VAR = np.nanmean(ACOUST_VAR, axis=3) Baseline = np.nanmean(Baseline, axis=3) - elif Averaging == 'Bottom Bin': + elif Averaging == "Bottom Bin": ACOUST_VAR = ACOUST_VAR[:, :, -1] Baseline = Baseline[:, :, -1] - elif Averaging == 'Top Bin': + elif Averaging == "Top Bin": ACOUST_VAR = ACOUST_VAR[:, :, 0] Baseline = Baseline[:, :, 0] else: @@ -224,10 +261,8 @@ def calculate_acoustic_stressors(fpath_dev, for ic, file in enumerate(paracousti_files): # paracousti files might not have regular grid spacing. - rx, ry, acoust_var = redefine_structured_grid( - XCOR, YCOR, ACOUST_VAR[ic, :]) - baseline = resample_structured_grid( - XCOR, YCOR, Baseline[ic, :], rx, ry) + rx, ry, acoust_var = redefine_structured_grid(XCOR, YCOR, ACOUST_VAR[ic, :]) + baseline = resample_structured_grid(XCOR, YCOR, Baseline[ic, :], rx, ry) if ic == 0: PARACOUSTI = np.zeros(rx.shape) @@ -236,25 +271,38 @@ def calculate_acoustic_stressors(fpath_dev, percent_scaled = np.zeros(rx.shape) density_scaled = np.zeros(rx.shape) - probability = boundary_conditions.loc[os.path.basename( - file)]['% of yr'] / 100 + probability = boundary_conditions.loc[os.path.basename(file)]["% of yr"] / 100 PARACOUSTI = PARACOUSTI + probability * acoust_var stressor = stressor + probability * (acoust_var - baseline) threshold_mask = acoust_var > Threshold - threshold_exceeded[threshold_mask] += probability*100 + threshold_exceeded[threshold_mask] += probability * 100 if not ((species_folder is None) or (species_folder == "")): if not os.path.exists(species_folder): - raise FileNotFoundError(f"The directory {species_folder} does not exist.") - species_percent_filename = boundary_conditions.loc[os.path.basename( - paracousti_file)]['Species Percent Occurance File'] - species_density_filename = boundary_conditions.loc[os.path.basename( - paracousti_file)]['Species Density File'] - parray = create_species_array(os.path.join( - species_folder, species_percent_filename), rx, ry, variable='percent', latlon=True) - darray = create_species_array(os.path.join( - species_folder, species_density_filename), rx, ry, variable='density', latlon=True) + raise FileNotFoundError( + f"The directory {species_folder} does not exist." + ) + species_percent_filename = boundary_conditions.loc[ + os.path.basename(paracousti_file) + ]["Species Percent Occurance File"] + species_density_filename = boundary_conditions.loc[ + os.path.basename(paracousti_file) + ]["Species Density File"] + parray = create_species_array( + os.path.join(species_folder, species_percent_filename), + rx, + ry, + variable="percent", + latlon=True, + ) + darray = create_species_array( + os.path.join(species_folder, species_density_filename), + rx, + ry, + variable="density", + latlon=True, + ) _, _, square_area = calculate_cell_area(rx, ry, latlon == True) # square area of each grid cell square_area = np.nanmean(square_area) @@ -265,17 +313,21 @@ def calculate_acoustic_stressors(fpath_dev, ratio = 1 parray_scaled = parray * ratio darray_scaled = darray * ratio - percent_scaled[threshold_mask] += probability * \ - parray_scaled[threshold_mask] - density_scaled[threshold_mask] += probability * \ - darray_scaled[threshold_mask] - - dict_of_arrays = {'paracousti_without_devices':baseline, - 'paracousti_with_devices':PARACOUSTI, - 'paracousti_stressor': stressor, - 'species_threshold_exceeded': threshold_exceeded, - 'species_percent': percent_scaled, - 'species_density': density_scaled} + percent_scaled[threshold_mask] += ( + probability * parray_scaled[threshold_mask] + ) + density_scaled[threshold_mask] += ( + probability * darray_scaled[threshold_mask] + ) + + dict_of_arrays = { + "paracousti_without_devices": baseline, + "paracousti_with_devices": PARACOUSTI, + "paracousti_stressor": stressor, + "species_threshold_exceeded": threshold_exceeded, + "species_percent": percent_scaled, + "species_density": density_scaled, + } dx = np.nanmean(np.diff(rx[0, :])) dy = np.nanmean(np.diff(ry[:, 0])) @@ -291,7 +343,8 @@ def run_acoustics_stressor( receptor_filename, species_folder=None, Averaging=None, - secondary_constraint_filename=None): + secondary_constraint_filename=None, +): """ @@ -326,59 +379,72 @@ def run_acoustics_stressor( """ - os.makedirs(output_path, exist_ok=True) # create output directory if it doesn't exist + os.makedirs( + output_path, exist_ok=True + ) # create output directory if it doesn't exist - dict_of_arrays, rx, ry, dx, dy = calculate_acoustic_stressors(fpath_dev=dev_present_file, - probabilities_file=probabilities_file, - receptor_filename=receptor_filename, - fpath_nodev=dev_notpresent_file, - species_folder=species_folder, - latlon=crs == 4326, - Averaging=Averaging) + dict_of_arrays, rx, ry, dx, dy = calculate_acoustic_stressors( + fpath_dev=dev_present_file, + probabilities_file=probabilities_file, + receptor_filename=receptor_filename, + fpath_nodev=dev_notpresent_file, + species_folder=species_folder, + latlon=crs == 4326, + Averaging=Averaging, + ) if not ((species_folder is None) or (species_folder == "")): - use_numpy_arrays = ['paracousti_without_devices', - 'paracousti_with_devices', - 'paracousti_stressor', - 'species_threshold_exceeded', - 'species_percent', - 'species_density'] + use_numpy_arrays = [ + "paracousti_without_devices", + "paracousti_with_devices", + "paracousti_stressor", + "species_threshold_exceeded", + "species_percent", + "species_density", + ] else: - use_numpy_arrays = ['paracousti_without_devices' - 'paracousti_with_devices', - 'paracousti_stressor', - 'species_threshold_exceeded'] - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): + use_numpy_arrays = [ + "paracousti_without_devices" "paracousti_with_devices", + "paracousti_stressor", + "species_threshold_exceeded", + ] + + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): if not os.path.exists(secondary_constraint_filename): - raise FileNotFoundError(f"The file {secondary_constraint_filename} does not exist.") - rrx, rry, constraint = secondary_constraint_geotiff_to_numpy(secondary_constraint_filename) - dict_of_arrays['paracousti_risk_layer'] = resample_structured_grid(rrx, rry, constraint, rx, ry, interpmethod='nearest') - use_numpy_arrays.append('paracousti_risk_layer') - - numpy_array_names = [i + '.tif' for i in use_numpy_arrays] + raise FileNotFoundError( + f"The file {secondary_constraint_filename} does not exist." + ) + rrx, rry, constraint = secondary_constraint_geotiff_to_numpy( + secondary_constraint_filename + ) + dict_of_arrays["paracousti_risk_layer"] = resample_structured_grid( + rrx, rry, constraint, rx, ry, interpmethod="nearest" + ) + use_numpy_arrays.append("paracousti_risk_layer") + + numpy_array_names = [i + ".tif" for i in use_numpy_arrays] output_rasters = [] for array_name, use_numpy_array in zip(numpy_array_names, use_numpy_arrays): numpy_array = np.flip(dict_of_arrays[use_numpy_array], axis=0) cell_resolution = [dx, dy] - # output_rasters = [] - # for array_name, use_numpy_array in zip(numpy_array_names, use_numpy_arrays): + # output_rasters = [] + # for array_name, use_numpy_array in zip(numpy_array_names, use_numpy_arrays): # numpy_array = np.flip(numpy_array, axis=0) # cell_resolution = [dx, dy] if crs == 4326: - rxx = np.where(rx > 180, rx-360, rx) - bounds = [rxx.min() - dx/2, ry.max() - dy/2] + rxx = np.where(rx > 180, rx - 360, rx) + bounds = [rxx.min() - dx / 2, ry.max() - dy / 2] else: - bounds = [rx.min() - dx/2, ry.max() - dy/2] + bounds = [rx.min() - dx / 2, ry.max() - dy / 2] rows, cols = numpy_array.shape # create an ouput raster given the stressor file path output_rasters.append(os.path.join(output_path, array_name)) output_raster = create_raster( - os.path.join(output_path, array_name), - cols, - rows, - nbands=1) + os.path.join(output_path, array_name), cols, rows, nbands=1 + ) # post processing of numpy array to output raster numpy_array_to_raster( @@ -387,64 +453,111 @@ def run_acoustics_stressor( bounds, cell_resolution, crs, - os.path.join(output_path, array_name)) + os.path.join(output_path, array_name), + ) output_raster = None # Area calculations # ParAcousti Area - bin_layer(os.path.join(output_path, "paracousti_without_devices.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "paracousti_without_devices.csv"), index=False) + bin_layer( + os.path.join(output_path, "paracousti_without_devices.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "paracousti_without_devices.csv"), index=False) - bin_layer(os.path.join(output_path, "paracousti_with_devices.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "paracousti_with_devices.csv"), index=False) + bin_layer( + os.path.join(output_path, "paracousti_with_devices.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "paracousti_with_devices.csv"), index=False) # Stressor Area - bin_layer(os.path.join(output_path, "paracousti_stressor.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "paracousti_stressor.csv"), index=False) + bin_layer( + os.path.join(output_path, "paracousti_stressor.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "paracousti_stressor.csv"), index=False) # threshold exeeded Area - bin_layer(os.path.join(output_path, "species_threshold_exceeded.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "species_threshold_exceeded.csv"), index=False) + bin_layer( + os.path.join(output_path, "species_threshold_exceeded.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "species_threshold_exceeded.csv"), index=False) + + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "paracousti_stressor.tif"), + receptor_filename=os.path.join(output_path, "paracousti_risk_layer.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "paracousti_stressor_at_paracousti_risk_layer.csv" + ), + index=False, + ) - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'paracousti_stressor.tif'), - receptor_filename=os.path.join(output_path, "paracousti_risk_layer.tif"), + if not ((species_folder is None) or (species_folder == "")): + bin_layer( + os.path.join(output_path, "species_percent.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "species_percent.csv"), index=False) + + bin_layer( + os.path.join(output_path, "species_density.tif"), latlon=crs == 4326 + ).to_csv(os.path.join(output_path, "species_density.csv"), index=False) + + if not ( + (secondary_constraint_filename is None) + or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "species_threshold_exceeded.tif"), + receptor_filename=os.path.join( + output_path, "paracousti_risk_layer.tif" + ), receptor_names=None, limit_receptor_range=[0, np.inf], latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "paracousti_stressor_at_paracousti_risk_layer.csv"), index=False) - - if not ((species_folder is None) or (species_folder == "")): - bin_layer(os.path.join(output_path, "species_percent.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "species_percent.csv"), index=False) - - bin_layer(os.path.join(output_path, "species_density.tif"), - latlon=crs == 4326).to_csv(os.path.join(output_path, "species_density.csv"), index=False) - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'species_threshold_exceeded.tif'), - receptor_filename=os.path.join(output_path, "paracousti_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "species_threshold_exceeded_at_paracousti_risk_layer.csv"), index=False) - - bin_layer(os.path.join(output_path, 'species_percent.tif'), - receptor_filename=os.path.join(output_path, "paracousti_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "species_percent_at_paracousti_risk_layer.csv"), index=False) - - bin_layer(os.path.join(output_path, 'species_density.tif'), - receptor_filename=os.path.join(output_path, "paracousti_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "species_density_at_paracousti_risk_layer.csv"), index=False) + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, + "species_threshold_exceeded_at_paracousti_risk_layer.csv", + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "species_percent.tif"), + receptor_filename=os.path.join( + output_path, "paracousti_risk_layer.tif" + ), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "species_percent_at_paracousti_risk_layer.csv" + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "species_density.tif"), + receptor_filename=os.path.join( + output_path, "paracousti_risk_layer.tif" + ), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "species_density_at_paracousti_risk_layer.csv" + ), + index=False, + ) OUTPUT = {} for val in output_rasters: - OUTPUT[os.path.basename(os.path.normpath(val)).split('.')[0]] = val + OUTPUT[os.path.basename(os.path.normpath(val)).split(".")[0]] = val return OUTPUT diff --git a/seat/modules/power_module.py b/seat/modules/power_module.py index 502ca116..15f08264 100644 --- a/seat/modules/power_module.py +++ b/seat/modules/power_module.py @@ -1,28 +1,39 @@ # power_module.py -# -# AUTHORS (Authors to use initals in history) -# Timothy R. Nelson - tnelson@integral-corp.com -# NOTES (Data descriptions and any script specific notes) -# 1. .OUT files and .pol must be in the same folder -# 2. .OUT file is format sensitive -# -# Date Author Remarks -# ----------- --------------------- -------------------------------------------- -# 2022-08-01 TRN - File created -# 2023-08-05 TRN - Comments and slight edits -# =============================================================================== -import pandas as pd +# pylint: disable=too-many-statements +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches + + +""" +This module provides functionalities related to power calculations. + +AUTHORS: + - Timothy R. Nelson (TRN) - tnelson@integral-corp.com + +NOTES: + 1. .OUT files and .pol files must be in the same folder. + 2. .OUT file is format sensitive. + +CHANGE HISTORY: + - 2022-08-01: TRN - File created. + - 2023-08-05: TRN - Comments and slight edits. +""" + + import io import re -import numpy as np import os +import numpy as np +import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib.colors import ListedColormap from matplotlib.cm import ScalarMappable from matplotlib.ticker import FormatStrFormatter + # Obstacle Polygon and Device Positions def read_obstacle_polygon_file(power_device_configuration_file): """ @@ -35,7 +46,7 @@ def read_obstacle_polygon_file(power_device_configuration_file): Returns ------- - Obstacles : Dict + obstacles : Dict xy of each obstacle. """ @@ -43,59 +54,69 @@ def read_obstacle_polygon_file(power_device_configuration_file): with io.open(power_device_configuration_file, "r", encoding="utf-8") as inf: lines = inf.readlines() except FileNotFoundError as exc: - raise FileNotFoundError(f"File not found: {power_device_configuration_file}") from exc + raise FileNotFoundError( + f"File not found: {power_device_configuration_file}" + ) from exc ic = 0 - Obstacles = {} + obstacles = {} while ic < len(lines) - 1: - if 'Obstacle' in lines[ic]: + if "Obstacle" in lines[ic]: obstacle = lines[ic].strip() - Obstacles[obstacle] = {} + obstacles[obstacle] = {} ic += 1 # skip to next line - nrows, ncols = [int(i) for i in lines[ic].split()] + nrows = int(lines[ic].split()[0]) ic += 1 # skip to next line x = [] y = [] - for n in range(nrows): # read polygon + for _ in range(nrows): # read polygon xi, yi = [float(i) for i in lines[ic].split()] x = np.append(x, xi) y = np.append(y, yi) ic += 1 # skip to next line - Obstacles[obstacle] = np.vstack((x, y)).T + obstacles[obstacle] = np.vstack((x, y)).T else: ic += 1 - return Obstacles + return obstacles -def find_mean_point_of_obstacle_polygon(Obstacles): +def find_mean_point_of_obstacle_polygon(obstacles): """ Calculates the center of each obstacle. Parameters ---------- - Obstacles : Dict + obstacles : Dict x,y of each obstacle. Returns ------- - Centroids : array + centroids : array Centroid of each obstacle. """ - Centroids = np.empty((0, 3), dtype=int) - for ic, obstacle in enumerate(Obstacles.keys()): - Centroids = np.vstack((Centroids, [ic, np.nanmean( - Obstacles[obstacle][:, 0]), np.nanmean(Obstacles[obstacle][:, 1])])) - return Centroids - - -def plot_test_obstacle_locations(Obstacles): + centroids = np.empty((0, 3), dtype=int) + for ic, obstacle in enumerate(obstacles.keys()): + centroids = np.vstack( + ( + centroids, + [ + ic, + np.nanmean(obstacles[obstacle][:, 0]), + np.nanmean(obstacles[obstacle][:, 1]), + ], + ) + ) + return centroids + + +def plot_test_obstacle_locations(obstacles): """ Creates a plot of the spatial distribution and location of each obstacle. Parameters ---------- - Obstacles : Dict + obstacles : Dict xy of each obstacle. Returns @@ -105,24 +126,37 @@ def plot_test_obstacle_locations(Obstacles): """ fig, ax = plt.subplots(figsize=(10, 10)) - for ic, obstacle in enumerate(Obstacles.keys()): - ax.plot(Obstacles[obstacle][:, 0], Obstacles[obstacle] - [:, 1], '.', markersize=3, alpha=0) - ax.text(Obstacles[obstacle][0, 0], Obstacles[obstacle] - [0, 1], f'{obstacle}', fontsize=8) - ax.text(Obstacles[obstacle][1, 0], Obstacles[obstacle] - [1, 1], f'{obstacle}', fontsize=8) + for obstacle in obstacles.keys(): + ax.plot( + obstacles[obstacle][:, 0], + obstacles[obstacle][:, 1], + ".", + markersize=3, + alpha=0, + ) + ax.text( + obstacles[obstacle][0, 0], + obstacles[obstacle][0, 1], + f"{obstacle}", + fontsize=8, + ) + ax.text( + obstacles[obstacle][1, 0], + obstacles[obstacle][1, 1], + f"{obstacle}", + fontsize=8, + ) fig.tight_layout() return fig -def centroid_diffs(Centroids, centroid): +def centroid_diffs(centroids, centroid): """ Determines the closest centroid pair Parameters ---------- - Centroids : Dict + centroids : Dict dimensions M,N with each M [index, x , y] centroid : array single x,y. @@ -134,89 +168,89 @@ def centroid_diffs(Centroids, centroid): """ - diff = Centroids[:, 1:] - centroid[1:] - min_arg = np.nanargmin(np.abs(diff[:, -1]-diff[:, 0])) - pair = [int(centroid[0]), int(Centroids[min_arg, 0])] + diff = centroids[:, 1:] - centroid[1:] + min_arg = np.nanargmin(np.abs(diff[:, -1] - diff[:, 0])) + pair = [int(centroid[0]), int(centroids[min_arg, 0])] return pair -def extract_device_location(Obstacles, Device_index): +def extract_device_location(obstacles, device_index): """ Creates a dictionary summary of each device location Parameters ---------- - Obstacles : TYPE + obstacles : TYPE DESCRIPTION. - Device_index : TYPE + device_index : TYPE DESCRIPTION. Returns ------- - Devices_DF : TYPE + devices_df : TYPE DESCRIPTION. """ - Devices = {} - for device, [ix1, ix2] in enumerate(Device_index): - key = f'{device+1:03.0f}' - Devices[key] = {} - xy = Obstacles[f'Obstacle {ix1+1}'] - xy = np.vstack((xy, Obstacles[f'Obstacle {ix2+1}'])) + devices = {} + for device, [ix1, ix2] in enumerate(device_index): + key = f"{device+1:03.0f}" + devices[key] = {} + xy = obstacles[f"Obstacle {ix1+1}"] + xy = np.vstack((xy, obstacles[f"Obstacle {ix2+1}"])) # create polygon from bottom left to upper right assuming rectangular x = xy[:, 0] y = xy[:, 1] - Devices[key]['polyx'] = [ - np.nanmin(x), np.nanmin(x), np.nanmax(x), np.nanmax(x)] - Devices[key]['polyy'] = [ - np.nanmin(y), np.nanmax(y), np.nanmax(y), np.nanmin(y)] - Devices[key]['lower_left'] = [np.nanmin(x), np.nanmin(y)] - Devices[key]['centroid'] = [np.nanmean(x), np.nanmean(y)] - Devices[key]['width'] = (np.nanmax(x) - np.nanmin(x)) - Devices[key]['height'] = (np.nanmax(y) - np.nanmin(y)) - Devices_DF = pd.DataFrame.from_dict(Devices, orient='index') - return Devices_DF - - -def pair_devices(Centroids): + devices[key]["polyx"] = [np.nanmin(x), np.nanmin(x), np.nanmax(x), np.nanmax(x)] + devices[key]["polyy"] = [np.nanmin(y), np.nanmax(y), np.nanmax(y), np.nanmin(y)] + devices[key]["lower_left"] = [np.nanmin(x), np.nanmin(y)] + devices[key]["centroid"] = [np.nanmean(x), np.nanmean(y)] + devices[key]["width"] = np.nanmax(x) - np.nanmin(x) + devices[key]["height"] = np.nanmax(y) - np.nanmin(y) + devices_df = pd.DataFrame.from_dict(devices, orient="index") + return devices_df + + +def pair_devices(centroids): """ Determins the two intersecting obstacles to that create a device. Parameters ---------- - Centroids : TYPE + centroids : TYPE DESCRIPTION. Returns ------- - Devices : TYPE + devices : TYPE DESCRIPTION. """ - Devices = np.empty((0, 2), dtype=int) - while len(Centroids) > 0: - # print(Centroids) + devices = np.empty((0, 2), dtype=int) + while len(centroids) > 0: + # print(centroids) # must have dimensions M,N with each M [index, x , y] - pair = centroid_diffs(Centroids[1:, :], Centroids[0, :]) - Devices = np.vstack((Devices, pair)) - Centroids = Centroids[~np.isin(Centroids[:, 0], pair), :] - return Devices + pair = centroid_diffs(centroids[1:, :], centroids[0, :]) + devices = np.vstack((devices, pair)) + centroids = centroids[~np.isin(centroids[:, 0], pair), :] + return devices + # # use https://stackoverflow.com/questions/10550477/how-do-i-set-color-to-rectangle-in-matplotlib # # to create rectangles from the polygons above # # scale color based on power device power range from 0 to max of array -# # This way the plot is array and grid independent, only based on centroid and device size, could make size variable if necessary. +# # This way the plot is array and grid independent, only based on centroid and device size, +# could make size variable if necessary. # -def create_power_heatmap(DEVICE_POWER, crs=None): +def create_power_heatmap(device_power, crs=None): """ Creates a heatmap of device location and power as cvalue. Parameters ---------- - DEVICE_POWER : dataframe - DEVICE_POWER dataframe. + device_power : dataframe + device_power dataframe. crs : int Coordinate Reverence Systems EPSG number @@ -233,39 +267,47 @@ def create_power_heatmap(DEVICE_POWER, crs=None): lowery = np.inf upperx = -np.inf uppery = -np.inf - # cmap = ListedColormap(plt.get_cmap('Greens')(np.linspace(0.1, 1, 256))) # skip too light colors - cmap = ListedColormap(plt.get_cmap('turbo')( - np.linspace(0.1, 1, 256))) # skip too light colors - # norm = plt.Normalize(DEVICE_POWER['Power [W]'].min(), DEVICE_POWER['Power [W]'].max()) + # cmap = ListedColormap(plt.get_cmap('Greens')(np.linspace(0.1, 1, 256))) + # # skip too light colors + cmap = ListedColormap( + plt.get_cmap("turbo")(np.linspace(0.1, 1, 256)) + ) # skip too light colors + # norm = plt.Normalize(device_power['Power [W]'].min(), device_power['Power [W]'].max()) norm = plt.Normalize( - 0.9*DEVICE_POWER['Power [W]'].min()*1e-6, DEVICE_POWER['Power [W]'].max()*1e-6) - for device_number, device in DEVICE_POWER.iterrows(): + 0.9 * device_power["Power [W]"].min() * 1e-6, + device_power["Power [W]"].max() * 1e-6, + ) + for _, device in device_power.iterrows(): # print(device) ax.add_patch( - Rectangle((device.lower_left[0]+adjust_x, device.lower_left[1]), - np.nanmax([device.width, device.height]), - np.nanmax([device.width, device.height]), - color=cmap(norm(device['Power [W]']*1e-6)))) - lowerx = np.nanmin([lowerx, device.lower_left[0]+adjust_x]) + Rectangle( + (device.lower_left[0] + adjust_x, device.lower_left[1]), + np.nanmax([device.width, device.height]), + np.nanmax([device.width, device.height]), + color=cmap(norm(device["Power [W]"] * 1e-6)), + ) + ) + lowerx = np.nanmin([lowerx, device.lower_left[0] + adjust_x]) lowery = np.nanmin([lowery, device.lower_left[1]]) - upperx = np.nanmax( - [upperx, device.lower_left[0]+adjust_x + device.width]) + upperx = np.nanmax([upperx, device.lower_left[0] + adjust_x + device.width]) uppery = np.nanmax([uppery, device.lower_left[1] + device.height]) xr = np.abs(np.max([lowerx, upperx]) - np.min([lowerx, upperx])) yr = np.abs(np.max([lowery, uppery]) - np.min([lowery, uppery])) - ax.set_xlim([lowerx-.05*xr, upperx+.05*xr]) - ax.set_ylim([lowery-.05*yr, uppery+.05*yr]) + ax.set_xlim([lowerx - 0.05 * xr, upperx + 0.05 * xr]) + ax.set_ylim([lowery - 0.05 * yr, uppery + 0.05 * yr]) cb = plt.colorbar(ScalarMappable(cmap=cmap, norm=norm), ax=ax) - cb.set_label('MW') - ax.ticklabel_format(useOffset=False, style='plain') - ax.set_xlabel('Longitude [deg]') - ax.set_ylabel('Latitude [deg]') + cb.set_label("MW") + ax.ticklabel_format(useOffset=False, style="plain") + ax.set_xlabel("Longitude [deg]") + ax.set_ylabel("Latitude [deg]") ax.set_xticks(np.linspace(lowerx, upperx, 5)) ax.set_xticklabels(ax.get_xticklabels(), ha="right", rotation=45) - ax.xaxis.set_major_formatter(FormatStrFormatter('%0.4f')) + ax.xaxis.set_major_formatter(FormatStrFormatter("%0.4f")) fig.tight_layout() return fig + + # %% @@ -282,45 +324,110 @@ def read_power_file(datafile): ------- Power : 1D Numpy Array [m] Individual data files for each observation [m]. - Total_Power : Scalar + total_power : Scalar Total power from all observations. """ - with io.open(datafile, "r") as inf:# = io.open(datafile, "r") # Read datafile + with io.open(datafile, "r", encoding="utf-8") as inf: + # = io.open(datafile, "r") # Read datafile for line in inf: # iterate through each line - if re.match('Iteration:', line): - Power = [] # If a new iteration is found, initalize varialbe or overwrite existing iteration + if re.match("Iteration:", line): + power_array = [] + # If a new iteration is found, initalize varialbe or overwrite existing iteration else: # data # extract float variable from line - power = float(line.split('=')[-1].split('W')[0].strip()) - Power = np.append(Power, power) # append data for each observation - Total_Power = np.nansum(Power) # Total power from all observations - return Power, Total_Power + power = float(line.split("=")[-1].split("W")[0].strip()) + power_array = np.append( + power_array, power + ) # append data for each observation + total_power = np.nansum(power_array) # Total power from all observations + return power_array, total_power def sort_data_files_by_runnumber(bc_data, datafiles): + """ + Sorts the data files based on the run number specified in `bc_data`. + + Parameters + ---------- + bc_data : pd.DataFrame + DataFrame containing the 'run number' and other metadata. + datafiles : list + List of data file paths. + + Returns + ------- + List[str] + List of sorted data file paths based on the run number. + """ bc_data_sorted = sort_bc_data_by_runnumber(bc_data.copy()) return [datafiles[i] for i in bc_data_sorted.original_order.to_numpy()] def sort_bc_data_by_runnumber(bc_data): - bc_data['original_order'] = range(0, len(bc_data)) - return bc_data.sort_values(by='run number') + """ + Sorts the `bc_data` DataFrame by the 'run number' column. + + Parameters + ---------- + bc_data : pd.DataFrame + DataFrame containing the 'run number' and other metadata. + + Returns + ------- + pd.DataFrame + Sorted DataFrame with an added 'original_order' column to track original indices. + """ + bc_data["original_order"] = range(0, len(bc_data)) + return bc_data.sort_values(by="run number") def reset_bc_data_order(bc_data): - if np.isin('original_order', bc_data.columns): + """ + Resets the order of `bc_data` DataFrame to its original order if 'original_order' column exists. + + Parameters + ---------- + bc_data : pd.DataFrame + DataFrame containing the 'run number' and other metadata. + + Returns + ------- + pd.DataFrame or None + Sorted DataFrame if 'original_order' column exists, otherwise None. + """ + if np.isin("original_order", bc_data.columns): bc_data = bc_data.sort() + return bc_data def roundup(x, val=2): + """ + Rounds up the number `x` to the nearest multiple of `val`. + + Parameters + ---------- + x : float + The number to round up. + val : int, optional + The value to round to the nearest multiple of (default is 2). + + Returns + ------- + float + The rounded-up number. + """ return np.ceil(x / val) * val + def calculate_power(power_files, probabilities_file, save_path=None, crs=None): """ - Reads the power files and calculates the total annual power based on hydrdynamic probabilities in probabilities_file. Data are saved as a csv files. + Reads the power files and calculates the total annual power based on + hydrodynamic probabilities in probabilities_file. + Data are saved as a csv files. Three files are output: - 1) Total Power among all devices for each hydrodynamic conditions BC_probability_Annual_SETS_wPower.csv + 1) Total Power among all devices for each + hydrodynamic conditions BC_probability_Annual_SETS_wPower.csv 2) Power per device per hydordynamic scenario. Power_per_device_per_scenario.csv 3) Total power per device during a year, scaled by $ of year in probabilities_file @@ -335,9 +442,9 @@ def calculate_power(power_files, probabilities_file, save_path=None, crs=None): Returns ------- - Devices : Dataframe + devices : Dataframe Scaled power per device per condition. - Devices_total : Dataframe + devices_total : Dataframe Total annual power per device. """ @@ -347,7 +454,7 @@ def calculate_power(power_files, probabilities_file, save_path=None, crs=None): if not os.path.exists(probabilities_file): raise FileNotFoundError(f"The file {probabilities_file} does not exist.") - datafiles_o = [s for s in os.listdir(power_files) if s.endswith('.OUT')] + datafiles_o = [s for s in os.listdir(power_files) if s.endswith(".OUT")] bc_data = pd.read_csv(probabilities_file) datafiles = sort_data_files_by_runnumber(bc_data, datafiles_o) @@ -355,172 +462,213 @@ def calculate_power(power_files, probabilities_file, save_path=None, crs=None): assert save_path is not None, "Specify an output directory" os.makedirs(save_path, exist_ok=True) - Total_Power = [] + total_power = [] ic = 0 for datafile in datafiles: p, tp = read_power_file(os.path.join(power_files, datafile)) # print(p) if ic == 0: - Power = np.empty((len(p), 0), float) - Power = np.append(Power, p[:, np.newaxis], axis=1) - Total_Power = np.append(Total_Power, tp) + power_array = np.empty((len(p), 0), float) + power_array = np.append(power_array, p[:, np.newaxis], axis=1) + total_power = np.append(total_power, tp) ic += 1 - Power_Scaled = bc_data['% of yr'].to_numpy() * Power - Total_Power_Scaled = bc_data['% of yr'] * Total_Power + power_scaled = bc_data["% of yr"].to_numpy() * power_array + total_power_scaled = bc_data["% of yr"] * total_power # Summary of power given percent of year for each array - # need to reorder Total_Power and Power to run roder in - bc_data['Power_Run_Name'] = datafiles - # bc_data['% of yr'] * Total_Power - bc_data['Power [W]'] = Total_Power_Scaled - bc_data.to_csv(os.path.join( - save_path, 'BC_probability_wPower.csv'), index=False) + # need to reorder total_power and Power to run roder in + bc_data["Power_Run_Name"] = datafiles + # bc_data['% of yr'] * total_power + bc_data["Power [W]"] = total_power_scaled + bc_data.to_csv(os.path.join(save_path, "BC_probability_wPower.csv"), index=False) fig, ax = plt.subplots(figsize=(9, 6)) - ax.bar(np.arange(np.shape(Total_Power_Scaled)[ - 0])+1, np.log10(Total_Power_Scaled), width=1, edgecolor='black') - ax.set_xlabel('Run Scenario') - ax.set_ylabel('Power [$log_{10}(Watts)$]') - ax.set_title('Total Power Annual') + ax.bar( + np.arange(np.shape(total_power_scaled)[0]) + 1, + np.log10(total_power_scaled), + width=1, + edgecolor="black", + ) + ax.set_xlabel("Run Scenario") + ax.set_ylabel("Power [$log_{10}(Watts)$]") + ax.set_title("Total Power Annual") fig.tight_layout() - fig.savefig(os.path.join( - save_path, 'Total_Scaled_Power_Bars_per_Run.png')) - - foo = np.sqrt(np.shape(Power_Scaled)[1]) - fig, AX = plt.subplots(np.round(foo).astype(int), np.ceil( - foo).astype(int), sharex=True, sharey=True, figsize=(12, 10)) - nr, nc = AX.shape - AX = AX.flatten() - mxy = roundup(np.log10(Power_Scaled.max().max())) - ndx = np.ceil(Power_Scaled.shape[0]/6) - for ic in range(Power_Scaled.shape[1]): + fig.savefig(os.path.join(save_path, "Total_Scaled_Power_Bars_per_Run.png")) + + subplot_grid_size = np.sqrt(np.shape(power_scaled)[1]) + fig, axes_grid = plt.subplots( + np.round(subplot_grid_size).astype(int), + np.ceil(subplot_grid_size).astype(int), + sharex=True, + sharey=True, + figsize=(12, 10), + ) + nr, nc = axes_grid.shape + axes_grid = axes_grid.flatten() + mxy = roundup(np.log10(power_scaled.max().max())) + ndx = np.ceil(power_scaled.shape[0] / 6) + for ic in range(power_scaled.shape[1]): # fig,ax = plt.subplots() - AX[ic].bar(np.arange(np.shape(Power_Scaled)[0])+1, - np.log10(Power_Scaled[:, ic]), width=1, edgecolor='black') -# AX[ic].text(Power_Scaled.shape[0]/2, mxy-1, f'{datafiles[ic]}', fontsize=8, ha='center', va='top') - AX[ic].set_title(f'{datafiles[ic]}', fontsize=8) - AX[ic].set_ylim([0, mxy]) - AX[ic].set_xticks(np.arange(0, Power_Scaled.shape[0]+ndx, ndx)) - AX[ic].set_xlim([0, Power_Scaled.shape[0]+1]) - AX = AX.reshape(nr, nc) - for ax in AX[:, 0]: - ax. set_ylabel('Power [$log_{10}(Watts)$]') - for ax in AX[-1, :]: - ax.set_xlabel('Obstacle') + axes_grid[ic].bar( + np.arange(np.shape(power_scaled)[0]) + 1, + np.log10(power_scaled[:, ic]), + width=1, + edgecolor="black", + ) + # axes_grid[ic].text(power_scaled.shape[0]/2, mxy-1, + # f'{datafiles[ic]}', fontsize=8, ha='center', va='top') + axes_grid[ic].set_title(f"{datafiles[ic]}", fontsize=8) + axes_grid[ic].set_ylim([0, mxy]) + axes_grid[ic].set_xticks(np.arange(0, power_scaled.shape[0] + ndx, ndx)) + axes_grid[ic].set_xlim([0, power_scaled.shape[0] + 1]) + axes_grid = axes_grid.reshape(nr, nc) + for ax in axes_grid[:, 0]: + ax.set_ylabel("Power [$log_{10}(Watts)$]") + for ax in axes_grid[-1, :]: + ax.set_xlabel("Obstacle") fig.tight_layout() - fig.savefig(os.path.join( - save_path, 'Scaled_Power_Bars_per_run_obstacle.png')) + fig.savefig(os.path.join(save_path, "Scaled_Power_Bars_per_run_obstacle.png")) fig, ax = plt.subplots(figsize=(9, 6)) - ax.bar(np.arange(np.shape(Power_Scaled)[ - 0])+1, np.log10(np.sum(Power_Scaled, axis=1)), width=1, edgecolor='black') - ax.set_xlabel('Obstacle') - ax.set_ylabel('Power [$log_{10}(Watts)$]') - ax.set_title('Total Obstacle Power for all Runs') + ax.bar( + np.arange(np.shape(power_scaled)[0]) + 1, + np.log10(np.sum(power_scaled, axis=1)), + width=1, + edgecolor="black", + ) + ax.set_xlabel("Obstacle") + ax.set_ylabel("Power [$log_{10}(Watts)$]") + ax.set_title("Total Obstacle Power for all Runs") fig.tight_layout() - fig.savefig(os.path.join( - save_path, 'Total_Scaled_Power_Bars_per_obstacle.png')) + fig.savefig(os.path.join(save_path, "Total_Scaled_Power_Bars_per_obstacle.png")) - power_device_configuration_file = [s for s in os.listdir(power_files) if ( - s.endswith('.pol') | s.endswith('.Pol') | s.endswith('.POL'))] + power_device_configuration_file = [ + s + for s in os.listdir(power_files) + if (s.endswith(".pol") | s.endswith(".Pol") | s.endswith(".POL")) + ] if len(power_device_configuration_file) > 0: - assert len( - power_device_configuration_file) == 1, "More than 1 *.pol file found" - - # Group arrays to devices and calculate power proportionally for each scenario (datafile), such that the sum of each scenario for each device is the yearly totoal power for that device - Obstacles = read_obstacle_polygon_file(os.path.join( - power_files, power_device_configuration_file[0])) - fig = plot_test_obstacle_locations(Obstacles) - fig.savefig(os.path.join(save_path, 'Obstacle_Locations.png')) - - Centroids = find_mean_point_of_obstacle_polygon(Obstacles) - Centroids_DF = pd.DataFrame( - data=Centroids, columns=['obstacle', 'X', 'Y']) - Centroids_DF['obstacle'] = Centroids_DF['obstacle'].astype(int) - Centroids_DF = Centroids_DF.set_index(['obstacle']) - Device_index = pair_devices(Centroids) - DeviceindexDF = pd.DataFrame({'Device_Number': range(Device_index.shape[0]), - 'Index 1': Device_index[:, 0], - 'Index 2': Device_index[:, 1], - 'X': Centroids_DF.loc[Device_index[:, 0], 'X'], - 'Y': Centroids_DF.loc[Device_index[:, 0], 'Y']}) - DeviceindexDF['Device_Number'] = DeviceindexDF['Device_Number']+1 - DeviceindexDF = DeviceindexDF.set_index('Device_Number') - DeviceindexDF.to_csv(os.path.join(save_path, 'Obstacle_Matching.csv')) + assert len(power_device_configuration_file) == 1, "More than 1 *.pol file found" + + # Group arrays to devices and calculate power + # proportionally for each scenario (datafile), + # such that the sum of each scenario for each + # device is the yearly totoal power for that device + obstacles = read_obstacle_polygon_file( + os.path.join(power_files, power_device_configuration_file[0]) + ) + fig = plot_test_obstacle_locations(obstacles) + fig.savefig(os.path.join(save_path, "Obstacle_Locations.png")) + + centroids = find_mean_point_of_obstacle_polygon(obstacles) + centroids_df = pd.DataFrame(data=centroids, columns=["obstacle", "X", "Y"]) + centroids_df["obstacle"] = centroids_df["obstacle"].astype(int) + centroids_df = centroids_df.set_index(["obstacle"]) + device_index = pair_devices(centroids) + device_index_df = pd.DataFrame( + { + "Device_Number": range(device_index.shape[0]), + "Index 1": device_index[:, 0], + "Index 2": device_index[:, 1], + "X": centroids_df.loc[device_index[:, 0], "X"], + "Y": centroids_df.loc[device_index[:, 0], "Y"], + } + ) + device_index_df["Device_Number"] = device_index_df["Device_Number"] + 1 + device_index_df = device_index_df.set_index("Device_Number") + device_index_df.to_csv(os.path.join(save_path, "Obstacle_Matching.csv")) fig, ax = plt.subplots(figsize=(10, 10)) - for device in DeviceindexDF.index.values: - ax.plot(DeviceindexDF.loc[device, 'X'], - DeviceindexDF.loc[device, 'Y'], '.', alpha=0) - ax.text(DeviceindexDF.loc[device, 'X'], - DeviceindexDF.loc[device, 'Y'], device, fontsize=8) - fig.savefig(os.path.join(save_path, 'Device Number Location.png')) - - device_power = np.empty((0, np.shape(Power)[1]), dtype=float) - for ic0, ic1 in Device_index: + for device in device_index_df.index.values: + ax.plot( + device_index_df.loc[device, "X"], + device_index_df.loc[device, "Y"], + ".", + alpha=0, + ) + ax.text( + device_index_df.loc[device, "X"], + device_index_df.loc[device, "Y"], + device, + fontsize=8, + ) + fig.savefig(os.path.join(save_path, "Device Number Location.png")) + + device_power = np.empty((0, np.shape(power_array)[1]), dtype=float) + for ic0, ic1 in device_index: device_power = np.vstack( - (device_power, Power[ic0, :] + Power[ic1, :])) + (device_power, power_array[ic0, :] + power_array[ic1, :]) + ) - # device_power = Power[0::2, :] + Power[1::2, :] + # device_power = power_array[0::2, :] + power_array[1::2, :] - Devices = pd.DataFrame({}) - device_power_year = device_power * bc_data['% of yr'].to_numpy() + devices = pd.DataFrame({}) + device_power_year = device_power * bc_data["% of yr"].to_numpy() for ic, name in enumerate(datafiles): - Devices[name] = device_power_year[:, ic] - Devices['Device'] = np.arange(1, len(Devices)+1) - Devices = Devices.set_index('Device') - Devices.to_csv(os.path.join( - save_path, 'Power_per_device_per_scenario.csv')) - - foo = np.sqrt(Devices.shape[1]) - fig, AX = plt.subplots(np.round(foo).astype(int), np.ceil( - foo).astype(int), sharex=True, sharey=True, figsize=(12, 10)) - nr, nc = AX.shape - AX = AX.flatten() - mxy = roundup(np.log10(Devices.max().max())) - ndx = np.ceil(Devices.shape[0]/6) - for ic, col in enumerate(Devices.columns): + devices[name] = device_power_year[:, ic] + devices["Device"] = np.arange(1, len(devices) + 1) + devices = devices.set_index("Device") + devices.to_csv(os.path.join(save_path, "Power_per_device_per_scenario.csv")) + + subplot_grid_size = np.sqrt(devices.shape[1]) + fig, axes_grid = plt.subplots( + np.round(subplot_grid_size).astype(int), + np.ceil(subplot_grid_size).astype(int), + sharex=True, + sharey=True, + figsize=(12, 10), + ) + nr, nc = axes_grid.shape + axes_grid = axes_grid.flatten() + mxy = roundup(np.log10(devices.max().max())) + ndx = np.ceil(devices.shape[0] / 6) + for ic, col in enumerate(devices.columns): # fig,ax = plt.subplots() - AX[ic].bar(np.arange(np.shape(Devices[col])[0])+1, - np.log10(Devices[col].to_numpy()), width=1.0, edgecolor='black') -# AX[ic].text(Devices.shape[0]/2, mxy-1, f'{col}', fontsize=8, ha='center', va='top') - AX[ic].set_title(f'{col}', fontsize=8) - AX[ic].set_ylim([0, mxy]) - AX[ic].set_xticks(np.arange(0, Devices.shape[0]+ndx, ndx)) - AX[ic].set_xlim([0, Devices.shape[0]+1]) - AX = AX.reshape(nr, nc) - for ax in AX[:, 0]: - ax. set_ylabel('Power [$log_{10}(Watts)$]') - for ax in AX[-1, :]: - ax.set_xlabel('Device') - AX = AX.flatten() + axes_grid[ic].bar( + np.arange(np.shape(devices[col])[0]) + 1, + np.log10(devices[col].to_numpy()), + width=1.0, + edgecolor="black", + ) + # axes_grid[ic].text(devices.shape[0]/2, mxy-1, f'{col}', + # fontsize=8, ha='center', va='top') + axes_grid[ic].set_title(f"{col}", fontsize=8) + axes_grid[ic].set_ylim([0, mxy]) + axes_grid[ic].set_xticks(np.arange(0, devices.shape[0] + ndx, ndx)) + axes_grid[ic].set_xlim([0, devices.shape[0] + 1]) + axes_grid = axes_grid.reshape(nr, nc) + for ax in axes_grid[:, 0]: + ax.set_ylabel("Power [$log_{10}(Watts)$]") + for ax in axes_grid[-1, :]: + ax.set_xlabel("Device") + axes_grid = axes_grid.flatten() fig.tight_layout() - fig.savefig(os.path.join( - save_path, 'Scaled_Power_per_device_per_scenario.png')) + fig.savefig(os.path.join(save_path, "Scaled_Power_per_device_per_scenario.png")) # power per scenario per device # Sum power for the entire years (all datafiles) for each device - Devices_total = pd.DataFrame({}) - Devices_total['Power [W]'] = device_power_year.sum(axis=1) - Devices_total['Device'] = np.arange(1, len(Devices_total)+1) - Devices_total = Devices_total.set_index('Device') - Devices_total.to_csv(os.path.join( - save_path, 'Power_per_device_annual.csv')) + devices_total = pd.DataFrame({}) + devices_total["Power [W]"] = device_power_year.sum(axis=1) + devices_total["Device"] = np.arange(1, len(devices_total) + 1) + devices_total = devices_total.set_index("Device") + devices_total.to_csv(os.path.join(save_path, "Power_per_device_annual.csv")) fig, ax = plt.subplots(figsize=(9, 6)) - ax.bar(Devices_total.index, np.log10( - Devices_total['Power [W]']), width=1, edgecolor='black') - ax. set_ylabel('Power [$log_{10}(Watts)$]') - ax.set_xlabel('Device') - fig.savefig(os.path.join( - save_path, 'Total_Scaled_Power_per_Device_.png')) - - DEVICE_POWER = extract_device_location(Obstacles, Device_index) - DEVICE_POWER['Power [W]'] = Devices_total['Power [W]'].values - fig = create_power_heatmap(DEVICE_POWER, crs=crs) - fig.savefig(os.path.join(save_path, 'Device_Power.png'), dpi=150) + ax.bar( + devices_total.index, + np.log10(devices_total["Power [W]"]), + width=1, + edgecolor="black", + ) + ax.set_ylabel("Power [$log_{10}(Watts)$]") + ax.set_xlabel("Device") + fig.savefig(os.path.join(save_path, "Total_Scaled_Power_per_Device_.png")) + + device_power = extract_device_location(obstacles, device_index) + device_power["Power [W]"] = devices_total["Power [W]"].values + fig = create_power_heatmap(device_power, crs=crs) + fig.savefig(os.path.join(save_path, "Device_Power.png"), dpi=150) # plt.close(fig) - return None diff --git a/seat/modules/shear_stress_module.py b/seat/modules/shear_stress_module.py index b1217ff0..2b38c326 100644 --- a/seat/modules/shear_stress_module.py +++ b/seat/modules/shear_stress_module.py @@ -1,10 +1,16 @@ +# pylint: disable=too-many-statements +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches + """ /***************************************************************************. shear_stress_module.py Copyright 2023, Integral Consulting Inc. All rights reserved. - PURPOSE: module for calcualting shear stress (sediment mobility) change from a shear stress stressor + PURPOSE: module for calcualting shear stress (sediment mobility) + change from a shear stress stressor PROJECT INFORMATION: Name: SEAT - Spatial and Environmental Assessment Toolkit @@ -20,12 +26,11 @@ """ import os - import numpy as np import pandas as pd -from netCDF4 import Dataset +from netCDF4 import Dataset # pylint: disable=no-name-in-module -from .stressor_utils import ( +from seat.modules.stressor_utils import ( estimate_grid_spacing, create_structured_array_from_unstructured, calc_receptor_array, @@ -34,19 +39,19 @@ numpy_array_to_raster, classify_layer_area, bin_layer, - classify_layer_area_2nd_Constraint, + classify_layer_area_2nd_constraint, resample_structured_grid, - secondary_constraint_geotiff_to_numpy + secondary_constraint_geotiff_to_numpy, ) -def critical_shear_stress(D_meters, rhow=1024, nu=1e-6, s=2.65, g=9.81): +def critical_shear_stress(d_meters, rhow=1024, nu=1e-6, s=2.65, g=9.81): """ Calculate critical shear stress from grain size. Parameters ---------- - D_meters : Array + d_meters : Array grain size in meters. rhow : scalar, optional density of water in kg/m3. The default is 1024. @@ -63,13 +68,13 @@ def critical_shear_stress(D_meters, rhow=1024, nu=1e-6, s=2.65, g=9.81): DESCRIPTION. """ - Dstar = ((g * (s-1)) / nu**2)**(1/3) * D_meters - SHcr = (0.3/(1+1.2*Dstar)) + 0.055*(1-np.exp(-0.02 * Dstar)) - taucrit = rhow * (s - 1) * g * D_meters * SHcr # in Pascals + d_star = ((g * (s - 1)) / nu**2) ** (1 / 3) * d_meters + sh_cr = (0.3 / (1 + 1.2 * d_star)) + 0.055 * (1 - np.exp(-0.02 * d_star)) + taucrit = rhow * (s - 1) * g * d_meters * sh_cr # in Pascals return taucrit -def classify_mobility(mobility_parameter_dev, mobility_parameter_nodev, nochange_threshold=0.1): +def classify_mobility(mobility_parameter_dev, mobility_parameter_nodev): """ classifies sediment mobility from device runs to no device runs. @@ -95,23 +100,65 @@ def classify_mobility(mobility_parameter_dev, mobility_parameter_nodev, nochange mobility_classification = np.zeros(mobility_parameter_dev.shape) # New Erosion = 3 - mobility_classification = np.where(((mobility_parameter_nodev < mobility_parameter_dev) & ( - mobility_parameter_nodev < 1) & (mobility_parameter_dev >= 1)), 3, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_nodev < mobility_parameter_dev) + & (mobility_parameter_nodev < 1) + & (mobility_parameter_dev >= 1) + ), + 3, + mobility_classification, + ) # Increased Erosion (Tw>Tb) & (Tw-Tb)>1 = 2 - mobility_classification = np.where(((mobility_parameter_dev > mobility_parameter_nodev) & ( - mobility_parameter_nodev >= 1) & (mobility_parameter_dev >= 1)), 2, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_dev > mobility_parameter_nodev) + & (mobility_parameter_nodev >= 1) + & (mobility_parameter_dev >= 1) + ), + 2, + mobility_classification, + ) # Reduced Erosion (Tw1 = 1 - mobility_classification = np.where(((mobility_parameter_dev < mobility_parameter_nodev) & ( - mobility_parameter_nodev >= 1) & (mobility_parameter_dev >= 1)), 1, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_dev < mobility_parameter_nodev) + & (mobility_parameter_nodev >= 1) + & (mobility_parameter_dev >= 1) + ), + 1, + mobility_classification, + ) # Reduced Deposition (Tw>Tb) & (Tw-Tb)<1 = -1 - mobility_classification = np.where(((mobility_parameter_dev > mobility_parameter_nodev) & ( - mobility_parameter_nodev < 1) & (mobility_parameter_dev < 1)), -1, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_dev > mobility_parameter_nodev) + & (mobility_parameter_nodev < 1) + & (mobility_parameter_dev < 1) + ), + -1, + mobility_classification, + ) # Increased Deposition (Tw>Tb) & (Tw-Tb)>1 = -2 - mobility_classification = np.where(((mobility_parameter_dev < mobility_parameter_nodev) & ( - mobility_parameter_nodev < 1) & (mobility_parameter_dev < 1)), -2, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_dev < mobility_parameter_nodev) + & (mobility_parameter_nodev < 1) + & (mobility_parameter_dev < 1) + ), + -2, + mobility_classification, + ) # New Deposition = -3 - mobility_classification = np.where(((mobility_parameter_dev < mobility_parameter_nodev) & ( - mobility_parameter_nodev >= 1) & (mobility_parameter_dev < 1)), -3, mobility_classification) + mobility_classification = np.where( + ( + (mobility_parameter_dev < mobility_parameter_nodev) + & (mobility_parameter_nodev >= 1) + & (mobility_parameter_dev < 1) + ), + -3, + mobility_classification, + ) # NoChange = 0 return mobility_classification @@ -137,24 +184,25 @@ def check_grid_define_vars(dataset): name of shear stress variable. """ - vars = list(dataset.variables) - if 'TAUMAX' in vars: - gridtype = 'structured' - tauvar = 'TAUMAX' + variable_names = list(dataset.variables) + if "TAUMAX" in variable_names: + gridtype = "structured" + tauvar = "TAUMAX" else: - gridtype = 'unstructured' - tauvar = 'taus' + gridtype = "unstructured" + tauvar = "taus" xvar, yvar = dataset.variables[tauvar].coordinates.split() return gridtype, xvar, yvar, tauvar -def calculate_shear_stress_stressors(fpath_nodev, - fpath_dev, - probabilities_file, - receptor_filename=None, - latlon=True, - value_selection=None - ): +def calculate_shear_stress_stressors( + fpath_nodev, + fpath_dev, + probabilities_file, + receptor_filename=None, + latlon=True, + value_selection=None, +): """ Calculates the stressor layers as arrays from model and parameter input. @@ -207,16 +255,21 @@ def calculate_shear_stress_stressors(fpath_nodev, if not os.path.exists(fpath_dev): raise FileNotFoundError(f"The file {fpath_dev} does not exist.") - files_nodev = [i for i in os.listdir(fpath_nodev) if i.endswith('.nc')] - files_dev = [i for i in os.listdir(fpath_dev) if i.endswith('.nc')] + xcor, ycor = None, None + + files_nodev = [i for i in os.listdir(fpath_nodev) if i.endswith(".nc")] + files_dev = [i for i in os.listdir(fpath_dev) if i.endswith(".nc")] # Load and sort files if len(files_nodev) == 1 & len(files_dev) == 1: # asumes a concatonated files with shape # [run_num, time, rows, cols] - with Dataset(os.path.join(fpath_dev, files_dev[0])) as file_dev_present, \ - Dataset(os.path.join(fpath_nodev, files_nodev[0])) as file_dev_notpresent: + with Dataset( + os.path.join(fpath_dev, files_dev[0]) + ) as file_dev_present, Dataset( + os.path.join(fpath_nodev, files_nodev[0]) + ) as file_dev_notpresent: gridtype, xvar, yvar, tauvar = check_grid_define_vars(file_dev_present) xcor = file_dev_present.variables[xvar][:].data ycor = file_dev_present.variables[yvar][:].data @@ -225,48 +278,55 @@ def calculate_shear_stress_stressors(fpath_nodev, # same number of files, file name must be formatted with either run number elif len(files_nodev) == len(files_dev): - # asumes each run is separate with the some_name_RunNum_map.nc, where run number comes at the last underscore before _map.nc + # asumes each run is separate with the some_name_RunNum_map.nc, + # where run number comes at the last underscore before _map.nc run_num_nodev = np.zeros((len(files_nodev))) for ic, file in enumerate(files_nodev): - run_num_nodev[ic] = int(file.split('.')[0].split('_')[-2]) + run_num_nodev[ic] = int(file.split(".")[0].split("_")[-2]) run_num_dev = np.zeros((len(files_dev))) for ic, file in enumerate(files_dev): - run_num_dev[ic] = int(file.split('.')[0].split('_')[-2]) + run_num_dev[ic] = int(file.split(".")[0].split("_")[-2]) # ensure run oder for nodev matches dev files if np.any(run_num_nodev != run_num_dev): adjust_dev_order = [] for ri in run_num_nodev: adjust_dev_order = np.append( - adjust_dev_order, np.flatnonzero(run_num_dev == ri)) + adjust_dev_order, np.flatnonzero(run_num_dev == ri) + ) files_dev = [files_dev[int(i)] for i in adjust_dev_order] run_num_dev = [run_num_dev[int(i)] for i in adjust_dev_order] - DF = pd.DataFrame({'files_nodev': files_nodev, - 'run_num_nodev': run_num_nodev, - 'files_dev': files_dev, - 'run_num_dev': run_num_dev}) - DF = DF.sort_values(by='run_num_dev') + df = pd.DataFrame( + { + "files_nodev": files_nodev, + "run_num_nodev": run_num_nodev, + "files_dev": files_dev, + "run_num_dev": run_num_dev, + } + ) + df = df.sort_values(by="run_num_dev") first_run = True ir = 0 - for _, row in DF.iterrows(): - with Dataset(os.path.join(fpath_nodev, row.files_nodev)) as file_dev_notpresent, \ - Dataset(os.path.join(fpath_dev, row.files_dev)) as file_dev_present: - - gridtype, xvar, yvar, tauvar = check_grid_define_vars( - file_dev_present) + for _, row in df.iterrows(): + with Dataset( + os.path.join(fpath_nodev, row.files_nodev) + ) as file_dev_notpresent, Dataset( + os.path.join(fpath_dev, row.files_dev) + ) as file_dev_present: + gridtype, xvar, yvar, tauvar = check_grid_define_vars(file_dev_present) if first_run: tmp = file_dev_notpresent.variables[tauvar][:].data - if gridtype == 'structured': + if gridtype == "structured": tau_nodev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2])) + (df.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2]) + ) tau_dev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2])) + (df.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2]) + ) else: - tau_nodev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1])) - tau_dev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1])) + tau_nodev = np.zeros((df.shape[0], tmp.shape[0], tmp.shape[1])) + tau_dev = np.zeros((df.shape[0], tmp.shape[0], tmp.shape[1])) xcor = file_dev_notpresent.variables[xvar][:].data ycor = file_dev_notpresent.variables[yvar][:].data first_run = False @@ -275,126 +335,152 @@ def calculate_shear_stress_stressors(fpath_nodev, ir += 1 else: - raise Exception( - f"Number of device runs ({len(files_dev)}) must be the same as no device runs ({len(files_nodev)}).") + raise ValueError( + f"Number of device runs ({len(files_dev)}) must be the same " + f"as no device runs ({len(files_nodev)})." + ) # Finished loading and sorting files - if (gridtype == 'structured'): + if gridtype == "structured": + if (xcor[0, 0] == 0) & (xcor[-1, 0] == 0): # at least for some runs the boundary has 0 coordinates. Check and fix. - xcor, ycor, tau_nodev, tau_dev = trim_zeros( - xcor, ycor, tau_nodev, tau_dev) + xcor, ycor, tau_nodev, tau_dev = trim_zeros(xcor, ycor, tau_nodev, tau_dev) - if not (probabilities_file == ""): + if not probabilities_file == "": if not os.path.exists(probabilities_file): raise FileNotFoundError(f"The file {probabilities_file} does not exist.") # Load BC file with probabilities and find appropriate probability - BC_probability = pd.read_csv(probabilities_file, delimiter=",") - BC_probability['run_num'] = BC_probability['run number']-1 - BC_probability = BC_probability.sort_values(by='run number') - BC_probability["probability"] = BC_probability["% of yr"].values/100 - # BC_probability - if 'Exclude' in BC_probability.columns: - BC_probability = BC_probability[~( - (BC_probability['Exclude'] == 'x') | (BC_probability['Exclude'] == 'X'))] + bc_probability = pd.read_csv(probabilities_file, delimiter=",") + bc_probability["run_num"] = bc_probability["run number"] - 1 + bc_probability = bc_probability.sort_values(by="run number") + bc_probability["probability"] = bc_probability["% of yr"].values / 100 + # bc_probability + if "Exclude" in bc_probability.columns: + bc_probability = bc_probability[ + ~( + (bc_probability["Exclude"] == "x") + | (bc_probability["Exclude"] == "X") + ) + ] else: # assume run_num in file name is return interval - BC_probability = pd.DataFrame() + bc_probability = pd.DataFrame() # ignore number and start sequentially from zero - BC_probability['run_num'] = np.arange(0, tau_dev.shape[0]) + bc_probability["run_num"] = np.arange(0, tau_dev.shape[0]) # assumes run_num in name is the return interval - BC_probability["probability"] = 1/DF.run_num_dev.to_numpy() - BC_probability["probability"] = BC_probability["probability"] / \ - BC_probability["probability"].sum() # rescale to ensure = 1 + bc_probability["probability"] = 1 / df.run_num_dev.to_numpy() + bc_probability["probability"] = ( + bc_probability["probability"] / bc_probability["probability"].sum() + ) # rescale to ensure = 1 # Calculate Stressor and Receptors - if value_selection == 'Maximum': + if value_selection == "Maximum": tau_dev = np.nanmax(tau_dev, axis=1, keepdims=True) # max over time - tau_nodev = np.nanmax( - tau_nodev, axis=1, keepdims=True) # max over time - elif value_selection == 'Mean': + tau_nodev = np.nanmax(tau_nodev, axis=1, keepdims=True) # max over time + elif value_selection == "Mean": tau_dev = np.nanmean(tau_dev, axis=1, keepdims=True) # mean over time - tau_nodev = np.nanmean( - tau_nodev, axis=1, keepdims=True) # mean over time - elif value_selection == 'Final Timestep': + tau_nodev = np.nanmean(tau_nodev, axis=1, keepdims=True) # mean over time + elif value_selection == "Final Timestep": tau_dev = tau_dev[:, -2:-1, :] # final timestep tau_nodev = tau_nodev[:, -2:-1, :] # final timestep else: tau_dev = np.nanmax(tau_dev, axis=1, keepdims=True) # default to max over time - tau_nodev = np.nanmax(tau_nodev, axis=1, keepdims=True) # default to max over time + tau_nodev = np.nanmax( + tau_nodev, axis=1, keepdims=True + ) # default to max over time # initialize arrays - if gridtype == 'structured': + if gridtype == "structured": tau_combined_nodev = np.zeros(np.shape(tau_nodev[0, 0, :, :])) tau_combined_dev = np.zeros(np.shape(tau_dev[0, 0, :, :])) else: tau_combined_nodev = np.zeros(np.shape(tau_nodev)[-1]) tau_combined_dev = np.zeros(np.shape(tau_dev)[-1]) - for run_number, prob in zip(BC_probability['run_num'].values, - BC_probability["probability"].values): - - tau_combined_nodev = tau_combined_nodev + \ - prob * tau_nodev[run_number, -1, :] + for run_number, prob in zip( + bc_probability["run_num"].values, bc_probability["probability"].values + ): + tau_combined_nodev = tau_combined_nodev + prob * tau_nodev[run_number, -1, :] tau_combined_dev = tau_combined_dev + prob * tau_dev[run_number, -1, :] - receptor_array = calc_receptor_array( - receptor_filename, xcor, ycor, latlon=latlon) - taucrit = critical_shear_stress(D_meters=receptor_array * 1e-6, - rhow=1024, - nu=1e-6, - s=2.65, - g=9.81) # units N/m2 = Pa + receptor_array = calc_receptor_array(receptor_filename, xcor, ycor, latlon=latlon) + taucrit = critical_shear_stress( + d_meters=receptor_array * 1e-6, rhow=1024, nu=1e-6, s=2.65, g=9.81 + ) # units N/m2 = Pa tau_diff = tau_combined_dev - tau_combined_nodev mobility_parameter_nodev = tau_combined_nodev / taucrit mobility_parameter_nodev = np.where( - receptor_array == 0, 0, mobility_parameter_nodev) + receptor_array == 0, 0, mobility_parameter_nodev + ) mobility_parameter_dev = tau_combined_dev / taucrit - mobility_parameter_dev = np.where( - receptor_array == 0, 0, mobility_parameter_dev) + mobility_parameter_dev = np.where(receptor_array == 0, 0, mobility_parameter_dev) # Calculate risk metrics over all runs mobility_parameter_diff = mobility_parameter_dev - mobility_parameter_nodev - #EQ 7 in Jones et al. (2018) doi:10.3390/en11082036 - risk = np.round(mobility_parameter_dev * ((mobility_parameter_dev-mobility_parameter_nodev) / np.abs(mobility_parameter_dev - mobility_parameter_nodev))) + (mobility_parameter_dev - mobility_parameter_nodev) + # EQ 7 in Jones et al. (2018) doi:10.3390/en11082036 + risk = np.round( + mobility_parameter_dev + * ( + (mobility_parameter_dev - mobility_parameter_nodev) + / np.abs(mobility_parameter_dev - mobility_parameter_nodev) + ) + ) + (mobility_parameter_dev - mobility_parameter_nodev) - if gridtype == 'structured': + if gridtype == "structured": mobility_classification = classify_mobility( - mobility_parameter_dev, mobility_parameter_nodev) + mobility_parameter_dev, mobility_parameter_nodev + ) dx = np.nanmean(np.diff(xcor[:, 0])) dy = np.nanmean(np.diff(ycor[0, :])) rx = xcor ry = ycor - dict_of_arrays = {'shear_stress_without_devices':tau_combined_nodev, - 'shear_stress_with_devices': tau_combined_dev, - 'shear_stress_difference': tau_diff, - 'sediment_mobility_without_devices': mobility_parameter_nodev, - 'sediment_mobility_with_devices': mobility_parameter_dev, - 'sediment_mobility_difference': mobility_parameter_diff, - 'sediment_mobility_classified':mobility_classification, - 'sediment_grain_size':receptor_array, - 'shear_stress_risk_metric':risk} + dict_of_arrays = { + "shear_stress_without_devices": tau_combined_nodev, + "shear_stress_with_devices": tau_combined_dev, + "shear_stress_difference": tau_diff, + "sediment_mobility_without_devices": mobility_parameter_nodev, + "sediment_mobility_with_devices": mobility_parameter_dev, + "sediment_mobility_difference": mobility_parameter_diff, + "sediment_mobility_classified": mobility_classification, + "sediment_grain_size": receptor_array, + "shear_stress_risk_metric": risk, + } else: # unstructured dxdy = estimate_grid_spacing(xcor, ycor, nsamples=100) dx = dxdy dy = dxdy rx, ry, tau_diff_struct = create_structured_array_from_unstructured( - xcor, ycor, tau_diff, dxdy, flatness=0.2) + xcor, ycor, tau_diff, dxdy, flatness=0.2 + ) _, _, tau_combined_dev_struct = create_structured_array_from_unstructured( - xcor, ycor, tau_combined_dev, dxdy, flatness=0.2) + xcor, ycor, tau_combined_dev, dxdy, flatness=0.2 + ) _, _, tau_combined_nodev_struct = create_structured_array_from_unstructured( - xcor, ycor, tau_combined_nodev, dxdy, flatness=0.2) + xcor, ycor, tau_combined_nodev, dxdy, flatness=0.2 + ) if not ((receptor_filename is None) or (receptor_filename == "")): - _, _, mobility_parameter_nodev_struct = create_structured_array_from_unstructured( - xcor, ycor, mobility_parameter_nodev, dxdy, flatness=0.2) - _, _, mobility_parameter_dev_struct = create_structured_array_from_unstructured( - xcor, ycor, mobility_parameter_dev, dxdy, flatness=0.2) - _, _, mobility_parameter_diff_struct = create_structured_array_from_unstructured( - xcor, ycor, mobility_parameter_diff, dxdy, flatness=0.2) + _, _, mobility_parameter_nodev_struct = ( + create_structured_array_from_unstructured( + xcor, ycor, mobility_parameter_nodev, dxdy, flatness=0.2 + ) + ) + _, _, mobility_parameter_dev_struct = ( + create_structured_array_from_unstructured( + xcor, ycor, mobility_parameter_dev, dxdy, flatness=0.2 + ) + ) + _, _, mobility_parameter_diff_struct = ( + create_structured_array_from_unstructured( + xcor, ycor, mobility_parameter_diff, dxdy, flatness=0.2 + ) + ) _, _, receptor_array_struct = create_structured_array_from_unstructured( - xcor, ycor, receptor_array, dxdy, flatness=0.2) + xcor, ycor, receptor_array, dxdy, flatness=0.2 + ) _, _, risk_struct = create_structured_array_from_unstructured( - xcor, ycor, risk, dxdy, flatness=0.2) + xcor, ycor, risk, dxdy, flatness=0.2 + ) else: mobility_parameter_nodev_struct = np.nan * tau_diff_struct mobility_parameter_dev_struct = np.nan * tau_diff_struct @@ -402,19 +488,23 @@ def calculate_shear_stress_stressors(fpath_nodev, receptor_array_struct = np.nan * tau_diff_struct risk_struct = np.nan * tau_diff_struct mobility_classification = classify_mobility( - mobility_parameter_dev_struct, mobility_parameter_nodev_struct) + mobility_parameter_dev_struct, mobility_parameter_nodev_struct + ) mobility_classification = np.where( - np.isnan(tau_diff_struct), -100, mobility_classification) - - dict_of_arrays = {'shear_stress_without_devices':tau_combined_nodev_struct, - 'shear_stress_with_devices': tau_combined_dev_struct, - 'shear_stress_difference': tau_diff_struct, - 'sediment_mobility_without_devices': mobility_parameter_nodev_struct, - 'sediment_mobility_with_devices': mobility_parameter_dev_struct, - 'sediment_mobility_difference': mobility_parameter_diff_struct, - 'sediment_mobility_classified':mobility_classification, - 'sediment_grain_size':receptor_array_struct, - 'shear_stress_risk_metric':risk_struct} + np.isnan(tau_diff_struct), -100, mobility_classification + ) + + dict_of_arrays = { + "shear_stress_without_devices": tau_combined_nodev_struct, + "shear_stress_with_devices": tau_combined_dev_struct, + "shear_stress_difference": tau_diff_struct, + "sediment_mobility_without_devices": mobility_parameter_nodev_struct, + "sediment_mobility_with_devices": mobility_parameter_dev_struct, + "sediment_mobility_difference": mobility_parameter_diff_struct, + "sediment_mobility_classified": mobility_classification, + "sediment_grain_size": receptor_array_struct, + "shear_stress_risk_metric": risk_struct, + } return dict_of_arrays, rx, ry, dx, dy, gridtype @@ -427,7 +517,8 @@ def run_shear_stress_stressor( output_path, receptor_filename=None, secondary_constraint_filename=None, - value_selection=None): + value_selection=None, +): """ creates geotiffs and area change statistics files for shear stress change @@ -454,60 +545,74 @@ def run_shear_stress_stressor( key = names of output rasters, val = full path to raster: """ - os.makedirs(output_path, exist_ok=True) # create output directory if it doesn't exist + os.makedirs( + output_path, exist_ok=True + ) # create output directory if it doesn't exist - dict_of_arrays, rx, ry, dx, dy, gridtype = calculate_shear_stress_stressors(fpath_nodev=dev_notpresent_file, - fpath_dev=dev_present_file, - probabilities_file=probabilities_file, - receptor_filename=receptor_filename, - latlon=crs == 4326, - value_selection=value_selection) + dict_of_arrays, rx, ry, dx, dy, gridtype = calculate_shear_stress_stressors( + fpath_nodev=dev_notpresent_file, + fpath_dev=dev_present_file, + probabilities_file=probabilities_file, + receptor_filename=receptor_filename, + latlon=crs == 4326, + value_selection=value_selection, + ) if not ((receptor_filename is None) or (receptor_filename == "")): - use_numpy_arrays = ['shear_stress_without_devices', - 'shear_stress_with_devices', - 'shear_stress_difference', - 'sediment_mobility_without_devices', - 'sediment_mobility_with_devices', - 'sediment_mobility_difference', - 'sediment_mobility_classified', - 'sediment_grain_size', - 'shear_stress_risk_metric'] + use_numpy_arrays = [ + "shear_stress_without_devices", + "shear_stress_with_devices", + "shear_stress_difference", + "sediment_mobility_without_devices", + "sediment_mobility_with_devices", + "sediment_mobility_difference", + "sediment_mobility_classified", + "sediment_grain_size", + "shear_stress_risk_metric", + ] else: - use_numpy_arrays = ['shear_stress_without_devices', - 'shear_stress_with_devices', - 'shear_stress_difference'] - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): + use_numpy_arrays = [ + "shear_stress_without_devices", + "shear_stress_with_devices", + "shear_stress_difference", + ] + + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): if not os.path.exists(secondary_constraint_filename): - raise FileNotFoundError(f"The file {secondary_constraint_filename} does not exist.") - rrx, rry, constraint = secondary_constraint_geotiff_to_numpy(secondary_constraint_filename) - dict_of_arrays['shear_stress_risk_layer'] = resample_structured_grid(rrx, rry, constraint, rx, ry, interpmethod='nearest') - use_numpy_arrays.append('shear_stress_risk_layer') - - numpy_array_names = [i + '.tif' for i in use_numpy_arrays] + raise FileNotFoundError( + f"The file {secondary_constraint_filename} does not exist." + ) + rrx, rry, constraint = secondary_constraint_geotiff_to_numpy( + secondary_constraint_filename + ) + dict_of_arrays["shear_stress_risk_layer"] = resample_structured_grid( + rrx, rry, constraint, rx, ry, interpmethod="nearest" + ) + use_numpy_arrays.append("shear_stress_risk_layer") + + numpy_array_names = [i + ".tif" for i in use_numpy_arrays] output_rasters = [] for array_name, use_numpy_array in zip(numpy_array_names, use_numpy_arrays): - if gridtype == 'structured': + if gridtype == "structured": numpy_array = np.flip(np.transpose(dict_of_arrays[use_numpy_array]), axis=0) else: numpy_array = np.flip(dict_of_arrays[use_numpy_array], axis=0) cell_resolution = [dx, dy] if crs == 4326: - rxx = np.where(rx > 180, rx-360, rx) - bounds = [rxx.min() - dx/2, ry.max() - dy/2] + rxx = np.where(rx > 180, rx - 360, rx) + bounds = [rxx.min() - dx / 2, ry.max() - dy / 2] else: - bounds = [rx.min() - dx/2, ry.max() - dy/2] + bounds = [rx.min() - dx / 2, ry.max() - dy / 2] rows, cols = numpy_array.shape # create an ouput raster given the stressor file path output_rasters.append(os.path.join(output_path, array_name)) output_raster = create_raster( - os.path.join(output_path, array_name), - cols, - rows, - nbands=1) + os.path.join(output_path, array_name), cols, rows, nbands=1 + ) # post processing of numpy array to output raster numpy_array_to_raster( @@ -516,94 +621,200 @@ def run_shear_stress_stressor( bounds, cell_resolution, crs, - os.path.join(output_path, array_name)) + os.path.join(output_path, array_name), + ) output_raster = None # Area calculations pull form rasters to ensure uniformity - bin_layer(os.path.join(output_path, 'shear_stress_difference.tif'), - receptor_filename=None, - receptor_names=None, - latlon=crs == 4326).to_csv(os.path.join(output_path, "shear_stress_difference.csv"), index=False) - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'shear_stress_difference.tif'), - receptor_filename=os.path.join(output_path, "shear_stress_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "shear_stress_difference_at_secondary_constraint.csv"), index=False) + bin_layer( + os.path.join(output_path, "shear_stress_difference.tif"), + receptor_filename=None, + receptor_names=None, + latlon=crs == 4326, + ).to_csv(os.path.join(output_path, "shear_stress_difference.csv"), index=False) + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "shear_stress_difference.tif"), + receptor_filename=os.path.join(output_path, "shear_stress_risk_layer.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "shear_stress_difference_at_secondary_constraint.csv" + ), + index=False, + ) if not ((receptor_filename is None) or (receptor_filename == "")): - bin_layer(os.path.join(output_path, 'shear_stress_difference.tif'), - receptor_filename=os.path.join(output_path, 'sediment_grain_size.tif'), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='grain size').to_csv(os.path.join(output_path, "shear_stress_difference_at_sediment_grain_size.csv"), index=False) - - bin_layer(os.path.join(output_path, 'sediment_mobility_difference.tif'), - receptor_filename=None, - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326).to_csv(os.path.join(output_path, "sediment_mobility_difference.csv"), index=False) - - bin_layer(os.path.join(output_path, 'sediment_mobility_difference.tif'), - receptor_filename=os.path.join(output_path, 'sediment_grain_size.tif'), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='grain size').to_csv(os.path.join(output_path, "sediment_mobility_difference_at_sediment_grain_size.csv"), index=False) - - bin_layer(os.path.join(output_path, 'shear_stress_risk_metric.tif'), - receptor_filename=None, - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326).to_csv(os.path.join(output_path, "shear_stress_risk_metric.csv"), index=False) - - bin_layer(os.path.join(output_path, 'shear_stress_risk_metric.tif'), - receptor_filename=os.path.join(output_path, 'sediment_grain_size.tif'), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='grain size').to_csv(os.path.join(output_path, "shear_stress_risk_metric_at_sediment_grain_size.csv"), index=False) - - classify_layer_area(os.path.join(output_path, "sediment_mobility_classified.tif"), - at_values=[-3, -2, -1, 0, 1, 2, 3], - value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - latlon=crs == 4326).to_csv(os.path.join(output_path, "sediment_mobility_classified.csv"), index=False) - - classify_layer_area(os.path.join(output_path, "sediment_mobility_classified.tif"), - receptor_filename=os.path.join(output_path, 'sediment_grain_size.tif'), - at_values=[-3, -2, -1, 0, 1, 2, 3], - value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='grain size').to_csv(os.path.join(output_path, "sediment_mobility_classified_at_sediment_grain_size.csv"), index=False) - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'sediment_mobility_difference.tif'), - receptor_filename=os.path.join(output_path, "shear_stress_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "sediment_mobility_difference_at_shear_stress_risk_layer.csv"), index=False) - - bin_layer(os.path.join(output_path, 'shear_stress_risk_metric.tif'), - receptor_filename=os.path.join(output_path, "shear_stress_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "shear_stress_risk_metric_at_shear_stress_risk_layer.csv"), index=False) - - classify_layer_area_2nd_Constraint(raster_to_sample = os.path.join(output_path, "sediment_mobility_difference.tif"), - secondary_constraint_filename=os.path.join(output_path, "shear_stress_risk_layer.tif"), - at_raster_values=[-3, -2, -1, 0, 1, 2, 3], - at_raster_value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - limit_constraint_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "sediment_mobility_difference_at_shear_stress_risk_layer.csv"), index=False) - OUTPUT = {} + bin_layer( + os.path.join(output_path, "shear_stress_difference.tif"), + receptor_filename=os.path.join(output_path, "sediment_grain_size.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="grain size", + ).to_csv( + os.path.join( + output_path, "shear_stress_difference_at_sediment_grain_size.csv" + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "sediment_mobility_difference.tif"), + receptor_filename=None, + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + ).to_csv( + os.path.join(output_path, "sediment_mobility_difference.csv"), index=False + ) + + bin_layer( + os.path.join(output_path, "sediment_mobility_difference.tif"), + receptor_filename=os.path.join(output_path, "sediment_grain_size.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="grain size", + ).to_csv( + os.path.join( + output_path, "sediment_mobility_difference_at_sediment_grain_size.csv" + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "shear_stress_risk_metric.tif"), + receptor_filename=None, + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + ).to_csv(os.path.join(output_path, "shear_stress_risk_metric.csv"), index=False) + + bin_layer( + os.path.join(output_path, "shear_stress_risk_metric.tif"), + receptor_filename=os.path.join(output_path, "sediment_grain_size.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="grain size", + ).to_csv( + os.path.join( + output_path, "shear_stress_risk_metric_at_sediment_grain_size.csv" + ), + index=False, + ) + + classify_layer_area( + os.path.join(output_path, "sediment_mobility_classified.tif"), + at_values=[-3, -2, -1, 0, 1, 2, 3], + value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + latlon=crs == 4326, + ).to_csv( + os.path.join(output_path, "sediment_mobility_classified.csv"), index=False + ) + + classify_layer_area( + os.path.join(output_path, "sediment_mobility_classified.tif"), + receptor_filename=os.path.join(output_path, "sediment_grain_size.tif"), + at_values=[-3, -2, -1, 0, 1, 2, 3], + value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="grain size", + ).to_csv( + os.path.join( + output_path, "sediment_mobility_classified_at_sediment_grain_size.csv" + ), + index=False, + ) + + if not ( + (secondary_constraint_filename is None) + or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "sediment_mobility_difference.tif"), + receptor_filename=os.path.join( + output_path, "shear_stress_risk_layer.tif" + ), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, + "sediment_mobility_difference_at_shear_stress_risk_layer.csv", + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "shear_stress_risk_metric.tif"), + receptor_filename=os.path.join( + output_path, "shear_stress_risk_layer.tif" + ), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, + "shear_stress_risk_metric_at_shear_stress_risk_layer.csv", + ), + index=False, + ) + + classify_layer_area_2nd_constraint( + raster_to_sample=os.path.join( + output_path, "sediment_mobility_difference.tif" + ), + secondary_constraint_filename=os.path.join( + output_path, "shear_stress_risk_layer.tif" + ), + at_raster_values=[-3, -2, -1, 0, 1, 2, 3], + at_raster_value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + limit_constraint_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, + "sediment_mobility_difference_at_shear_stress_risk_layer.csv", + ), + index=False, + ) + output = {} for val in output_rasters: - OUTPUT[os.path.basename(os.path.normpath(val)).split('.')[0]] = val - return OUTPUT + output[os.path.basename(os.path.normpath(val)).split(".")[0]] = val + return output diff --git a/seat/modules/stressor_utils.py b/seat/modules/stressor_utils.py index 722322b8..7c5af38a 100644 --- a/seat/modules/stressor_utils.py +++ b/seat/modules/stressor_utils.py @@ -1,65 +1,61 @@ +# pylint: disable=too-many-statements +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches """ -/***************************************************************************. +velocity_module.py: Definitions for stressor modules. - velocity_module.py +This module includes functions for estimating grid spacing, creating structured +arrays from unstructured data, redefining grids, resampling grids, and handling +raster data. - PURPOSE: definitions used by the stressor modules - - AUTHORS - Timothy Nelson (tnelson@integral-corp.com) - Sam McWilliams (smcwilliams@integral-corp.com) - Eben Pendelton - - NOTES (Data descriptions and any script specific notes) - 1. called by shear_stress_module.py, velocity_module.py, acoustics_module.py +Dependencies: +- pyproj, osgeo, numpy, pandas, matplotlib, scipy """ - +import os +import sys +import random import numpy as np +from pyproj import Geod import pandas as pd from matplotlib.tri import LinearTriInterpolator, TriAnalyzer, Triangulation from scipy.interpolate import griddata from osgeo import gdal, osr -import os def estimate_grid_spacing(x, y, nsamples=100): """ - Estimates the grid spacing of an unstructured grid to create a similar resolution structured grid + Estimate grid spacing for an unstructured grid to create a structured grid. Parameters ---------- - x : array - x-coordinates. - y : array - y-coordiantes. - nsamples : scalar, optional - number of random points to sample spacing to nearest cell. The default is 100. + x, y : array + Coordinates of the points. + nsamples : int, optional + Number of random points to sample. Default is 100. Returns ------- - dxdy : array - estimated median spacing between samples. - + float + Estimated median spacing between samples. """ - import random - import sys - random.seed(10) coords = list(set(zip(x, y))) if nsamples != len(x): - points = [random.choice(coords) - for i in range(nsamples)] # pick N random points + points = [ + random.choice(coords) for i in range(nsamples) + ] # pick N random points else: points = coords - MD = [] + md = [] for p0x, p0y in points: minimum_distance = sys.maxsize for px, py in coords: distance = np.sqrt((p0x - px) ** 2 + (p0y - py) ** 2) if (distance < minimum_distance) & (distance != 0): minimum_distance = distance - MD.append(minimum_distance) - dxdy = np.median(MD) + md.append(minimum_distance) + dxdy = np.median(md) return dxdy @@ -91,8 +87,8 @@ def create_structured_array_from_unstructured(x, y, z, dxdy, flatness=0.2): """ # flatness is from 0-.5 .5 is equilateral triangle - refx = np.arange(np.nanmin(x), np.nanmax(x)+dxdy, dxdy) - refy = np.arange(np.nanmin(y), np.nanmax(y)+dxdy, dxdy) + refx = np.arange(np.nanmin(x), np.nanmax(x) + dxdy, dxdy) + refy = np.arange(np.nanmin(y), np.nanmax(y) + dxdy, dxdy) refxg, refyg = np.meshgrid(refx, refy) tri = Triangulation(x, y) mask = TriAnalyzer(tri).get_flat_tri_mask(flatness) @@ -132,15 +128,22 @@ def redefine_structured_grid(x, y, z): xx = np.linspace(min_x, max_x, x.shape[1]) yy = np.linspace(min_y, max_y, y.shape[0]) dx = np.nanmin([np.nanmedian(np.diff(xx)), np.nanmedian(np.diff(yy))]) - xx = np.arange(min_x, max_x+dx, dx) - yy = np.arange(min_y, max_y+dx, dx) + xx = np.arange(min_x, max_x + dx, dx) + yy = np.arange(min_y, max_y + dx, dx) x_new, y_new = np.meshgrid(xx, yy) - z_new = griddata((x.flatten(), y.flatten()), z.flatten(), - (x_new, y_new), method='nearest', fill_value=0) + z_new = griddata( + (x.flatten(), y.flatten()), + z.flatten(), + (x_new, y_new), + method="nearest", + fill_value=0, + ) return x_new, y_new, z_new -def resample_structured_grid(x_grid, y_grid, z, X_grid_out, Y_grid_out, interpmethod='nearest'): +def resample_structured_grid( + x_grid, y_grid, z, x_grid_out, y_grid_out, interpmethod="nearest" +): """ interpolates a structured grid onto a new structured grid. @@ -152,9 +155,9 @@ def resample_structured_grid(x_grid, y_grid, z, X_grid_out, Y_grid_out, interpme y-coordinates. z : array input value. - X_grid_out : array + x_grid_out : array x-coordinates. - Y_grid_out : array + y_grid_out : array y-coordinates. interpmethod : str, optional interpolation method to use. The default is 'nearest'. @@ -165,7 +168,13 @@ def resample_structured_grid(x_grid, y_grid, z, X_grid_out, Y_grid_out, interpme interpolated z-value on X/Y _grid_out. """ - return griddata((x_grid.flatten(), y_grid.flatten()), z.flatten(), (X_grid_out, Y_grid_out), method=interpmethod, fill_value=0) + return griddata( + (x_grid.flatten(), y_grid.flatten()), + z.flatten(), + (x_grid_out, y_grid_out), + method=interpmethod, + fill_value=0, + ) def calc_receptor_array(receptor_filename, x, y, latlon=False, mask=None): @@ -196,33 +205,40 @@ def calc_receptor_array(receptor_filename, x, y, latlon=False, mask=None): """ # if ((receptor_filename is not None) or (not receptor_filename == "")): if not ((receptor_filename is None) or (receptor_filename == "")): - if receptor_filename.endswith('.tif'): + if receptor_filename.endswith(".tif"): data = gdal.Open(receptor_filename) img = data.GetRasterBand(1) receptor_array = img.ReadAsArray() receptor_array[receptor_array < 0] = 0 - (upper_left_x, x_size, x_rotation, upper_left_y, - y_rotation, y_size) = data.GetGeoTransform() + (upper_left_x, x_size, _, upper_left_y, _, y_size) = data.GetGeoTransform() cols = data.RasterXSize rows = data.RasterYSize r_rows = np.arange(rows) * y_size + upper_left_y + (y_size / 2) r_cols = np.arange(cols) * x_size + upper_left_x + (x_size / 2) - if latlon == True: - r_cols = np.where(r_cols < 0, r_cols+360, r_cols) + if latlon: + r_cols = np.where(r_cols < 0, r_cols + 360, r_cols) x_grid, y_grid = np.meshgrid(r_cols, r_rows) - receptor_array = griddata((x_grid.flatten(), y_grid.flatten( - )), receptor_array.flatten(), (x, y), method='nearest', fill_value=0) - - elif receptor_filename.endswith('.csv'): + receptor_array = griddata( + (x_grid.flatten(), y_grid.flatten()), + receptor_array.flatten(), + (x, y), + method="nearest", + fill_value=0, + ) + + elif receptor_filename.endswith(".csv"): receptor_array = pd.read_csv( - receptor_filename, header=None, index_col=0).to_numpy().item() * np.ones(x.shape) + receptor_filename, header=None, index_col=0 + ).to_numpy().item() * np.ones(x.shape) else: - raise Exception( - f"Invalid Receptor File {receptor_filename}. Must be of type .tif or .csv") + raise ValueError( + f"Invalid Receptor File {receptor_filename}. Must be of type .tif or .csv" + ) else: # taucrit without a receptor - # Assume the following grain sizes and conditions for typical beach sand (Nielsen, 1992 p.108) - receptor_array = 200*1e-6 * np.ones(x.shape) + # Assume the following grain sizes and conditions for + # typical beach sand (Nielsen, 1992 p.108) + receptor_array = 200 * 1e-6 * np.ones(x.shape) if mask is not None: receptor_array = np.where(mask, receptor_array, np.nan) return receptor_array @@ -264,7 +280,7 @@ def create_raster( cols, rows, nbands, - eType=gdal.GDT_Float32, + e_type=gdal.GDT_Float32, ): """ Create a gdal raster object. @@ -279,7 +295,7 @@ def create_raster( number of rows. nbands : scalar number of bads to write. - eType : gdal, optional + e_type : gdal, optional type of geotiff and precision. The default is gdal.GDT_Float32. Returns @@ -297,7 +313,7 @@ def create_raster( int(cols), int(rows), nbands, - eType=gdal.GDT_Float32, + e_type, ) # spatial_reference = osr.SpatialReference() @@ -315,7 +331,7 @@ def numpy_array_to_raster( cell_resolution, spatial_reference_system_wkid, output_path, - nodata_val=None + nodata_val=None, ): """ @@ -347,7 +363,6 @@ def numpy_array_to_raster( """ - """Create the output raster.""" # create output raster # (upper_left_x, x_resolution, x_skew 0, upper_left_y, y_skew 0, y_resolution). geotransform = ( @@ -368,17 +383,17 @@ def numpy_array_to_raster( output_raster.SetGeoTransform(geotransform) output_band = output_raster.GetRasterBand(1) if nodata_val is not None: - output_band.SetNoDataValue(nodata_val) #Not an issue, may be in other cases? + output_band.SetNoDataValue(nodata_val) # Not an issue, may be in other cases? output_band.WriteArray(numpy_array) output_band.FlushCache() output_band.ComputeStatistics( False, - ) # you want this false, true will make computed results, but is faster, could be a setting in the UI perhaps, esp for large rasters? - - if os.path.exists(output_path) == False: - raise Exception("Failed to create raster: %s" % output_path) + ) # you want this false, true will make computed results, but is faster, + # could be a setting in the UI perhaps, esp for large rasters? + if not os.path.exists(output_path): + raise RuntimeError(f"Failed to create raster: {output_path}") # this closes the file output_raster = None return output_path @@ -444,34 +459,51 @@ def read_raster(raster_name): data = gdal.Open(raster_name) img = data.GetRasterBand(1) raster_array = img.ReadAsArray() - (upper_left_x, x_size, x_rotation, upper_left_y, - y_rotation, y_size) = data.GetGeoTransform() + (upper_left_x, x_size, _, upper_left_y, _, y_size) = data.GetGeoTransform() cols = data.RasterXSize rows = data.RasterYSize r_rows = np.arange(rows) * y_size + upper_left_y + (y_size / 2) r_cols = np.arange(cols) * x_size + upper_left_x + (x_size / 2) rx, ry = np.meshgrid(r_cols, r_rows) data = None + return rx, ry, raster_array + def secondary_constraint_geotiff_to_numpy(filename): + """ + Converts a secondary constraint GeoTIFF file to a NumPy array. + + Parameters + ---------- + filename : str + The file path to the GeoTIFF file. + + Returns + ------- + tuple + A tuple containing: + - x_grid (ndarray): The X coordinates grid. + - y_grid (ndarray): The Y coordinates grid. + - array (ndarray): The data values from the GeoTIFF file as a 2D array. + """ data = gdal.Open(filename) img = data.GetRasterBand(1) array = img.ReadAsArray() - (upper_left_x, x_size, x_rotation, upper_left_y, - y_rotation, y_size) = data.GetGeoTransform() + (upper_left_x, x_size, _, upper_left_y, _, y_size) = data.GetGeoTransform() cols = data.RasterXSize rows = data.RasterYSize r_rows = np.arange(rows) * y_size + upper_left_y + (y_size / 2) r_cols = np.arange(cols) * x_size + upper_left_x + (x_size / 2) prj = data.GetProjection() srs = osr.SpatialReference(wkt=prj) - if srs.GetAttrValue('AUTHORITY',1) == '4326': - r_cols = np.where(r_cols < 0, r_cols+360, r_cols) + if srs.GetAttrValue("AUTHORITY", 1) == "4326": + r_cols = np.where(r_cols < 0, r_cols + 360, r_cols) x_grid, y_grid = np.meshgrid(r_cols, r_rows) return x_grid, y_grid, array + def calculate_cell_area(rx, ry, latlon=True): """ Calculates the area of each cell @@ -495,30 +527,30 @@ def calculate_cell_area(rx, ry, latlon=True): area of each cell. """ - import numpy as np - from pyproj import Geod - if latlon == True: + if latlon: geod = Geod(ellps="WGS84") - lon2D, lat2D = np.where(rx > 180, rx-360, rx), ry - _, _, distEW = geod.inv( - lon2D[:, :-1], lat2D[:, 1:], lon2D[:, 1:], lat2D[:, 1:]) - _, _, distNS = geod.inv( - lon2D[1:, :], lat2D[1:, :], lon2D[1:, :], lat2D[:-1, :]) - square_area = distEW[1:, :] * distNS[:, 1:] + lon_2d, lat_2d = np.where(rx > 180, rx - 360, rx), ry + _, _, dist_ew = geod.inv( + lon_2d[:, :-1], lat_2d[:, 1:], lon_2d[:, 1:], lat_2d[:, 1:] + ) + _, _, dist_ns = geod.inv( + lon_2d[1:, :], lat_2d[1:, :], lon_2d[1:, :], lat_2d[:-1, :] + ) + square_area = dist_ew[1:, :] * dist_ns[:, 1:] else: - square_area = np.zeros((rx.shape[0]-1, ry.shape[1]-1)) - for row in range(rx.shape[0]-1): - for col in range(ry.shape[1]-1): - dx = rx[row, col+1] - rx[row, col] - dy = ry[row+1, col] - ry[row, col] - square_area[row, col] = dx*dy + square_area = np.zeros((rx.shape[0] - 1, ry.shape[1] - 1)) + for row in range(rx.shape[0] - 1): + for col in range(ry.shape[1] - 1): + dx = rx[row, col + 1] - rx[row, col] + dy = ry[row + 1, col] - ry[row, col] + square_area[row, col] = dx * dy rxm = np.zeros(square_area.shape) rym = np.zeros(square_area.shape) - for row in range(rx.shape[0]-1): - rxm[row, :] = (rx[row, :-1] + rx[row, 1:])/2 - for col in range(ry.shape[1]-1): - rym[:, col] = (ry[:-1, col] + ry[1:, col])/2 + for row in range(rx.shape[0] - 1): + rxm[row, :] = (rx[row, :-1] + rx[row, 1:]) / 2 + for col in range(ry.shape[1] - 1): + rym[:, col] = (ry[:-1, col] + ry[1:, col]) / 2 return rxm, rym, square_area @@ -538,7 +570,7 @@ def bin_data(zm, square_area, nbins=25): Returns ------- - DATA : Dictionary + data : Dictionary Dictionary for each bin contating bin start : the starting value of each bin bin end : the last value of each bin @@ -549,22 +581,24 @@ def bin_data(zm, square_area, nbins=25): """ hist, bins = np.histogram(zm, bins=nbins) center = (bins[:-1] + bins[1:]) / 2 - DATA = {} - DATA['bin start'] = bins[:-1] - DATA['bin end'] = bins[1:] - DATA['bin center'] = center - DATA['count'] = hist - DATA['Area'] = np.zeros(hist.shape) + data = {} + data["bin start"] = bins[:-1] + data["bin end"] = bins[1:] + data["bin center"] = center + data["count"] = hist + data["Area"] = np.zeros(hist.shape) for ic, (bin_start, bin_end) in enumerate(zip(bins[:-1], bins[1:])): - if ic < len(hist)-1: + if ic < len(hist) - 1: area_ix = np.flatnonzero((zm >= bin_start) & (zm < bin_end)) else: area_ix = np.flatnonzero((zm >= bin_start) & (zm <= bin_end)) - DATA['Area'][ic] = np.sum(square_area[area_ix]) - return DATA + data["Area"][ic] = np.sum(square_area[area_ix]) + return data -def bin_receptor(zm, receptor, square_area, nbins=25, receptor_names=None, receptor_type='receptor'): +def bin_receptor( + zm, receptor, square_area, nbins=25, receptor_names=None, receptor_type="receptor" +): """ Bins values into 25 bins and by unique values in the receptor. @@ -586,8 +620,10 @@ def bin_receptor(zm, receptor, square_area, nbins=25, receptor_names=None, recep Returns ------- - DATA : Dictionary - Dictionary with keys corresponding to each unique receptor value each containing for each bin + data : Dictionary + Dictionary with keys corresponding to each unique receptor value + each containing for each bin + bin start : the starting value of each bin bin end : the last value of each bin bin center: the center value of each bin @@ -598,28 +634,39 @@ def bin_receptor(zm, receptor, square_area, nbins=25, receptor_names=None, recep """ hist, bins = np.histogram(zm, bins=nbins) center = (bins[:-1] + bins[1:]) / 2 - DATA = {} - DATA['bin start'] = bins[:-1] - DATA['bin end'] = bins[1:] - DATA['bin center'] = center + data = {} + data["bin start"] = bins[:-1] + data["bin end"] = bins[1:] + data["bin center"] = center for ic, rval in enumerate(np.unique(receptor)): zz = zm[receptor == rval] sqa = square_area[receptor == rval] - rcolname = f'Area, {receptor_type} value {rval}' if receptor_names is None else receptor_names[ - ic] - DATA[rcolname] = np.zeros(hist.shape) + rcolname = ( + f"Area, {receptor_type} value {rval}" + if receptor_names is None + else receptor_names[ic] + ) + data[rcolname] = np.zeros(hist.shape) for ic, (bin_start, bin_end) in enumerate(zip(bins[:-1], bins[1:])): - if ic < len(hist)-1: + if ic < len(hist) - 1: area_ix = np.flatnonzero((zz >= bin_start) & (zz < bin_end)) else: area_ix = np.flatnonzero((zz >= bin_start) & (zz <= bin_end)) - DATA[rcolname][ic] = np.sum(sqa[area_ix]) - DATA[f'Area percent, {receptor_type} value {rval}'] = 100 * \ - DATA[rcolname]/DATA[rcolname].sum() - return DATA - - -def bin_layer(raster_filename, receptor_filename=None, receptor_names=None, limit_receptor_range=None, latlon=True, receptor_type='receptor'): + data[rcolname][ic] = np.sum(sqa[area_ix]) + data[f"Area percent, {receptor_type} value {rval}"] = ( + 100 * data[rcolname] / data[rcolname].sum() + ) + return data + + +def bin_layer( + raster_filename, + receptor_filename=None, + receptor_names=None, + limit_receptor_range=None, + latlon=True, + receptor_type="receptor", +): """ creates a dataframe of binned raster values and associtaed area and percent of array. @@ -637,7 +684,7 @@ def bin_layer(raster_filename, receptor_filename=None, receptor_names=None, limi True is coordinates are lat/lon. The default is True. receptor_type : str, optional name to display in output (eg. grain size, risk layer). the default is receptor. - + Returns ------- DataFrame @@ -646,30 +693,47 @@ def bin_layer(raster_filename, receptor_filename=None, receptor_names=None, limi """ rx, ry, z = read_raster(raster_filename) - rxm, rym, square_area = calculate_cell_area(rx, ry, latlon=True) + rxm, rym, square_area = calculate_cell_area(rx, ry, latlon) square_area = square_area.flatten() - zm = resample_structured_grid( - rx, ry, z, rxm, rym, interpmethod='linear').flatten() + zm = resample_structured_grid(rx, ry, z, rxm, rym, interpmethod="linear").flatten() if receptor_filename is None: - DATA = bin_data(zm[np.invert(np.isnan(zm))], - square_area[np.invert(np.isnan(zm))], nbins=25) - # DF = pd.DataFrame(DATA) - DATA['Area percent'] = 100 * DATA['Area']/DATA['Area'].sum() + data = bin_data( + zm[np.invert(np.isnan(zm))], square_area[np.invert(np.isnan(zm))], nbins=25 + ) + # DF = pd.DataFrame(data) + data["Area percent"] = 100 * data["Area"] / data["Area"].sum() else: rrx, rry, receptor = read_raster(receptor_filename) - receptor = resample_structured_grid( - rrx, rry, receptor, rxm, rym).flatten() + receptor = resample_structured_grid(rrx, rry, receptor, rxm, rym).flatten() if limit_receptor_range is not None: - receptor = np.where((receptor >= np.min(limit_receptor_range)) & ( - receptor <= np.max(limit_receptor_range)), receptor, 0) - DATA = bin_receptor(zm[np.invert(np.isnan(zm))], receptor[np.invert(np.isnan( - zm))], square_area[np.invert(np.isnan(zm))], receptor_names=receptor_names, receptor_type=receptor_type) - return pd.DataFrame(DATA) - - -def classify_layer_area(raster_filename, receptor_filename=None, at_values=None, value_names=None, limit_receptor_range=None, latlon=True, receptor_type='receptor'): + receptor = np.where( + (receptor >= np.min(limit_receptor_range)) + & (receptor <= np.max(limit_receptor_range)), + receptor, + 0, + ) + data = bin_receptor( + zm[np.invert(np.isnan(zm))], + receptor[np.invert(np.isnan(zm))], + square_area[np.invert(np.isnan(zm))], + receptor_names=receptor_names, + receptor_type=receptor_type, + ) + return pd.DataFrame(data) + + +def classify_layer_area( + raster_filename, + receptor_filename=None, + at_values=None, + value_names=None, + limit_receptor_range=None, + latlon=True, + receptor_type="receptor", +): """ - Creates a dataframe of raster values and associtaed area and percent of array at specified raster values. + Creates a dataframe of raster values and associtaed area and + percent of array at specified raster values. Parameters ---------- @@ -704,40 +768,82 @@ def classify_layer_area(raster_filename, receptor_filename=None, at_values=None, at_values = np.unique(zm) else: at_values = np.atleast_1d(at_values) - DATA = {} - DATA['value'] = at_values + data = {} + data["value"] = at_values if value_names is not None: - DATA['value name'] = value_names + data["value name"] = value_names if receptor_filename is None: - DATA['Area'] = np.zeros(len(at_values)) + data["Area"] = np.zeros(len(at_values)) for ic, value in enumerate(at_values): area_ix = np.flatnonzero(zm == value) - DATA['Area'][ic] = np.sum(square_area[area_ix]) - DATA['Area percent'] = 100 * DATA['Area']/DATA['Area'].sum() + data["Area"][ic] = np.sum(square_area[area_ix]) + data["Area percent"] = 100 * data["Area"] / data["Area"].sum() else: rrx, rry, receptor = read_raster(receptor_filename) if limit_receptor_range is not None: - receptor = np.where((receptor >= np.min(limit_receptor_range)) & ( - receptor <= np.max(limit_receptor_range)), receptor, 0) - receptor = resample_structured_grid( - rrx, rry, receptor, rxm, rym).flatten() + receptor = np.where( + (receptor >= np.min(limit_receptor_range)) + & (receptor <= np.max(limit_receptor_range)), + receptor, + 0, + ) + receptor = resample_structured_grid(rrx, rry, receptor, rxm, rym).flatten() for ic, rval in enumerate(np.unique(receptor)): zz = zm[receptor == rval] sqa = square_area[receptor == rval] - rcolname = f'Area, {receptor_type} value {rval}' - ccolname = f'Count, {receptor_type} value {rval}' - DATA[rcolname] = np.zeros(len(at_values)) - DATA[ccolname] = np.zeros(len(at_values)) + rcolname = f"Area, {receptor_type} value {rval}" + ccolname = f"Count, {receptor_type} value {rval}" + data[rcolname] = np.zeros(len(at_values)) + data[ccolname] = np.zeros(len(at_values)) for iic, value in enumerate(at_values): area_ix = np.flatnonzero(zz == value) - DATA[ccolname][iic] = len(area_ix) - DATA[rcolname][iic] = np.sum(sqa[area_ix]) - DATA[f'Area percent, {receptor_type} value {rval}'] = 100 * \ - DATA[rcolname]/DATA[rcolname].sum() - return pd.DataFrame(DATA) + data[ccolname][iic] = len(area_ix) + data[rcolname][iic] = np.sum(sqa[area_ix]) + data[f"Area percent, {receptor_type} value {rval}"] = ( + 100 * data[rcolname] / data[rcolname].sum() + ) + return pd.DataFrame(data) + + +def classify_layer_area_2nd_constraint( + raster_to_sample, + secondary_constraint_filename, + at_raster_values, + at_raster_value_names, + limit_constraint_range=None, + latlon=True, + receptor_type="receptor", +): + """ + Classifies layer areas based on a secondary constraint raster. + This function calculates the area of different classifications in a raster + and applies an additional filter or constraint using a secondary raster file. -def classify_layer_area_2nd_Constraint(raster_to_sample, secondary_constraint_filename, at_raster_values, at_raster_value_names, limit_constraint_range=None, latlon=True, receptor_type='receptor'): + Parameters + ---------- + raster_to_sample : str + Path to the raster file to be sampled. + secondary_constraint_filename : str or None + Path to the secondary constraint raster file. If None, no secondary constraint is applied. + at_raster_values : list or None + List of values in the raster to classify. If None, all unique values are considered. + at_raster_value_names : list or None + List of names corresponding to the `at_raster_values` for more descriptive output. + limit_constraint_range : tuple or None, optional + A tuple specifying the range (min, max) to limit the secondary constraint values. + Default is None. + latlon : bool, optional + Boolean to indicate if the coordinate system is latitude/longitude. Default is True. + receptor_type : str, optional + Type of receptor for naming purposes in the output. Default is "receptor". + + Returns + ------- + pd.DataFrame + A DataFrame with areas and their percentages calculated for each classification + and optionally for each classification within a constraint range. + """ rx, ry, z = read_raster(raster_to_sample) rxm, rym, square_area = calculate_cell_area(rx, ry, latlon=latlon) square_area = square_area.flatten() @@ -746,37 +852,44 @@ def classify_layer_area_2nd_Constraint(raster_to_sample, secondary_constraint_fi at_values = np.unique(zm) else: at_values = np.atleast_1d(at_raster_values) - DATA = {} - DATA['value'] = at_values + data = {} + data["value"] = at_values if at_raster_value_names is not None: - DATA['value name'] = at_raster_value_names + data["value name"] = at_raster_value_names if secondary_constraint_filename is None: - DATA['Area'] = np.zeros(len(at_values)) + data["Area"] = np.zeros(len(at_values)) for ic, value in enumerate(at_values): area_ix = np.flatnonzero(zm == value) - DATA['Area'][ic] = np.sum(square_area[area_ix]) - DATA['Area percent'] = 100 * DATA['Area']/DATA['Area'].sum() + data["Area"][ic] = np.sum(square_area[area_ix]) + data["Area percent"] = 100 * data["Area"] / data["Area"].sum() else: rrx, rry, constraint = read_raster(secondary_constraint_filename) - constraint = resample_structured_grid(rrx, rry, constraint, rxm, rym, interpmethod='nearest').flatten() + constraint = resample_structured_grid( + rrx, rry, constraint, rxm, rym, interpmethod="nearest" + ).flatten() if limit_constraint_range is not None: - constraint = np.where((constraint >= np.min(limit_constraint_range)) & ( - constraint <= np.max(limit_constraint_range)), constraint, np.nan) + constraint = np.where( + (constraint >= np.min(limit_constraint_range)) + & (constraint <= np.max(limit_constraint_range)), + constraint, + np.nan, + ) for ic, rval in enumerate(np.unique(constraint)): if ~np.isnan(rval): zz = zm[constraint == rval] sqa = square_area[constraint == rval] - rcolname = f'Area, {receptor_type} value {rval}' - ccolname = f'Count, {receptor_type} value {rval}' - DATA[rcolname] = np.zeros(len(at_values)) - DATA[ccolname] = np.zeros(len(at_values)) + rcolname = f"Area, {receptor_type} value {rval}" + ccolname = f"Count, {receptor_type} value {rval}" + data[rcolname] = np.zeros(len(at_values)) + data[ccolname] = np.zeros(len(at_values)) for iic, value in enumerate(at_values): area_ix = np.flatnonzero(zz == value) - DATA[ccolname][iic] = len(area_ix) - DATA[rcolname][iic] = np.sum(sqa[area_ix]) - DATA[f'Area percent, {receptor_type} value {rval}'] = 100 * \ - DATA[rcolname]/DATA[rcolname].sum() - return pd.DataFrame(DATA) + data[ccolname][iic] = len(area_ix) + data[rcolname][iic] = np.sum(sqa[area_ix]) + data[f"Area percent, {receptor_type} value {rval}"] = ( + 100 * data[rcolname] / data[rcolname].sum() + ) + return pd.DataFrame(data) # def calc_area_change(self, ofilename, crs, stylefile=None): # """Export the areas of the given file. Find a UTM of the given crs and calculate in m2.""" @@ -808,7 +921,8 @@ def classify_layer_area_2nd_Constraint(raster_to_sample, secondary_constraint_fi # # create a temporary file for reprojection # outfile = tempfile.NamedTemporaryFile(suffix=".tif").name - # # cmd = f'gdalwarp -s_srs EPSG:{crs} -t_srs EPSG:{crs_found} -r near -of GTiff {ofilename} {outfile}' + # # cmd = f'gdalwarp -s_srs EPSG:{crs} -t_srs + # # EPSG:{crs_found} -r near -of GTiff {ofilename} {outfile}' # # os.system(cmd) # reproject_params = { @@ -876,4 +990,4 @@ def classify_layer_area_2nd_Constraint(raster_to_sample, secondary_constraint_fi # cfile, # na_rep="NULL", # index=False, - # ) \ No newline at end of file + # ) diff --git a/seat/modules/velocity_module.py b/seat/modules/velocity_module.py index 5561ab57..2404b7b4 100644 --- a/seat/modules/velocity_module.py +++ b/seat/modules/velocity_module.py @@ -1,4 +1,10 @@ #!/usr/bin/python + +# pylint: disable=too-many-statements +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches + """ /***************************************************************************. @@ -20,13 +26,12 @@ 1. called by stressor_receptor_calc.py """ -import glob import os import numpy as np import pandas as pd -from netCDF4 import Dataset -from .stressor_utils import ( +from netCDF4 import Dataset # pylint: disable=no-name-in-module +from seat.modules.stressor_utils import ( estimate_grid_spacing, create_structured_array_from_unstructured, calc_receptor_array, @@ -35,9 +40,9 @@ numpy_array_to_raster, bin_layer, classify_layer_area, - classify_layer_area_2nd_Constraint, + classify_layer_area_2nd_constraint, resample_structured_grid, - secondary_constraint_geotiff_to_numpy + secondary_constraint_geotiff_to_numpy, ) @@ -65,17 +70,45 @@ def classify_motility(motility_parameter_dev, motility_parameter_nodev): motility_classification = np.zeros(motility_parameter_dev.shape) # Motility Stops - motility_classification = np.where(((motility_parameter_dev < motility_parameter_nodev) & ( - motility_parameter_nodev >= 1) & (motility_parameter_dev < 1)), -1, motility_classification) + motility_classification = np.where( + ( + (motility_parameter_dev < motility_parameter_nodev) + & (motility_parameter_nodev >= 1) + & (motility_parameter_dev < 1) + ), + -1, + motility_classification, + ) # Reduced Motility (Tw1 - motility_classification = np.where(((motility_parameter_dev < motility_parameter_nodev) & ( - motility_parameter_nodev >= 1) & (motility_parameter_dev >= 1)), 1, motility_classification) + motility_classification = np.where( + ( + (motility_parameter_dev < motility_parameter_nodev) + & (motility_parameter_nodev >= 1) + & (motility_parameter_dev >= 1) + ), + 1, + motility_classification, + ) # Increased Motility (Tw>Tb) & (Tw-Tb)>1 - motility_classification = np.where(((motility_parameter_dev > motility_parameter_nodev) & ( - motility_parameter_nodev >= 1) & (motility_parameter_dev >= 1)), 2, motility_classification) + motility_classification = np.where( + ( + (motility_parameter_dev > motility_parameter_nodev) + & (motility_parameter_nodev >= 1) + & (motility_parameter_dev >= 1) + ), + 2, + motility_classification, + ) # New Motility - motility_classification = np.where(((motility_parameter_dev > motility_parameter_nodev) & ( - motility_parameter_nodev < 1) & (motility_parameter_dev >= 1)), 3, motility_classification) + motility_classification = np.where( + ( + (motility_parameter_dev > motility_parameter_nodev) + & (motility_parameter_nodev < 1) + & (motility_parameter_dev >= 1) + ), + 3, + motility_classification, + ) # NoChange or NoMotility = 0 return motility_classification @@ -102,30 +135,32 @@ def check_grid_define_vars(dataset): vvar : str name of y-coordinate velocity variable. """ - vars = list(dataset.variables) - if 'U1' in vars: - gridtype = 'structured' - uvar = 'U1' - vvar = 'V1' + data_vars = list(dataset.variables) + if "U1" in data_vars: + gridtype = "structured" + uvar = "U1" + vvar = "V1" try: xvar, yvar = dataset.variables[uvar].coordinates.split() - except: - xvar = 'XCOR' - yvar = 'YCOR' + except AttributeError: + xvar = "XCOR" + yvar = "YCOR" else: - gridtype = 'unstructured' - uvar = 'ucxa' - vvar = 'ucya' + gridtype = "unstructured" + uvar = "ucxa" + vvar = "ucya" xvar, yvar = dataset.variables[uvar].coordinates.split() return gridtype, xvar, yvar, uvar, vvar -def calculate_velocity_stressors(fpath_nodev, - fpath_dev, - probabilities_file, - receptor_filename=None, - latlon=True, - value_selection=None): +def calculate_velocity_stressors( + fpath_nodev, + fpath_dev, + probabilities_file, + receptor_filename=None, + latlon=True, + value_selection=None, +): """ @@ -176,18 +211,22 @@ def calculate_velocity_stressors(fpath_nodev, if not os.path.exists(fpath_dev): raise FileNotFoundError(f"The directory {fpath_dev} does not exist.") - files_nodev = [i for i in os.listdir(fpath_nodev) if i.endswith('.nc')] - files_dev = [i for i in os.listdir(fpath_dev) if i.endswith('.nc')] + files_nodev = [i for i in os.listdir(fpath_nodev) if i.endswith(".nc")] + files_dev = [i for i in os.listdir(fpath_dev) if i.endswith(".nc")] + + xcor = None + ycor = None # Load and sort files if len(files_nodev) == 1 & len(files_dev) == 1: # asumes a concatonated files with shape # [run_num, time, rows, cols] - with Dataset(os.path.join(fpath_dev, files_dev[0])) as file_dev_present,\ - Dataset(os.path.join(fpath_nodev, files_nodev[0])) as file_dev_notpresent: - - gridtype, xvar, yvar, uvar, vvar = check_grid_define_vars( - file_dev_present) + with Dataset( + os.path.join(fpath_dev, files_dev[0]) + ) as file_dev_present, Dataset( + os.path.join(fpath_nodev, files_nodev[0]) + ) as file_dev_notpresent: + gridtype, xvar, yvar, uvar, vvar = check_grid_define_vars(file_dev_present) xcor = file_dev_present.variables[xvar][:].data ycor = file_dev_present.variables[yvar][:].data u = file_dev_present.variables[uvar][:].data @@ -200,49 +239,74 @@ def calculate_velocity_stressors(fpath_nodev, # same number of files, file name must be formatted with either run number or return interval elif len(files_nodev) == len(files_dev): - # asumes each run is separate with the some_name_RunNum_map.nc, where run number comes at the last underscore before _map.nc + # asumes each run is separate with the some_name_RunNum_map.nc, + # where run number comes at the last underscore before _map.nc run_num_nodev = np.zeros((len(files_nodev))) for ic, file in enumerate(files_nodev): - run_num_nodev[ic] = int(file.split('.')[0].split('_')[-2]) + run_num_nodev[ic] = int(file.split(".")[0].split("_")[-2]) run_num_dev = np.zeros((len(files_dev))) for ic, file in enumerate(files_dev): - run_num_dev[ic] = int(file.split('.')[0].split('_')[-2]) + run_num_dev[ic] = int(file.split(".")[0].split("_")[-2]) # ensure run oder for nodev matches dev files if np.any(run_num_nodev != run_num_dev): adjust_dev_order = [] for ri in run_num_nodev: adjust_dev_order = np.append( - adjust_dev_order, np.flatnonzero(run_num_dev == ri)) + adjust_dev_order, np.flatnonzero(run_num_dev == ri) + ) files_dev = [files_dev[int(i)] for i in adjust_dev_order] run_num_dev = [run_num_dev[int(i)] for i in adjust_dev_order] - DF = pd.DataFrame({'files_nodev': files_nodev, - 'run_num_nodev': run_num_nodev, - 'files_dev': files_dev, - 'run_num_dev': run_num_dev}) - DF = DF.sort_values(by='run_num_dev') + data_frame = pd.DataFrame( + { + "files_nodev": files_nodev, + "run_num_nodev": run_num_nodev, + "files_dev": files_dev, + "run_num_dev": run_num_dev, + } + ) + data_frame = data_frame.sort_values(by="run_num_dev") first_run = True ir = 0 - for _, row in DF.iterrows(): - with Dataset(os.path.join(fpath_nodev, row.files_nodev)) as file_dev_notpresent, \ - Dataset(os.path.join(fpath_dev, row.files_dev)) as file_dev_present: - + for _, row in data_frame.iterrows(): + with Dataset( + os.path.join(fpath_nodev, row.files_nodev) + ) as file_dev_notpresent, Dataset( + os.path.join(fpath_dev, row.files_dev) + ) as file_dev_present: gridtype, xvar, yvar, uvar, vvar = check_grid_define_vars( - file_dev_present) + file_dev_present + ) if first_run: tmp = file_dev_notpresent.variables[uvar][:].data - if gridtype == 'structured': + if gridtype == "structured": mag_nodev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2], tmp.shape[3])) + ( + data_frame.shape[0], + tmp.shape[0], + tmp.shape[1], + tmp.shape[2], + tmp.shape[3], + ) + ) mag_dev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1], tmp.shape[2], tmp.shape)) + ( + data_frame.shape[0], + tmp.shape[0], + tmp.shape[1], + tmp.shape[2], + tmp.shape, + ) + ) else: mag_nodev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1])) + (data_frame.shape[0], tmp.shape[0], tmp.shape[1]) + ) mag_dev = np.zeros( - (DF.shape[0], tmp.shape[0], tmp.shape[1])) + (data_frame.shape[0], tmp.shape[0], tmp.shape[1]) + ) xcor = file_dev_notpresent.variables[xvar][:].data ycor = file_dev_notpresent.variables[yvar][:].data first_run = False @@ -254,50 +318,57 @@ def calculate_velocity_stressors(fpath_nodev, mag_dev[ir, :] = np.sqrt(u**2 + v**2) ir += 1 else: - raise Exception( - f"Number of device runs ({len(files_dev)}) must be the same as no device runs ({len(files_nodev)}).") + raise ValueError( + f"Number of device runs ({len(files_dev)}) must be the same \ + as no device runs ({len(files_nodev)})." + ) # Finished loading and sorting files - if (gridtype == 'structured'): + if gridtype == "structured": if (xcor[0, 0] == 0) & (xcor[-1, 0] == 0): # at least for some runs the boundary has 0 coordinates. Check and fix. - xcor, ycor, mag_nodev, mag_dev = trim_zeros( - xcor, ycor, mag_nodev, mag_dev) + xcor, ycor, mag_nodev, mag_dev = trim_zeros(xcor, ycor, mag_nodev, mag_dev) if probabilities_file != "": if not os.path.exists(probabilities_file): raise FileNotFoundError(f"The file {probabilities_file} does not exist.") # Load BC file with probabilities and find appropriate probability - BC_probability = pd.read_csv(probabilities_file, delimiter=",") - BC_probability['run_num'] = BC_probability['run number']-1 - BC_probability = BC_probability.sort_values(by='run number') - BC_probability["probability"] = BC_probability["% of yr"].values/100 - # BC_probability - if 'Exclude' in BC_probability.columns: - BC_probability = BC_probability[~( - (BC_probability['Exclude'] == 'x') | (BC_probability['Exclude'] == 'X'))] + bc_probability = pd.read_csv(probabilities_file, delimiter=",") + bc_probability["run_num"] = bc_probability["run number"] - 1 + bc_probability = bc_probability.sort_values(by="run number") + bc_probability["probability"] = bc_probability["% of yr"].values / 100 + # bc_probability + if "Exclude" in bc_probability.columns: + bc_probability = bc_probability[ + ~( + (bc_probability["Exclude"] == "x") + | (bc_probability["Exclude"] == "X") + ) + ] else: # assume run_num in file name is return interval - BC_probability = pd.DataFrame() + bc_probability = pd.DataFrame() # ignor number and start sequentially from zero - BC_probability['run_num'] = np.arange(0, mag_dev.shape[0]) + bc_probability["run_num"] = np.arange(0, mag_dev.shape[0]) # assumes run_num in name is the return interval - BC_probability["probability"] = 1/DF.run_num_dev.to_numpy() - BC_probability["probability"] = BC_probability["probability"] / \ - BC_probability["probability"].sum() # rescale to ensure = 1 + bc_probability["probability"] = 1 / data_frame.run_num_dev.to_numpy() + bc_probability["probability"] = ( + bc_probability["probability"] / bc_probability["probability"].sum() + ) # rescale to ensure = 1 - # ensure velocity is depth averaged for structured array [run_num, time, layer, x, y] and drop dimension + # ensure velocity is depth averaged for structured array [run_num, time, layer, x, y] + # and drop dimension if np.ndim(mag_nodev) == 5: mag_dev = np.nanmean(mag_dev, axis=2) mag_nodev = np.nanmean(mag_nodev, axis=2) # Calculate Stressor and Receptors - if value_selection == 'Maximum': + if value_selection == "Maximum": mag_dev = np.nanmax(mag_dev, axis=1) # max over time mag_nodev = np.nanmax(mag_nodev, axis=1) # max over time - elif value_selection == 'Mean': + elif value_selection == "Mean": mag_dev = np.nanmean(mag_dev, axis=1) # mean over time mag_nodev = np.nanmean(mag_nodev, axis=1) # mean over time - elif value_selection == 'Final Timestep': + elif value_selection == "Final Timestep": mag_dev = mag_dev[:, -1, :] # last time step mag_nodev = mag_nodev[:, -1, :] # last time step else: @@ -305,22 +376,23 @@ def calculate_velocity_stressors(fpath_nodev, mag_nodev = np.nanmax(mag_nodev, axis=1) # default to max over time # initialize arrays - if gridtype == 'structured': + if gridtype == "structured": mag_combined_nodev = np.zeros(np.shape(mag_nodev[0, :, :])) mag_combined_dev = np.zeros(np.shape(mag_dev[0, :, :])) else: mag_combined_nodev = np.zeros(np.shape(mag_nodev)[-1]) mag_combined_dev = np.zeros(np.shape(mag_dev)[-1]) - for run_number, prob in zip(BC_probability['run_num'].values, - BC_probability["probability"].values): - - mag_combined_nodev = mag_combined_nodev + \ - prob * mag_nodev[run_number, :] + for run_number, prob in zip( + bc_probability["run_num"].values, bc_probability["probability"].values + ): + mag_combined_nodev = mag_combined_nodev + prob * mag_nodev[run_number, :] mag_combined_dev = mag_combined_dev + prob * mag_dev[run_number, :] mag_diff = mag_combined_dev - mag_combined_nodev - velcrit = calc_receptor_array(receptor_filename, xcor, ycor, latlon=latlon, mask=~np.isnan(mag_diff)) + velcrit = calc_receptor_array( + receptor_filename, xcor, ycor, latlon=latlon, mask=~np.isnan(mag_diff) + ) motility_nodev = mag_combined_nodev / velcrit # motility_nodev = np.where(velcrit == 0, np.nan, motility_nodev) motility_dev = mag_combined_dev / velcrit @@ -329,40 +401,48 @@ def calculate_velocity_stressors(fpath_nodev, motility_diff = motility_dev - motility_nodev - if gridtype == 'structured': - motility_classification = classify_motility( - motility_dev, motility_nodev) + if gridtype == "structured": + motility_classification = classify_motility(motility_dev, motility_nodev) dx = np.nanmean(np.diff(xcor[:, 0])) dy = np.nanmean(np.diff(ycor[0, :])) rx = xcor ry = ycor - dict_of_arrays = {'velocity_magnitude_without_devices':mag_combined_nodev, - 'velocity_magnitude_with_devices': mag_combined_dev, - 'velocity_magnitude_difference': mag_diff, - 'motility_without_devices': motility_nodev, - 'motility_with_devices': motility_dev, - 'motility_difference': motility_diff, - 'motility_classified':motility_classification, - 'critical_velocity':velcrit} + dict_of_arrays = { + "velocity_magnitude_without_devices": mag_combined_nodev, + "velocity_magnitude_with_devices": mag_combined_dev, + "velocity_magnitude_difference": mag_diff, + "motility_without_devices": motility_nodev, + "motility_with_devices": motility_dev, + "motility_difference": motility_diff, + "motility_classified": motility_classification, + "critical_velocity": velcrit, + } else: # unstructured dxdy = estimate_grid_spacing(xcor, ycor, nsamples=100) dx = dxdy dy = dxdy rx, ry, mag_diff_struct = create_structured_array_from_unstructured( - xcor, ycor, mag_diff, dxdy, flatness=0.2) + xcor, ycor, mag_diff, dxdy, flatness=0.2 + ) _, _, mag_combined_dev_struct = create_structured_array_from_unstructured( - xcor, ycor, mag_combined_dev, dxdy, flatness=0.2) + xcor, ycor, mag_combined_dev, dxdy, flatness=0.2 + ) _, _, mag_combined_nodev_struct = create_structured_array_from_unstructured( - xcor, ycor, mag_combined_nodev, dxdy, flatness=0.2) + xcor, ycor, mag_combined_nodev, dxdy, flatness=0.2 + ) if not ((receptor_filename is None) or (receptor_filename == "")): _, _, motility_nodev_struct = create_structured_array_from_unstructured( - xcor, ycor, motility_nodev, dxdy, flatness=0.2) + xcor, ycor, motility_nodev, dxdy, flatness=0.2 + ) _, _, motility_dev_struct = create_structured_array_from_unstructured( - xcor, ycor, motility_dev, dxdy, flatness=0.2) + xcor, ycor, motility_dev, dxdy, flatness=0.2 + ) _, _, motility_diff_struct = create_structured_array_from_unstructured( - xcor, ycor, motility_diff, dxdy, flatness=0.2) + xcor, ycor, motility_diff, dxdy, flatness=0.2 + ) _, _, velcrit_struct = create_structured_array_from_unstructured( - xcor, ycor, velcrit, dxdy, flatness=0.2) + xcor, ycor, velcrit, dxdy, flatness=0.2 + ) else: motility_nodev_struct = np.nan * mag_diff_struct @@ -371,20 +451,23 @@ def calculate_velocity_stressors(fpath_nodev, velcrit_struct = np.nan * mag_diff_struct motility_classification = classify_motility( - motility_dev_struct, motility_nodev_struct) + motility_dev_struct, motility_nodev_struct + ) motility_classification = np.where( - np.isnan(mag_diff_struct), -100, motility_classification) - # listOfFiles = [mag_combined_dev_struct, mag_combined_nodev_struct, mag_diff_struct, motility_nodev_struct, - # motility_dev_struct, motility_diff_struct, motility_classification, velcrit_struct] - dict_of_arrays = {'velocity_magnitude_without_devices':mag_combined_nodev_struct, - 'velocity_magnitude_with_devices': mag_combined_dev_struct, - 'velocity_magnitude_difference': mag_diff_struct, - 'motility_without_devices': motility_nodev_struct, - 'motility_with_devices': motility_dev_struct, - 'motility_difference': motility_diff_struct, - 'motility_classified':motility_classification, - 'critical_velocity':velcrit_struct} + np.isnan(mag_diff_struct), -100, motility_classification + ) + + dict_of_arrays = { + "velocity_magnitude_without_devices": mag_combined_nodev_struct, + "velocity_magnitude_with_devices": mag_combined_dev_struct, + "velocity_magnitude_difference": mag_diff_struct, + "motility_without_devices": motility_nodev_struct, + "motility_with_devices": motility_dev_struct, + "motility_difference": motility_diff_struct, + "motility_classified": motility_classification, + "critical_velocity": velcrit_struct, + } return dict_of_arrays, rx, ry, dx, dy, gridtype @@ -396,7 +479,8 @@ def run_velocity_stressor( output_path, receptor_filename=None, secondary_constraint_filename=None, - value_selection=None): + value_selection=None, +): """ creates geotiffs and area change statistics files for velocity change @@ -423,54 +507,67 @@ def run_velocity_stressor( key = names of output rasters, val = full path to raster: """ - os.makedirs(output_path, exist_ok=True) # create output directory if it doesn't exist - + os.makedirs( + output_path, exist_ok=True + ) # create output directory if it doesn't exist - - dict_of_arrays, rx, ry, dx, dy, gridtype = calculate_velocity_stressors(fpath_nodev=dev_notpresent_file, - fpath_dev=dev_present_file, - probabilities_file=probabilities_file, - receptor_filename=receptor_filename, - latlon=crs == 4326, - value_selection=value_selection) + dict_of_arrays, rx, ry, dx, dy, gridtype = calculate_velocity_stressors( + fpath_nodev=dev_notpresent_file, + fpath_dev=dev_present_file, + probabilities_file=probabilities_file, + receptor_filename=receptor_filename, + latlon=crs == 4326, + value_selection=value_selection, + ) if not ((receptor_filename is None) or (receptor_filename == "")): - use_numpy_arrays = ['velocity_magnitude_without_devices', - 'velocity_magnitude_with_devices', - 'velocity_magnitude_difference', - 'motility_without_devices', - 'motility_with_devices', - 'motility_difference', - 'motility_classified', - 'critical_velocity'] + use_numpy_arrays = [ + "velocity_magnitude_without_devices", + "velocity_magnitude_with_devices", + "velocity_magnitude_difference", + "motility_without_devices", + "motility_with_devices", + "motility_difference", + "motility_classified", + "critical_velocity", + ] else: - use_numpy_arrays = ['velocity_magnitude_without_devices', - 'velocity_magnitude_with_devices', - 'velocity_magnitude_difference'] - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): + use_numpy_arrays = [ + "velocity_magnitude_without_devices", + "velocity_magnitude_with_devices", + "velocity_magnitude_difference", + ] + + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): if not os.path.exists(secondary_constraint_filename): - raise FileNotFoundError(f"The file {secondary_constraint_filename} does not exist.") - rrx, rry, constraint = secondary_constraint_geotiff_to_numpy(secondary_constraint_filename) - dict_of_arrays['velocity_risk_layer'] = resample_structured_grid(rrx, rry, constraint, rx, ry, interpmethod='nearest') - use_numpy_arrays.append('velocity_risk_layer') + raise FileNotFoundError( + f"The file {secondary_constraint_filename} does not exist." + ) + rrx, rry, constraint = secondary_constraint_geotiff_to_numpy( + secondary_constraint_filename + ) + dict_of_arrays["velocity_risk_layer"] = resample_structured_grid( + rrx, rry, constraint, rx, ry, interpmethod="nearest" + ) + use_numpy_arrays.append("velocity_risk_layer") - numpy_array_names = [i + '.tif' for i in use_numpy_arrays] + numpy_array_names = [i + ".tif" for i in use_numpy_arrays] output_rasters = [] for array_name, use_numpy_array in zip(numpy_array_names, use_numpy_arrays): - - if gridtype == 'structured': + if gridtype == "structured": numpy_array = np.flip(np.transpose(dict_of_arrays[use_numpy_array]), axis=0) else: numpy_array = np.flip(dict_of_arrays[use_numpy_array], axis=0) cell_resolution = [dx, dy] if crs == 4326: - rxx = np.where(rx > 180, rx-360, rx) - bounds = [rxx.min() - dx/2, ry.max() - dy/2] + rxx = np.where(rx > 180, rx - 360, rx) + bounds = [rxx.min() - dx / 2, ry.max() - dy / 2] else: - bounds = [rx.min() - dx/2, ry.max() - dy/2] + bounds = [rx.min() - dx / 2, ry.max() - dy / 2] rows, cols = numpy_array.shape # create an ouput raster given the stressor file path output_rasters.append(os.path.join(output_path, array_name)) @@ -493,72 +590,145 @@ def run_velocity_stressor( output_raster = None # Area calculations pull form rasters to ensure uniformity - bin_layer(os.path.join(output_path, 'velocity_magnitude_difference.tif'), - receptor_filename=None, - receptor_names=None, - latlon=crs == 4326).to_csv(os.path.join(output_path, "velocity_magnitude_difference.csv"), index=False) - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'velocity_magnitude_difference.tif'), - receptor_filename=os.path.join(output_path, "velocity_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "velocity_magnitude_difference_at_velocity_risk_layer.csv"), index=False) + bin_layer( + os.path.join(output_path, "velocity_magnitude_difference.tif"), + receptor_filename=None, + receptor_names=None, + latlon=crs == 4326, + ).to_csv( + os.path.join(output_path, "velocity_magnitude_difference.csv"), index=False + ) + if not ( + (secondary_constraint_filename is None) or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "velocity_magnitude_difference.tif"), + receptor_filename=os.path.join(output_path, "velocity_risk_layer.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "velocity_magnitude_difference_at_velocity_risk_layer.csv" + ), + index=False, + ) if not ((receptor_filename is None) or (receptor_filename == "")): - bin_layer(os.path.join(output_path, 'velocity_magnitude_difference.tif'), - receptor_filename=os.path.join(output_path, 'critical_velocity.tif'), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='critical velocity').to_csv(os.path.join(output_path, "velocity_magnitude_difference_at_critical_velocity.csv"), index=False) - - bin_layer(os.path.join(output_path, 'motility_difference.tif'), - receptor_filename=None, - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326).to_csv(os.path.join(output_path, "motility_difference.csv"), index=False) - - bin_layer(os.path.join(output_path, 'motility_difference.tif'), - receptor_filename=os.path.join(output_path, 'critical_velocity.tif'), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='critical velocity').to_csv(os.path.join(output_path, "motility_difference_at_critical_velocity.csv"), index=False) - - classify_layer_area(os.path.join(output_path, "motility_classified.tif"), - at_values=[-3, -2, -1, 0, 1, 2, 3], - value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - latlon=crs == 4326).to_csv(os.path.join(output_path, "motility_classified.csv"), index=False) - - classify_layer_area(os.path.join(output_path, "motility_classified.tif"), - receptor_filename=os.path.join(output_path, 'critical_velocity.tif'), - at_values=[-3, -2, -1, 0, 1, 2, 3], - value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='critical velocity').to_csv(os.path.join(output_path, "motility_classified_at_critical_velocity.csv"), index=False) - - if not ((secondary_constraint_filename is None) or (secondary_constraint_filename == "")): - bin_layer(os.path.join(output_path, 'motility_difference.tif'), - receptor_filename=os.path.join(output_path, "velocity_risk_layer.tif"), - receptor_names=None, - limit_receptor_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "motility_difference_at_velocity_risk_layer.csv"), index=False) - - classify_layer_area_2nd_Constraint(raster_to_sample = os.path.join(output_path, "motility_classified.tif"), - secondary_constraint_filename=os.path.join(output_path, "velocity_risk_layer.tif"), - at_raster_values=[-3, -2, -1, 0, 1, 2, 3], - at_raster_value_names=['New Deposition', 'Increased Deposition', 'Reduced Deposition', - 'No Change', 'Reduced Erosion', 'Increased Erosion', 'New Erosion'], - limit_constraint_range=[0, np.inf], - latlon=crs == 4326, - receptor_type='risk layer').to_csv(os.path.join(output_path, "motility_classified_at_velocity_risk_layer.csv"), index=False) - OUTPUT = {} + bin_layer( + os.path.join(output_path, "velocity_magnitude_difference.tif"), + receptor_filename=os.path.join(output_path, "critical_velocity.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="critical velocity", + ).to_csv( + os.path.join( + output_path, "velocity_magnitude_difference_at_critical_velocity.csv" + ), + index=False, + ) + + bin_layer( + os.path.join(output_path, "motility_difference.tif"), + receptor_filename=None, + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + ).to_csv(os.path.join(output_path, "motility_difference.csv"), index=False) + + bin_layer( + os.path.join(output_path, "motility_difference.tif"), + receptor_filename=os.path.join(output_path, "critical_velocity.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="critical velocity", + ).to_csv( + os.path.join(output_path, "motility_difference_at_critical_velocity.csv"), + index=False, + ) + classify_layer_area( + os.path.join(output_path, "motility_classified.tif"), + at_values=[-3, -2, -1, 0, 1, 2, 3], + value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + latlon=crs == 4326, + ).to_csv(os.path.join(output_path, "motility_classified.csv"), index=False) + + classify_layer_area( + os.path.join(output_path, "motility_classified.tif"), + receptor_filename=os.path.join(output_path, "critical_velocity.tif"), + at_values=[-3, -2, -1, 0, 1, 2, 3], + value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="critical velocity", + ).to_csv( + os.path.join(output_path, "motility_classified_at_critical_velocity.csv"), + index=False, + ) + + if not ( + (secondary_constraint_filename is None) + or (secondary_constraint_filename == "") + ): + bin_layer( + os.path.join(output_path, "motility_difference.tif"), + receptor_filename=os.path.join(output_path, "velocity_risk_layer.tif"), + receptor_names=None, + limit_receptor_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "motility_difference_at_velocity_risk_layer.csv" + ), + index=False, + ) + + classify_layer_area_2nd_constraint( + raster_to_sample=os.path.join(output_path, "motility_classified.tif"), + secondary_constraint_filename=os.path.join( + output_path, "velocity_risk_layer.tif" + ), + at_raster_values=[-3, -2, -1, 0, 1, 2, 3], + at_raster_value_names=[ + "New Deposition", + "Increased Deposition", + "Reduced Deposition", + "No Change", + "Reduced Erosion", + "Increased Erosion", + "New Erosion", + ], + limit_constraint_range=[0, np.inf], + latlon=crs == 4326, + receptor_type="risk layer", + ).to_csv( + os.path.join( + output_path, "motility_classified_at_velocity_risk_layer.csv" + ), + index=False, + ) + output = {} for val in output_rasters: - OUTPUT[os.path.basename(os.path.normpath(val)).split('.')[0]] = val - return OUTPUT + output[os.path.basename(os.path.normpath(val)).split(".")[0]] = val + return output diff --git a/seat/resources.py b/seat/resources.py index 15d1d7ee..7457ac39 100644 --- a/seat/resources.py +++ b/seat/resources.py @@ -249,7 +249,7 @@ \x00\x00\x01\x8c\x35\xb7\xd5\x15\ " -qt_version = [int(v) for v in QtCore.qVersion().split('.')] +qt_version = [int(v) for v in QtCore.qVersion().split(".")] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 @@ -257,10 +257,17 @@ rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 + def qInitResources(): - QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + QtCore.qRegisterResourceData( + rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data + ) + def qCleanupResources(): - QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + QtCore.qUnregisterResourceData( + rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data + ) + qInitResources() diff --git a/seat/stressor_receptor_calc.py b/seat/stressor_receptor_calc.py index 51c4a681..65507023 100644 --- a/seat/stressor_receptor_calc.py +++ b/seat/stressor_receptor_calc.py @@ -23,7 +23,7 @@ import numpy as np import pandas as pd -from qgis.core import (# type: ignore +from qgis.core import ( # type: ignore Qgis, QgsApplication, QgsCoordinateReferenceSystem, @@ -33,8 +33,8 @@ QgsRasterLayer, QgsVectorLayer, QgsLayerTreeGroup, - QgsMapLayerStyleManager -)# type: ignore + QgsMapLayerStyleManager, +) # type: ignore from qgis.gui import QgsProjectionSelectionDialog # ,QgsLayerTreeView # type: ignore from qgis.PyQt.QtCore import QCoreApplication, QSettings, QTranslator # type: ignore from qgis.PyQt.QtGui import QIcon # type: ignore @@ -91,7 +91,7 @@ def __init__(self, iface): if locale: locale = locale[0:2] else: - locale = 'en' + locale = "en" locale_path = os.path.join( self.plugin_dir, "i18n", @@ -230,15 +230,12 @@ def unload(self): # End mostly boilerplate code ------ def select_folder(self): - folder_name = QFileDialog.getExistingDirectory( - self.dlg, - "Select Folder" - ) + folder_name = QFileDialog.getExistingDirectory(self.dlg, "Select Folder") return folder_name def read_style_files(self, file): data = pd.read_csv(file) - data = data.set_index('Type') + data = data.set_index("Type") return data def select_file(self, filter=""): @@ -250,13 +247,17 @@ def select_file(self, filter=""): filter, ) return filename - + def copy_shear_input_to_velocity(self): self.dlg.velocity_device_present.setText(self.dlg.shear_device_present.text()) - self.dlg.velocity_device_not_present.setText(self.dlg.shear_device_not_present.text()) - self.dlg.velocity_probabilities_file.setText(self.dlg.shear_probabilities_file.text()) + self.dlg.velocity_device_not_present.setText( + self.dlg.shear_device_not_present.text() + ) + self.dlg.velocity_probabilities_file.setText( + self.dlg.shear_probabilities_file.text() + ) self.dlg.velocity_risk_file.setText(self.dlg.shear_risk_file.text()) - + def select_crs(self): """Input the crs using the QGIS widget box.""" @@ -276,10 +277,10 @@ def test_exists(self, line_edit, fin, fin_type): line_edit.setText(fin) line_edit.setStyleSheet("color: black;") else: - line_edit.setText(f'{fin_type} not Found') - line_edit.setStyleSheet("color: red;") + line_edit.setText(f"{fin_type} not Found") + line_edit.setStyleSheet("color: red;") else: - line_edit.setStyleSheet("color: black;") + line_edit.setStyleSheet("color: black;") def select_and_load_in(self): """Select and load an input file.""" @@ -296,58 +297,64 @@ def select_and_load_in(self): config.read(filename) fin = config.get("Input", "shear stress device present filepath") - self.test_exists(self.dlg.shear_device_present, fin, 'Directory') + self.test_exists(self.dlg.shear_device_present, fin, "Directory") fin = config.get("Input", "shear stress device not present filepath") - self.test_exists(self.dlg.shear_device_not_present, fin, 'Directory') + self.test_exists(self.dlg.shear_device_not_present, fin, "Directory") fin = config.get("Input", "shear stress probabilities file") - self.test_exists(self.dlg.shear_probabilities_file, fin, 'File') + self.test_exists(self.dlg.shear_probabilities_file, fin, "File") fin = config.get("Input", "shear stress grain size file") - self.test_exists(self.dlg.shear_grain_size_file, fin, 'File') + self.test_exists(self.dlg.shear_grain_size_file, fin, "File") fin = config.get("Input", "shear stress risk layer file") - self.test_exists(self.dlg.shear_risk_file, fin, 'File') - self.dlg.shear_averaging_combobox.setCurrentText(config.get("Input", "shear stress averaging")) - + self.test_exists(self.dlg.shear_risk_file, fin, "File") + self.dlg.shear_averaging_combobox.setCurrentText( + config.get("Input", "shear stress averaging") + ) + fin = config.get("Input", "velocity device present filepath") - self.test_exists(self.dlg.velocity_device_present, fin, 'Directory') + self.test_exists(self.dlg.velocity_device_present, fin, "Directory") fin = config.get("Input", "velocity device not present filepath") - self.test_exists(self.dlg.velocity_device_not_present, fin, 'Directory') - fin=config.get("Input", "velocity probabilities file") - self.test_exists(self.dlg.velocity_probabilities_file, fin, 'File') - fin=config.get("Input", "velocity threshold file") - self.test_exists(self.dlg.velocity_threshold_file, fin, 'File') - fin=config.get("Input", "velocity risk layer file") - self.test_exists(self.dlg.velocity_risk_file, fin, 'File') - self.dlg.velocity_averaging_combobox.setCurrentText(config.get("Input", "velocity Averaging")) - - fin=config.get("Input", "paracousti device present filepath") - self.test_exists(self.dlg.paracousti_device_present, fin, 'Directory') + self.test_exists(self.dlg.velocity_device_not_present, fin, "Directory") + fin = config.get("Input", "velocity probabilities file") + self.test_exists(self.dlg.velocity_probabilities_file, fin, "File") + fin = config.get("Input", "velocity threshold file") + self.test_exists(self.dlg.velocity_threshold_file, fin, "File") + fin = config.get("Input", "velocity risk layer file") + self.test_exists(self.dlg.velocity_risk_file, fin, "File") + self.dlg.velocity_averaging_combobox.setCurrentText( + config.get("Input", "velocity Averaging") + ) + + fin = config.get("Input", "paracousti device present filepath") + self.test_exists(self.dlg.paracousti_device_present, fin, "Directory") fin = config.get("Input", "paracousti device not present filepath") - self.test_exists(self.dlg.paracousti_device_not_present, fin, 'Directory') + self.test_exists(self.dlg.paracousti_device_not_present, fin, "Directory") fin = config.get("Input", "paracousti probabilities file") - self.test_exists(self.dlg.paracousti_probabilities_file, fin, 'File') + self.test_exists(self.dlg.paracousti_probabilities_file, fin, "File") fin = config.get("Input", "paracousti threshold file") - self.test_exists(self.dlg.paracousti_threshold_file, fin, 'File') + self.test_exists(self.dlg.paracousti_threshold_file, fin, "File") fin = config.get("Input", "paracousti risk layer file") - self.test_exists(self.dlg.paracousti_risk_file, fin, 'File') + self.test_exists(self.dlg.paracousti_risk_file, fin, "File") fin = config.get("Input", "paracousti species filepath") - self.test_exists(self.dlg.paracousti_species_directory, fin, 'Directory') - self.dlg.paracousti_averaging_combobox.setCurrentText(config.get("Input", "paracousti averaging")) - + self.test_exists(self.dlg.paracousti_species_directory, fin, "Directory") + self.dlg.paracousti_averaging_combobox.setCurrentText( + config.get("Input", "paracousti averaging") + ) + fin = config.get("Input", "power files filepath") - self.test_exists(self.dlg.power_files, fin, 'Directory') + self.test_exists(self.dlg.power_files, fin, "Directory") fin = config.get("Input", "power probabilities file") - self.test_exists(self.dlg.power_probabilities_file, fin, 'File') + self.test_exists(self.dlg.power_probabilities_file, fin, "File") self.dlg.crs.setText(config.get("Input", "coordinate reference system")) self.dlg.output_folder.setText(config.get("Output", "output filepath")) fin = config.get("Input", "output style files") - self.test_exists(self.dlg.output_stylefile, fin, 'File') + self.test_exists(self.dlg.output_stylefile, fin, "File") - if 'config' in locals(): #prevents error if window to closed without running + if "config" in locals(): # prevents error if window to closed without running config.clear() - + def save_in(self): """Select and save an input file.""" filename, _filter = QFileDialog.getSaveFileName( @@ -363,8 +370,8 @@ def save_in(self): config["Input"] = { "shear stress device present filepath": self.dlg.shear_device_present.text(), "shear stress device not present filepath": self.dlg.shear_device_not_present.text(), - "shear stress averaging": self.dlg.shear_averaging_combobox.currentText(), - "shear stress probabilities file": self.dlg.shear_probabilities_file.text(), + "shear stress averaging": self.dlg.shear_averaging_combobox.currentText(), + "shear stress probabilities file": self.dlg.shear_probabilities_file.text(), "shear stress grain size file": self.dlg.shear_grain_size_file.text(), "shear stress risk layer file": self.dlg.shear_risk_file.text(), "velocity device present filepath": self.dlg.velocity_device_present.text(), @@ -372,17 +379,16 @@ def save_in(self): "velocity averaging": self.dlg.velocity_averaging_combobox.currentText(), "velocity probabilities file": self.dlg.velocity_probabilities_file.text(), "velocity threshold file": self.dlg.velocity_threshold_file.text(), - "velocity risk layer file": self.dlg.velocity_risk_file.text(), + "velocity risk layer file": self.dlg.velocity_risk_file.text(), "paracousti device present filepath": self.dlg.paracousti_device_present.text(), - "paracousti device not present filepath": self.dlg.paracousti_device_not_present.text(), + "paracousti device not present filepath": self.dlg.paracousti_device_not_present.text(), "paracousti averaging": self.dlg.paracousti_averaging_combobox.currentText(), "paracousti probabilities file": self.dlg.paracousti_probabilities_file.text(), "paracousti threshold file": self.dlg.paracousti_threshold_file.text(), "paracousti risk layer file": self.dlg.paracousti_risk_file.text(), - "paracousti species filepath" : self.dlg.paracousti_species_directory.text(), + "paracousti species filepath": self.dlg.paracousti_species_directory.text(), "power files filepath": self.dlg.power_files.text(), "power probabilities file": self.dlg.power_probabilities_file.text(), - "coordinate reference system": self.dlg.crs.text(), "output style files": self.dlg.output_stylefile.text(), } @@ -391,8 +397,8 @@ def save_in(self): with open(filename, "w") as configfile: config.write(configfile) - - def add_layer(self, fpath, root=None, group=None): + + def add_layer(self, fpath, root=None, group=None): basename = os.path.splitext(os.path.basename(fpath))[0] if group is not None: vlayer = QgsRasterLayer(fpath, basename) @@ -402,9 +408,11 @@ def add_layer(self, fpath, root=None, group=None): group.insertChildNode(0, clone) root.removeChildNode(layer) else: - layer = QgsProject.instance().addMapLayer(QgsRasterLayer(fpath, basename)) + layer = QgsProject.instance().addMapLayer(QgsRasterLayer(fpath, basename)) - def style_layer(self, fpath, stylepath=None, root=None, group=None):#, ranges=True): + def style_layer( + self, fpath, stylepath=None, root=None, group=None + ): # , ranges=True): """Style and add the result layer to map.""" basename = os.path.splitext(os.path.basename(fpath))[0] if group is not None: @@ -414,102 +422,101 @@ def style_layer(self, fpath, stylepath=None, root=None, group=None):#, ranges=Tr if stylepath is not None: vlayer.loadNamedStyle(stylepath) vlayer.triggerRepaint() - vlayer.reload() + vlayer.reload() layer = root.findLayer(vlayer.id()) clone = layer.clone() group.insertChildNode(0, clone) root.removeChildNode(layer) else: - layer = QgsProject.instance().addMapLayer(QgsRasterLayer(fpath, basename)) + layer = QgsProject.instance().addMapLayer(QgsRasterLayer(fpath, basename)) layer.loadNamedStyle(stylepath) layer.triggerRepaint() - layer.reload() - # refresh legend entries + layer.reload() + # refresh legend entries self.iface.layerTreeView().refreshLayerSymbology(layer.id()) def select_folder_module(self, module=None, option=None): directory = self.select_folder() - if module=='shear': - if option=='device_present': + if module == "shear": + if option == "device_present": self.dlg.shear_device_present.setText(directory) self.dlg.shear_device_present.setStyleSheet("color: black;") - if option=="device_not_present": + if option == "device_not_present": self.dlg.shear_device_not_present.setText(directory) self.dlg.shear_device_not_present.setStyleSheet("color: black;") - if module=='velocity': - if option=='device_present': + if module == "velocity": + if option == "device_present": self.dlg.velocity_device_present.setText(directory) self.dlg.velocity_device_present.setStyleSheet("color: black;") - if option=="device_not_present": + if option == "device_not_present": self.dlg.velocity_device_not_present.setText(directory) self.dlg.velocity_device_not_present.setStyleSheet("color: black;") - if module=='paracousti': - if option=='device_present': + if module == "paracousti": + if option == "device_present": self.dlg.paracousti_device_present.setText(directory) self.dlg.paracousti_device_present.setStyleSheet("color: black;") - if option=="device_not_present": - self.dlg.paracousti_device_not_present.setText(directory) - self.dlg.paracousti_device_not_present.setStyleSheet("color: black;") - if option=='species_directory': + if option == "device_not_present": + self.dlg.paracousti_device_not_present.setText(directory) + self.dlg.paracousti_device_not_present.setStyleSheet("color: black;") + if option == "species_directory": self.dlg.paracousti_species_directory.setText(directory) self.dlg.paracousti_species_directory.setStyleSheet("color: black;") - if module=='power': + if module == "power": self.dlg.power_files.setText(directory) self.dlg.power_files.setStyleSheet("color: black;") - if module=='output': - self.dlg.output_folder.setText(directory) + if module == "output": + self.dlg.output_folder.setText(directory) self.dlg.output_folder.setStyleSheet("color: black;") def select_files_module(self, module=None, option=None): - if module=='shear': - if option=='probabilities_file': + if module == "shear": + if option == "probabilities_file": file = self.select_file(filter="*.csv") - self.dlg.shear_probabilities_file.setText(file) + self.dlg.shear_probabilities_file.setText(file) self.dlg.shear_probabilities_file.setStyleSheet("color: black;") - if option=="grain_size_file": + if option == "grain_size_file": file = self.select_file(filter="*.tif; *.csv") - self.dlg.shear_grain_size_file.setText(file) + self.dlg.shear_grain_size_file.setText(file) self.dlg.shear_grain_size_file.setStyleSheet("color: black;") - if option=="risk_file": + if option == "risk_file": file = self.select_file(filter="*.tif") - self.dlg.shear_risk_file.setText(file) - self.dlg.shear_risk_file.setStyleSheet("color: black;") - if module=='velocity': - if option=='probabilities_file': + self.dlg.shear_risk_file.setText(file) + self.dlg.shear_risk_file.setStyleSheet("color: black;") + if module == "velocity": + if option == "probabilities_file": file = self.select_file(filter="*.csv") - self.dlg.velocity_probabilities_file.setText(file) + self.dlg.velocity_probabilities_file.setText(file) self.dlg.velocity_probabilities_file.setStyleSheet("color: black;") - if option=="thresholds": + if option == "thresholds": file = self.select_file(filter="*.tif; *.csv") - self.dlg.velocity_threshold_file.setText(file) + self.dlg.velocity_threshold_file.setText(file) self.dlg.velocity_threshold_file.setStyleSheet("color: black;") - if option=="risk_file": + if option == "risk_file": file = self.select_file(filter="*.tif") - self.dlg.velocity_risk_file.setText(file) - self.dlg.velocity_risk_file.setStyleSheet("color: black;") - if module=='paracousti': - if option=='probabilities_file': + self.dlg.velocity_risk_file.setText(file) + self.dlg.velocity_risk_file.setStyleSheet("color: black;") + if module == "paracousti": + if option == "probabilities_file": file = self.select_file(filter="*.csv") - self.dlg.paracousti_probabilities_file.setText(file) + self.dlg.paracousti_probabilities_file.setText(file) self.dlg.paracousti_probabilities_file.setStyleSheet("color: black;") - if option=="thresholds": + if option == "thresholds": file = self.select_file(filter="*.csv") - self.dlg.paracousti_threshold_file.setText(file) + self.dlg.paracousti_threshold_file.setText(file) self.dlg.paracousti_threshold_file.setStyleSheet("color: black;") - if option=="risk_file": + if option == "risk_file": file = self.select_file(filter="*.tif") - self.dlg.paracousti_risk_file.setText(file) + self.dlg.paracousti_risk_file.setText(file) self.dlg.paracousti_risk_file.setStyleSheet("color: black;") - if module=='power': + if module == "power": file = self.select_file(filter="*.csv") - self.dlg.power_probabilities_file.setText(file) + self.dlg.power_probabilities_file.setText(file) self.dlg.power_probabilities_file.setStyleSheet("color: black;") - if module=='style_files': + if module == "style_files": file = self.select_file(filter="*.csv") - self.dlg.output_stylefile.setText(file) + self.dlg.output_stylefile.setText(file) self.dlg.output_stylefile.setStyleSheet("color: black;") - - + def run(self): """Run method that performs all the real work.""" @@ -519,90 +526,148 @@ def run(self): self.first_start = False self.dlg = StressorReceptorCalcDialog() - shear_average_fields = [ - "Maximum", - "Mean", - "Final Timestep" - ] + shear_average_fields = ["Maximum", "Mean", "Final Timestep"] self.dlg.shear_averaging_combobox.addItems(shear_average_fields) - velocity_average_fields = [ - "Maximum", - "Mean", - "Final Timestep" - ] + velocity_average_fields = ["Maximum", "Mean", "Final Timestep"] self.dlg.velocity_averaging_combobox.addItems(velocity_average_fields) paracousti_average_fields = [ "Depth Maximum", "Depth Average", "Bottom Bin", - "Top Bin" + "Top Bin", ] self.dlg.paracousti_averaging_combobox.addItems(paracousti_average_fields) # this connects the input file chooser - self.dlg.load_input.clicked.connect( - lambda: self.select_and_load_in()) + self.dlg.load_input.clicked.connect(lambda: self.select_and_load_in()) # this connects the input file creator self.dlg.save_input.clicked.connect(lambda: self.save_in()) - # set the present and not present files. Either .nc files or .tif folders - - #directories - self.dlg.shear_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="shear", option="device_present")) - self.dlg.shear_no_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="shear", option="device_not_present")) - self.dlg.velocity_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="velocity", option="device_present")) - self.dlg.velocity_no_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="velocity", option="device_not_present")) - self.dlg.paracousti_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="paracousti", option="device_present")) - self.dlg.paracousti_no_device_pushButton.clicked.connect(lambda: self.select_folder_module(module="paracousti", option="device_not_present")) - self.dlg.paracousti_species_directory_button.clicked.connect(lambda: self.select_folder_module(module="paracousti", option="species_directory")) - self.dlg.power_files_pushButton.clicked.connect(lambda: self.select_folder_module(module="power")) - self.dlg.output_pushButton.clicked.connect(lambda: self.select_folder_module(module="output")) - - #files - self.dlg.shear_probabilities_pushButton.clicked.connect(lambda: self.select_files_module(module='shear', option='probabilities_file')) - self.dlg.shear_grain_size_button.clicked.connect(lambda: self.select_files_module(module='shear', option='grain_size_file')) - self.dlg.shear_risk_pushButton.clicked.connect(lambda: self.select_files_module(module='shear', option='risk_file')) - self.dlg.velocity_probabilities_pushButton.clicked.connect(lambda: self.select_files_module(module='velocity', option='probabilities_file')) - self.dlg.velocity_threshold_button.clicked.connect(lambda: self.select_files_module(module='velocity', option='thresholds')) - self.dlg.velocity_risk_pushButton.clicked.connect(lambda: self.select_files_module(module='velocity', option='risk_file')) - self.dlg.paracousti_probabilities_pushButton.clicked.connect(lambda: self.select_files_module(module='paracousti', option='probabilities_file')) - self.dlg.paracousti_threshold_button.clicked.connect(lambda: self.select_files_module(module='paracousti', option='thresholds')) - self.dlg.paracousti_risk_pushButton.clicked.connect(lambda: self.select_files_module(module='paracousti', option='risk_file')) - self.dlg.power_probabilities_pushButton.clicked.connect(lambda: self.select_files_module(module='power')) - self.dlg.select_stylefile_button.clicked.connect(lambda: self.select_files_module(module='style_files')) - - self.dlg.copy_shear_to_velocity_button.clicked.connect(self.copy_shear_input_to_velocity) + + # directories + self.dlg.shear_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="shear", option="device_present" + ) + ) + self.dlg.shear_no_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="shear", option="device_not_present" + ) + ) + self.dlg.velocity_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="velocity", option="device_present" + ) + ) + self.dlg.velocity_no_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="velocity", option="device_not_present" + ) + ) + self.dlg.paracousti_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="paracousti", option="device_present" + ) + ) + self.dlg.paracousti_no_device_pushButton.clicked.connect( + lambda: self.select_folder_module( + module="paracousti", option="device_not_present" + ) + ) + self.dlg.paracousti_species_directory_button.clicked.connect( + lambda: self.select_folder_module( + module="paracousti", option="species_directory" + ) + ) + self.dlg.power_files_pushButton.clicked.connect( + lambda: self.select_folder_module(module="power") + ) + self.dlg.output_pushButton.clicked.connect( + lambda: self.select_folder_module(module="output") + ) + + # files + self.dlg.shear_probabilities_pushButton.clicked.connect( + lambda: self.select_files_module( + module="shear", option="probabilities_file" + ) + ) + self.dlg.shear_grain_size_button.clicked.connect( + lambda: self.select_files_module( + module="shear", option="grain_size_file" + ) + ) + self.dlg.shear_risk_pushButton.clicked.connect( + lambda: self.select_files_module(module="shear", option="risk_file") + ) + self.dlg.velocity_probabilities_pushButton.clicked.connect( + lambda: self.select_files_module( + module="velocity", option="probabilities_file" + ) + ) + self.dlg.velocity_threshold_button.clicked.connect( + lambda: self.select_files_module(module="velocity", option="thresholds") + ) + self.dlg.velocity_risk_pushButton.clicked.connect( + lambda: self.select_files_module(module="velocity", option="risk_file") + ) + self.dlg.paracousti_probabilities_pushButton.clicked.connect( + lambda: self.select_files_module( + module="paracousti", option="probabilities_file" + ) + ) + self.dlg.paracousti_threshold_button.clicked.connect( + lambda: self.select_files_module( + module="paracousti", option="thresholds" + ) + ) + self.dlg.paracousti_risk_pushButton.clicked.connect( + lambda: self.select_files_module( + module="paracousti", option="risk_file" + ) + ) + self.dlg.power_probabilities_pushButton.clicked.connect( + lambda: self.select_files_module(module="power") + ) + self.dlg.select_stylefile_button.clicked.connect( + lambda: self.select_files_module(module="style_files") + ) + + self.dlg.copy_shear_to_velocity_button.clicked.connect( + self.copy_shear_input_to_velocity + ) self.dlg.crs_button.clicked.connect(self.select_crs) - + self.dlg.shear_device_present.clear() self.dlg.velocity_device_present.clear() self.dlg.paracousti_device_present.clear() self.dlg.power_files.clear() - + self.dlg.shear_device_not_present.clear() - self.dlg.velocity_device_not_present.clear() + self.dlg.velocity_device_not_present.clear() self.dlg.paracousti_device_not_present.clear() self.dlg.shear_probabilities_file.clear() self.dlg.velocity_probabilities_file.clear() - self.dlg.paracousti_probabilities_file.clear() - self.dlg.power_probabilities_file.clear() + self.dlg.paracousti_probabilities_file.clear() + self.dlg.power_probabilities_file.clear() self.dlg.shear_grain_size_file.clear() self.dlg.velocity_threshold_file.clear() self.dlg.paracousti_threshold_file.clear() - + self.dlg.shear_risk_file.clear() self.dlg.velocity_risk_file.clear() self.dlg.paracousti_risk_file.clear() self.dlg.paracousti_species_directory.clear() - - self.dlg.crs.clear() + + self.dlg.crs.clear() self.dlg.output_folder.clear() self.dlg.output_stylefile.clear() @@ -617,131 +682,237 @@ def run(self): # this grabs the files for input and output # TODO Remove these and just query the dlg directly when needed shear_stress_device_present_directory = self.dlg.shear_device_present.text() - if not ((shear_stress_device_present_directory is None) or (shear_stress_device_present_directory == "")): + if not ( + (shear_stress_device_present_directory is None) + or (shear_stress_device_present_directory == "") + ): if not os.path.exists(shear_stress_device_present_directory): - raise FileNotFoundError(f"The directory {shear_stress_device_present_directory} does not exist.") + raise FileNotFoundError( + f"The directory {shear_stress_device_present_directory} does not exist." + ) velocity_device_present_directory = self.dlg.velocity_device_present.text() - if not ((velocity_device_present_directory is None) or (velocity_device_present_directory == "")): + if not ( + (velocity_device_present_directory is None) + or (velocity_device_present_directory == "") + ): if not os.path.exists(velocity_device_present_directory): - raise FileNotFoundError(f"The directory {velocity_device_present_directory} does not exist.") - paracousti_device_present_directory = self.dlg.paracousti_device_present.text() - if not ((paracousti_device_present_directory is None) or (paracousti_device_present_directory == "")): + raise FileNotFoundError( + f"The directory {velocity_device_present_directory} does not exist." + ) + paracousti_device_present_directory = ( + self.dlg.paracousti_device_present.text() + ) + if not ( + (paracousti_device_present_directory is None) + or (paracousti_device_present_directory == "") + ): if not os.path.exists(paracousti_device_present_directory): - raise FileNotFoundError(f"The directory {paracousti_device_present_directory} does not exist.") + raise FileNotFoundError( + f"The directory {paracousti_device_present_directory} does not exist." + ) power_files_directory = self.dlg.power_files.text() if not ((power_files_directory is None) or (power_files_directory == "")): if not os.path.exists(power_files_directory): - raise FileNotFoundError(f"The directory {power_files_directory} does not exist.") - - shear_stress_device_not_present_directory = self.dlg.shear_device_not_present.text() - if not ((shear_stress_device_not_present_directory is None) or (shear_stress_device_not_present_directory == "")): + raise FileNotFoundError( + f"The directory {power_files_directory} does not exist." + ) + + shear_stress_device_not_present_directory = ( + self.dlg.shear_device_not_present.text() + ) + if not ( + (shear_stress_device_not_present_directory is None) + or (shear_stress_device_not_present_directory == "") + ): if not os.path.exists(shear_stress_device_not_present_directory): - raise FileNotFoundError(f"The directory {shear_stress_device_not_present_directory} does not exist.") - velocity_device_not_present_directory = self.dlg.velocity_device_not_present.text() - if not ((velocity_device_not_present_directory is None) or (velocity_device_not_present_directory == "")): + raise FileNotFoundError( + f"The directory {shear_stress_device_not_present_directory} does not exist." + ) + velocity_device_not_present_directory = ( + self.dlg.velocity_device_not_present.text() + ) + if not ( + (velocity_device_not_present_directory is None) + or (velocity_device_not_present_directory == "") + ): if not os.path.exists(velocity_device_not_present_directory): - raise FileNotFoundError(f"The directory {velocity_device_not_present_directory} does not exist.") - paracousti_device_not_present_directory = self.dlg.paracousti_device_not_present.text() - if not ((paracousti_device_not_present_directory is None) or (paracousti_device_not_present_directory == "")): + raise FileNotFoundError( + f"The directory {velocity_device_not_present_directory} does not exist." + ) + paracousti_device_not_present_directory = ( + self.dlg.paracousti_device_not_present.text() + ) + if not ( + (paracousti_device_not_present_directory is None) + or (paracousti_device_not_present_directory == "") + ): if not os.path.exists(paracousti_device_not_present_directory): - raise FileNotFoundError(f"The directory {paracousti_device_not_present_directory} does not exist.") - + raise FileNotFoundError( + f"The directory {paracousti_device_not_present_directory} does not exist." + ) + shear_stress_probabilities_fname = self.dlg.shear_probabilities_file.text() - if not ((shear_stress_probabilities_fname is None) or (shear_stress_probabilities_fname == "")): + if not ( + (shear_stress_probabilities_fname is None) + or (shear_stress_probabilities_fname == "") + ): if not os.path.exists(shear_stress_probabilities_fname): - raise FileNotFoundError(f"The file {shear_stress_probabilities_fname} does not exist.") + raise FileNotFoundError( + f"The file {shear_stress_probabilities_fname} does not exist." + ) velocity_probabilities_fname = self.dlg.velocity_probabilities_file.text() - if not ((velocity_probabilities_fname is None) or (velocity_probabilities_fname == "")): + if not ( + (velocity_probabilities_fname is None) + or (velocity_probabilities_fname == "") + ): if not os.path.exists(velocity_probabilities_fname): - raise FileNotFoundError(f"The file {velocity_probabilities_fname} does not exist.") - paracousti_probabilities_fname = self.dlg.paracousti_probabilities_file.text() - if not ((paracousti_probabilities_fname is None) or (paracousti_probabilities_fname == "")): + raise FileNotFoundError( + f"The file {velocity_probabilities_fname} does not exist." + ) + paracousti_probabilities_fname = ( + self.dlg.paracousti_probabilities_file.text() + ) + if not ( + (paracousti_probabilities_fname is None) + or (paracousti_probabilities_fname == "") + ): if not os.path.exists(paracousti_probabilities_fname): - raise FileNotFoundError(f"The file {paracousti_probabilities_fname} does not exist.") - power_probabilities_fname = self.dlg.power_probabilities_file.text() - if not ((power_probabilities_fname is None) or (power_probabilities_fname == "")): + raise FileNotFoundError( + f"The file {paracousti_probabilities_fname} does not exist." + ) + power_probabilities_fname = self.dlg.power_probabilities_file.text() + if not ( + (power_probabilities_fname is None) or (power_probabilities_fname == "") + ): if not os.path.exists(power_probabilities_fname): - raise FileNotFoundError(f"The file {power_probabilities_fname} does not exist.") + raise FileNotFoundError( + f"The file {power_probabilities_fname} does not exist." + ) - shear_grain_size_file = self.dlg.shear_grain_size_file.text() + shear_grain_size_file = self.dlg.shear_grain_size_file.text() if not ((shear_grain_size_file is None) or (shear_grain_size_file == "")): if not os.path.exists(shear_grain_size_file): - raise FileNotFoundError(f"The file {shear_grain_size_file} does not exist.") - velocity_threshold_file = self.dlg.velocity_threshold_file.text() - if not ((velocity_threshold_file is None) or (velocity_threshold_file == "")): + raise FileNotFoundError( + f"The file {shear_grain_size_file} does not exist." + ) + velocity_threshold_file = self.dlg.velocity_threshold_file.text() + if not ( + (velocity_threshold_file is None) or (velocity_threshold_file == "") + ): if not os.path.exists(velocity_threshold_file): - raise FileNotFoundError(f"The file {velocity_threshold_file} does not exist.") - paracousti_threshold_file = self.dlg.paracousti_threshold_file.text() - if not ((paracousti_threshold_file is None) or (paracousti_threshold_file == "")): + raise FileNotFoundError( + f"The file {velocity_threshold_file} does not exist." + ) + paracousti_threshold_file = self.dlg.paracousti_threshold_file.text() + if not ( + (paracousti_threshold_file is None) or (paracousti_threshold_file == "") + ): if not os.path.exists(paracousti_threshold_file): - raise FileNotFoundError(f"The file {paracousti_threshold_file} does not exist.") - - shear_risk_layer_file = self.dlg.shear_risk_file.text() + raise FileNotFoundError( + f"The file {paracousti_threshold_file} does not exist." + ) + + shear_risk_layer_file = self.dlg.shear_risk_file.text() if not ((shear_risk_layer_file is None) or (shear_risk_layer_file == "")): if not os.path.exists(shear_risk_layer_file): - raise FileNotFoundError(f"The file {shear_risk_layer_file} does not exist.") - velocity_risk_layer_file = self.dlg.velocity_risk_file.text() - if not ((velocity_risk_layer_file is None) or (velocity_risk_layer_file == "")): + raise FileNotFoundError( + f"The file {shear_risk_layer_file} does not exist." + ) + velocity_risk_layer_file = self.dlg.velocity_risk_file.text() + if not ( + (velocity_risk_layer_file is None) or (velocity_risk_layer_file == "") + ): if not os.path.exists(velocity_risk_layer_file): - raise FileNotFoundError(f"The file {velocity_risk_layer_file} does not exist.") - paracousti_risk_layer_file = self.dlg.paracousti_risk_file.text() - if not ((paracousti_risk_layer_file is None) or (paracousti_risk_layer_file == "")): + raise FileNotFoundError( + f"The file {velocity_risk_layer_file} does not exist." + ) + paracousti_risk_layer_file = self.dlg.paracousti_risk_file.text() + if not ( + (paracousti_risk_layer_file is None) + or (paracousti_risk_layer_file == "") + ): if not os.path.exists(paracousti_risk_layer_file): - raise FileNotFoundError(f"The file {paracousti_risk_layer_file} does not exist.") - - paracousti_species_directory = self.dlg.paracousti_species_directory.text() - if not ((paracousti_species_directory is None) or (paracousti_species_directory == "")): + raise FileNotFoundError( + f"The file {paracousti_risk_layer_file} does not exist." + ) + + paracousti_species_directory = self.dlg.paracousti_species_directory.text() + if not ( + (paracousti_species_directory is None) + or (paracousti_species_directory == "") + ): if not os.path.exists(paracousti_species_directory): - raise FileNotFoundError(f"The directory {paracousti_species_directory} does not exist.") - - shear_stress_averaging = self.dlg.shear_averaging_combobox.currentText() - velocity_averaging = self.dlg.velocity_averaging_combobox.currentText() - paracousti_averaging = self.dlg.paracousti_averaging_combobox.currentText() - + raise FileNotFoundError( + f"The directory {paracousti_species_directory} does not exist." + ) + + shear_stress_averaging = self.dlg.shear_averaging_combobox.currentText() + velocity_averaging = self.dlg.velocity_averaging_combobox.currentText() + paracousti_averaging = self.dlg.paracousti_averaging_combobox.currentText() + output_folder_name = self.dlg.output_folder.text() - os.makedirs(output_folder_name, exist_ok=True) # create output directory if it doesn't exist + os.makedirs( + output_folder_name, exist_ok=True + ) # create output directory if it doesn't exist crs = int(self.dlg.crs.text()) - + # need to add check to leave empty if not present then apply default values - if not ((self.dlg.output_stylefile.text() is None) or (self.dlg.output_stylefile.text() == "")): + if not ( + (self.dlg.output_stylefile.text() is None) + or (self.dlg.output_stylefile.text() == "") + ): if not os.path.exists(self.dlg.output_stylefile.text()): - raise FileNotFoundError(f"The file {self.dlg.output_stylefile.text()} does not exist.") + raise FileNotFoundError( + f"The file {self.dlg.output_stylefile.text()} does not exist." + ) stylefiles_DF = self.read_style_files(self.dlg.output_stylefile.text()) else: stylefiles_DF = None - initialize_group = True - + initialize_group = True + # if the output file path is empty display a warning if output_folder_name == "": - QgsMessageLog.logMessage("Output file path not given.", level=Qgis.MessageLevel.Warnin) + QgsMessageLog.logMessage( + "Output file path not given.", level=Qgis.MessageLevel.Warnin + ) # Run Power Module if power_files_directory != "": - if ((power_probabilities_fname is None) or (power_probabilities_fname == "")): - power_probabilities_fname = shear_stress_probabilities_fname #default to shear stress probabilities if none given - calculate_power(power_files_directory, - power_probabilities_fname, - save_path=os.path.join(output_folder_name, 'Power Module'), - crs=crs) - - # Run Shear Stress Module - if not ((shear_stress_device_present_directory is None) or (shear_stress_device_present_directory == "")): # svar == "Shear Stress": + if (power_probabilities_fname is None) or ( + power_probabilities_fname == "" + ): + power_probabilities_fname = shear_stress_probabilities_fname # default to shear stress probabilities if none given + calculate_power( + power_files_directory, + power_probabilities_fname, + save_path=os.path.join(output_folder_name, "Power Module"), + crs=crs, + ) + + # Run Shear Stress Module + if not ( + (shear_stress_device_present_directory is None) + or (shear_stress_device_present_directory == "") + ): # svar == "Shear Stress": sfilenames = run_shear_stress_stressor( dev_present_file=shear_stress_device_present_directory, dev_notpresent_file=shear_stress_device_not_present_directory, probabilities_file=shear_stress_probabilities_fname, crs=crs, - output_path=os.path.join(output_folder_name, 'Shear Stress Module'), + output_path=os.path.join(output_folder_name, "Shear Stress Module"), receptor_filename=shear_grain_size_file, secondary_constraint_filename=shear_risk_layer_file, - value_selection=shear_stress_averaging) + value_selection=shear_stress_averaging, + ) if initialize_group: root = QgsProject.instance().layerTreeRoot() group = root.addGroup("temporary") - self.add_layer(sfilenames[list(sfilenames.keys())[0]], root=root, group=group) + self.add_layer( + sfilenames[list(sfilenames.keys())[0]], root=root, group=group + ) initialize_group = False group_name = "Shear Stress Stressor" @@ -749,79 +920,103 @@ def run(self): group = root.findGroup(group_name) if group is None: group = root.addGroup(group_name) - for key in sfilenames.keys(): #add styles files and/or display + for key in sfilenames.keys(): # add styles files and/or display if stylefiles_DF is None: self.add_layer(sfilenames[key], root=root, group=group) else: - self.style_layer(sfilenames[key], stylefiles_DF.loc[key].item(), root=root, group=group) + self.style_layer( + sfilenames[key], + stylefiles_DF.loc[key].item(), + root=root, + group=group, + ) # Run Velocity Module - if not ((velocity_device_present_directory is None) or (velocity_device_present_directory == "")): # svar == "Velocity": - + if not ( + (velocity_device_present_directory is None) + or (velocity_device_present_directory == "") + ): # svar == "Velocity": vfilenames = run_velocity_stressor( dev_present_file=velocity_device_present_directory, dev_notpresent_file=velocity_device_not_present_directory, probabilities_file=velocity_probabilities_fname, crs=crs, - output_path=os.path.join(output_folder_name, 'Velocity Module'), + output_path=os.path.join(output_folder_name, "Velocity Module"), receptor_filename=velocity_threshold_file, secondary_constraint_filename=velocity_risk_layer_file, - value_selection=velocity_averaging) + value_selection=velocity_averaging, + ) if initialize_group: root = QgsProject.instance().layerTreeRoot() group = root.addGroup("temporary") - self.add_layer(vfilenames[list(vfilenames.keys())[0]], root=root, group=group) - initialize_group = False - + self.add_layer( + vfilenames[list(vfilenames.keys())[0]], root=root, group=group + ) + initialize_group = False + group_name = "Velocity Stressor" root = QgsProject.instance().layerTreeRoot() group = root.findGroup(group_name) if group is None: group = root.addGroup(group_name) - for key in vfilenames.keys(): #add styles files and/or display + for key in vfilenames.keys(): # add styles files and/or display if stylefiles_DF is None: self.add_layer(vfilenames[key], root=root, group=group) else: - self.style_layer(vfilenames[key] , stylefiles_DF.loc[key].item(), root=root, group=group) + self.style_layer( + vfilenames[key], + stylefiles_DF.loc[key].item(), + root=root, + group=group, + ) # Run Acoustics Module - if not ((paracousti_device_present_directory is None) or (paracousti_device_present_directory == "")): # if svar == "Acoustics": - + if not ( + (paracousti_device_present_directory is None) + or (paracousti_device_present_directory == "") + ): # if svar == "Acoustics": pfilenames = run_acoustics_stressor( dev_present_file=paracousti_device_present_directory, dev_notpresent_file=paracousti_device_not_present_directory, probabilities_file=paracousti_probabilities_fname, crs=crs, - output_path=os.path.join(output_folder_name, 'Acoustics Module'), + output_path=os.path.join(output_folder_name, "Acoustics Module"), receptor_filename=paracousti_threshold_file, species_folder=paracousti_species_directory, - Averaging = paracousti_averaging, - secondary_constraint_filename=paracousti_risk_layer_file) - + Averaging=paracousti_averaging, + secondary_constraint_filename=paracousti_risk_layer_file, + ) + if initialize_group: root = QgsProject.instance().layerTreeRoot() group = root.addGroup("temporary") - self.add_layer(pfilenames[list(pfilenames.keys())[0]], root=root, group=group) + self.add_layer( + pfilenames[list(pfilenames.keys())[0]], root=root, group=group + ) initialize_group = False - + group_name = "Acoustic Stressor" root = QgsProject.instance().layerTreeRoot() group = root.findGroup(group_name) if group is None: group = root.addGroup(group_name) - for key in pfilenames.keys(): #add styles files and/or display - + for key in pfilenames.keys(): # add styles files and/or display if stylefiles_DF is None: self.add_layer(pfilenames[key], root=root, group=group) else: - self.style_layer(pfilenames[key] , stylefiles_DF.loc[key].item(), root=root, group=group) - - #remove temproary layer group + self.style_layer( + pfilenames[key], + stylefiles_DF.loc[key].item(), + root=root, + group=group, + ) + + # remove temproary layer group root = QgsProject.instance().layerTreeRoot() group_layer = root.findGroup("temporary") if group_layer is not None: root.removeChildNode(group_layer) - + # close and remove the filehandler - # fh.close() \ No newline at end of file + # fh.close() diff --git a/tests/install_dependencies.py b/tests/install_dependencies.py index bd6d3f90..07148cfe 100644 --- a/tests/install_dependencies.py +++ b/tests/install_dependencies.py @@ -6,11 +6,11 @@ # Use the provided path if given, otherwise use the default python_exe = sys.argv[1] if len(sys.argv) > 1 else default_python_exe -# # Upgrade pip +# Upgrade pip # subprocess.run([python_exe, "-m", "pip", "install", "--upgrade", "pip"], check=True) -# # List of packages to install -# packages = ["pandas", "netCDF4", "pytest"] +# List of packages to install +# packages = ["pandas", "netCDF4", "pytest", "pylint"] # # Install each package using pip # for package in packages: diff --git a/tests/qgis_tests.bat b/tests/qgis_tests.bat index 1dba516c..f500bd05 100644 --- a/tests/qgis_tests.bat +++ b/tests/qgis_tests.bat @@ -12,7 +12,8 @@ set "PYTHONHOME=%QGIS_ROOT%\apps\Python312" set "PATH=%QGIS_ROOT%\bin;%QGIS_ROOT%\apps\qgis-ltr\bin;%PATH%" :: Install dependencies -"%QGIS_ROOT%\bin\python.exe" tests\install_dependencies.py "C:\\Program Files\\QGIS 3.34.9\\bin\\python.exe" +@REM "%QGIS_ROOT%\bin\python.exe" tests\install_dependencies.py "C:\\Program Files\\QGIS 3.34.9\\bin\\python.exe" :: Run pytest on all test scripts in the directory -"%QGIS_ROOT%\bin\python.exe" -m pytest . +@REM "%QGIS_ROOT%\bin\python.exe" -m pytest . +"%QGIS_ROOT%\bin\python.exe" -m pylint \ No newline at end of file diff --git a/tests/test_stressor_utils.py b/tests/test_stressor_utils.py index ca2eac20..a29c2754 100644 --- a/tests/test_stressor_utils.py +++ b/tests/test_stressor_utils.py @@ -634,8 +634,8 @@ def setUp(self): ]) def test_classify_layer_area_2nd_constraint(self): - # Call the classify_layer_area_2nd_Constraint with hardcoded secondary constraint - result = su.classify_layer_area_2nd_Constraint( + # Call the classify_layer_area_2nd_constraint with hardcoded secondary constraint + result = su.classify_layer_area_2nd_constraint( self.risk_layer_file, None, # Not using a secondary constraint file, using hardcoded data instead at_raster_values=[0, 5, 7],