From 6e2ba2cc1b19ee42d9061e63c1b4985bdc267cf3 Mon Sep 17 00:00:00 2001 From: Mohamad Gharamti Date: Thu, 15 Feb 2024 15:44:56 -0700 Subject: [PATCH 01/36] Adding PMO capability for WRF-Hydro A new perfect model obs experimental capability is added to HydroDART. The python and shell scripts use a user-defined truth run to sample psuedo-observations and run OSSEs. The following scripts have been tested in regional hydroDART runs such as the DRB domain. 1- gen_truth.sh: Starting from a list of wrf-hydro restarts (truth), strip out streamflow information at all available routelink gauges or at a subset of gauges specified by the user. The resulting netcdf files are stored in a new directory. 2- gauges.sh: A script to handle the gauges in the domain. It can be used to identify a subset "desired" set of gauges or retrive all gauges available in wrf_hydro's Routelink file. 3- pmo_osse.py: This iterates over the truth files obtained by running 'gen_truth.sh' and builds a set of obs sequence files. These are then used by the hydroDART famework to conduct an OSSE. Credits to Ben Johnson who built initial versions of this script. PS. This could have been done by building a wrapper around DART's own perfect_model_obs routine. The reasons why we went this route are unknown at this time. --- models/wrf_hydro/pmo/gauges.sh | 59 ++++++ models/wrf_hydro/pmo/gen_truth.sh | 188 ++++++++++++++++++ models/wrf_hydro/pmo/pmo_osse.py | 304 ++++++++++++++++++++++++++++++ 3 files changed, 551 insertions(+) create mode 100755 models/wrf_hydro/pmo/gauges.sh create mode 100755 models/wrf_hydro/pmo/gen_truth.sh create mode 100644 models/wrf_hydro/pmo/pmo_osse.py diff --git a/models/wrf_hydro/pmo/gauges.sh b/models/wrf_hydro/pmo/gauges.sh new file mode 100755 index 0000000000..d91c46521c --- /dev/null +++ b/models/wrf_hydro/pmo/gauges.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Directories of the truth run +# and the routelink file here: +truth_d=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/ +route_l=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/RouteLink.nc +gageind=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/gage_ind_small.txt +gageids=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/gauge_ids.txt +cd $truth_d + +echo ' ' + +rm combo_IDs || true + +ncdump -f F -v gages RouteLink.nc | grep " 01" | cut -d, -f1 | tr '"' ' ' | sed 's/^[ \t]*//;s/[ \t]*$//' > gauges_rl +ncdump -f F -v gages RouteLink.nc | grep " 01" | cut -d, -f3 | tr ")" " " > indxes_rl + +num_gauges_rl=`cat gauges_rl | wc -l` +echo $num_gauges_rl + +k=1 +while read -r line; do + gauges[$k]=$line + let 'k+=1' +done < "gauges_rl" + +k=1 +while read -r line; do + indxes[$k]=$line + let 'k+=1' +done < "indxes_rl" + +# All gauges available in the RL file +for k in `seq 1 $num_gauges_rl`; do + echo $k,${gauges[$k]},${indxes[$k]} >> combo_IDs + echo Gauge: $k, ID: ${gauges[$k]}, Feature: ${indxes[$k]} +done + + +# Fetch Gauges/Links/ +# If both link file and gauge file are given, priority +# is for the gauge IDs: +if [[ -f "${gageids}" ]]; then + # User provided set of gauges + while read -r line; do + grep $line combo_IDs | cut -d, -f3 >> tmp + done < "${gageids}" + echo -e "\nThe truth will be computed at user-provided gauge ID locations." + +elif [[ -f "${gageind}" ]]; then + # User provided set of links + cp ${gageind} ${truth_d}/tmp + echo -e "\nThe truth will be computed at user-provided index locations." + +else + # Couldn't find files that contain either gauge IDs or links + mv indxes_rl tmp + echo -e "\nThe gage locations from the RouteLink file are used to form the truth." +fi diff --git a/models/wrf_hydro/pmo/gen_truth.sh b/models/wrf_hydro/pmo/gen_truth.sh new file mode 100755 index 0000000000..467d7a47a5 --- /dev/null +++ b/models/wrf_hydro/pmo/gen_truth.sh @@ -0,0 +1,188 @@ +#!/bin/bash + +# Directories of the truth run, gauges indices, gauge IDs +# and the routelink file here: +ref_dir=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/ +route_l=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/RouteLink.nc +gageind=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/gage_ind.txt +gageids=/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/gage_ids.txt + +cd ${ref_dir} + +echo ' ' + +# time stamps: OSSE start to OSSE end +osse_s=2019-06-01_00 +osse_e=2019-06-01_23 + +# hydro restart file +hydroL=HYDRO_RST. +hydroR=_DOMAIN1 + +# Truth file name +truth_x=pmo_drb_truth.nc +truth_d=pmo_drb_truth_all_gages.nc +truth_g=pmo_drb_truth_des_gages.nc + +rm -f $truth_x $truth_d $truth_g + +# Form a list of all needed files +ls -1 ${hydroL}*${hydroR} > majorlist.txt + +line1=`grep -n $osse_s majorlist.txt | head -n 1 | cut -d: -f1` +line2=`grep -n $osse_e majorlist.txt | head -n 1 | cut -d: -f1` + +sed -n "${line1},${line2}p" majorlist.txt > newlist.txt + +nhoura=`cat newlist.txt | wc -l` +nhourb=`echo "($nhoura - 1)" | bc -l` +timesq=`seq 0 $nhourb` + +# Remove all variables and keep qlink1 +f=0 + +for file in `cat newlist.txt`; do + + let "f+=1" + + echo $file, Cycle: $f + + ex=`printf '%05.f' $f` + + # Extract qlink1 only + ncks -O -v qlink1 $file member${ex}.nc + + # Rename variable and dimension + ncrename -O -d links,feature_id -v qlink1,streamflow member${ex}.nc member${ex}.nc + + # Add record time dimension + ncecat -O -u time member${ex}.nc member${ex}.nc +done + +# Concatenate all files +ncrcat -F -O -h -H member?????.nc $truth_x + +rm member*.nc || true + +ncap2 -O -s 'streamflow@units="m3 s-1";streamflow@long_name="River Flow";streamflow@grid_mapping="crs"; streamflow@missing_value=-9999.f' $truth_x $truth_x +ncatted -O -a cell_methods,streamflow,d,, $truth_x $truth_x + +# Add time variable +ncap2 -O -s 'time=array(0,1,$time)' $truth_x $truth_x +ncap2 -O -s 'time@long_name="valid output time";time@standard_name="time";time@units="hours since ${osse_s}";time@calendar="proleptic_gregorian"' $truth_x $truth_x +ncatted -O -a units,time,o,c,"hours since ${osse_s}:00" $truth_x + +# Bring in the feature IDs from RL +ncks -A -v link $route_l $truth_x +ncrename -O -v link,feature_ids $truth_x $truth_x + +# Clean-up some global attributes +ncatted -O -a Restart_Time,global,d,, $truth_x +ncatted -O -a Since_Date,global,d,, $truth_x +ncatted -O -a his_out_counts,global,d,, $truth_x +ncatted -O -a featureType,global,a,c,"timeSeries" $truth_x +ncatted -O -a station_dimension,global,a,c,"feature_id" $truth_x +ncatted -O -a NCO,global,d,, $truth_x +ncatted -O -a history,global,d,, $truth_x + +echo -e "\n** Created reference truth trajectory: ${ref_dir}${truth_x}\n" + + +# Still need to subset based on gages +ncdump -f F -v gages RouteLink.nc | grep " 01" | cut -d, -f1 | tr '"' ' ' | sed 's/^[ \t]*//;s/[ \t]*$//' > gauges_rl +ncdump -f F -v gages RouteLink.nc | grep " 01" | cut -d, -f3 | tr ")" " " > indxes_rl + +num_gauges_rl=`cat gauges_rl | wc -l` +echo -e "** The number of gauges in the Route Link file is: $num_gauges_rl\n" + +k=1 +while read -r line; do + gauges[$k]=$line + let 'k+=1' +done < "gauges_rl" + +k=1 +while read -r line; do + indxes[$k]=$line + let 'k+=1' +done < "indxes_rl" + +sleep 0.5 + +# All gauges available in the RL file +for k in `seq 1 $num_gauges_rl`; do + echo $k,${gauges[$k]},${indxes[$k]} >> combo_IDs + echo Gauge: $k, ID: ${gauges[$k]}, Index: ${indxes[$k]} +done + +# Fetch Gauges/Links/ +# If both link file and gauge file are given, priority +# is for the gauge IDs: +if [[ -f "${gageids}" ]]; then + # User provided set of gauges + while read -r line; do + grep $line combo_IDs | cut -d, -f3 >> tmp + done < "${gageids}" + + num_gauges_des=`cat ${gageids} | wc -l` + + echo -e "\n** The truth will be computed at user-provided gauge ID locations only." + +elif [[ -f "${gageind}" ]]; then + # User provided set of links + cp ${gageind} ${ref_dir}/tmp + + num_gauges_des=`cat ${gageind} | wc -l` + + echo -e "\n** The truth will be computed at user-provided index locations only." + +else + # Couldn't find files that contain either gauge IDs or links + cp indxes_rl tmp + + num_gauges_des=${num_gauges_rl} + + echo -e "\n** The gage locations from the RouteLink file are used to form the truth." +fi + +# Permutate the record to feature_id instead of time +ncpdq -O -a feature_id,time $truth_x new_pmo.nc + +echo -e "\n** Creating truth file at the desired gauges: ${ref_dir}$truth_g" + +# Create individual files at the gage locations +cc=0 +for i in `cat tmp`; do + let 'cc+=1' + + fid=$(($i-1)) + ncks -O -v feature_ids,time,streamflow -d feature_id,$fid new_pmo.nc test_${cc}.nc +done + +# Now, concatenate the resulting files +ncrcat -O test_*.nc $truth_g +ncpdq -O -a time,feature_id $truth_g $truth_g +ncatted -O -a history,global,d,, $truth_g + +# Check if we need to provide the truth for all gauges +if [[ $num_gauges_rl != $num_gauges_des ]]; then + cp indxes_rl tmp + + cc=0 + for i in `cat tmp`; do + let 'cc+=1' + + fid=$(($i-1)) + ncks -O -v feature_ids,time,streamflow -d feature_id,$fid new_pmo.nc test_a${cc}.nc + done + + ncrcat -O test_a*.nc $truth_d + ncpdq -O -a time,feature_id $truth_d $truth_d + ncatted -O -a history,global,d,, $truth_d +else + cp $truth_g $truth_d +fi + +rm test*.nc new_pmo.nc tmp combo_IDs indxes_rl gauges_rl || true + +echo -e "\n ##### Done #####" diff --git a/models/wrf_hydro/pmo/pmo_osse.py b/models/wrf_hydro/pmo/pmo_osse.py new file mode 100644 index 0000000000..ddc0fc1476 --- /dev/null +++ b/models/wrf_hydro/pmo/pmo_osse.py @@ -0,0 +1,304 @@ +#!/usr/bin/env python +# coding: utf-8 + +# When given a NetCDF file from a deterministic run of WRF-Hydro, this script +# creates daily obs_sequence files, denoted by YYYYMMDD strings appended to +# the end of the obs_seq string. + +# This script needs one third-party module: netcdf4-python. +# On CISL resources (Cheyenne and Casper) please run these two commands first: +# > module load python +# > ncar_pylib + +# Then run this script: +# > python pmo_osse.py + +# IMPORT STANDARD LIBRARIES + +from __future__ import print_function +from __future__ import division +import os +import time +import sys +import datetime +from math import pi + +# IMPORT THIRD PARTY MODULE(S) +import netCDF4 + +# PRINT SCRIPT INFORMATION +# Print script data in case output is redirected from standard output to file. +this_script = sys.argv[0] +this_script = this_script[0:-3] +# This prints the name of the script +print('Running', this_script) +# This prints the last time the script was modified +print('Which was last modified at', + time.ctime(os.path.getmtime(os.path.realpath(__file__)))) +print('\n') + +# CONSTANTS +# Get the name of the user running the script so we can output to their work +# directory. +user = os.environ['USER'] +# Frequency of output +ntimes_per_day = 24 +# Following the observation error procedure from +# create_identity_streamflow_obs.f90 we define a mininum error bound and a +# observational error fraction (40%) and pick whichever is larger. +min_err = 0.1 +max_err = 1000000.0 +obs_fraction_for_error = 0.4 +deg2rad = pi/180.0 + +# STRINGS AND PATHS +# Change the following strings to match the experiment names and locations of +# the files. +domain_name = 'drb' +pmo_name = 'osse_id2' +input_path = '/glade/work/gharamti/DART/DART_development/models/wrf_hydro/pmo/drb_mem0/' +output_path = '/glade/work/' + user + '/wrfhydro_dart/' + +# Check to see if the output path exists. +obs_seq_path = output_path + domain_name + '/obs_seqs/' + pmo_name + '/' +if not os.path.exists(obs_seq_path): + # If not, create the path. + print('Making directory for obs_seq files:', obs_seq_path) + os.makedirs(obs_seq_path) + +# Perfect Model Output +# This file, when created by DART'S PMO, is typically called perfect_output.nc +pmo_path = input_path + 'pmo_drb_truth_des_gages.nc' +pmo_all_path = input_path + 'pmo_drb_truth_all_gages.nc' +# Route Link File +rl_path = input_path + 'RouteLink.nc' + +# PMO +# Get the necessary data from the PMO file +pmo_pointer = netCDF4.Dataset(pmo_path) +ntimes = len(pmo_pointer.dimensions['time']) +print('ntimes:', ntimes) + +pmo_all_pointer = netCDF4.Dataset(pmo_all_path) + +# TIMES +# Use the units string on the time variable to assign integers for year, month, +# day, etc +pmo_time = pmo_pointer.variables['time'] +start_year = int(pmo_time.units[12:16]) +start_month = int(pmo_time.units[17:19]) +start_day = int(pmo_time.units[20:22]) +start_hour = int(pmo_time.units[23:25]) +start_minute = 0 +start_second = 0 + +print('Start date:', start_year, start_month, start_day, start_hour) + +# Create an integration start_time using the integers from the time units +# string. +integration_start_time = datetime.datetime(start_year, start_month, start_day, + start_hour, start_minute, + start_second) + +# If obs_seq files should only be output after a certain day, change it here. +# The default behavior is that the output_start_day is the same as the +# deterministic run start day. +output_start_day = datetime.datetime(start_year, start_month, start_day) +# output_start_day = datetime.datetime(2018, 9, 7) + +# This loop starts the observation sequence loop only after the +# output_start_day specified by the user. +start_time = 0 +nday = -1 +ndays = int(ntimes/ntimes_per_day) + +for iday in range(0, ndays): + if output_start_day > integration_start_time+datetime.timedelta(days=iday): + nday = nday + 1 + start_time = start_time + ntimes_per_day + +# DART uses time since 1601-01-01 00:00:00 +overall_start_time = datetime.datetime(1601, 1, 1, 0, 0, 0) +# Get the time since DART start time at the beginning of the file +file_start_time_delta = integration_start_time-overall_start_time +print('DART start time:', file_start_time_delta.seconds, file_start_time_delta.days) + +# Get feature information from the perfect obs file +nfeatures = len(pmo_all_pointer.dimensions['feature_id']) +nfeatures_des = len(pmo_pointer.dimensions['feature_id']) +print('RL gauges:', nfeatures, ', desired ones:', nfeatures_des) + +nobs = ntimes*nfeatures +nobs_day = ntimes_per_day*nfeatures + +pmo_reach_id = pmo_all_pointer.variables['feature_ids'] +pmo_time = pmo_all_pointer.variables['time'] +pmo_streamflow = pmo_all_pointer.variables['streamflow'] + +pmo_des_reach_id = pmo_pointer.variables['feature_ids'] + +# ROUTELINK +# Get the necessary data from the Route Link file. +rl_pointer = netCDF4.Dataset(rl_path) +# Get the variables from the Route Link file. +lat = rl_pointer.variables['lat'] +lon = rl_pointer.variables['lon'] +alt = rl_pointer.variables['alt'] +link = rl_pointer.variables['link'] + +# GAGE LISTS +# Build lists with the following: +# 1. Index of the link with the desired gage +ilinks = [] +# 2. Latitude of link +lats = [] +# 3. Longitude of link +lons = [] +# 4. Altitude of link +alts = [] + +# Loop through the reach ids to build the lists +print('\n') +print('Looping through the links in the Route Link file to get the location ' + 'data for each desired gage.') +print('Thank you', user, 'for your patience.') +print('\n') + +gg = 0 +for ipmo_reach, this_pmo_reach in enumerate(pmo_reach_id): + for ilink, this_link in enumerate(link): + if this_pmo_reach == this_link: + gg = gg + 1 + print('Gauge no:', gg) + print('Feature ID:', this_pmo_reach, 'and Link Index:', ilink+1) + print('Location: lat', lat[ilink], ', lon', lon[ilink], ', alt', alt[ilink], '\n') + ilinks.append(ilink) + this_lat = lat[ilink]*deg2rad + lats.append(this_lat) + this_lon = lon[ilink] + if this_lon < 0.0: + this_lon = this_lon+360.0 + this_lon = this_lon*deg2rad + lons.append(this_lon) + alts.append(alt[ilink]) + +# OBS SEQUENCE LOOP +# Loop through the times in the PMO file to build the obs_seq files + +for itime in range(start_time, ntimes): + # The commented line assumes that we want to make the observation time half + # an observation period ahead of when it actually occurs so that the window + # is centered on when the observation was taken. Do we want this? + # If so, uncomment the next line and comment the following one. + # this_time = file_start_time_delta+datetime.timedelta(hours=itime) - \ + # datetime.timedelta(hours=0.5) + this_time = file_start_time_delta+datetime.timedelta(hours=itime) + + # If the time index modulo the number of times per day equals zero, then + # that means we're at the start of a day and we should thus open a new + # obs_seq file designated with the proper YYYYMMDD string and write the + # appropriate header information to it. + if itime % ntimes_per_day == 0: + # Get the YYYYMMDD string of this new day so that we can name the + # obs_seq file appropriately. + nday = nday+1 + + file_start_day = integration_start_time+datetime.timedelta(days=nday) + time_string = str(file_start_day.year) + \ + str(file_start_day.month).zfill(2) + \ + str(file_start_day.day).zfill(2) + + # Append 'obs_seq.' with the YYYYMMDD string + obs_seq_file = obs_seq_path + 'obs_seq.' + time_string + print('Writing observation sequences for day', str(nday).zfill(2), + 'to:', obs_seq_file) + + # Create the file pointer for writing + obs_seq = open(obs_seq_file, 'w') + + # Write the header strings 'obs_sequence', 'obs_kind_definitions', + # etc to the file + print(' obs_sequence', file=obs_seq) + print('obs_kind_definitions', file=obs_seq) + # There aren't any obs_kind_definitions because this file only contains + # identity obs. + print(' 0', file=obs_seq) + print('num_copies: 1 num_qc: 1', file=obs_seq) + print('num_obs: ', nobs_day, ' max_num_obs: ', nobs_day, + file=obs_seq) + print(' observation', file=obs_seq) + print('QC VALUE', file=obs_seq) + print('first:', '1'.rjust(8), 'last:', str(nobs_day).rjust(8), + file=obs_seq) + # Reset the obs counter so the OBS line is correct and the linked list + # strings are correct as well. + iobs = -1 + + for ifeature in range(0, nfeatures): + # Now we're looping through the actual gages and writing them, their + # linked list, state_vector strings and observation error variance + # to the obs_sequence files. + iobs = iobs + 1 + this_obs = pmo_streamflow[itime, ifeature] + + # The observation error standard deviation is specified as the larger + # of the observation magnitude times error fraction or the minimum + # error threshold. + if any(pmo_des_reach_id == pmo_reach_id[ifeature]): + obs_err = max(this_obs*obs_fraction_for_error, min_err) + else: + obs_err = max_err + + # Square it to get the variance + obs_var = obs_err*obs_err + + # The observations are 1 indexed, but python loops are 0 indexed so + # we add 1 to the observation index before writing to the file + print('OBS', str(iobs+1).rjust(10), file=obs_seq) + # Write the value of the observation + print(this_obs, file=obs_seq) + # write the QC value, 0 + print('1.000000000000000E+000', file=obs_seq) + + # The linked list line has three configurations + if iobs == 0: + # If it's the first observation, the first integer is -1 + print('-1'.rjust(4), str(iobs+2).rjust(4), '-1'.rjust(4), + file=obs_seq) + elif iobs == nobs_day-1: + # If it's the last observation the second integer is -1 + print(str(iobs).rjust(4), '-1'.rjust(4), '-1'.rjust(4), + file=obs_seq) + else: + # If it's any other observation the first integer is the obs + # number minus 1 (assuming the observations are 1 indexed) and + # the second integer is obs number plus one (again assuming + # observations are 1 indexed). + print(str(iobs).rjust(4), str(iobs+2).rjust(4), '-1'.rjust(4), + file=obs_seq) + # Then we have the obdef section of the observation. + print('obdef', file=obs_seq) + # This is a 3-D observation with.... + print('loc3d', file=obs_seq) + # ...latitude, longitude, altitude and the -1 denotes that the + # vertical coordinate is a surface value, VERTISSURFACE. + print(str(lons[ifeature]).rjust(12), str(lats[ifeature]).rjust(12), + str(alts[ifeature]).rjust(12), '3'.rjust(12), file=obs_seq) + print('kind', file=obs_seq) + # Since these are identity observations they're the negative of the + # position within the state vector. + print(' -'+str(ilinks[ifeature]+1), file=obs_seq) + # The time of the observation is days and seconds since 1601-01-01 + # 00:00:00 + print(this_time.seconds, this_time.days, file=obs_seq) + # Finally, write the observation error variance + print(obs_var, file=obs_seq) + + # If the next time modulo the number of times per day equals zero, for + # example if we just wrote the observations for 11PM (23 hours), then we + # just wrote the last appropriate observations to this file and we need + # to close the file. + if itime+1 % ntimes_per_day == 0: + obs_seq.close() + + From fc7c8492828b05e5435c8f57c54ab0c9fc761333 Mon Sep 17 00:00:00 2001 From: Mohamad Gharamti Date: Thu, 15 Feb 2024 16:01:58 -0700 Subject: [PATCH 02/36] Modifications to Observation Creation Streamflow obs converter is modified to allow for better diagnostics. The changes allows DART to compute obs space diagnostics on all gauges from the Routelink (not only the ones specified by the user). This essentially mimcs evaluate_only functionality for non-idenity obs. There is also a change to check for bad (inf/nan) streamflow obs. A final change allows processing a large number of obs-seq files at the same time. The python scripts include bug fixes for file movement. It also allows to call the converter from the yaml file using fat memory nodes. --- .../create_identity_streamflow_obs.f90 | 32 ++++++++++++------- .../core/create_usgs_daily_obs_seq.py | 2 +- .../hydrodartpy/core/setup_usgs_daily.py | 32 +++++++++++++------ 3 files changed, 43 insertions(+), 23 deletions(-) diff --git a/models/wrf_hydro/create_identity_streamflow_obs.f90 b/models/wrf_hydro/create_identity_streamflow_obs.f90 index f39141eddb..db2c78488a 100644 --- a/models/wrf_hydro/create_identity_streamflow_obs.f90 +++ b/models/wrf_hydro/create_identity_streamflow_obs.f90 @@ -66,7 +66,7 @@ program create_identity_streamflow_obs integer, parameter :: NUM_COPIES = 1 ! number of copies in sequence integer, parameter :: NUM_QC = 1 ! number of QC entries real(r8), parameter :: MIN_OBS_ERR_STD = 0.1_r8 ! m^3/sec -real(r8), parameter :: MAX_OBS_ERR_STD = 100000.0_r8 +real(r8), parameter :: MAX_OBS_ERR_STD = 1000000.0_r8 real(r8), parameter :: NORMAL_FLOW = 10.0_r8 real(r8), parameter :: contract = 0.001_r8 @@ -104,7 +104,7 @@ program create_identity_streamflow_obs real(r8), allocatable :: discharge(:) character(len=IDLength), allocatable :: desired_gages(:) -integer :: n_wanted_gages +integer :: n_wanted_gages, n_desired_gages real(r8) :: oerr, qc integer :: oday, osec type(obs_type) :: obs @@ -127,6 +127,7 @@ program create_identity_streamflow_obs character(len=256) :: location_file = 'location.nc' character(len=256) :: gages_list_file = '' real(r8) :: obs_fraction_for_error = 0.01 +logical :: assimilate_all = .false. integer :: debug = 0 namelist / create_identity_streamflow_obs_nml / & @@ -135,6 +136,7 @@ program create_identity_streamflow_obs location_file, & gages_list_file, & obs_fraction_for_error, & + assimilate_all, & debug !------------------------------------------------------------------------------- @@ -209,7 +211,12 @@ program create_identity_streamflow_obs call init_obs(obs, num_copies=NUM_COPIES, num_qc=NUM_QC) call init_obs(prev_obs, num_copies=NUM_COPIES, num_qc=NUM_QC) -n_wanted_gages = set_desired_gages(gages_list_file) +! Collect all the gauges: +! - desired ones will have the provided obs_err_sd +! - remaining gauges are dummy with very large obs_err_sd + +n_desired_gages = set_desired_gages(gages_list_file) +n_wanted_gages = 0 !set_desired_gages(gages_list_file) call find_textfile_dims(input_files, nfiles) num_new_obs = estimate_total_obs_count(input_files, nfiles) @@ -308,7 +315,8 @@ program create_identity_streamflow_obs OBSLOOP: do n = 1, nobs - if ( discharge(n) < 0.0_r8 ) cycle OBSLOOP + ! make sure discharge is physical + if ( discharge(n) < 0.0_r8 .or. discharge(n) /= discharge(n) ) cycle OBSLOOP ! relate the TimeSlice:station to the RouteLink:gage so we can ! determine the location @@ -318,13 +326,13 @@ program create_identity_streamflow_obs ! relate the physical location to the dart state vector index dart_index = linkloc_to_dart(lat(indx), lon(indx)) - ! oerr is the observation error standard deviation in this application. - ! The observation error variance encoded in the observation file - ! will be oerr*oerr - oerr = max(discharge(n)*obs_fraction_for_error, MIN_OBS_ERR_STD) - - ! MEG: A fix to not crush the ensemble in a no-flood period (stagnant water). - !if ( discharge(n) < NORMAL_FLOW ) then + ! desired gauges get the provided obs_err + ! remaining ones are for verification purposes + if (ANY(desired_gages == station_strings(n)) .or. assimilate_all) then + oerr = max(discharge(n)*obs_fraction_for_error, MIN_OBS_ERR_STD) + else + oerr = MAX_OBS_ERR_STD + endif ! don't correct that much, the gauge observations imply that the flow ! in the stream is small. This is not a flood period. Streamflow values ! indicate a more or less lake situation rather than a strongly flowing stream. @@ -662,7 +670,7 @@ function estimate_total_obs_count(file_list,nfiles) result (num_obs) ! Specifying too many is not really a problem. ! I am adding 20% -num_obs = 1.2_r8 * nobs * nfiles +num_obs = 10.0_r8 * nobs * nfiles end function estimate_total_obs_count diff --git a/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/create_usgs_daily_obs_seq.py b/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/create_usgs_daily_obs_seq.py index 38d38c194c..ec95a4a968 100644 --- a/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/create_usgs_daily_obs_seq.py +++ b/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/create_usgs_daily_obs_seq.py @@ -50,7 +50,7 @@ def parallel_process_day(arg_dict): the_cmd = exe_cmd.format( **{ - 'cmd': './' + the_converter.name, + 'cmd': './create_identity_streamflow_obs', 'nproc': 1 } ) diff --git a/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/setup_usgs_daily.py b/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/setup_usgs_daily.py index c3e8b75852..d81a4db50c 100755 --- a/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/setup_usgs_daily.py +++ b/models/wrf_hydro/hydro_dart_py/hydrodartpy/core/setup_usgs_daily.py @@ -27,7 +27,8 @@ def setup_usgs_daily( input_dir = usgs_daily_config['input_dir'] output_dir = usgs_daily_config['output_dir'] # Output directory: make if DNE - output_dir.mkdir(exist_ok=False, parents=True) + #output_dir.mkdir(exist_ok=False, parents=True) + output_dir.mkdir(exist_ok=True, parents=True) # converter: identity or regular obs converter? # Check that the desired obs converter is in the dart build @@ -75,7 +76,8 @@ def setup_usgs_daily( run_dir = config['experiment']['run_dir'] m0 = pickle.load(open(run_dir / "member_000/WrfHydroSim.pkl", 'rb')) route_link_f = run_dir / 'member_000' / m0.base_hydro_namelist['hydro_nlist']['route_link_f'] - (output_dir / route_link_f.name).symlink_to(route_link_f) + if not route_link_f.is_file(): + (output_dir / route_link_f.name).symlink_to(route_link_f) input_nml[converter_nml]['location_file'] = route_link_f.name #input.nml input_files: create a list of files in the start and end range. @@ -101,27 +103,35 @@ def setup_usgs_daily( if usgs_daily_config['identity_obs']: hydro_rst_file = run_dir / 'member_000' / m0.base_hydro_namelist['hydro_nlist']['restart_file'] - (output_dir / hydro_rst_file.name).symlink_to(hydro_rst_file) + if not hydro_rst_file.is_file(): + (output_dir / hydro_rst_file.name).symlink_to(hydro_rst_file) input_nml['model_nml']['domain_order'] = 'hydro' input_nml['model_nml']['domain_shapefiles'] = str(hydro_rst_file.name) f90nml.Namelist(m0.base_hydro_namelist).write(output_dir / 'hydro.namelist', force=True) top_level_dir = get_top_level_dir_from_config(config, m0) - (output_dir / top_level_dir).symlink_to(config['wrf_hydro']['domain_src'] / top_level_dir) + nwm_dir = config['wrf_hydro']['domain_src'] / top_level_dir + if not nwm_dir.is_dir(): + (output_dir / top_level_dir).symlink_to(config['wrf_hydro']['domain_src'] / top_level_dir) # Now we are done editing it, write the input.nml back out. - input_nml.write(output_dir / 'input.nml') + out_input = output_dir / 'input.nml' + if not out_input.is_file(): + input_nml.write(output_dir / 'input.nml') # Symlink the config file into the output_dir so the default yaml file name # can be used by create_usgs_daily_obs_seq. if config_file is None: config_file = sorted(exp_dir.glob('original.*.yaml'))[0] - (output_dir / 'config_file.yaml').symlink_to(config_file) + if not config_file.is_file(): + (output_dir / 'config_file.yaml').symlink_to(config_file) # Stage the file that does the batch processing. this_file = pathlib.Path(__file__) batcher_base = 'create_usgs_daily_obs_seq.py' - (output_dir / batcher_base).symlink_to(this_file.parent / batcher_base) + pyscript = this_file.parent / batcher_base + if not pyscript.is_file(): + (output_dir / batcher_base).symlink_to(this_file.parent / batcher_base) # Setup the scheduled script. orig_submit_script = this_file.parent / 'submission_scripts/submit_usgs_daily_obs_converter.sh' @@ -152,10 +162,11 @@ def setup_usgs_daily( # Select statement # Right now, only single node processing - select_stmt = 'select=1:ncpus={ncpus}:mpiprocs={mpiprocs}'.format( + select_stmt = 'select=1:ncpus={ncpus}:mpiprocs={mpiprocs}:mem={reqmem}GB'.format( **{ 'ncpus': usgs_sched['ncpus'], - 'mpiprocs': usgs_sched['mpiprocs'] + 'mpiprocs': usgs_sched['mpiprocs'], + 'reqmem': usgs_sched['reqmem'] } ) replace_in_file(this_submit_script, 'PBS_SELECT_TEMPLATE', select_stmt) @@ -191,6 +202,7 @@ def setup_usgs_daily( all_obs_dir = pathlib.PosixPath(config['observation_preparation']['all_obs_dir']) all_obs_seq = output_dir.glob('obs_seq.*') for oo in all_obs_seq: - (all_obs_dir / oo.name).symlink_to(oo) + if not oo.is_file(): + (all_obs_dir / oo.name).symlink_to(oo) return 0 From 8a14b6e7454a84775d7b65e98331c21a4c4fbfe4 Mon Sep 17 00:00:00 2001 From: Mohamad Gharamti Date: Thu, 15 Feb 2024 16:12:17 -0700 Subject: [PATCH 03/36] Enhanced Performance Changes to the model_mod and noah_hydro_mod to enhance performance when running a full CONUS domain. Various parts of the noah_hydro_mod code are rearranged to avoid extra computations that are not needed when the LSM is turned off. Also, the link tree structure is modified such that only the individual upstream links from any reach are stored. We also limit the number of upstream links to '5'. This helps alleviate any issues in the Routelink file (which can make the stream network unphysical). This change makes our Along-The-Stream (ATS) Localization runs a lot faster. Changes in the model_mod makes the code faster when distributing the close by reaches to each individual task. Also, there is a change that allows reading climatology files when a hybrid EnKF variant is invoked. --- models/wrf_hydro/model_mod.f90 | 107 ++++++++-- models/wrf_hydro/noah_hydro_mod.f90 | 314 +++++++++++++++------------- 2 files changed, 254 insertions(+), 167 deletions(-) diff --git a/models/wrf_hydro/model_mod.f90 b/models/wrf_hydro/model_mod.f90 index da4b2bdb4a..d1b5c7c7e3 100644 --- a/models/wrf_hydro/model_mod.f90 +++ b/models/wrf_hydro/model_mod.f90 @@ -26,7 +26,8 @@ module model_mod use netcdf_utilities_mod, only : nc_check, nc_add_global_attribute, & nc_synchronize_file, nc_end_define_mode, & - nc_add_global_creation_time, nc_begin_define_mode + nc_add_global_creation_time, nc_begin_define_mode, & + nc_get_dimension_size, nc_open_file_readonly use obs_def_utilities_mod, only : track_status @@ -534,8 +535,9 @@ function read_model_time(filename) character(len=STRINGLENGTH) :: datestring_scalar integer :: year, month, day, hour, minute, second integer :: DimID, VarID, strlen, ntimes -logical :: isLsmFile +logical :: isLsmFile, isClimFile integer :: ncid, io +integer :: c_link io = nf90_open(filename, NF90_NOWRITE, ncid) call nc_check(io,routine,'open',filename) @@ -543,6 +545,10 @@ function read_model_time(filename) ! Test if "Time" is a dimension in the file. isLsmFile = nf90_inq_dimid(ncid, 'Time', DimID) == NF90_NOERR +! Test if "time" is a dimension +! Only read model time from the restart, use a dummy one here! +isClimFile = nf90_inq_varid(ncid, 'static_time', VarID) == NF90_NOERR + if(isLsmFile) then ! Get the time from the LSM restart file ! TJH ... my preference is to read the dimension IDs for the Times variable @@ -576,6 +582,28 @@ function read_model_time(filename) io = nf90_get_var(ncid, VarID, datestring) call nc_check(io, routine, 'get_var','Times',filename) +elseif (isClimFile) then + + ! Dummy time for static files + ntimes = 1 + allocate(datestring(ntimes)) + datestring(1) = '1980-01-01_00:00:00' + + ! Also check if the state in the climatology is consistent + ! with the state in the restarts + ncid = nc_open_file_readonly(filename, routine) + c_link = nc_get_dimension_size(ncid, 'links', routine) + + if ( c_link /= n_link ) then + write(string1,'(A)')'The size of the state in the climatology files is not consistent with the current domain size.' + write(string2, * )'number of links: ', c_link, & + ' from "'//trim(filename)//'"' + write(string3,*)'number of links: ',int(n_link,i8), & + ' from "'//get_hydro_domain_filename()//'"' + call error_handler(E_ERR, routine, string1, & + source, revision, revdate, text2=string2, text3=string3) + endif + else ! Get the time from the hydro or parameter file io = nf90_inquire_attribute(ncid, NF90_GLOBAL, 'Restart_Time', len=strlen) @@ -637,7 +665,7 @@ subroutine get_state_meta_data(index_in, location, var_type) location = domain_info(domid)%location(iloc,jloc,kloc) -if (do_output() .and. debug > 99) then +if (do_output() .and. debug > 1000) then call write_location(0,location,charstring=string1) write(*,*)'gsmd index,i,j,k = ',index_in, iloc, jloc, kloc, trim(string1) endif @@ -1374,6 +1402,36 @@ end subroutine configure_domains !> close_ind(:) the list of desired indices ON MY TASK !> dist(:) the corresponding distances of the desired indices +!subroutine get_my_close(num_superset, superset_indices, superset_distances, & +! my_task_indices, num_close, close_ind, dist) +! +!integer, intent(in) :: num_superset +!integer(i8), intent(in) :: superset_indices(:) +!real(r8), intent(in) :: superset_distances(:) +!integer(i8), intent(in) :: my_task_indices(:) +!integer, intent(out) :: num_close +!integer, intent(out) :: close_ind(:) +!real(r8), intent(out) :: dist(:) +! +!integer :: itask, isuper +! +!num_close = 0 +! +!do itask = 1,size(my_task_indices) +! do isuper = 1,num_superset +! +! ! if stuff on my task ... equals ... global stuff I want ... +! if ( my_task_indices(itask) == superset_indices(isuper) ) then +! num_close = num_close + 1 +! close_ind(num_close) = itask +! dist(num_close) = superset_distances(isuper) +! endif +! +! enddo +!enddo +! +!end subroutine get_my_close + subroutine get_my_close(num_superset, superset_indices, superset_distances, & my_task_indices, num_close, close_ind, dist) @@ -1385,22 +1443,39 @@ subroutine get_my_close(num_superset, superset_indices, superset_distances, & integer, intent(out) :: close_ind(:) real(r8), intent(out) :: dist(:) -integer :: itask, isuper +integer, dimension(:), allocatable :: index_map +integer :: i, idx, il, ir num_close = 0 -do itask = 1,size(my_task_indices) - do isuper = 1,num_superset - - ! if stuff on my task ... equals ... global stuff I want ... - if ( my_task_indices(itask) == superset_indices(isuper) ) then - num_close = num_close + 1 - close_ind(num_close) = itask - dist(num_close) = superset_distances(isuper) - endif - - enddo -enddo +! Determine the range of my_task_indices +il = minval(my_task_indices) +ir = maxval(my_task_indices) + +! Create a map for quick lookup +allocate(index_map(il:ir)) +index_map = 0 +do i = 1, num_superset + idx = superset_indices(i) + if (idx >= il .and. idx <= ir) then + index_map(idx) = i + end if +end do + +! Loop over my_task_indices and find matches using the map +do i = 1, size(my_task_indices) + idx = my_task_indices(i) + if (idx >= il .and. idx <= ir) then + if (index_map(idx) > 0) then + num_close = num_close + 1 + close_ind(num_close) = i + dist(num_close) = superset_distances(index_map(idx)) + end if + end if +end do + +! Deallocate the map +deallocate(index_map) end subroutine get_my_close diff --git a/models/wrf_hydro/noah_hydro_mod.f90 b/models/wrf_hydro/noah_hydro_mod.f90 index 0d9cf3a1da..be3ac6fce6 100644 --- a/models/wrf_hydro/noah_hydro_mod.f90 +++ b/models/wrf_hydro/noah_hydro_mod.f90 @@ -150,6 +150,7 @@ module noah_hydro_mod real(r8), allocatable, dimension(:) :: length integer, allocatable, dimension(:) :: to integer, allocatable, dimension(:) :: BucketMask +integer, allocatable, dimension(:) :: num_up_links integer, parameter :: IDSTRLEN = 15 ! must match declaration in netCDF file character(len=IDSTRLEN), allocatable, dimension(:) :: gageID ! NHD Gage Event ID from SOURCE_FEA field in Gages feature class @@ -456,127 +457,125 @@ subroutine get_hydro_constants(filename) integer :: i, ii, jj, io integer :: ncid, DimID, VarID, numdims, dimlen, xtype - -io = nf90_open(filename, NF90_NOWRITE, ncid) -call nc_check(io, routine, 'open', filename) - -! The number of latitudes is dimension 'y' -io = nf90_inq_dimid(ncid, 'y', DimID) -call nc_check(io, routine, 'inq_dimid y', filename) - -io = nf90_inquire_dimension(ncid, DimID, len=n_hlat) -call nc_check(io, routine,'inquire_dimension y',filename) - -! The number of longitudes is dimension 'x' -io = nf90_inq_dimid(ncid, 'x', DimID) -call nc_check(io, routine,'inq_dimid x',filename) - -io = nf90_inquire_dimension(ncid, DimID, len=n_hlong) -call nc_check(io, routine,'inquire_dimension x',filename) - -!>@todo could just check the dimension lengths for LONGITUDE -!> and use them for all ... removes the dependency on knowing -!> the dimension names are 'y' and 'x' ... and the order. - -!! module allocation -allocate(hlong(n_hlong, n_hlat)) -allocate( hlat(n_hlong, n_hlat)) - -!! local allocation -allocate(hlongFlip(n_hlong, n_hlat)) -allocate( hlatFlip(n_hlong, n_hlat)) - -! Require that the xlong and xlat are the same shape.?? -io = nf90_inq_varid(ncid, 'LONGITUDE', VarID) -call nc_check(io, routine,'inq_varid LONGITUDE',filename) - -io = nf90_inquire_variable(ncid, VarID, dimids=dimIDs, & - ndims=numdims, xtype=xtype) -call nc_check(io, routine, 'inquire_variable LONGITUDE',filename) - -! numdims = 2, these are all 2D fields -! Form the start/count such that we always get the 'latest' time. -ncstart(:) = 0 -nccount(:) = 0 -do i = 1,numdims - write(string1,'(''LONGITUDE inquire dimension '',i2,A)') i,trim(filename) - io = nf90_inquire_dimension(ncid, dimIDs(i), name=dimname, len=dimlen) - call nc_check(io, routine, string1) - ncstart(i) = 1 - nccount(i) = dimlen - if ((trim(dimname) == 'Time') .or. (trim(dimname) == 'time')) then - ncstart(i) = dimlen - nccount(i) = 1 - endif -enddo - -if (debug > 99) then - write(*,*)'DEBUG get_hydro_constants ncstart is',ncstart(1:numdims) - write(*,*)'DEBUG get_hydro_constants nccount is',nccount(1:numdims) -endif - -!get the longitudes -io = nf90_get_var(ncid, VarID, hlong, start=ncstart(1:numdims), & - count=nccount(1:numdims)) -call nc_check(io, routine, 'get_var LONGITUDE',filename) - -where(hlong < 0.0_r8) hlong = hlong + 360.0_r8 -where(hlong == 360.0_r8) hlong = 0.0_r8 - -!get the latitudes -io = nf90_inq_varid(ncid, 'LATITUDE', VarID) -call nc_check(io, routine,'inq_varid LATITUDE',filename) -io = nf90_get_var(ncid, VarID, hlat, start=ncstart(1:numdims), & - count=nccount(1:numdims)) -call nc_check(io, routine, 'get_var LATITUDE',filename) - -where (hlat < -90.0_r8) hlat = -90.0_r8 -where (hlat > 90.0_r8) hlat = 90.0_r8 - -! Flip the longitues and latitudes -do ii=1,n_hlong - do jj=1,n_hlat - hlongFlip(ii,jj) = hlong(ii,n_hlat-jj+1) - hlatFlip(ii,jj) = hlat(ii,n_hlat-jj+1) - end do -end do -hlong = hlongFlip -hlat = hlatFlip -deallocate(hlongFlip, hlatFlip) - -!get the channelgrid -! i'm doing this exactly to match how it's done in the wrf_hydro code -! (line 1044 of /home/jamesmcc/WRF_Hydro/ndhms_wrf_hydro/trunk/NDHMS/Routing/module_HYDRO_io.F) -! so that the output set of indices correspond to the grid in the Fulldom file -! and therefore these can be used to grab other channel attributes in that file. -! but the code is really long so I've put it in a module subroutine. -! Dont need to flip lat and lon in this (already done) but will flip other vars from Fulldom file. -! Specify channel routing option: 1=Muskingam-reach, 2=Musk.-Cunge-reach, 3=Diff.Wave-gridded - +integer, allocatable, dimension(:) :: col + + !get the channelgrid + ! i'm doing this exactly to match how it's done in the wrf_hydro code + ! (line 1044 of /home/jamesmcc/WRF_Hydro/ndhms_wrf_hydro/trunk/NDHMS/Routing/module_HYDRO_io.F) + ! so that the output set of indices correspond to the grid in the Fulldom file + ! and therefore these can be used to grab other channel attributes in that file. + ! but the code is really long so I've put it in a module subroutine. + ! Dont need to flip lat and lon in this (already done) but will flip other vars from Fulldom file. + ! Specify channel routing option: 1=Muskingam-reach, 2=Musk.-Cunge-reach, 3=Diff.Wave-gridded + if ( chanrtswcrt == 1 ) then - - if ( channel_option == 2) then - - call get_routelink_constants(route_link_f) - - else if ( channel_option == 3) then - - call getChannelGridCoords(filename, ncid, numdims, ncstart, nccount) - call get_basn_msk( filename, ncid, numdims, ncstart, nccount, n_hlong, n_hlat) + + if ( channel_option == 2) then + + call get_routelink_constants(route_link_f) + + else if ( channel_option == 3) then + + io = nf90_open(filename, NF90_NOWRITE, ncid) + call nc_check(io, routine, 'open', filename) + + ! The number of latitudes is dimension 'y' + io = nf90_inq_dimid(ncid, 'y', DimID) + call nc_check(io, routine, 'inq_dimid y', filename) + + io = nf90_inquire_dimension(ncid, DimID, len=n_hlat) + call nc_check(io, routine,'inquire_dimension y',filename) + + ! The number of longitudes is dimension 'x' + io = nf90_inq_dimid(ncid, 'x', DimID) + call nc_check(io, routine,'inq_dimid x',filename) + + io = nf90_inquire_dimension(ncid, DimID, len=n_hlong) + call nc_check(io, routine,'inquire_dimension x',filename) + + !>@todo could just check the dimension lengths for LONGITUDE + !> and use them for all ... removes the dependency on knowing + !> the dimension names are 'y' and 'x' ... and the order. + + !! module allocation + allocate(hlong(n_hlong, n_hlat), hlat(n_hlong, n_hlat)) + + !! local allocation + !allocate(hlongFlip(n_hlong, n_hlat), hlatFlip(n_hlong, n_hlat)) + allocate(col(n_hlat)) + + ! Require that the xlong and xlat are the same shape.?? + io = nf90_inq_varid(ncid, 'LONGITUDE', VarID) + call nc_check(io, routine,'inq_varid LONGITUDE',filename) + + io = nf90_inquire_variable(ncid, VarID, dimids=dimIDs, & + ndims=numdims, xtype=xtype) + call nc_check(io, routine, 'inquire_variable LONGITUDE',filename) + + ! numdims = 2, these are all 2D fields + ! Form the start/count such that we always get the 'latest' time. + ncstart(:) = 0 + nccount(:) = 0 + do i = 1,numdims + write(string1,'(''LONGITUDE inquire dimension '',i2,A)') i,trim(filename) + io = nf90_inquire_dimension(ncid, dimIDs(i), name=dimname, len=dimlen) + call nc_check(io, routine, string1) + ncstart(i) = 1 + nccount(i) = dimlen + if ((trim(dimname) == 'Time') .or. (trim(dimname) == 'time')) then + ncstart(i) = dimlen + nccount(i) = 1 + endif + enddo + + if (debug > 99) then + write(*,*)'DEBUG get_hydro_constants ncstart is',ncstart(1:numdims) + write(*,*)'DEBUG get_hydro_constants nccount is',nccount(1:numdims) + endif + + !get the longitudes + io = nf90_get_var(ncid, VarID, hlong, start=ncstart(1:numdims), & + count=nccount(1:numdims)) + call nc_check(io, routine, 'get_var LONGITUDE',filename) + + where(hlong < 0.0_r8) hlong = hlong + 360.0_r8 + where(hlong == 360.0_r8) hlong = 0.0_r8 + + !get the latitudes + io = nf90_inq_varid(ncid, 'LATITUDE', VarID) + call nc_check(io, routine,'inq_varid LATITUDE',filename) + io = nf90_get_var(ncid, VarID, hlat, start=ncstart(1:numdims), & + count=nccount(1:numdims)) + call nc_check(io, routine, 'get_var LATITUDE',filename) + + where (hlat < -90.0_r8) hlat = -90.0_r8 + where (hlat > 90.0_r8) hlat = 90.0_r8 + + ! Flip the longitues and latitudes + do jj = 1, n_hlat + col(jj) = n_hlat-jj+1 + enddo + hlong = hlong(:, col) + hlat = hlat(:, col) + + call getChannelGridCoords(filename, ncid, numdims, ncstart, nccount) + call get_basn_msk( filename, ncid, numdims, ncstart, nccount, n_hlong, n_hlat) + + io = nf90_close(ncid) + call nc_check(io, routine, filename) else write(string1,'("channel_option ",i1," is not supported.")')channel_option call error_handler(E_ERR,routine,string1,source) - endif + else + write(string1,'("CHANRTSWCRT ",i1," is not supported.")')chanrtswcrt write(string2,*)'This is specified in hydro.namelist' call error_handler(E_ERR,routine,string1,source) -endif -io = nf90_close(ncid) -call nc_check(io, routine, filename) +endif end subroutine get_hydro_constants @@ -982,6 +981,7 @@ subroutine get_routelink_constants(filename) integer, allocatable :: fromIndsStart(:) integer, allocatable :: fromIndsEnd(:) integer, allocatable :: toIndex(:) +!integer, allocatable :: num_up_links(:) ncid = nc_open_file_readonly(filename, routine) database_length = nc_get_dimension_size(ncid,'index', routine) @@ -998,25 +998,29 @@ subroutine get_routelink_constants(filename) allocate (fromIndsStart( n_link)) allocate (fromIndsEnd( n_link)) allocate (toIndex( n_link)) +allocate (num_up_links( n_link)) call nc_get_variable(ncid,'fromIndices', fromIndices, routine) call nc_get_variable(ncid,'fromIndsStart',fromIndsStart,routine) call nc_get_variable(ncid,'fromIndsEnd', fromIndsEnd, routine) call nc_get_variable(ncid,'toIndex', toIndex, routine) -n_upstream = maxval(fromIndsEnd - fromIndsStart) + 1 +n_upstream = maxval(fromIndsEnd - fromIndsStart) + 1 +num_up_links = fromIndsEnd - fromIndsStart + 1 !! Allocate these module variables allocate(linkLong(n_link), linkLat(n_link), linkAlt(n_link)) -allocate(roughness(n_link)) -allocate(linkID(n_link)) -allocate(gageID(n_link)) +!allocate(roughness(n_link)) +!allocate(linkID(n_link)) +!allocate(gageID(n_link)) allocate(channelIndsX(n_link), channelIndsY(n_link)) allocate(connections(n_link)) + do i = 1, n_link - allocate(connections(i)%upstream_linkID(n_upstream)) - allocate(connections(i)%upstream_index(n_upstream)) + !allocate(connections(i)%upstream_linkID(n_upstream)) + allocate(connections(i)%upstream_index(num_up_links(i))) enddo + allocate(length(n_link)) allocate(to(n_link)) allocate(BucketMask(n_link)) @@ -1028,23 +1032,24 @@ subroutine get_routelink_constants(filename) ! length: Length (Stream length (m)) ! to: "To Link ID (PlusFlow table TOCOMID for every FROMCOMID)" -call nc_get_variable(ncid,'lon', linkLong, routine) -call nc_get_variable(ncid,'lat', linkLat, routine) -call nc_get_variable(ncid,'alt', linkAlt, routine) -call nc_get_variable(ncid,'n', roughness, routine) -call nc_get_variable(ncid,'link', linkID, routine) -call nc_get_variable(ncid,'Length',length, routine) -call nc_get_variable(ncid,'to', to, routine) +call nc_get_variable(ncid,'lon', linkLong, routine) +call nc_get_variable(ncid,'lat', linkLat, routine) +call nc_get_variable(ncid,'alt', linkAlt, routine) +!call nc_get_variable(ncid,'n', roughness, routine) +!call nc_get_variable(ncid,'link', linkID, routine) +call nc_get_variable(ncid,'Length', length, routine) +call nc_get_variable(ncid,'to', to, routine) call nc_get_variable(ncid,'bucket_comid_mask',BucketMask,routine) ! no snappy accessor routine for character arrays ! call nc_get_variable(ncid,'gages', gageID, routine) -io = nf90_inq_varid(ncid,'gages', VarID) -call nc_check(io, routine, 'inq_varid', 'gages', filename) -io = nf90_get_var(ncid, VarID, gageID) -call nc_check(io, routine, 'get_var', 'gages', filename) -call nc_close_file(ncid, routine) +!io = nf90_inq_varid(ncid,'gages', VarID) +!call nc_check(io, routine, 'inq_varid', 'gages', filename) +!io = nf90_get_var(ncid, VarID, gageID) +!call nc_check(io, routine, 'get_var', 'gages', filename) + +!call nc_close_file(ncid, routine) ! Longitude [DART uses longitudes [0,360)] where(linkLong < 0.0_r8) linkLong = linkLong + 360.0_r8 @@ -1054,19 +1059,20 @@ subroutine get_routelink_constants(filename) channelIndsX = (/ (i, i=1,n_link) /) channelIndsY = (/ (i, i=1,n_link) /) -call fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd) +call fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd, num_up_links) if (debug > 99) then do i=1,n_link - write(*,*)'link ',i,linkLong(i),linkLat(i),linkAlt(i),gageID(i),roughness(i),linkID(i),BucketMask(i) + !write(*,*)'link ',i,linkLong(i),linkLat(i),linkAlt(i),gageID(i),roughness(i),linkID(i),BucketMask(i) + write(*,*)'link ',i,linkLong(i),linkLat(i),linkAlt(i),BucketMask(i) enddo write(*,*)'Longitude range is ',minval(linkLong),maxval(linkLong) write(*,*)'Latitude range is ',minval(linkLat),maxval(linkLat) write(*,*)'Altitude range is ',minval(linkAlt),maxval(linkAlt) endif -deallocate(linkID) -deallocate(gageID) +!deallocate(linkID) +!deallocate(gageID) deallocate(length) deallocate(to) deallocate(toIndex) @@ -1080,25 +1086,28 @@ end subroutine get_routelink_constants !----------------------------------------------------------------------- !> -subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd) +subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd,num_up_links) integer, intent(in) :: toIndex(:) integer, intent(in) :: fromIndices(:) integer, intent(in) :: fromIndsStart(:) integer, intent(in) :: fromIndsEnd(:) +integer, intent(inout) :: num_up_links(:) integer :: i, j, id, nfound +integer, parameter :: MAX_UPSTREAM_LINKS = 5 + ! hydro_domain_offset = 0 !>@todo get the actual offset somehow do i = 1,n_link - connections(i)%gageName = gageID(i) - connections(i)%linkID = linkID(i) +! connections(i)%gageName = gageID(i) +! connections(i)%linkID = linkID(i) connections(i)%linkLength = length(i) connections(i)%domain_offset = i connections(i)%downstream_linkID = to(i) connections(i)%downstream_index = toIndex(i) - connections(i)%upstream_linkID(:) = MISSING_I + !connections(i)%upstream_linkID(:) = MISSING_I connections(i)%upstream_index(:) = MISSING_I enddo @@ -1114,31 +1123,34 @@ subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd) UPSTREAM : do id = 1,n_link ! If there is nothing upstream ... already set to MISSING - if ( fromIndsStart(id) == 0 ) cycle UPSTREAM + if ( fromIndsStart(id) == 0 ) then + num_up_links(id) = 0 + cycle UPSTREAM + endif + connections(id)%upstream_index(:) = fromIndices(fromIndsStart(id):fromIndsEnd(id)) - nfound = 0 - do j = fromIndsStart(id),fromIndsEnd(id) ! loops over dimension to query - nfound = nfound + 1 - connections(id)%upstream_linkID(nfound) = linkID(fromIndices(j)) - connections(id)%upstream_index(nfound) = fromIndices(j) - enddo enddo UPSTREAM +! Ignore links that have outlets at the lakes +! This removes those having an extreme number of upstream links from +! the localization tree search +where(num_up_links > MAX_UPSTREAM_LINKS) num_up_links = 0 + if (debug > 99) then write(string1,'("PE ",i3)') my_task_id() ! do i = 1,n_link do i = 54034,54034 write(*,*)'' write(*,*)trim(string1),' connectivity for link : ',i - write(*,*)trim(string1),' gageName : ',connections(i)%gageName - write(*,*)trim(string1),' linkID : ',connections(i)%linkID + !write(*,*)trim(string1),' gageName : ',connections(i)%gageName + !write(*,*)trim(string1),' linkID : ',connections(i)%linkID write(*,*)trim(string1),' linkLength : ',connections(i)%linkLength write(*,*)trim(string1),' domain_offset : ',connections(i)%domain_offset write(*,*)trim(string1),' downstream_linkID : ',connections(i)%downstream_linkID write(*,*)trim(string1),' downstream_index : ',connections(i)%downstream_index - write(*,*)trim(string1),' upstream_linkID : ',connections(i)%upstream_linkID + !write(*,*)trim(string1),' upstream_linkID : ',connections(i)%upstream_linkID write(*,*)trim(string1),' upstream_index : ',connections(i)%upstream_index enddo endif @@ -1189,11 +1201,11 @@ recursive subroutine get_link_tree(my_index, reach_cutoff, depth, & write(*,*)trim(string1),' glt:task, distances ', distances(1:nclose) endif -do iup = 1,n_upstream - if ( connections(my_index)%upstream_index(iup) /= MISSING_I8 ) then - call get_link_tree( connections(my_index)%upstream_index(iup), & - reach_cutoff, depth+1, direct_length, nclose, close_indices, distances) - endif +!write(*, '(A, i5, f10.2, i8, i4, 5i8)') 'depth, distance, node, # upstream, up nodes: ', depth, direct_length, my_index, num_up_links(my_index), connections(my_index)%upstream_index(:) + +do iup = 1,num_up_links(my_index) + call get_link_tree(connections(my_index)%upstream_index(iup), & + reach_cutoff, depth+1, direct_length, nclose, close_indices, distances) enddo end subroutine get_link_tree From 687964498ee7b576d4c3f4dfb0c4e6b8bdb483fc Mon Sep 17 00:00:00 2001 From: Mohamad Gharamti Date: Thu, 15 Feb 2024 16:13:53 -0700 Subject: [PATCH 04/36] Improved HydroDART Diagnostics + More Capabilities Modifications to hydroDART's diagnostic routine now allows the following: 1. Save the hydrographs in a high-resolution pdf 2. Handles hybrid DA components (weight mean and sd) 3. Better parsing of the netcdf files 4. Allows the openloop to have different ens size (compared to the regular DA runs) 5. Better time handling 6. Collects more information about the openloop run 7. The openloop may have different gauges than the DA runs 8. Better usage of matlab functions (removing obselete ones) 9. Separate plots for the hybrid statistics --- models/wrf_hydro/matlab/HydroDARTdiags.m | 478 +++++++++++++++++------ 1 file changed, 351 insertions(+), 127 deletions(-) diff --git a/models/wrf_hydro/matlab/HydroDARTdiags.m b/models/wrf_hydro/matlab/HydroDARTdiags.m index a730c80b6c..d2ea7d31bd 100644 --- a/models/wrf_hydro/matlab/HydroDARTdiags.m +++ b/models/wrf_hydro/matlab/HydroDARTdiags.m @@ -1,4 +1,5 @@ -function [observation, openloop, forecast, analysis, exp] = HydroDARTdiags(dir_exps, obs, dir_ol, disp_res, plot_state) +function [observation, openloop, forecast, analysis, exp] = ... + HydroDARTdiags(dir_exps, obs, dir_ol, disp_res, plot_state, fig_to_pdf) %% DART software - Copyright UCAR. This open source software is provided % by UCAR, "as is", without charge, subject to all terms of use at @@ -35,19 +36,19 @@ elseif nargin > 2 plot_ol = true; -elseif nargin == 2 - plot_ol = false; +elseif nargin == 2 % no open-loop case + plot_ol = false; plot_state = false; + disp_res = 1; + fig_to_pdf = 'results.pdf'; end gY = [ 150, 150, 150 ]/255; lB = [ 153, 255, 255 ]/255; -lP = [ 204, 153, 255 ]/255; bK = [ 0, 0, 0 ]/255; bL = [ 30, 144, 255 ]/255; rD = [ 255, 51, 51 ]/255; gR = [ 0, 153, 0 ]/255; -pR = [ 153, 51, 255 ]/255; oR = [ 255, 153, 51 ]/255; @@ -82,50 +83,75 @@ end end +% Do we have any hybrid weight files? +ishybrid = '/all_output_hybridweight_mean.nc'; + +hyb_flavor = cell(num_exps); +hyb_flav_n = zeros(num_exps); +for e = 1:num_exps + if exist(char(strcat(dir_exps(e), ishybrid)), 'file') == 2 + hyb_flavor{e} = '/all_output_hybridweight_'; + hyb_flav_n(e) = 1; + end +end + nc = struct; for e = 1:num_exps diag_dir = dir_exps(e); - nc(e).state_mean_pr = char(strcat(diag_dir, '/all_preassim_mean.nc' )); % aggregated prior state_mean - nc(e).state_sd_pr = char(strcat(diag_dir, '/all_preassim_sd.nc' )); % aggregated prior state_sd + nc(e).state_mean_pr = char(strcat(diag_dir, '/all_preassim_mean.nc' )); % aggregated prior state_mean + nc(e).state_sd_pr = char(strcat(diag_dir, '/all_preassim_sd.nc' )); % aggregated prior state_sd - nc(e).state_mean_po = char(strcat(diag_dir, '/all_analysis_mean.nc' )); % aggregated analysis state_mean - nc(e).state_sd_po = char(strcat(diag_dir, '/all_analysis_sd.nc' )); % aggregated analysis state_sd + nc(e).state_mean_po = char(strcat(diag_dir, '/all_analysis_mean.nc' )); % aggregated analysis state_mean + nc(e).state_sd_po = char(strcat(diag_dir, '/all_analysis_sd.nc' )); % aggregated analysis state_sd if inf_flav_n(e, 1) > 0 && inf_flav_n(e, 2) > 0 - nc(e).pr_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 1}, 'mean.nc' )); % aggregated inf_mean - nc(e).pr_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 1}, 'sd.nc' )); % aggregated inf_std - nc(e).po_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 2}, 'mean.nc' )); % aggregated inf_mean - nc(e).po_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 2}, 'sd.nc' )); % aggregated inf_std + nc(e).pr_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 1}, 'mean.nc')); % aggregated inf_mean + nc(e).pr_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 1}, 'sd.nc' )); % aggregated inf_std + nc(e).po_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 2}, 'mean.nc')); % aggregated inf_mean + nc(e).po_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 2}, 'sd.nc' )); % aggregated inf_std elseif inf_flav_n(e, 1) > 0 - nc(e).pr_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 1}, 'mean.nc' )); % aggregated inf_mean - nc(e).pr_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 1}, 'sd.nc' )); % aggregated inf_std + nc(e).pr_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 1}, 'mean.nc')); % aggregated inf_mean + nc(e).pr_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 1}, 'sd.nc' )); % aggregated inf_std elseif inf_flav_n(e, 2) > 0 - nc(e).po_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 2}, 'mean.nc' )); % aggregated inf_mean - nc(e).po_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 2}, 'sd.nc' )); % aggregated inf_std + nc(e).po_inflate_mean = char(strcat(diag_dir, inf_flavor{e, 2}, 'mean.nc')); % aggregated inf_mean + nc(e).po_inflate_sd = char(strcat(diag_dir, inf_flavor{e, 2}, 'sd.nc' )); % aggregated inf_std + end + if hyb_flav_n(e) > 0 + nc(e).hybrid_mean = char(strcat(diag_dir, hyb_flavor{e}, 'mean.nc')); % aggregated hyb_mean + nc(e).hybrid_sd = char(strcat(diag_dir, hyb_flavor{e}, 'sd.nc' )); % aggregated hyb_std end - - nc(e).routelink = char(strcat(diag_dir, '/RouteLink.nc' )); % routelink file - nc(e).obs_diag = char(strcat(diag_dir, '/obs_diag_output.nc' )); % output of obs_diag - nc(e).obs_epoc = char(strcat(diag_dir, '/obs_epoch_001.nc' )); % output of obs_seq_to_netcdf + nc(e).routelink = char(strcat(diag_dir, '/../RouteLink.nc' )); % routelink file + + nc(e).obs_diag = char(strcat(diag_dir, '/obs_diag_output.nc' )); % output of obs_diag + nc(e).obs_epoc = char(strcat(diag_dir, '/obs_epoch_001.nc' )); % output of obs_seq_to_netcdf % open loop if plot_ol - ol.obs_diag = char(strcat(dir_ol , '/obs_diag_output.nc' )); - ol.obs_epoc = char(strcat(dir_ol , '/obs_epoch_001.nc' )); + ol.obs_diag = char(strcat(dir_ol , '/obs_diag_output.nc' )); + ol.obs_epoc = char(strcat(dir_ol , '/obs_epoch_001.nc' )); end end +% Figure the links and time variables in the netcdf file +ncid = netcdf.open(nc(e).state_mean_pr, 'NC_NOWRITE'); +ncvar = netcdf.inqDim(ncid, 0); +if strcmp(ncvar, 'links') + iL = 0; iT = 1; +else + iT = 0; iL = 1; +end + % Retrieve the dimensions from the netcdf file Nt_tmp = zeros(1, num_exps); for e = 1:num_exps ncid = netcdf.open(nc(e).state_mean_pr, 'NC_NOWRITE'); - [~, Nt_tmp(e)] = netcdf.inqDim(ncid, 0); % # of assim cycles + [~, Nt_tmp(e)] = netcdf.inqDim(ncid, iT); % # of assim cycles netcdf.close(ncid); end if length(unique(Nt_tmp)) > 1 @@ -134,7 +160,7 @@ Nt = Nt_tmp(1); ncid = netcdf.open(nc(1).state_mean_pr, 'NC_NOWRITE'); -[~, Nl] = netcdf.inqDim(ncid, 1); % # of links in the domain +[~, Nl] = netcdf.inqDim(ncid, iL); % # of links in the domain netcdf.close(ncid); exp = struct; @@ -146,6 +172,18 @@ netcdf.close(ncid); end +% ensemble size of the open loop +ncid = netcdf.open(ol.obs_diag, 'NC_NOWRITE'); +[~, rbins] = netcdf.inqDim(ncid, 14); % # of bins in the rank histogram (i.e., ens_size+1) +ol.ens_size = rbins-1; % size of the ensemble +netcdf.close(ncid); + +% separate exp name from path +exp_name = string(missing); +for e = 1:num_exps + sp_names = strsplit(dir_exps{e}, '/'); + exp_name(e) = sp_names{end}; +end %% TIME HANDLING: @@ -165,10 +203,17 @@ xticks = current(time_label); xtickslabel = goodtime(time_label, :); +% obs_diag time +od_time = ncread(nc(e).obs_diag, 'time') + origin; +od_time_b1 = find(od_time == current(1)); +od_time_b2 = find(od_time == current(Nt)); +diag_range = od_time_b1:od_time_b2; %% PROCESSING DATA: gauge_id = strtrim(ncread(nc(1).routelink, 'gages')'); +obserr_L = 1.e8; + % All available gauges in the domain: k = 0; for i = 1:Nl @@ -210,14 +255,16 @@ end - % Reading: for e = 1:num_exps exp(e).ensemble = double(ncread(nc(e).obs_epoc, 'observations')); exp(e).obs_ind = -1 * double(ncread(nc(e).obs_epoc, 'obs_type')); exp(e).O_time = double(ncread(nc(e).obs_epoc, 'time')) + origin; - if plot_ol, ol.ensemble = double(ncread(ol.obs_epoc, 'observations')); end + if plot_ol + ol.ensemble = double(ncread(ol.obs_epoc, 'observations')); + ol.obs_ind = -1 * double(ncread(ol.obs_epoc, 'obs_type')); + end % State and spread files exp(e).pr.state.x1 = ncread(nc(e).state_mean_pr, 'qlink1'); @@ -272,15 +319,27 @@ exp(e).po.infs.x2 = ncread(nc(e).po_inflate_sd , 'z_gwsubbas'); end end + + % Hybrid files + if hyb_flav_n(e) > 0 + exp(e).pr.hybm.x1 = ncread(nc(e).hybrid_mean, 'qlink1'); + exp(e).pr.hybs.x1 = ncread(nc(e).hybrid_sd , 'qlink1'); + + if bucket + exp(e).pr.hybm.x2 = ncread(nc(e).hybrid_mean, 'z_gwsubbas'); + exp(e).pr.hybs.x2 = ncread(nc(e).hybrid_sd , 'z_gwsubbas'); + end + end end end -openloop = cell( 3, gauges.want.num ); -forecast = cell(10, gauges.want.num, num_exps); +openloop = cell( 4, gauges.want.num ); +forecast = cell(12, gauges.want.num, num_exps); observation = cell( 9, gauges.want.num, num_exps); analysis = cell( 9, gauges.want.num, num_exps); flood = zeros(gauges.want.num, num_exps); + for i = 1:gauges.want.num k = gauges.want.IND(i); @@ -289,6 +348,8 @@ find_obs = k == exp(e).obs_ind; + fprintf('exp: %2d, obs no: %3d, dart-index: %6d, USGS gauge ID: %10d\n', e, i, k, gauges.want.OID(i)) + tmp.obs_val = exp(e).ensemble(1, find_obs); tmp.ens_mean_f = exp(e).ensemble(2, find_obs); tmp.ens_sd_f = exp(e).ensemble(4, find_obs); @@ -298,7 +359,14 @@ tmp.ens_sd_a = exp(e).ensemble(5, find_obs); tmp.ensemble_a = exp(e).ensemble(7:2:end, find_obs); - if plot_ol, tmp.ens_mean_ol = ol.ensemble(2, find_obs); tmp.ens_sd_ol = ol.ensemble(4, find_obs); end + if plot_ol + % Just in case, open loop has more gauges + ol_obs = k == ol.obs_ind; + + tmp.ens_mean_ol = ol.ensemble(2, ol_obs); + tmp.ens_sd_ol = ol.ensemble(4, ol_obs); + tmp.ensemble_ol = ol.ensemble(6:2:end-1, ol_obs); + end ens_time = zeros(1, Nt+1); Found_time = exp(e).O_time(find_obs); @@ -331,7 +399,11 @@ ens_sd_a = zeros(1, Nt); ensemble_a = NaN(exp(e).ens_size, Nt); - if plot_ol, ens_mean_ol = zeros(1, Nt); ens_sd_ol = zeros(1, Nt); end + if plot_ol + ens_mean_ol = zeros(1, Nt); + ens_sd_ol = zeros(1, Nt); + ensemble_ol = NaN(ol.ens_size, Nt); + end for j = 1:Nt if ~isnan(ens_time(j)) && ~isnan(ens_time(j+1)) @@ -343,10 +415,11 @@ ens_mean_a(j) = mean(tmp.ens_mean_a(:, ens_time(j):ens_time(j+1)), 2); ens_sd_a(j) = mean(tmp.ens_sd_a (:, ens_time(j):ens_time(j+1)), 2); ensemble_a(:, j) = mean(tmp.ensemble_a(:, ens_time(j):ens_time(j+1)), 2); - + if plot_ol - ens_mean_ol(j) = mean(tmp.ens_mean_ol(:, ens_time(j):ens_time(j+1)), 2); - ens_sd_ol(j) = mean(tmp.ens_sd_ol(:, ens_time(j):ens_time(j+1)), 2); + ens_mean_ol(j) = mean(tmp.ens_mean_ol(:, ens_time(j):ens_time(j+1)), 2); + ens_sd_ol(j) = mean(tmp.ens_sd_ol(:, ens_time(j):ens_time(j+1)), 2); + ensemble_ol(:, j) = mean(tmp.ensemble_ol(:, ens_time(j):ens_time(j+1)), 2); end elseif ~isnan(ens_time(j)) @@ -360,8 +433,9 @@ ensemble_a(:, j) = tmp.ensemble_a(:, ens_time(j)); if plot_ol - ens_mean_ol(j) = tmp.ens_mean_ol(:, ens_time(j)); - ens_sd_ol(j) = tmp.ens_sd_ol(:, ens_time(j)); + ens_mean_ol(j) = tmp.ens_mean_ol(:, ens_time(j)); + ens_sd_ol(j) = tmp.ens_sd_ol(:, ens_time(j)); + ensemble_ol(:, j) = tmp.ensemble_ol(:, ens_time(j)); end @@ -376,8 +450,9 @@ ensemble_a(:, j) = tmp.ensemble_a(:, ens_time(j+1)); if plot_ol - ens_mean_ol(j) = tmp.ens_mean_ol(:, ens_time(j+1)); - ens_sd_ol(j) = tmp.ens_sd_ol(:, ens_time(j+1)); + ens_mean_ol(j) = tmp.ens_mean_ol(:, ens_time(j+1)); + ens_sd_ol(j) = tmp.ens_sd_ol(:, ens_time(j+1)); + ensemble_ol(:, j) = tmp.ensemble_ol(:, ens_time(j+1)); end else @@ -390,13 +465,17 @@ ens_sd_a(j) = NaN; ensemble_a(:, j) = NaN; - if plot_ol, ens_mean_ol(j) = NaN; ens_sd_ol(j) = NaN; end + if plot_ol + ens_mean_ol(j) = NaN; + ens_sd_ol(j) = NaN; + ensemble_ol(:, j) = NaN; + end end end clear tmp - - flood(i, e) = find(obs_val == nanmax(obs_val), 1); + + flood(i, e) = find(obs_val == max(obs_val, [], 'omitnan'), 1); varname_f = strcat(gauges.want.names(i, :), '_guess'); varname_a = strcat(gauges.want.names(i, :), '_analy'); @@ -406,15 +485,16 @@ % Manage the forecast copies tmp_f = squeeze(double(ncread(nc(e).obs_diag, varname_f))); + tmp_f = tmp_f(:, diag_range); rmse_f = abs(ens_mean_f - obs_val); bias_f = obs_val - ens_mean_f; totspread_f = sqrt(ens_sd_f.^2 + obs_var.^2); rank_hist_f = squeeze(double(ncread(nc(e).obs_diag, strcat(varname_f, '_RankHist')))); + rank_hist_f = rank_hist_f(:, diag_range); if inf_flav_n(e, 1) > 0 pr_inflate_mean = exp(e).pr.infm.x1(k, :); - pr_inflate_sd = exp(e).pr.infs.x1(k, :); - + pr_inflate_sd = exp(e).pr.infs.x1(k, :); elseif inf_flav_n(e, 1) == 0 pr_inflate_mean = nan; pr_inflate_sd = nan; @@ -428,6 +508,14 @@ ensemble_f_def(:, j) = 1/lambda(j) * (ensemble_f(:, j) - ens_mean_f(j)) + ens_mean_f(j); end end + + if hyb_flav_n(e) > 0 + hybrid_mean = exp(e).pr.hybm.x1(k, :); + hybrid_sd = exp(e).pr.hybs.x1(k, :); + else + hybrid_mean = nan; + hybrid_sd = nan; + end % Manage the observation copies obs_poss = tmp_f(1, :); @@ -471,6 +559,7 @@ openloop(:, i) = { ens_mean_ol ; ... rmse_ol ; ... ens_sd_ol ; ... + ensemble_ol ; ... }; end @@ -484,6 +573,8 @@ ensemble_f_def ; ... pr_inflate_mean ; ... pr_inflate_sd ; ... + hybrid_mean ; ... + hybrid_sd ; ... }; observation(:, i, e) = { k ; ... @@ -517,6 +608,8 @@ if disp_res +if isfile(fig_to_pdf), delete(fig_to_pdf); end + %% TIME SERIES EVOLUTION: for o = 1:gauges.want.num @@ -524,13 +617,10 @@ if num_exps > 1 if num_exps == 2 - figure('uni','pi','pos',[50, 600, 1600, 450]); elseif num_exps <= 4 - figure('uni','pi','pos',[50, 600, 1600, 900]); else - figure('uni','pi','pos',[50, 600, 1600, 1000]); end @@ -538,13 +628,24 @@ for e = 1:num_exps if num_exps == 2 + + start_x = [.05, .55]; + fig_wid = .40; + + fig_ht1 = .50; + fig_ht2 = .26; + + sep = .05; + bot_y = .08; + top_y = bot_y + sep + fig_ht2; + + subplot('Position', [start_x(e), top_y, fig_wid, fig_ht1]); - subplot(1, 2, e) en_f = plot(current, forecast{8, o, e} , '-', 'Color', gY); hold on en_a = plot(current, analysis{7, o, e} , '-', 'Color', lB); grid on ob_a = plot(current, observation{7, o, e}, '*', 'Color', gR); - ob_r = plot(current, observation{8, o, e}, '*', 'Color', rD); + ob_r = plot(current, observation{8, o, e}, '*', 'Color', rD); if plot_ol, op = plot(current, openloop{1, o} , '-', 'Color', oR, 'LineWidth', 3); end mf = plot(current, forecast{6, o, e} , '-', 'Color', bK, 'LineWidth', 3); @@ -556,44 +657,106 @@ limsy = get(gca, 'YLim'); - strY = [ gauges.want.names(o, :), ' | ID: ', num2str(gauges.want.OID(o)) ]; + strY = 'Sream flow (cms)'; - set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0 limsy(2)]) - ylabel(strY, 'FontSize', 18, 'Interpreter', 'none', 'FontWeight', 'bold') + set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', {}, 'Ylim', [0 limsy(2)]) + ylabel(strY, 'FontSize', 18) + + if mean(observation{9, o, e}, 'omitnan') < obserr_L + obs_status = 'Used Obs (Assimilated)'; + else + obs_status = 'Used Obs (Evaluated only)'; + end if plot_ol - L = legend([ob_a, ob_r, en_f(1), en_a(1), op, mf , ... - ma, so, sf, sa], 'Used Obs' , ... + L = legend([ob_a, ob_r, en_f(1), en_a(1), op, mf , ma, so, sf, sa], ... + obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... 'Prior Members', 'Posterior Members' , ... - sprintf('Open Loop, RMSE: %.2f' , nanmean(openloop{2, o})), ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Open loop Spread, avg: %.2f', nanmean(openloop{3, o})), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Open Loop, RMSE: %.2f' , mean(openloop{2, o}, 'omitnan')), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... + sprintf('Open loop Spread, avg: %.2f', mean(openloop{3, o}, 'omitnan')), ... + sprintf('Prior Spread, avg: %.2f' , mean(forecast{4, o, e}, 'omitnan')), ... + sprintf('Posterior Spread, avg: %.2f', mean(analysis{4, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); else - L = legend([ob_a, ob_r, en_f(1), en_a(1), mf , ... - ma, sf, sa], 'Used Obs' , ... + L = legend([ob_a, ob_r, mf , ma], obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... - 'Prior Members', 'Posterior Members' , ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); end - set(L, 'Interpreter', 'none', 'FontSize', 10) - title(L, {'Experiment', dir_exps{e}}, 'FontSize', 12) + set(L, 'Interpreter', 'none', 'FontSize', 12, 'color', 'none') + title(L, exp_name(e), 'FontSize', 14) - str2 = 'Statistics Time-Series (Hydrograph)'; + str2 = [ 'Hydrograph: ', gauges.want.names(o, :), ', Gauge ID: ', num2str(gauges.want.OID(o)) ]; title(str2, 'FontSize', 20, 'FontWeight', 'bold', 'Interpreter', 'none') + + subplot('Position', [start_x(e), bot_y, fig_wid, fig_ht2]); + + if sum(inf_flav_n) > 1 + i_pr_m = plot(current, forecast{9, o, e}, '-', 'Color', bK, 'LineWidth', 2); hold on + i_po_m = plot(current, analysis{8, o, e}, '-', 'Color', bL, 'LineWidth', 2); grid on + + plot(current, ones(1, Nt), '--', 'Color', gY); ax1 = gca; + + limsy = get(gca, 'YLim'); + + set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0.9 limsy(2)]) + ylabel('Inflation', 'FontSize', 18) + + if hyb_flav_n(e) > 0 + yyaxis right + hyb_m = plot(current, forecast{11, o, e}, '-', 'Color', gR, 'LineWidth', 2); + set(gca, 'YColor', bK, 'YLim', [-0.1, 1.1], 'YTick', [0, 0.5, 1]) + ylabel('Hyb. Weight', 'FontSize', 18) + + L = legend(ax1, [i_pr_m, i_po_m, hyb_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Posterior Inflation Mean, avg: %.2f' , mean(analysis{8, o, e}, 'omitnan')), ... + sprintf('Hybrid Weight Mean, avg: %.2f' , mean(forecast{11, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + else + L = legend(ax1, [i_pr_m, i_po_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Posterior Inflation Mean, avg: %.2f' , mean(analysis{8, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + end + + set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12, 'color', 'none') + else + i_pr_m = plot(current, forecast{9, o, e}, '-', 'Color', bK, 'LineWidth', 2); hold on + + plot(current, ones(1, Nt), '--', 'Color', gY); grid on + + limsy = get(gca, 'YLim'); + + set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0.9 limsy(2)]) + ylabel('Inflation', 'FontSize', 18) + + if hyb_flav_n(e) > 0 + yyaxis right + hyb_m = plot(current, forecast{11, o, e}, '-', 'Color', gR, 'LineWidth', 2); + set(gca, 'YColor', bK, 'YLim', [-0.1, 1.1], 'YTick', [0, 0.5, 1]) + ylabel('Hyb. Weight', 'FontSize', 18) + + L = legend(ax1, [i_pr_m, hyb_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Hybrid Weight Mean, avg: %.2f' , mean(forecast{11, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + else + L = legend(sprintf('Prior Inflation Mean, avg: %.2f', mean(forecast{9, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + end + + set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12, 'color', 'none') + end else - % num_exps > 2 + % num_exps > 2 :: no place for inflation! rows = ceil(num_exps/2); subplot(rows, 2, e) @@ -616,36 +779,46 @@ set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0 limsy(2)]) ylabel(['Gauge: ', num2str(observation{2, o, e})], 'FontSize', 18) + if mean(observation{9, o, e}, 'omitnan') < obserr_L + obs_status = 'Used Obs (Assimilated)'; + else + obs_status = 'Used Obs (Evaluated only)'; + end + if plot_ol L = legend([ob_a, ob_r, en_f(1), en_a(1), op, mf , ... - ma, so, sf, sa], 'Used Obs' , ... + ma, so, sf, sa], obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... 'Prior Members', 'Posterior Members' , ... - sprintf('Open Loop, RMSE: %.2f' , nanmean(openloop{2, o})), ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Open loop Spread, avg: %.2f', nanmean(openloop{3, o})), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Open Loop, RMSE: %.2f' , mean(openloop{2, o}, 'omitnan')), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... + sprintf('Open loop Spread, avg: %.2f', mean(openloop{3, o}, 'omitnan')), ... + sprintf('Prior Spread, avg: %.2f' , mean(forecast{4, o, e}, 'omitnan')), ... + sprintf('Posterior Spread, avg: %.2f', mean(analysis{4, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); else L = legend([ob_a, ob_r, en_f(1), en_a(1), mf , ... - ma, sf, sa], 'Used Obs' , ... + ma, sf, sa], obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... 'Prior Members', 'Posterior Members' , ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... + sprintf('Prior Spread, avg: %.2f' , mean(forecast{4, o, e}, 'omitnan')), ... + sprintf('Posterior Spread, avg: %.2f', mean(analysis{4, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); end - set(L, 'Interpreter', 'none', 'FontSize', 10) + set(L, 'Interpreter', 'none', 'FontSize', 10, 'color', 'none') - title(['Exp: ', dir_exps{e}], 'FontSize', 16, 'FontWeight', 'bold', 'Interpreter', 'none') + title('Exp: ' + exp_name(e), 'FontSize', 16, 'FontWeight', 'bold', 'Interpreter', 'none') end end + if fig_to_pdf + exportgraphics(gcf, fig_to_pdf, 'Append', true, 'ContentType', 'vector') + close + end else @@ -656,9 +829,9 @@ fig_wid = .80; fig_ht1 = .50; - fig_ht2 = .30; + fig_ht2 = .26; - sep = .01; + sep = .05; bot_y = .08; top_y = bot_y + sep + fig_ht2; @@ -669,7 +842,7 @@ en_a = plot(current, analysis{7, o, e} , '-', 'Color', lB); grid on ob_a = plot(current, observation{7, o, e}, '*', 'Color', gR); - ob_r = plot(current, observation{8, o, e}, '*', 'Color', rD); + ob_r = plot(current, observation{8, o, e}, '*', 'Color', rD); if plot_ol, op = plot(current, openloop{1, o} , '-', 'Color', oR, 'LineWidth', 3); end mf = plot(current, forecast{6, o, e} , '-', 'Color', bK, 'LineWidth', 3); @@ -684,30 +857,36 @@ set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', {}, 'Ylim', [0 limsy(2)]) ylabel('Stream flow (cms)', 'FontSize', 18) + if mean(observation{9, o, e}, 'omitnan') < obserr_L + obs_status = 'Used Obs (Assimilated)'; + else + obs_status = 'Used Obs (Evaluated only)'; + end + if plot_ol L = legend([ob_a, ob_r, en_f(1), en_a(1), op, mf, ma, ... - so, sf, sa], 'Used Obs' , ... + so, sf, sa], obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... 'Prior Members', 'Posterior Members' , ... - sprintf('Open Loop, RMSE: %.2f' , nanmean(openloop{2, o })), ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Open loop Spread, avg: %.2f', nanmean(openloop{3, o })), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Open Loop, RMSE: %.2f' , mean(openloop{2, o }, 'omitnan')), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... + sprintf('Open loop Spread, avg: %.2f', mean(openloop{3, o }, 'omitnan')), ... + sprintf('Prior Spread, avg: %.2f' , mean(forecast{4, o, e}, 'omitnan')), ... + sprintf('Posterior Spread, avg: %.2f', mean(analysis{4, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); else L = legend([ob_a, ob_r, en_f(1), en_a(1), mf, ma, ... - sf, sa], 'Used Obs' , ... + sf, sa], obs_status , ... sprintf('Rejected Obs: %.2f%%' , 100-observation{4, o, e}/observation{3, o, e}*100), ... 'Prior Members', 'Posterior Members' , ... - sprintf('Prior Mean, RMSE: %.2f' , nanmean(forecast{2, o, e})), ... - sprintf('Posterior Mean, RMSE: %.2f' , nanmean(analysis{2, o, e})), ... - sprintf('Prior Spread, avg: %.2f' , nanmean(forecast{4, o, e})), ... - sprintf('Posterior Spread, avg: %.2f', nanmean(analysis{4, o, e})), ... + sprintf('Prior Mean, RMSE: %.2f' , mean(forecast{2, o, e}, 'omitnan')), ... + sprintf('Posterior Mean, RMSE: %.2f' , mean(analysis{2, o, e}, 'omitnan')), ... + sprintf('Prior Spread, avg: %.2f' , mean(forecast{4, o, e}, 'omitnan')), ... + sprintf('Posterior Spread, avg: %.2f', mean(analysis{4, o, e}, 'omitnan')), ... 'Location', 'NorthEast'); end - set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12) + set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12, 'color', 'none') str1 = [ gauges.want.names(o, :), ' | Gauge ID: ', num2str(gauges.want.OID(o)) ]; str2 = 'Hydrograph: Obs, Prior/Posterior Ensemble, Mean, Spread & Inflation'; @@ -715,23 +894,69 @@ subplot('Position', [start_x, bot_y, fig_wid, fig_ht2]); - i_pr_m = plot(current, forecast{9, o, e}, '-', 'Color', bK, 'LineWidth', 2); hold on - i_po_m = plot(current, analysis{8, o, e}, '-', 'Color', bL, 'LineWidth', 2); grid on - - plot(current, ones(1, Nt), '--', 'Color', gY) - - limsy = get(gca, 'YLim'); - - set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0.8 limsy(2)-0.1]) - ylabel('Inflation', 'FontSize', 18) - - L = legend([i_pr_m, i_po_m], ... - sprintf('Prior Inflation Mean, avg: %.2f' , nanmean(forecast{9, o, e})), ... - sprintf('Posterior Inflation Mean, avg: %.2f' , nanmean(analysis{8, o, e})), ... - 'Location', 'NorthEast'); + if sum(inf_flav_n) > 1 + i_pr_m = plot(current, forecast{9, o, e}, '-', 'Color', bK, 'LineWidth', 2); hold on + i_po_m = plot(current, analysis{8, o, e}, '-', 'Color', bL, 'LineWidth', 2); grid on + + plot(current, ones(1, Nt), '--', 'Color', gY); ax1 = gca; + + limsy = get(gca, 'YLim'); + + set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0.9 limsy(2)]) + ylabel('Inflation', 'FontSize', 18) + + if hyb_flav_n(e) > 0 + yyaxis right + hyb_m = plot(current, forecast{11, o, e}, '-', 'Color', gR, 'LineWidth', 2); + set(gca, 'YColor', bK, 'YLim', [-0.1, 1.1], 'YTick', [0, 0.5, 1]) + ylabel('Hyb. Weight', 'FontSize', 18) + + L = legend(ax1, [i_pr_m, i_po_m, hyb_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Posterior Inflation Mean, avg: %.2f' , mean(analysis{8, o, e}, 'omitnan')), ... + sprintf('Hybrid Weight Mean, avg: %.2f' , mean(forecast{11, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + else + L = legend(ax1, [i_pr_m, i_po_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Posterior Inflation Mean, avg: %.2f' , mean(analysis{8, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + end + + set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12, 'color', 'none') + else + i_pr_m = plot(current, forecast{9, o, e}, '-', 'Color', bK, 'LineWidth', 2); hold on + + plot(current, ones(1, Nt), '--', 'Color', gY); grid on + + limsy = get(gca, 'YLim'); + + set(gca, 'FontSize', 16, 'XLim', [xticks(1), xticks(end)], 'XTick', xticks, 'XTickLabel', xtickslabel, 'Ylim', [0.9 limsy(2)]) + ylabel('Inflation', 'FontSize', 18) + + if hyb_flav_n(e) > 0 + yyaxis right + hyb_m = plot(current, forecast{11, o, e}, '-', 'Color', gR, 'LineWidth', 2); + set(gca, 'YColor', bK, 'YLim', [-0.1, 1.1], 'YTick', [0, 0.5, 1]) + ylabel('Hyb. Weight', 'FontSize', 18) + + L = legend(ax1, [i_pr_m, hyb_m], ... + sprintf('Prior Inflation Mean, avg: %.2f' , mean(forecast{9, o, e}, 'omitnan')), ... + sprintf('Hybrid Weight Mean, avg: %.2f' , mean(forecast{11, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + else + L = legend(sprintf('Prior Inflation Mean, avg: %.2f', mean(forecast{9, o, e}, 'omitnan')), ... + 'Location', 'NorthEast'); + end + + set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12, 'color', 'none') + end - set(L, 'Interpreter', 'none', 'Box', 'off', 'FontSize', 12) - + if fig_to_pdf + %print(gcf, '-dpsc', '-vector', '-append', '-bestfit', pdf_filename) + exportgraphics(gcf, fig_to_pdf, 'Append', true, 'ContentType', 'vector') + close + end if plot_state && o == gauges.want.num @@ -748,20 +973,20 @@ subplot(221) plot_connections(Xm1_f, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: Time-Avg. Prior Mean'}, 'FontSize', 14, 'Interpreter', 'none') + title({'Experiment: ' + exp_name(e),'Stream Flow: Time-Avg. Prior Mean'}, 'FontSize', 14, 'Interpreter', 'none') subplot(222) plot_connections(Xs1_f, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: Time-Avg. Prior Spread'}, 'FontSize', 14, 'Interpreter', 'none') + title({'Experiment: ' + exp_name(e),'Stream Flow: Time-Avg. Prior Spread'}, 'FontSize', 14, 'Interpreter', 'none') subplot(223) if bucket Xm2_f = mean(exp(e).pr.state.x2 , 2); plot_connections(Xm2_f, nc(e).routelink, get(gca, 'position'), 'mm', tiny_flow_b) - title({['Experiment: ' dir_exps{e}],'Bucket: Time-Avg. Prior Mean'}, 'FontSize', 16, 'Interpreter', 'none') + title({'Experiment: ' + exp_name(e),'Bucket: Time-Avg. Prior Mean'}, 'FontSize', 16, 'Interpreter', 'none') else plot_connections(Xm1_a, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: Time-Avg. Posterior Mean'}, 'FontSize', 16, 'Interpreter', 'none') + title({'Experiment: ' + exp_name(e),'Stream Flow: Time-Avg. Posterior Mean'}, 'FontSize', 16, 'Interpreter', 'none') end @@ -769,10 +994,10 @@ if bucket Xs2_f = mean(exp(e).pr.spread.x2, 2); plot_connections(Xs2_f, nc(e).routelink, get(gca, 'position'), 'mm', tiny_flow_b) - title({['Experiment: ' dir_exps{e}],'Bucket: Time-Avg. Prior Spread'}, 'FontSize', 16, 'Interpreter', 'none') + title({'Experiment: ' + exp_name(e),'Bucket: Time-Avg. Prior Spread'}, 'FontSize', 16, 'Interpreter', 'none') else plot_connections(Xs1_a, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: Time-Avg. Posterior Spread'}, 'FontSize', 16, 'Interpreter', 'none') + title({'Experiment: ' + dir_exps(e),'Stream Flow: Time-Avg. Posterior Spread'}, 'FontSize', 16, 'Interpreter', 'none') end % display increment @@ -785,18 +1010,18 @@ subplot(121) plot_connections(Xi1, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: DA Increment (Prior-Posterior)', ... + title({'Experiment: ' + exp_name(e),'Stream Flow: DA Increment (Prior-Posterior)', ... ['Event: ' longtime(event, :)]}, 'FontSize', 16, 'Interpreter', 'none') subplot(122) if bucket Xi2 = exp(e).pr.state.x2(:, event) - exp(e).po.state.x2(:, event); plot_connections(Xi2, nc(e).routelink, get(gca, 'position'), 'mm', tiny_flow_b) - title({['Experiment: ' dir_exps{e}],'Bucket: DA Increment (Prior-Posterior)', ... + title({'Experiment: ' + exp_name(e),'Bucket: DA Increment (Prior-Posterior)', ... ['Event: ' longtime(event, :)]}, 'FontSize', 16, 'Interpreter', 'none') else plot_connections(Xia, nc(e).routelink, get(gca, 'position'), 'cms', tiny_flow_s) - title({['Experiment: ' dir_exps{e}],'Stream Flow: DA Increment (Prior-Posterior)', ... + title({'Experiment: ' + exp_name(e),'Stream Flow: DA Increment (Prior-Posterior)', ... 'Time-Average'}, 'FontSize', 16, 'Interpreter', 'none') end @@ -812,5 +1037,4 @@ % % % % $URL: $ % % $Revision: $ -% % $Date: $ - +% % $Date: $ \ No newline at end of file From 60838ab6f2e6e6ad89681f11274515859ef636eb Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 14:53:49 -0500 Subject: [PATCH 05/36] remove BUILD*.sh scripts Build instructions are avaiable from hdfeos.org --- .../obs_converters/AIRS/BUILD_HDF-EOS.sh | 270 ------------------ .../AIRS/shell_scripts/Build_HDF_to_netCDF.sh | 57 ---- 2 files changed, 327 deletions(-) delete mode 100755 observations/obs_converters/AIRS/BUILD_HDF-EOS.sh delete mode 100755 observations/obs_converters/AIRS/shell_scripts/Build_HDF_to_netCDF.sh diff --git a/observations/obs_converters/AIRS/BUILD_HDF-EOS.sh b/observations/obs_converters/AIRS/BUILD_HDF-EOS.sh deleted file mode 100755 index 3cba5115f3..0000000000 --- a/observations/obs_converters/AIRS/BUILD_HDF-EOS.sh +++ /dev/null @@ -1,270 +0,0 @@ -#!/bin/sh -# -# updated 4 Dec 2020 - -echo -echo 'These converters require either the HDF-EOS or the HDF-EOS5 libraries.' -echo 'These libraries are, in general, not compatible with each other.' -echo 'There is a compatibility library that "provides uniform access to HDF-EOS2' -echo 'and 5 files though one set of API calls." which sounds great.' -echo -echo 'The HDF-EOS5 libraries are installed on the supercomputers, and are' -echo 'available via MacPorts (hdfeos5). The HDF-EOS libraries are older and' -echo 'are much less available. Consequently, I have used the HDF-EOS5 interfaces' -echo 'where possible.' -echo -echo 'If the he5_hdfeos libraries are installed on your system, you are in luck.' -echo 'On our system, it has been useful to define variables like:' -echo -echo 'setenv("NCAR_INC_HDFEOS5", "/glade/u/apps/ch/opt/hdf-eos5/5.1.16/intel/19.0.5/include")' -echo 'setenv("NCAR_LDFLAGS_HDFEOS5","/glade/u/apps/ch/opt/hdf-eos5/5.1.16/intel/19.0.5/lib")' -echo 'setenv("NCAR_LIBS_HDFEOS5","-Wl,-Bstatic -lGctp -lhe5_hdfeos -lsz -lz -Wl,-Bdynamic")' -echo 'which we then use in when compiling convert_airs_L2' -echo -echo 'If you need to build the HDF-EOS and/or the HDF-EOS5 libraries, you may ' -echo 'try to follow the steps outlined in this script. They will need to be ' -echo 'modified for your system.' -echo -echo 'You will have to edit this script, first, by removing the early exit ...' -echo - -exit - -# ------------------------------------------------------------------------------ -## -## The NASA Earthdata Data Access Services portal serves as the download site: -## https://wiki.earthdata.nasa.gov/display/DAS/Toolkit+Downloads -## -## The following packages were downloaded: -## -## zlib-1.2.11.tar.gz -## jpegsrc.v9b.tar.gz -## hdf-4.2.13.tar.gz -## HDF-EOS2.20v1.00.tar.Z -## HDF-EOS2.20v1.00_TestDriver.tar.Z -## szip-2.1.1.tar.gz -## hdf5-1.8.19.tar.gz -## HDF-EOS5-5.1.16.tar.Z -## HDF-EOS5-5.1.16_TESTDRIVERS.tar.Z -## -## The documentation files were downloaded: -## -## HDF-EOS_REF.pdf -## HDF-EOS_UG.pdf -## HDF-EOS5_REF.pdf -## HDF-EOS5_UG.pdf -## -## Some other useful websites for HDF and HDF-related products are: -## https://portal.hdfgroup.org/display/support/Downloads -## https://hdfeos.org/software/library.php#HDF-EOS2 -## https://opensource.gsfc.nasa.gov/projects/HDF-EOS2/index.php - -# Change this to 'true' to uncompress the packages. You only need to uncompress them -# once, but you may need to run this script several times. - -if ( `false` ); then - - for i in zlib-1.2.11.tar.gz \ - jpegsrc.v9b.tar.gz \ - hdf-4.2.13.tar.gz \ - szip-2.1.1.tar.gz \ - hdf5-1.8.19.tar.gz - do - tar -zxvf $i - done - - uncompress HDF-EOS2.20v1.00.tar.Z - uncompress HDF-EOS2.20v1.00_TestDriver.tar.Z - uncompress HDF-EOS5.1.16.tar.Z - uncompress HDF-EOS5.1.16_TESTDRIVERS.tar.Z - - tar -xvf HDF-EOS2.20v1.00.tar - tar -xvf HDF-EOS5-5.1.16.tar - -fi - -# ------------------------------------------------------------------------------ -# start with smaller libs, work up to HDF-EOS. -# ------------------------------------------------------------------------------ - -# set the installation location of the final libraries -H4_PREFIX=/glade/work/${USER}/local/hdf-eos -H5_PREFIX=/glade/work/${USER}/local/hdf-eos5 - -# make the target install dirs -mkdir -p ${H4_PREFIX}/{lib,bin,include,man,man/man1,share} -mkdir -p ${H5_PREFIX}/{lib,bin,include,man,man/man1,share} - -# record the build script and environment -echo > ${H4_PREFIX}/build_environment_log.txt -echo 'the build script' >> ${H4_PREFIX}/build_environment_log.txt -cat $0 >> ${H4_PREFIX}/build_environment_log.txt -echo >> ${H4_PREFIX}/build_environment_log.txt -echo '=====================' >> ${H4_PREFIX}/build_environment_log.txt -echo 'the build environment' >> ${H4_PREFIX}/build_environment_log.txt -echo >> ${H4_PREFIX}/build_environment_log.txt -env | sort >> ${H4_PREFIX}/build_environment_log.txt - -# start with smaller libs, work up to HDF-EOS. - -echo '' -echo '======================================================================' -if [ -f ${H4_PREFIX}/lib/libz.a ]; then - echo 'zlib already exists - no need to build.' -else - - export CC='icc' - export CFLAGS='-fPIC' - export FFLAGS='-fPIC' - - echo 'building zlib at '`date` - cd zlib-1.2.11 || exit 1 - ./configure --prefix=${H4_PREFIX} || exit 1 - make clean || exit 1 - make || exit 1 - make test || exit 1 - make install || exit 1 - cd .. -fi - - -echo '' -echo '======================================================================' -if [ -f ${H4_PREFIX}/lib/libsz.a ]; then - echo 'szip already exists - no need to build.' -else - - export CC='icc' - export CFLAGS='-fPIC' - export FFLAGS='-fPIC' - - echo 'building szip at '`date` - cd szip-2.1.1 || exit 1 - ./configure --prefix=${H4_PREFIX} || exit 1 - make clean || exit 1 - make || exit 1 - make test || exit 1 - make install || exit 1 - cd .. -fi - -echo '' -echo '======================================================================' -# This is peculiar - on Cheyenne: -# If I build with --libdir=H4_PREFIX, subsequent linking works. -# If I build with --libdir=H4_PREFIX/lib, subsequent linking FAILS with an -# undefined reference to 'rpl_malloc'. -if [ -f ${H4_PREFIX}/lib/libjpeg.a ]; then - echo 'jpeg already exists - no need to build.' -else - echo 'buiding jpeg at '`date` - cd jpeg-9b || exit 2 - ./configure CC='icc -Df2cFortran' CFLAGS='-fPIC' FFLAGS='-fPIC' \ - --prefix=${H4_PREFIX} || exit 2 - make clean || exit 2 - make || exit 2 - make test || exit 2 - make install || exit 2 - cd .. - cd ${H4_PREFIX} - \ln -s lib/libjpeg* . - cd - -fi - -echo '' -echo '======================================================================' -if [ -f ${H4_PREFIX}/lib/libmfhdf.a ]; then - echo 'hdf4 already exists - no need to build.' -else - echo 'building hdf4 at '`date` - # (apparently there is no 'make test') - - cd hdf-4.2.13 || exit 3 - ./configure CC='icc -Df2cFortran' CFLAGS='-fPIC' FFLAGS='-fPIC' \ - --prefix=${H4_PREFIX} \ - --disable-netcdf \ - --with-zlib=${H4_PREFIX} \ - --with-jpeg=${H4_PREFIX} || exit 3 - make clean || exit 3 - make || exit 3 - make install || exit 3 - cd .. -fi - -echo '' -echo '======================================================================' -if [ -f ${H4_PREFIX}/lib/libhdfeos.a ]; then - echo 'hdf-eos already exists - no need to build.' -else - echo 'building HDF-EOS2.20v1.00 at '`date` - echo 'after expanding the .tar.gz file, the source is in "hdfeos"' - cd hdfeos || exit 4 - # (the CC options are crucial to provide Fortran interoperability) - ./configure CC='icc -Df2cFortran' CFLAGS='-fPIC' FFLAGS='-fPIC' \ - --prefix=${H4_PREFIX} \ - --enable-install-include \ - --with-zlib=${H4_PREFIX} \ - --with-jpeg=${H4_PREFIX} \ - --with-hdf=${H4_PREFIX} || exit 4 - make clean || exit 4 - make || exit 4 - make install || exit 4 - cd .. -fi - -#------------------------------------------------------------------------------- -# HDF-EOS5 record the build script and environment -#------------------------------------------------------------------------------- - -echo > ${H5_PREFIX}/build_environment_log.txt -echo 'the build script' >> ${H5_PREFIX}/build_environment_log.txt -cat $0 >> ${H5_PREFIX}/build_environment_log.txt -echo >> ${H5_PREFIX}/build_environment_log.txt -echo '=====================' >> ${H5_PREFIX}/build_environment_log.txt -echo 'the build environment' >> ${H5_PREFIX}/build_environment_log.txt -echo >> ${H5_PREFIX}/build_environment_log.txt -env | sort >> ${H5_PREFIX}/build_environment_log.txt - -echo '======================================================================' -if [ -f ${H5_PREFIX}/lib/libhdf5.a ]; then - echo 'hdf5 already exists - no need to build.' -else - echo 'building hdf5 at '`date` - - cd hdf5-1.8.19 || exit 3 - ./configure CC='icc -Df2cFortran' CFLAGS='-fPIC' FFLAGS='-fPIC' \ - --prefix=${H5_PREFIX} \ - --enable-fortran \ - --enable-fortran2003 \ - --enable-production \ - --with-zlib=${H4_PREFIX} || exit 3 - make clean || exit 3 - make || exit 3 - make check || exit 3 - make install || exit 3 - make check-install || exit 3 - cd .. -fi - -echo '' -echo '======================================================================' -if [ -f ${H5_PREFIX}/lib/libhe5_hdfeos.a ]; then - echo 'hdf-eos5 already exists - no need to build.' -else - echo 'building HDF-EOS5.1.16 at '`date` - echo 'after expanding the .tar.Z file, the source is in "hdfeos5"' - cd hdfeos5 || exit 4 - # (the CC options are crucial to provide Fortran interoperability) - ./configure CC='icc -Df2cFortran' CFLAGS='-fPIC' FFLAGS='-fPIC' \ - --prefix=${H5_PREFIX} \ - --enable-install-include \ - --with-zlib=${H4_PREFIX} \ - --with-hdf5=${H5_PREFIX} || exit 4 - make clean || exit 4 - make || exit 4 - make check || exit 4 - make install || exit 4 - cd .. -fi - -exit 0 diff --git a/observations/obs_converters/AIRS/shell_scripts/Build_HDF_to_netCDF.sh b/observations/obs_converters/AIRS/shell_scripts/Build_HDF_to_netCDF.sh deleted file mode 100755 index fe4e67c8d5..0000000000 --- a/observations/obs_converters/AIRS/shell_scripts/Build_HDF_to_netCDF.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -# -# DART software - Copyright UCAR. This open source software is provided -# by UCAR, "as is", without charge, subject to all terms of use at -# http://www.image.ucar.edu/DAReS/DART/DART_download -# -# This file is intended to provide guidance on how to compile the -# "HDF4 CF CONVERSION TOOLKIT" from the HDF-EOS Tools and Information Center -# -# The URL of the HDF-EOS Tools and Information Center is: -# http://hdfeos.org/software/h4cflib.php -# -# The URL of the "HDF4 CF CONVERSION TOOLKIT" is: -# http://hdfeos.org/software/h4cflib/h4cflib_1.3.tar.gz -# -# This is not a substitute for the README and INSTALL contained in the tar file. -# -# My habit is to install software for my personal use in my $HOME/local directory. - -export MYHOME=/glade/work/thoar - -./configure \ - --prefix=${MYHOME}/local/h4cf_1.3 \ - --with-hdf4=${MYHOME}/local/hdf-eos \ - --with-jpeg=${MYHOME}/local/hdf-eos \ - --with-zlib=${MYHOME}/local/hdf-eos \ - --with-hdfeos2=${MYHOME}/local/hdf-eos \ - --with-netcdf=$NETCDF \ - --with-szlib=/usr/local/szip \ - CPPFLAGS=-I${MYHOME}/local/hdf-eos/include \ - LDFLAGS=-L${MYHOME}/local/hdf-eos/lib || exit 1 - -make || exit 2 -make check || exit 3 -make install || exit 4 - -exit - -# The best way to get the most current configure options is to use configure: -# ./configure --help -# -# Here is a recap of just the environment variables, there are many more options -# -# Some influential environment variables: -# CC C compiler command -# CFLAGS C compiler flags -# LDFLAGS linker flags, e.g. -L if you have libraries in a -# nonstandard directory -# LIBS libraries to pass to the linker, e.g. -l -# CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if -# you have headers in a nonstandard directory -# LT_SYS_LIBRARY_PATH -# User-defined run-time library search path. -# CPP C preprocessor -# CXX C++ compiler command -# CXXFLAGS C++ compiler flags -# CXXCPP C++ preprocessor From 9b6cc72ca2de1879069687a964880097b927ee39 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 15:02:41 -0500 Subject: [PATCH 06/36] remove ncar mass store download scripts. The HPSS was decomissioned in 2021 --- .../AIRS/shell_scripts/download_L2.sh | 62 --------- .../AIRS/shell_scripts/oneday_down.sh | 118 ------------------ 2 files changed, 180 deletions(-) delete mode 100755 observations/obs_converters/AIRS/shell_scripts/download_L2.sh delete mode 100755 observations/obs_converters/AIRS/shell_scripts/oneday_down.sh diff --git a/observations/obs_converters/AIRS/shell_scripts/download_L2.sh b/observations/obs_converters/AIRS/shell_scripts/download_L2.sh deleted file mode 100755 index 71cedb74a7..0000000000 --- a/observations/obs_converters/AIRS/shell_scripts/download_L2.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -# -# DART software - Copyright UCAR. This open source software is provided -# by UCAR, "as is", without charge, subject to all terms of use at -# http://www.image.ucar.edu/DAReS/DART/DART_download -# -# download the requested tar files from the NCAR mass store. - -# set the first and last days. can roll over -# month and year boundaries now! -let start_year=2006 -let start_month=10 -let start_day=1 - -let end_year=2007 -let end_month=1 -let end_day=31 - -# end of things you should have to set in this script - -# convert the start and stop times to gregorian days, so we can -# compute total number of days including rolling over month and -# year boundaries. make sure all values have leading 0s if they -# are < 10. do the end time first so we can use the same values -# to set the initial day while we are doing the total day calc. -mon2=`printf %02d $end_month` -day2=`printf %02d $end_day` -end_d=(`echo ${end_year}${mon2}${day2}00 0 -g | ./advance_time`) - -mon2=`printf %02d $start_month` -day2=`printf %02d $start_day` -start_d=(`echo ${start_year}${mon2}${day2}00 0 -g | ./advance_time`) - -curday=(`echo ${start_year}${mon2}${day2}00 0 | ./advance_time`) - -# how many total days are going to be converted (for the loop counter) -let totaldays=${end_d[0]}-${start_d[0]}+1 - -# loop over each day -let d=1 -while (( d <= totaldays)) ; do - - # parse out the parts from a string which is YYYYMMDDHH - year=${curday:0:4} - month=${curday:4:2} - day=${curday:6:2} - - - echo getting ${year}${month}${day}.tar from mass store - hsi get /MIJEONG/AIRS/V5/L2/${year}${month}/${year}${month}${day}.tar - - - # advance the day; the output is YYYYMMDD00 - curday=(`echo ${year}${month}${day}00 +1d | ./advance_time`) - - # advance the loop counter - let d=d+1 - -done - -exit 0 - diff --git a/observations/obs_converters/AIRS/shell_scripts/oneday_down.sh b/observations/obs_converters/AIRS/shell_scripts/oneday_down.sh deleted file mode 100755 index bc8b44a329..0000000000 --- a/observations/obs_converters/AIRS/shell_scripts/oneday_down.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -# -# DART software - Copyright UCAR. This open source software is provided -# by UCAR, "as is", without charge, subject to all terms of use at -# http://www.image.ucar.edu/DAReS/DART/DART_download -# -# this version gets the tar file from the mass store first. -# unpack one day of tar files at a time, convert them into -# individual obs_seq files. this program also does the merge -# of the 240 individual daily swaths into a single obs_seq file. -# -# this program should be started from the work directory. -# it assumes ../data, ../tars, the output dir, etc -# exist relative to starting from AIRS/work. - -# set the first and last days to be converted. can roll over -# month and year boundaries now! -let start_year=2006 -let start_month=10 -let start_day=1 - -let end_year=2007 -let end_month=1 -let end_day=31 - -# relative to work dir -output_dir=../output.thin - -# whether to download the tar file from the mass store or not -# set to one of: true or false -download=true - -# end of things you should have to set in this script - -# convert the start and stop times to gregorian days, so we can -# compute total number of days including rolling over month and -# year boundaries. make sure all values have leading 0s if they -# are < 10. do the end time first so we can use the same values -# to set the initial day while we are doing the total day calc. -mon2=`printf %02d $end_month` -day2=`printf %02d $end_day` -end_d=(`echo ${end_year}${mon2}${day2}00 0 -g | ./advance_time`) - -mon2=`printf %02d $start_month` -day2=`printf %02d $start_day` -start_d=(`echo ${start_year}${mon2}${day2}00 0 -g | ./advance_time`) - -curday=(`echo ${start_year}${mon2}${day2}00 0 | ./advance_time`) - -# how many total days are going to be converted (for the loop counter) -let totaldays=${end_d[0]}-${start_d[0]}+1 - -# loop over each day -let d=1 -while (( d <= totaldays)) ; do - - # parse out the parts from a string which is YYYYMMDDHH - year=${curday:0:4} - month=${curday:4:2} - day=${curday:6:2} - - # compute the equivalent gregorian day here. - g=(`echo ${year}${month}${day}00 0 -g | ./advance_time`) - greg=${g[0]} - - echo starting AIRS to obs ${year}${month}${day} - echo gregorian: $greg - - # download the tar file from the hpss first - if [[ "$download" = "true" ]]; then - echo getting ${year}${month}${day}.tar from mass store - (cd ../tars; hsi get /MIJEONG/AIRS/V5/L2/${year}${month}/${year}${month}${day}.tar ) - fi - - # assume the original collection of data (hdf files, one per swath) - # are in ../tars and that the filenames inside the tar files are named - # YYYYMM/YYYYMMDD/*.hdf - (cd ../data; tar -xvf ../tars/${year}${month}${day}.tar >> tarlog) - - # construct the input list of files for the converter. - # cd there first in a subshell so the ls just contains simple file names - (cd ../data/${year}${month}/${year}${month}${day}; ls AIR*hdf > flist) - - # get back to work dir and edit a template file to set the - # values that change in the namelists. - sed -e "s/YYYY/${year}/g" \ - -e "s/MM/${month}/g" \ - -e "s/DD/${day}/g" \ - -e "s/GREG/${greg}/g" < ./input.nml.template > input.nml - - # actually make the obs_seq files, one per input. these still need to - # be merged if you want daily files. - ./convert_airs_L2 - - # do the merge now - ls ${output_dir}/AIRS.${year}.${month}.${day}.*.out > olist - ./obs_sequence_tool - - # start local mods - # ok, this is a local mod - to try to keep from running out of disk space - remote_dir=/gpfs/ptmp/dart/Obs_sets/AIRS_24_subx4_ascii/${year}${month}/ - cp -f ${output_dir}/AIRS.${year}${month}${day}.out $remote_dir - # and clean up so we don't run out of disk space - (cd ../data/${year}${month}/${year}${month}${day}; rm AIR*hdf) - (cd ${output_dir}; rm AIRS.${year}.${month}.${day}.*.out) - (cd ../tars; rm ${year}${month}${day}.tar) - # end local mods - - # advance the day; the output is YYYYMMDD00 - curday=(`echo ${year}${month}${day}00 +1d | ./advance_time`) - - # advance the loop counter - let d=d+1 - -done - -exit 0 - From 9f73c9b5dbf4e013172b0b261b213331c07a848a Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 15:03:35 -0500 Subject: [PATCH 07/36] remove Convert_HDF_to_netCDF.csh - not used --- .../shell_scripts/Convert_HDF_to_netCDF.csh | 27 ------------------- 1 file changed, 27 deletions(-) delete mode 100755 observations/obs_converters/AIRS/shell_scripts/Convert_HDF_to_netCDF.csh diff --git a/observations/obs_converters/AIRS/shell_scripts/Convert_HDF_to_netCDF.csh b/observations/obs_converters/AIRS/shell_scripts/Convert_HDF_to_netCDF.csh deleted file mode 100755 index 7035e07158..0000000000 --- a/observations/obs_converters/AIRS/shell_scripts/Convert_HDF_to_netCDF.csh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/csh -# -# DART software - Copyright UCAR. This open source software is provided -# by UCAR, "as is", without charge, subject to all terms of use at -# http://www.image.ucar.edu/DAReS/DART/DART_download - - -cd ../data - -foreach FILE ( *hdf ) - - set BASE = $FILE:r - set NEWNAME = $BASE.nc - - echo - echo "Converting $FILE to" - echo " $NEWNAME" - echo - - \rm -f bob.nc - h4tonccf_nc4 $FILE bob.nc || exit 1 - ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, bob.nc $NEWNAME - -end - -\rm -f bob.nc - From 0ea68c5166df9a1869c88950c777ea30623788f3 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 15:06:12 -0500 Subject: [PATCH 08/36] remove README since there is only mergeit.sh script left --- .../obs_converters/AIRS/shell_scripts/README | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 observations/obs_converters/AIRS/shell_scripts/README diff --git a/observations/obs_converters/AIRS/shell_scripts/README b/observations/obs_converters/AIRS/shell_scripts/README deleted file mode 100644 index 3da99d9312..0000000000 --- a/observations/obs_converters/AIRS/shell_scripts/README +++ /dev/null @@ -1,16 +0,0 @@ -# DART software - Copyright UCAR. This open source software is provided -# by UCAR, "as is", without charge, subject to all terms of use at -# http://www.image.ucar.edu/DAReS/DART/DART_download -# -# DART $Id$ - -These scripts are intended to help download the original AIRS hdf -data files, convert them in bulk, and merge the resulting obs_seq files. - -In most cases, they're intended to be copied over to the ../work directory -and then customized for the particular time period and local directory names. - -# -# $URL$ -# $Revision$ -# $Date$ From 7a5921032e9fc09f15c37fbf7b58afac2ccf64a9 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 15:14:04 -0500 Subject: [PATCH 09/36] remove unneeded rttov options from input.nml Only the two options are needed for the converter. rttov12 and rttov13 have different namelist options, so running rttov13 with the whole rttov12 namelist causes an error on reading the rttov namelist --- .../obs_converters/AIRS/work/input.nml | 70 ------------------- 1 file changed, 70 deletions(-) diff --git a/observations/obs_converters/AIRS/work/input.nml b/observations/obs_converters/AIRS/work/input.nml index 83303ae5e9..a827cdc9b9 100644 --- a/observations/obs_converters/AIRS/work/input.nml +++ b/observations/obs_converters/AIRS/work/input.nml @@ -127,76 +127,6 @@ &obs_def_rttov_nml rttov_sensor_db_file = '../../../forward_operators/rttov_sensor_db.csv' - first_lvl_is_sfc = .true. - mw_clear_sky_only = .false. - interp_mode = 1 - do_checkinput = .true. - apply_reg_limits = .true. - verbose = .true. - fix_hgpl = .false. - do_lambertian = .false. - lambertian_fixed_angle = .true. - rad_down_lin_tau = .true. - use_q2m = .true. - use_uv10m = .true. - use_wfetch = .false. - use_water_type = .false. - addrefrac = .false. - plane_parallel = .false. - use_salinity = .false. - do_lambertian = .false. - apply_band_correction = .true. - cfrac_data = .true. - clw_data = .true. - rain_data = .true. - ciw_data = .true. - snow_data = .true. - graupel_data = .true. - hail_data = .false. - w_data = .true. - clw_scheme = 1 - clw_cloud_top = 322. - fastem_version = 6 - supply_foam_fraction = .false. - use_totalice = .true. use_zeeman = .false. - cc_threshold = 0.05 - ozone_data = .false. - co2_data = .false. - n2o_data = .false. - co_data = .false. - ch4_data = .false. - so2_data = .false. - addsolar = .false. - rayleigh_single_scatt = .true. - do_nlte_correction = .false. - solar_sea_brdf_model = 2 - ir_sea_emis_model = 2 - use_sfc_snow_frac = .false. - add_aerosl = .false. - aerosl_type = 1 - add_clouds = .true. - ice_scheme = 1 - use_icede = .false. - idg_scheme = 2 - user_aer_opt_param = .false. - user_cld_opt_param = .false. - grid_box_avg_cloud = .true. - cldstr_threshold = -1.0 - cldstr_simple = .false. - cldstr_low_cloud_top = 750.0 - ir_scatt_model = 2 - vis_scatt_model = 1 - dom_nstreams = 8 - dom_accuracy = 0.0 - dom_opdep_threshold = 0.0 - addpc = .false. - npcscores = -1 - addradrec = .false. - ipcreg = 1 - use_htfrtc = .false. - htfrtc_n_pc = -1 - htfrtc_simple_cloud = .false. - htfrtc_overcast = .false. / From 6d4a9211ee17b558f850a4fc100f76e58837f8c2 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 17:00:10 -0500 Subject: [PATCH 10/36] doc: patched hdfeos converter and where to find it Removed redundant documentation that was in the readme and the individual convert docs. Removed the various HDF5 vs HDF4 disussion and references to using a dart provided script to build hdfeos (get instructions directly from hdfeos) --- observations/obs_converters/AIRS/README.rst | 138 +++------------ .../obs_converters/AIRS/convert_airs_L2.rst | 33 ++-- .../obs_converters/AIRS/convert_amsu_L1.rst | 164 ++++-------------- 3 files changed, 65 insertions(+), 270 deletions(-) diff --git a/observations/obs_converters/AIRS/README.rst b/observations/obs_converters/AIRS/README.rst index 117656f40a..6451602220 100644 --- a/observations/obs_converters/AIRS/README.rst +++ b/observations/obs_converters/AIRS/README.rst @@ -1,137 +1,45 @@ AIRS and AMSU ============= -.. caution:: +The AIRS directory contains both the AIRS and AMSU-A observation converters. +AIRS is the Atmospheric Infrared Sounder (AIRS) Level 2 observations. +AMSU-A is the Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures. - Before you begin: Installing the libraries needed to read these files can be - fairly troublesome. The NASA Earthdata Data Access Services website is the - `download site `__ - for the necessary libraries. An example build script (`AIRS/Build_HDF-EOS.sh`) - is intended to provide some guidance. - -This directory covers two observation converters: - -- :doc:`./convert_airs_L2` for temperature and moisture retrievals. - -- :doc:`./convert_amsu_L1` for radiances. +- :doc:`./convert_airs_L2` for converting AIRS temperature and moisture retrievals. +- :doc:`./convert_amsu_L1` for converting AMSU-A radiances. Both converters are in the AIRS directory because of the complicated history of the data used to create the AIRS L2 product (which includes some AMSU observations). Since both datasets are HDF - it was believed that some of the routines could be used by both converters. Alas, that has not proven to be the case. -Atmospheric Infrared Sounder (AIRS) Level 2 observations --------------------------------------------------------- - -The `AIRS `__ instrument is an Atmospheric -Infrared Sounder flying on the `Aqua `__ -spacecraft. Aqua is one of a group of satellites flying close together -in a polar orbit, collectively known as the “A-train”. The programs in -this directory help to extract the data from the distribution files and -put them into DART observation sequence (obs_seq) file format. - -AIRS data includes atmospheric temperature in the troposphere, derived -moisture profiles, land and ocean surface temperatures, surface -emissivity, cloud fraction, cloud top height, and ozone burden in the -atmosphere. - - -Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures ---------------------------------------------------------------------- - -The *DART/observations/obs_converters/AIRS* directory contains the code -to convert the L1B AMSU-A Brightness Temperatures in HDF-EOS2 format to -the DART observation sequence file format. - -There is a little bit of confusing history to be aware of for AMSU/A: - -https://en.wikipedia.org/wiki/Advanced_microwave_sounding_unit#History - -AMSU/A was flown on NOAA 15-17. It is also on the Aqua satellite (that -also houses AIRS) as well as the European MetOp. It has been replaced by -ATMS on NOAA-20. Dependencies ------------ -Both *convert_airs_L2* and *convert_amsu_L1* require the HDF-EOS libraries. -*convert_amsu_L1* also requires HDF5 support because of -the RTTOV libraries. HDF5 is incompatible with HDF-EOS, so a two-step -conversion is necessary for the AMSU observations. -The data must be converted from HDF to netCDF -(which can be done without HDF5) and then the netCDF files can be -converted to DART radiance observation format - which requires -``obs_def_rttov_mod.f90``, which depends on HDF5. To simplify things, -An example build script (*DART/observations/obs_converters/AIRS/Build_HDF-EOS.sh*) -is supplied and may provide some guidance on downloading and building -the libraries required by NASA. - -The NASA Earthdata Data Access Services website is the `download -site `__, -at press time, the following packages were required to build HDF-EOS -Release v2.20: - -- hdf-4.2.13.tar.gz -- HDF-EOS2.20v1.00.tar.Z -- HDF-EOS2.20v1.00_TestDriver.tar.Z -- HDF-EOS_REF.pdf -- HDF-EOS_UG.pdf -- jpegsrc.v9b.tar.gz -- zlib-1.2.11.tar.gz - -Similarly for HDF-EOS5 Release v5.1.16: - -- HDF-EOS5.1.16.tar.Z -- HDF-EOS5.1.16_TESTDRIVERS.tar.Z -- HDF-EOS5_REF.pdf -- HDF-EOS5_UG.pdf -- hdf5-1.8.19.tar.gz -- szip-2.1.1.tar.gz - -*BUILD_HDF-EOS.sh* may help you build these libraries. -You *will* have to modify it for your -system, and you *probably will* have to iterate on that process. The -script takes the stance that if you have to build HDF4, HDF-EOS, HDF5 … -you might as well build HDF-EOS5 too. The HDF-EOS5 is entirely optional. -The HDF5 will be needed by RTTOV. +Both ``convert_airs_L2`` and ``convert_amsu_L1`` require the HDF-EOS2 libraries. -Converting from HDF4 to netCDF ------------------------------- +The HDF-EOS2 library required a patch to work with DART observation converters. +For details on the patch see `issue #590 `_. +The patched version of the library is available on Derecho. The _intel, _gfortran, +_nvhpc, _cray indicates which compiler was used to build the library. Select an +appropriate mkmf.template for the compiler you are using. -There are multiple ways to convert from HDF4 to netCDF. The HDF-EOS -Tools and Information Center provides binaries for several common -platforms as well as source code should you need to build your own. +.. code:: text -HDF4 CF CONVERSION TOOLKIT -^^^^^^^^^^^^^^^^^^^^^^^^^^ + /glade/campaign/cisl/dares/libraries/hdf-eos_intel + /glade/campaign/cisl/dares/libraries/hdf-eos_gfortran + /glade/campaign/cisl/dares/libraries/hdf-eos_nvhpc + /glade/campaign/cisl/dares/libraries/hdf-eos_cray -The HDF-EOS Tools and Information Center provides the `HDF4 CF -CONVERSION TOOLKIT `__ +``convert_amsu_L1`` requires the RTTOV libraries. - The HDF4 CF (H4CF) Conversion Toolkit can access various NASA HDF4 - external and HDF-EOS2 external files by following the CF conventions - external. The toolkit includes a conversion library for application - developers and a conversion utility for NetCDF users. We have - translated the information obtained from various NASA HDF-EOS2 and - HDF4 files and the corresponding product documents into the - information required by CF into the conversion library. We also have - implemented an HDF4-to-NetCDF (either NetCDF-3 or NetCDF-4 classic) - conversion tool by using this conversion library. In this web page, - we will first introduce how to build the conversion library and the - tool from the source. Then, we will provide basic usage of the tool - and the conversion library APIs. The information for the supported - NASA HDF-EOS2 and HDF4 products and visualization screenshots of some - converted NetCDF files will also be presented. +The following mkmf.templates, which are available in DART/build_templates have been used to compile the AIRS and AMSUA +observation converters on Derecho. -If you download a binary, it’s a good habit to verify the checksum. The download page has a link -to a .pdf that has the known checksums. `Here’s how to generate the -checksum `__. -Be aware that when I downloaded the file (via Chrome or ‘wget’) on an -OSX system, the checksum did not match. When I downloaded the file on a -linux system, the checksum *did* match. +.. code :: text + + mkmf.template.AIRS.gfortran + mkmf.template.AIRS.intel -If you download the source, the tar file comes with a ``README`` and an ``INSTALL``. Please become -familiar with them. DART also has a build script: -``AIRS/shell_scripts/Build_HDF_to_netCDF.csh`` that you can customize -after you read the ``INSTALL`` document. diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index 5703a5985a..0e044f2306 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -1,20 +1,13 @@ Program ``convert_airs_L2`` =========================== -.. caution:: - - Before you begin: Installing the libraries needed to read these files can be - fairly troublesome. The NASA Earthdata Data Access Services website is the - `download site `__ - for the necessary libraries. An example build script (`AIRS/Build_HDF-EOS.sh`) - is intended to provide some guidance. - - Overview -------- -The Atmospheric Infrared Sounder (AIRS) is a facility instrument aboard the second -Earth Observing System (EOS) polar-orbiting platform, EOS Aqua. In combination with +The Atmospheric Infrared Sounder `(AIRS) `_ is a facility +instrument aboard the second Earth Observing System (EOS) polar-orbiting platform +`Aqua `_. Aqua is one of a group of satellites flying close +together in a polar orbit, collectively known as the “A-train”. In combination with the Advanced Microwave Sounding Unit (AMSU) and the Humidity Sounder for Brazil (HSB), AIRS constitutes an innovative atmospheric sounding group of visible, infrared, and microwave sensors. AIRS data will be generated continuously. Global coverage will @@ -34,6 +27,12 @@ is AIRX2RET. (AIRS2RET is the same product but without the AMSU data.) Atmospheric Infrared Sounder (AIRS) Level 2 observations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +AIRS data includes atmospheric temperature in the troposphere, derived +moisture profiles, land and ocean surface temperatures, surface +emissivity, cloud fraction, cloud top height, and ozone burden in the +atmosphere. + + Several types of AIRS data, with varying levels of processing, are available. The following descriptions are taken from the `V5_Data_Release_UG `__ @@ -109,13 +108,6 @@ that spans the Prime Meridian lon1 can be a larger number than lon2, for example a region from 300 E to 40 E and 60 S to 30 S (some of the South Atlantic), would be *lon1 = 300, lon2 = 40, lat1 = -60, lat2 = -30*. -The ``DART/observations/obs_converters/AIRS/shell_scripts`` directory includes scripts -(``download_L2.sh`` and ``oneday_down.sh``) that make use of the fact that the AIRS data -is also archived on the NSF NCAR HPSS (tape library) in daily tar files. -``oneday_down.sh`` has options to download a day of granule files, convert them, merge them -into daily files, and remove the original data files and repeat the process for any -specified time period. - Namelist -------- @@ -196,11 +188,6 @@ The default values are shown below. More realistic values are provided in +--------------------+------------------------+--------------------------------------------------------------+ -Dependencies -~~~~~~~~~~~~ - -See the :doc:`Dependencies Section<./README>` of the AIRS/README. - Known Bugs ~~~~~~~~~~ diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index f3bcd1a57f..012baeaea6 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -1,16 +1,8 @@ Program ``convert_amsu_L1`` =========================== -.. caution:: - - Before you begin: Installing the libraries needed to read these files can be - fairly troublesome. The NASA Earthdata Data Access Services website is the - `download site `__ - for the necessary libraries. An example build script (`AIRS/Build_HDF-EOS.sh`) - is intended to provide some guidance. - Overview --------- +--------- There is a little bit of confusing history to be aware of for AMSU/A: @@ -56,14 +48,36 @@ The citation information for this dataset is: NASA provides a `README.AIRABRAD.pdf `__ through the Goddard Earth Sciences Data and Information Services Center. -convert_amsua_L1.f90 --------------------- +Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures +--------------------------------------------------------------------- + +Converting AMSU_L1 observations is a two-step process: + +- convert the data from HDF to netCDF +- run ``convert_amsua_L1`` to convert the netCDF file to an obs_seq file. + +.. note:: + + The native HDF-EOS2 format files must be converted to netCDF before running + ``convert_amsua_L1``. + +To convert from HDF-EOS2 to netCDF use the +`h4tonccf_nc4 ` from the HDF-EOS +tools. + + +The netCDF files have two global +attributes that are exceedingly large and uninformative. If needed you can +remove these attributes, you can use the ``ncatted`` command from +`NCO `_. + +:: + + h4tonccf_nc4 AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf bob.nc + ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, bob.nc bill.nc + + -``convert_amsua_L1`` converts the L1B AMSU-A Brightness -Temperatures in netCDF format to the DART observation sequence file format. -The native HDF-EOS2 format files must be converted to netCDF. -The conversion from HDF-EOS2 to netCDF is easily performed by the -`h4tonccf_nc4 `__ converter. As you can imagine, you need to download each satellite’s data in a different way. Also, just for your information, AMSU/B has been replaced @@ -73,10 +87,8 @@ identical. Namelist ~~~~~~~~ -DARTs design structure has the support for radiance observations (like brightness -temperatures) provided by the :doc:`../../forward_operators/obs_def_rttov_mod` -which depends on HDF5 libraries. Consequently, the ``obs_def_rttov_mod_nml`` namelist -must appear in the ``input.nml``. However, only two options are used when converting +``convert_amsua_L1`` makes use of :doc:`../../forward_operators/obs_def_rttov_mod` +Only two &obs_def_rttov_nml options are required when converting the observations: *use_zeeman* and *rttov_sensor_db_file*. Be aware that if the RTTOV namelist option ``use_zeeman = .true.`` @@ -221,21 +233,6 @@ are provided for reference only. The 'Documentation' values are from the +---------+---------+---------------+---------------+ -Known Bugs -~~~~~~~~~~ - -None. - - -Future Plans -~~~~~~~~~~~~ - -None. - - ----------- - - .. _instructions-to-download-the-airabrad-dataset-1: Instructions to download the AIRABRAD dataset @@ -273,106 +270,9 @@ Instructions to download the AIRABRAD dataset AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf -Build -^^^^^^ - -See the :doc:`Dependencies Section<./README>` of the AIRS/README. - -Because the data are distributed in HDF-EOS format, and the RTTOV -libraries require HDF5 (incompatible with HDF-EOS) a two-step conversion -is necessary. The data must be converted from HDF to netCDF (which can -be done without HDF5) and then the netCDF files can be converted to DART -radiance observation format - which is the part that requires -``obs_def_rttov_mod.f90``, which is the part that requires HDF5. - -The NASA Earthdata Data Access Services website is the `download -site `__, -at press time, the following packages were required to build HDF-EOS -Release v2.20: - -- hdf-4.2.13.tar.gz -- HDF-EOS2.20v1.00.tar.Z -- HDF-EOS2.20v1.00_TestDriver.tar.Z -- HDF-EOS_REF.pdf -- HDF-EOS_UG.pdf -- jpegsrc.v9b.tar.gz -- zlib-1.2.11.tar.gz - -Similarly for HDF-EOS5 Release v5.1.16: - -- HDF-EOS5.1.16.tar.Z -- HDF-EOS5.1.16_TESTDRIVERS.tar.Z -- HDF-EOS5_REF.pdf -- HDF-EOS5_UG.pdf -- hdf5-1.8.19.tar.gz -- szip-2.1.1.tar.gz - -DART provides a script ``DART/observations/obs_converters/AIRS/BUILD_HDF-EOS.sh`` -that may help provide support for these libraries. You *will* have to modify it for your -system, and you *probably will* have to iterate on that process. The -script takes the stance that if you have to build HDF4, HDF-EOS, HDF5 … -you might as well build HDF-EOS5 too. The HDF-EOS5 is entirely optional. -The HDF5 will be needed by RTTOV. - -Converting from HDF4 to netCDF ------------------------------- - -There are multiple ways to convert from HDF4 to netCDF. The HDF-EOS -Tools and Information Center provides binaries for several common -platforms as well as source code should you need to build your own. - -HDF4 CF CONVERSION TOOLKIT -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The HDF-EOS Tools and Information Center provides the `HDF4 CF -CONVERSION TOOLKIT `__ - - The HDF4 CF (H4CF) Conversion Toolkit can access various NASA HDF4 - external and HDF-EOS2 external files by following the CF conventions - external. The toolkit includes a conversion library for application - developers and a conversion utility for NetCDF users. We have - translated the information obtained from various NASA HDF-EOS2 and - HDF4 files and the corresponding product documents into the - information required by CF into the conversion library. We also have - implemented an HDF4-to-NetCDF (either NetCDF-3 or NetCDF-4 classic) - conversion tool by using this conversion library. In this web page, - we will first introduce how to build the conversion library and the - tool from the source. Then, we will provide basic usage of the tool - and the conversion library APIs. The information for the supported - NASA HDF-EOS2 and HDF4 products and visualization screenshots of some - converted NetCDF files will also be presented. - -If you download a binary, it’s a good habit to verify the checksum. -The download page has a link -to a .pdf that has the known checksums. -`Here’s how to generate the checksum `__. -Be aware that when I downloaded the file (via Chrome or ‘wget’) on an -OSX system, the checksum did not match. When I downloaded the file on a -linux system, the checksum *did* match. - -If you download the source, the tar file comes with a ``README`` and an -``INSTALL``. Please become familiar with them. DART also has a build script: -``AIRS/shell_scripts/Build_HDF_to_netCDF.csh`` that you can customize -after you read the ``INSTALL`` document. - Actually converting to netCDF ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -While the converter creates very nice netCDF files, there are two global -attributes that are exceedingly large and uninformative. Should you want -to remove them, I suggest using the ``ncatted`` command from -`NCO `__. -:: - - h4tonccf_nc4 AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf bob.nc - ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, bob.nc bill.nc -The DART ``L1_AMSUA_to_netcdf.f90`` program -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Before I became aware of ``h4tonccf_nc4``, I was in the process of -writing my own converter ``L1_AMSUA_to_netcdf.f90``. *It is not -finished.* Furthermore, at this stage, I don’t know which variables are -needed to be a viable DART observation sequence file, and I don’t see -the point in converting EVERYTHING. From b70e8c8c94d0728a7257351cc2e6440758c6eddf Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Fri, 16 Feb 2024 17:10:52 -0500 Subject: [PATCH 11/36] add hdf4 module instructions and removed dangling heading --- observations/obs_converters/AIRS/README.rst | 3 +++ observations/obs_converters/AIRS/convert_amsu_L1.rst | 6 ------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/observations/obs_converters/AIRS/README.rst b/observations/obs_converters/AIRS/README.rst index 6451602220..5115917e7d 100644 --- a/observations/obs_converters/AIRS/README.rst +++ b/observations/obs_converters/AIRS/README.rst @@ -32,6 +32,8 @@ appropriate mkmf.template for the compiler you are using. /glade/campaign/cisl/dares/libraries/hdf-eos_nvhpc /glade/campaign/cisl/dares/libraries/hdf-eos_cray +hdf-eos requires HDF4. HDF4 is available on Derecho with ``module load hdf4``. + ``convert_amsu_L1`` requires the RTTOV libraries. The following mkmf.templates, which are available in DART/build_templates have been used to compile the AIRS and AMSUA @@ -43,3 +45,4 @@ observation converters on Derecho. mkmf.template.AIRS.intel + diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 012baeaea6..d8eebb683f 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -270,9 +270,3 @@ Instructions to download the AIRABRAD dataset AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf -Actually converting to netCDF -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - - From b50a918e2bf3d219686adcbfa89767f94447bceb Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Tue, 5 Mar 2024 08:33:47 -0700 Subject: [PATCH 12/36] removed L1_AMSUA_to_netcdf non-working code. --- .../AIRS/L1_AMSUA_to_netcdf.f90 | 153 ------------------ .../obs_converters/AIRS/work/quickbuild.sh | 1 - 2 files changed, 154 deletions(-) delete mode 100644 observations/obs_converters/AIRS/L1_AMSUA_to_netcdf.f90 diff --git a/observations/obs_converters/AIRS/L1_AMSUA_to_netcdf.f90 b/observations/obs_converters/AIRS/L1_AMSUA_to_netcdf.f90 deleted file mode 100644 index a3025dc840..0000000000 --- a/observations/obs_converters/AIRS/L1_AMSUA_to_netcdf.f90 +++ /dev/null @@ -1,153 +0,0 @@ -! DART software - Copyright UCAR. This open source software is provided -! by UCAR, "as is", without charge, subject to all terms of use at -! http://www.image.ucar.edu/DAReS/DART/DART_download - -program L1_AMSUA_to_netcdf - -use utilities_mod, only : initialize_utilities, register_module, & - error_handler, finalize_utilities, E_ERR, E_MSG, & - find_namelist_in_file, check_namelist_read, & - do_nml_file, do_nml_term, set_filename_list, & - nmlfileunit, get_next_filename - -use netcdf_utilities_mod, only : nc_create_file, nc_begin_define_mode, & - nc_end_define_mode, nc_close_file - -use amsua_netCDF_support_mod, only : define_amsua_dimensions, & - define_amsua_variables, & - fill_amsua_variables - -use amsua_bt_mod, only : amsua_bt_granule, amsua_bt_rdr, & - AMSUA_BT_GEOXTRACK, AMSUA_BT_GEOTRACK, AMSUA_BT_CHANNEL, & - AMSUA_BT_CALXTRACK, AMSUA_BT_SPACEXTRACK, AMSUA_BT_BBXTRACK, & - AMSUA_BT_WARMPRTA11, AMSUA_BT_WARMPRTA12, AMSUA_BT_WARMPRTA2 - -implicit none - -! ---------------------------------------------------------------------- -! Declare local parameters -! ---------------------------------------------------------------------- - -! version controlled file description for error handling, do not edit -character(len=*), parameter :: source = 'L1_AMSUA_to_netcdf.f90' -character(len=*), parameter :: revision = '' -character(len=*), parameter :: revdate = '' - -type(amsua_bt_granule) :: amsua_bt_gran - -integer :: iunit, io, ncid -integer :: chan - -! ---------------------------------------------------------------------- -! Declare namelist parameters -! ---------------------------------------------------------------------- - -character(len=256) :: file_name = '' -character(len=256) :: outputfile = 'amsua_bt_granule.nc' -integer :: track = 1 ! 1-based index along track -integer :: xtrack = 0 ! 1-based index across-track - -namelist /L1_AMSUA_to_netcdf_nml/ file_name, outputfile, & - track, xtrack - -! ---------------------------------------------------------------------- -! start of executable program code -! ---------------------------------------------------------------------- - -call initialize_utilities('L1_AMSUA_to_netcdf') -call register_module(source,revision,revdate) - -call error_handler(E_ERR,source,'ROUTINE NOT USABLE.', & - text2='Routine barely started. Needs a lot of work and expect', & - text3='complications with simultaneous HDF4, netCDF, and HDF5.') - -!---------------------------------------------------------------------- -! Read the namelist -!---------------------------------------------------------------------- - -call find_namelist_in_file('input.nml', 'L1_AMSUA_to_netcdf_nml', iunit) -read(iunit, nml = L1_AMSUA_to_netcdf_nml, iostat = io) -call check_namelist_read(iunit, io, 'L1_AMSUA_to_netcdf_nml') - -! Record the namelist values used for the run ... -if (do_nml_file()) write(nmlfileunit, nml=L1_AMSUA_to_netcdf_nml) -if (do_nml_term()) write( * , nml=L1_AMSUA_to_netcdf_nml) - -print *, "This code extracts a single profile from a specified" -print *, " input file to stdout. It requires exactly three " -print *, "arguments." -print *, " 1) scan line number [1, 45]" -print *, " 2) field-of-view number [1, 30]" -print *, " 3) file name" - -if (track.lt.1.OR.track.gt.45) then - print *, "Error: along-track scan line number [1, 45]" - print *, "got ", track - STOP -endif - -if (xtrack.lt.1.OR.xtrack.gt.30) then - print *, "Error: second argument must be scan line number [1, 30]" - print *, "got ", xtrack - STOP -endif - -call amsua_bt_rdr(file_name, amsua_bt_gran) - -! Each AMSU-A scan has 2 "state"s, indicating whether the AMSU-A1 and -! AMSU-A2 instruments were in science mode when the data -! was taken and whether the data was successfully transmitted. - -if (amsua_bt_gran%state1(track).ne.0) then - if (amsua_bt_gran%state1(track).EQ.1) then - print *, "Warning, AMSU-A1 state for this profile is SPECIAL" - else if (amsua_bt_gran%state1(track).EQ.2) then - print *, "Warning, AMSU-A1 state for this profile is ERRONEOUS" - else if (amsua_bt_gran%state1(track).EQ.3) then - print *, "Warning, AMSU-A1 state for this profile is MISSING" - else - print *, "Warning, AMSU-A1 state for this profile is UNKNOWN" - endif - - print *, "NOT PROCESS" - -endif - -if (amsua_bt_gran%state2(track).ne.0) then - if (amsua_bt_gran%state2(track).EQ.1) then - print *, "Warning, AMSU-A2 state for this profile is SPECIAL" - else if (amsua_bt_gran%state2(track).EQ.2) then - print *, "Warning, AMSU-A2 state for this profile is ERRONEOUS" - else if (amsua_bt_gran%state2(track).EQ.3) then - print *, "Warning, AMSU-A2 state for this profile is MISSING" - else - print *, "Warning, AMSU-A2 state for this profile is UNKNOWN" - endif - - print *, "NOT PROCESS" - -endif - -print *, "# AMSU Brightness Temperatures (Kelvins)" -print *, "# Channels 1-15" -print *, "# -9999 flags bad value" - -do chan = 1, AMSUA_BT_CHANNEL - write(*, "(f8.2)") amsua_bt_gran%brightness_temp(chan,xtrack,track) -enddo - -!------------------------------------------------------------------------------- -! convert the granule to netCDF -!------------------------------------------------------------------------------- - -ncid = nc_create_file( outputfile, source) -call nc_begin_define_mode( ncid, source) -call define_amsua_dimensions(ncid, source) -call define_amsua_variables( amsua_bt_gran, ncid, source) -call nc_end_define_mode( ncid, source) -call fill_amsua_variables( amsua_bt_gran, ncid, source) -call nc_close_file( ncid, source) - -call finalize_utilities() - -end program L1_AMSUA_to_netcdf diff --git a/observations/obs_converters/AIRS/work/quickbuild.sh b/observations/obs_converters/AIRS/work/quickbuild.sh index e5c2bc45f3..4b1fcedd61 100755 --- a/observations/obs_converters/AIRS/work/quickbuild.sh +++ b/observations/obs_converters/AIRS/work/quickbuild.sh @@ -15,7 +15,6 @@ EXTRA="$DART/observations/obs_converters/obs_error/ncep_obs_err_mod.f90" programs=( -L1_AMSUA_to_netcdf advance_time convert_airs_L2 convert_amsu_L1 From a564ad48cfe995ae7c93f60ce032b0ccf8aca60e Mon Sep 17 00:00:00 2001 From: Mohamad Gharamti Date: Tue, 12 Mar 2024 12:32:06 -0600 Subject: [PATCH 13/36] Cleaning up and removing commented out code --- .../create_identity_streamflow_obs.f90 | 2 +- models/wrf_hydro/model_mod.f90 | 30 ----------- models/wrf_hydro/noah_hydro_mod.f90 | 54 +++++-------------- 3 files changed, 13 insertions(+), 73 deletions(-) diff --git a/models/wrf_hydro/create_identity_streamflow_obs.f90 b/models/wrf_hydro/create_identity_streamflow_obs.f90 index db2c78488a..dc8ace47cc 100644 --- a/models/wrf_hydro/create_identity_streamflow_obs.f90 +++ b/models/wrf_hydro/create_identity_streamflow_obs.f90 @@ -668,7 +668,7 @@ function estimate_total_obs_count(file_list,nfiles) result (num_obs) ! We need to know how many observations there may be. ! Specifying too many is not really a problem. -! I am adding 20% +! I am multiplying by 10. num_obs = 10.0_r8 * nobs * nfiles diff --git a/models/wrf_hydro/model_mod.f90 b/models/wrf_hydro/model_mod.f90 index d1b5c7c7e3..8f943989ea 100644 --- a/models/wrf_hydro/model_mod.f90 +++ b/models/wrf_hydro/model_mod.f90 @@ -1402,36 +1402,6 @@ end subroutine configure_domains !> close_ind(:) the list of desired indices ON MY TASK !> dist(:) the corresponding distances of the desired indices -!subroutine get_my_close(num_superset, superset_indices, superset_distances, & -! my_task_indices, num_close, close_ind, dist) -! -!integer, intent(in) :: num_superset -!integer(i8), intent(in) :: superset_indices(:) -!real(r8), intent(in) :: superset_distances(:) -!integer(i8), intent(in) :: my_task_indices(:) -!integer, intent(out) :: num_close -!integer, intent(out) :: close_ind(:) -!real(r8), intent(out) :: dist(:) -! -!integer :: itask, isuper -! -!num_close = 0 -! -!do itask = 1,size(my_task_indices) -! do isuper = 1,num_superset -! -! ! if stuff on my task ... equals ... global stuff I want ... -! if ( my_task_indices(itask) == superset_indices(isuper) ) then -! num_close = num_close + 1 -! close_ind(num_close) = itask -! dist(num_close) = superset_distances(isuper) -! endif -! -! enddo -!enddo -! -!end subroutine get_my_close - subroutine get_my_close(num_superset, superset_indices, superset_distances, & my_task_indices, num_close, close_ind, dist) diff --git a/models/wrf_hydro/noah_hydro_mod.f90 b/models/wrf_hydro/noah_hydro_mod.f90 index be3ac6fce6..8af0cee699 100644 --- a/models/wrf_hydro/noah_hydro_mod.f90 +++ b/models/wrf_hydro/noah_hydro_mod.f90 @@ -981,7 +981,6 @@ subroutine get_routelink_constants(filename) integer, allocatable :: fromIndsStart(:) integer, allocatable :: fromIndsEnd(:) integer, allocatable :: toIndex(:) -!integer, allocatable :: num_up_links(:) ncid = nc_open_file_readonly(filename, routine) database_length = nc_get_dimension_size(ncid,'index', routine) @@ -1010,14 +1009,10 @@ subroutine get_routelink_constants(filename) !! Allocate these module variables allocate(linkLong(n_link), linkLat(n_link), linkAlt(n_link)) -!allocate(roughness(n_link)) -!allocate(linkID(n_link)) -!allocate(gageID(n_link)) allocate(channelIndsX(n_link), channelIndsY(n_link)) allocate(connections(n_link)) do i = 1, n_link - !allocate(connections(i)%upstream_linkID(n_upstream)) allocate(connections(i)%upstream_index(num_up_links(i))) enddo @@ -1035,22 +1030,10 @@ subroutine get_routelink_constants(filename) call nc_get_variable(ncid,'lon', linkLong, routine) call nc_get_variable(ncid,'lat', linkLat, routine) call nc_get_variable(ncid,'alt', linkAlt, routine) -!call nc_get_variable(ncid,'n', roughness, routine) -!call nc_get_variable(ncid,'link', linkID, routine) call nc_get_variable(ncid,'Length', length, routine) call nc_get_variable(ncid,'to', to, routine) call nc_get_variable(ncid,'bucket_comid_mask',BucketMask,routine) -! no snappy accessor routine for character arrays -! call nc_get_variable(ncid,'gages', gageID, routine) - -!io = nf90_inq_varid(ncid,'gages', VarID) -!call nc_check(io, routine, 'inq_varid', 'gages', filename) -!io = nf90_get_var(ncid, VarID, gageID) -!call nc_check(io, routine, 'get_var', 'gages', filename) - -!call nc_close_file(ncid, routine) - ! Longitude [DART uses longitudes [0,360)] where(linkLong < 0.0_r8) linkLong = linkLong + 360.0_r8 where(linkLong == 360.0_r8) linkLong = 0.0_r8 @@ -1059,11 +1042,10 @@ subroutine get_routelink_constants(filename) channelIndsX = (/ (i, i=1,n_link) /) channelIndsY = (/ (i, i=1,n_link) /) -call fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd, num_up_links) +call fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd) if (debug > 99) then do i=1,n_link - !write(*,*)'link ',i,linkLong(i),linkLat(i),linkAlt(i),gageID(i),roughness(i),linkID(i),BucketMask(i) write(*,*)'link ',i,linkLong(i),linkLat(i),linkAlt(i),BucketMask(i) enddo write(*,*)'Longitude range is ',minval(linkLong),maxval(linkLong) @@ -1071,8 +1053,6 @@ subroutine get_routelink_constants(filename) write(*,*)'Altitude range is ',minval(linkAlt),maxval(linkAlt) endif -!deallocate(linkID) -!deallocate(gageID) deallocate(length) deallocate(to) deallocate(toIndex) @@ -1086,13 +1066,12 @@ end subroutine get_routelink_constants !----------------------------------------------------------------------- !> -subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd,num_up_links) +subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd) integer, intent(in) :: toIndex(:) integer, intent(in) :: fromIndices(:) integer, intent(in) :: fromIndsStart(:) integer, intent(in) :: fromIndsEnd(:) -integer, intent(inout) :: num_up_links(:) integer :: i, j, id, nfound integer, parameter :: MAX_UPSTREAM_LINKS = 5 @@ -1101,13 +1080,10 @@ subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd,num_up ! hydro_domain_offset = 0 !>@todo get the actual offset somehow do i = 1,n_link -! connections(i)%gageName = gageID(i) -! connections(i)%linkID = linkID(i) connections(i)%linkLength = length(i) connections(i)%domain_offset = i connections(i)%downstream_linkID = to(i) connections(i)%downstream_index = toIndex(i) - !connections(i)%upstream_linkID(:) = MISSING_I connections(i)%upstream_index(:) = MISSING_I enddo @@ -1138,20 +1114,14 @@ subroutine fill_connections(toIndex,fromIndices,fromIndsStart,fromIndsEnd,num_up if (debug > 99) then write(string1,'("PE ",i3)') my_task_id() -! do i = 1,n_link - do i = 54034,54034 - write(*,*)'' - write(*,*)trim(string1),' connectivity for link : ',i - !write(*,*)trim(string1),' gageName : ',connections(i)%gageName - !write(*,*)trim(string1),' linkID : ',connections(i)%linkID - write(*,*)trim(string1),' linkLength : ',connections(i)%linkLength - write(*,*)trim(string1),' domain_offset : ',connections(i)%domain_offset - - write(*,*)trim(string1),' downstream_linkID : ',connections(i)%downstream_linkID - write(*,*)trim(string1),' downstream_index : ',connections(i)%downstream_index - - !write(*,*)trim(string1),' upstream_linkID : ',connections(i)%upstream_linkID - write(*,*)trim(string1),' upstream_index : ',connections(i)%upstream_index + do i = 1,n_link + write(*,*)'' + write(*,*)trim(string1),' connectivity for link : ',i + write(*,*)trim(string1),' linkLength : ',connections(i)%linkLength + write(*,*)trim(string1),' domain_offset : ',connections(i)%domain_offset + write(*,*)trim(string1),' downstream_linkID : ',connections(i)%downstream_linkID + write(*,*)trim(string1),' downstream_index : ',connections(i)%downstream_index + write(*,*)trim(string1),' upstream_index : ',connections(i)%upstream_index enddo endif @@ -1199,10 +1169,10 @@ recursive subroutine get_link_tree(my_index, reach_cutoff, depth, & write(*,*)trim(string1),' glt:task, nclose ', nclose write(*,*)trim(string1),' glt:task, close_indices ', close_indices(1:nclose) write(*,*)trim(string1),' glt:task, distances ', distances(1:nclose) + write(*, '(A, i5, f10.2, i8, i4, 5i8)') 'depth, distance, node, # upstream, up nodes: ', & + depth, direct_length, my_index, num_up_links(my_index), connections(my_index)%upstream_index(:) endif -!write(*, '(A, i5, f10.2, i8, i4, 5i8)') 'depth, distance, node, # upstream, up nodes: ', depth, direct_length, my_index, num_up_links(my_index), connections(my_index)%upstream_index(:) - do iup = 1,num_up_links(my_index) call get_link_tree(connections(my_index)%upstream_index(iup), & reach_cutoff, depth+1, direct_length, nclose, close_indices, distances) From 39c1cfb919abbab3ed6b64177b6257570eb19486 Mon Sep 17 00:00:00 2001 From: braczka Date: Thu, 14 Mar 2024 09:56:09 -0600 Subject: [PATCH 14/36] AIRS converter generalize to version 7 --- observations/obs_converters/AIRS/airs_JPL_mod.f90 | 10 +++++----- observations/obs_converters/AIRS/airs_obs_mod.f90 | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/observations/obs_converters/AIRS/airs_JPL_mod.f90 b/observations/obs_converters/AIRS/airs_JPL_mod.f90 index 9e4e8acb61..19f6468be4 100644 --- a/observations/obs_converters/AIRS/airs_JPL_mod.f90 +++ b/observations/obs_converters/AIRS/airs_JPL_mod.f90 @@ -2,7 +2,7 @@ ! adapted from original JPL code, example AIRS readers ! -! updated for version 6 of the AIRS data formats +! updated for version 6 and 7 of the AIRS data formats ! added fields needed to support radiances ! removed unused items to streamline the code. ! @@ -511,7 +511,7 @@ subroutine airs_ret_rdr(file_name, airs_ret_gran, ver) print *, "Error ", statn, " reading field ", & "TAirStdErr" - if (ver == 6) then + if (ver .eq. 6 .or. ver .eq. 7) then edge(3) = 45 edge(2) = 30 edge(1) = 28 @@ -543,7 +543,7 @@ subroutine airs_ret_rdr(file_name, airs_ret_gran, ver) print *, "Error ", statn, " reading field ", & "TSurfAirErr" - if (ver == 6) then + if (ver .eq. 6 .or. ver .eq. 7) then edge(2) = 45 edge(1) = 30 statn = SWrdfld(swid, "TSurfAir_QC", & @@ -621,7 +621,7 @@ subroutine airs_ret_rdr(file_name, airs_ret_gran, ver) print *, "Error ", statn, " reading field ", & "H2OMMRStdErr" - if (ver == 6) then + if (ver .eq. 6 .or. ver .eq. 7) then edge(3) = 45 edge(2) = 30 edge(1) = 14 @@ -653,7 +653,7 @@ subroutine airs_ret_rdr(file_name, airs_ret_gran, ver) print *, "Error ", statn, " reading field ", & "totH2OStdErr" - if (ver == 6) then + if (ver .eq. 6 .or. ver .eq. 7) then edge(2) = 45 edge(1) = 30 statn = SWrdfld(swid, "totH2OStd_QC", & diff --git a/observations/obs_converters/AIRS/airs_obs_mod.f90 b/observations/obs_converters/AIRS/airs_obs_mod.f90 index aca52d2b38..c1ba55e665 100644 --- a/observations/obs_converters/AIRS/airs_obs_mod.f90 +++ b/observations/obs_converters/AIRS/airs_obs_mod.f90 @@ -268,7 +268,7 @@ subroutine make_obs_sequence ( seq, granule, lon1, lon2, lat1, lat2, & vert_Q_loop: do ivert=istart,humidity_top_index - if ((version == 6) .and. (granule%H2OMMRStd_QC(ivert, icol, irow) > 0)) cycle vert_Q_loop + if ((version.eq.6 .or. version.eq.7) .and. (granule%H2OMMRStd_QC(ivert, icol, irow) > 0)) cycle vert_Q_loop qqc = 0 ! if we get here, the quality control is 'Best' == 0 From db509d56f0b838f071e5dcca5c80783d07511781 Mon Sep 17 00:00:00 2001 From: braczka Date: Fri, 15 Mar 2024 10:54:28 -0600 Subject: [PATCH 15/36] Update AIRS converter documentation --- observations/obs_converters/AIRS/README.rst | 44 +++--- .../obs_converters/AIRS/convert_airs_L2.rst | 133 ++++++++++-------- .../obs_converters/AIRS/work/input.nml | 7 +- 3 files changed, 103 insertions(+), 81 deletions(-) diff --git a/observations/obs_converters/AIRS/README.rst b/observations/obs_converters/AIRS/README.rst index 5115917e7d..7fcaf77d6b 100644 --- a/observations/obs_converters/AIRS/README.rst +++ b/observations/obs_converters/AIRS/README.rst @@ -5,8 +5,10 @@ The AIRS directory contains both the AIRS and AMSU-A observation converters. AIRS is the Atmospheric Infrared Sounder (AIRS) Level 2 observations. AMSU-A is the Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures. -- :doc:`./convert_airs_L2` for converting AIRS temperature and moisture retrievals. -- :doc:`./convert_amsu_L1` for converting AMSU-A radiances. +- :doc:`./convert_airs_L2` is used to convert AIRS temperature and + specific humidity vertical profile observations. +- :doc:`./convert_amsu_L1` is used to convert AMSU-A radiances (brightness temperature) + observations. Both converters are in the AIRS directory because of the complicated history of the data used to create the AIRS L2 product (which includes some AMSU observations). @@ -17,32 +19,32 @@ used by both converters. Alas, that has not proven to be the case. Dependencies ------------ -Both ``convert_airs_L2`` and ``convert_amsu_L1`` require the HDF-EOS2 libraries. +Both ``convert_airs_L2`` and ``convert_amsu_L1`` require the HDF-EOS2 libraries, +which, in turn, requires HDF4. HDF4 is available on Derecho using the ``module load hdf`` +command. -The HDF-EOS2 library required a patch to work with DART observation converters. -For details on the patch see `issue #590 `_. -The patched version of the library is available on Derecho. The _intel, _gfortran, -_nvhpc, _cray indicates which compiler was used to build the library. Select an -appropriate mkmf.template for the compiler you are using. - -.. code:: text - - /glade/campaign/cisl/dares/libraries/hdf-eos_intel - /glade/campaign/cisl/dares/libraries/hdf-eos_gfortran - /glade/campaign/cisl/dares/libraries/hdf-eos_nvhpc - /glade/campaign/cisl/dares/libraries/hdf-eos_cray - -hdf-eos requires HDF4. HDF4 is available on Derecho with ``module load hdf4``. +The ``convert_amsu_L1`` script also requires the RTTOV libraries. -``convert_amsu_L1`` requires the RTTOV libraries. - -The following mkmf.templates, which are available in DART/build_templates have been used to compile the AIRS and AMSUA -observation converters on Derecho. +The following mkmf.templates for gfortran and intel compilers respectively, +are available in DART/build_templates, and they have been used to compile +the AIRS and AMSUA observation converters on Derecho. They include the +proper library paths for both HDF-EOS2 and RTTOV. The HDF-EOS2 library +required a patch to work with DART observation converters. +For details on the patch see `issue #590 `_. .. code :: text mkmf.template.AIRS.gfortran mkmf.template.AIRS.intel +In addition to gfortran and intel compiled hdf-eos as mentioned above, +Derecho also includes nvhpc and cray compiled hdf-eos libraries, with +the paths provided below. + +.. code:: text + /glade/campaign/cisl/dares/libraries/hdf-eos_intel + /glade/campaign/cisl/dares/libraries/hdf-eos_gfortran + /glade/campaign/cisl/dares/libraries/hdf-eos_nvhpc + /glade/campaign/cisl/dares/libraries/hdf-eos_cray diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index 0e044f2306..d2b178051c 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -10,9 +10,7 @@ instrument aboard the second Earth Observing System (EOS) polar-orbiting platfor together in a polar orbit, collectively known as the “A-train”. In combination with the Advanced Microwave Sounding Unit (AMSU) and the Humidity Sounder for Brazil (HSB), AIRS constitutes an innovative atmospheric sounding group of visible, infrared, and -microwave sensors. AIRS data will be generated continuously. Global coverage will -be obtained twice daily (day and night) on a 1:30pm sun synchronous orbit from a -705-km altitude. +microwave sensors. AIRS data will be generated continuously. The AIRS Standard Retrieval Product consists of retrieved estimates of cloud and surface properties, plus profiles of retrieved temperature, water vapor, @@ -21,23 +19,18 @@ quantities will also be part of the Standard Product. The temperature profile vertical resolution is 28 levels total between 1100 mb and 0.1 mb, while moisture profile is reported at 14 atmospheric layers between 1100 mb and 50 mb. The horizontal resolution is 50 km. An AIRS granule has been set as 6 minutes of data, -30 footprints cross track by 45 lines along track. The Shortname for this product -is AIRX2RET. (AIRS2RET is the same product but without the AMSU data.) +There are 240 granules per day, with orbit repeat cycle of approximately 16 days. -Atmospheric Infrared Sounder (AIRS) Level 2 observations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Overview of L1-L3 Atmospheric Infrared Sounder (AIRS) Observations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -AIRS data includes atmospheric temperature in the troposphere, derived -moisture profiles, land and ocean surface temperatures, surface -emissivity, cloud fraction, cloud top height, and ozone burden in the -atmosphere. +The ``convert_airs_L2`` converter is designed specifically for +**temperature and moisture retrievals for L2 observations** only. +For reference, we provide a brief description of the L1-L3 AIRS data +products below. For more detailed information please see the +`AIRS documentation page: `_ -Several types of AIRS data, with varying levels of processing, are available. -The following descriptions are taken from the -`V5_Data_Release_UG `__ -document: - The L1B data product includes geolocated, calibrated observed microwave, infrared and visible/near infrared radiances, as well as Quality Assessment (QA) data. The radiances are well calibrated; however, not all QA data have @@ -54,21 +47,41 @@ document: There are three products: daily, 8-day and monthly. Each product provides separate ascending (daytime) and descending (nighttime) binned data sets. -The converter in this directory processes level 2 (L2) data files, using data -set ``AIRS_DP`` and data product ``AIRX2RET`` or ``AIRS2RET`` without ``HSB`` -(the instrument measuring humidity which failed). - -Getting the data currently means putting in a start/stop time at -`this web page `__. -The keyword is ``AIRX2RET`` and put in the time range of interest and optionally a -geographic region. Each file contains 6 minutes of data, is about 2.3 Megabytes, -and globally there are 240 files/day (about 550 Megabytes/day). There are additional -options for getting only particular variables of interest, but the current reader -expects whole files to be present. Depending on your connection to the internet, -there are various options for downloading. We have chosen to download a ``wget`` -script which is created by the web page after adding the selected files to a 'cart' -and 'checking out'. The script has a series of ``wget`` commands which downloads -each file, one at a time, which is run on the machine where you want the data. + +Downloading Atmospheric Infrared Sounder (AIRS) L2 Observations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are several data file types and versions that contain L2 +observations for temperature and moisture profiles. **We recommend the use of +the AIRS2RET version 7 (AIRS2RETv7) data product.** The ``AIRSRET`` data (AIRS data only) +product is preferred to the ``AIRX2RET`` (AIRS/AMSU data) because the radiometric +noise in several AMSU channels increased (since June 2007) degrading the +``AIRXRET`` product. Furthermore, the version 7 product is higher quality than version 6 +because of an improved retrieval algorithm leading to significantly improved RMSE and bias statistics. +See the `AIRSRETv7 documentation `_ +for more information. + +Although we recommend ``AIRSRETv7``, the `convert_airs_L2` converter is compatible +with ``AIRS2RET`` and ``AIRX2RET`` versions 5-7. Version 5 is no longer available +within the GES DISC database. For more information on these data products see the +links below: + +`AIRSRETv6 `_ +`AIRXRETv6 `_ +`AIRXRETv7 `_ + +The AIRS data is located within the Goddard Earth Sciences Data and Information +Services Center (GES DISC) `located here `_. You need +to create an Earthdata account before you can download data. As an example, to +access the AIRSRETv7 data, search on keyword ``AIRSRET`` and locate +the AIRSRET 7.0 data set within your search results. Next, click on the +`Subset/Get Data` link, then refine your search results by 1) data range (time) +and 2) spatial region. + +There are various options for downloading, however,the most straightforward approach +for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` +provide the proper wget flags/options. The ``Download Links List`` provides +the AIRS file list based on your search results. convert_airs_L2.f90 ------------------- @@ -79,14 +92,9 @@ corresponding vertical pressure levels. However, the moisture obs are the mean f the layer, so the location in the vertical is the midpoint, in log space, of the current layer and the layer above it. There is an alternative computation for the moisture across the layer which may be more accurate, but requires a forward -operator subroutine to be written and for the observation to contain metadata. -The observation could be defined with a layer top, in pressure, and a number of -points to use for the integration across the layer. Then the forward operator would -query the model at each of the N points in the vertical for a given horizontal -location, and compute the mean moisture value. This code has not been implemented -yet, and would require a different QTY_xxx to distinguish it from the simple -location/value moisture obs. See the GPS non-local operator code for an example -of how this would need to be implemented. +operator subroutine to be written and for the observation converter to include +additional metadata to support this forward operator. For more information see +the `Future Plans` section below. The temperature observations are located on standard levels; there is a single array of heights in each file and all temperature data is located on one of these levels. @@ -95,18 +103,18 @@ the levels; in their terminology the fixed heights are 'levels' and the space be them are 'layers'. The current converter locates the moisture obs at the midpoint, in log space, between the levels. -The hdf files need to be downloaded from the data server, in any manner you choose. -The converter program reads each hdf granule and outputs a DART obs_seq file +The converter program reads each AIRS hdf file granule and outputs a DART obs_seq file containing up to 56700 observations. Only those with a quality control of 0 (Best) are kept. The resulting obs_seq files can be merged with the :doc:`../../../assimilation_code/programs/obs_sequence_tool/obs_sequence_tool` into larger time periods. -It is possible to restrict the output observation sequence to contain data from a -region of interest throught the use of the namelist parameters. If you need a region -that spans the Prime Meridian lon1 can be a larger number than lon2, for example, -a region from 300 E to 40 E and 60 S to 30 S (some of the South Atlantic), -would be *lon1 = 300, lon2 = 40, lat1 = -60, lat2 = -30*. +During the excecution of the obs converter, It is possible to restrict the output +observation sequence to contain data from a region of interest throught the use of +the namelist parameters (described in Namelist section below). If you need a region +that spans the Prime Meridian, ``lon1`` can be a larger number than ``lon2``. +For example, a region from 300 E to 40 E and 60 S to 30 S (some of the South Atlantic), +would be ``lon1 = 300``, ``lon2 = 40``, ``lat1 = -60``, ``lat2 = -30``. Namelist @@ -134,7 +142,7 @@ The default values are shown below. More realistic values are provided in cross_track_thin = 0 along_track_thin = 0 use_NCEP_errs = .false. - version = 6 + version = 7 / | @@ -159,32 +167,33 @@ The default values are shown below. More realistic values are provided in +--------------------+------------------------+--------------------------------------------------------------+ | outputfile | character(len=256) | The name of the output observation sequence file. | +--------------------+------------------------+--------------------------------------------------------------+ - | lon1 | real(r8) | the West-most longitude of interest in degrees. [0.0, 360] | + | lon1 | real(r8) | The West-most longitude of interest in degrees. [0.0, 360] | +--------------------+------------------------+--------------------------------------------------------------+ - | lon2 | real(r8) | the East-most longitude of interest in degrees. [0.0, 360] | + | lon2 | real(r8) | The East-most longitude of interest in degrees. [0.0, 360] | +--------------------+------------------------+--------------------------------------------------------------+ - | lat1 | real(r8) | the South-most latitude of interest in degrees. [-90.0,90.0] | + | lat1 | real(r8) | The South-most latitude of interest in degrees. [-90.0,90.0] | +--------------------+------------------------+--------------------------------------------------------------+ - | lat2 | real(r8) | the North-most latitude of interest in degrees. [-90.0,90.0] | + | lat2 | real(r8) | The North-most latitude of interest in degrees. [-90.0,90.0] | +--------------------+------------------------+--------------------------------------------------------------+ | min_MMR_threshold | real(r8) | The data files contains 'Retrieved Water Vapor Mass Mixing | - | | | Ratio'. This is the minimum threshold, in gm/kg, that will | - | | | be converted into a specific humidity observation. | + | | | Ratio'. This is the minimum threshold (gm/kg) that will | + | | | be converted into a specific humidity observation (kg/kg). | +--------------------+------------------------+--------------------------------------------------------------+ | top_pressure_level | real(r8) | The highest pressure level of interest (in mb). | +--------------------+------------------------+--------------------------------------------------------------+ - | cross_track_thin | integer | provides ability to thin the data by keeping every Nth data | + | cross_track_thin | integer | Provides ability to thin the data by keeping every Nth data | | | | value in the cross-track scan. [0,30] | | | | e.g. 3 == keep every third value. 0 is no thinning. | +--------------------+------------------------+--------------------------------------------------------------+ - | along_track_thin | integer | provides ability to thin the data by keeping every Nth data | + | along_track_thin | integer | Provides ability to thin the data by keeping every Nth data | | | | value in the along-track scan. [0,45] | | | | e.g. 4 == keep only every 4th row. 0 is no thinning. | +--------------------+------------------------+--------------------------------------------------------------+ - | use_NCEP_errs | logical | if .true. use the maximum observation error from either the | + | use_NCEP_errs | logical | If .true. use the maximum observation error from either the | | | | granule or the NCEP equivalent (from ``obs_error_mod.f90``) | +--------------------+------------------------+--------------------------------------------------------------+ - | version | integer | The AIRS file format version. | + | version | integer | The AIRS file format version. Version 7 is recommended, but | + | | | the converter is compatible with versions 5-7. | +--------------------+------------------------+--------------------------------------------------------------+ @@ -204,5 +213,13 @@ Future Plans ~~~~~~~~~~~~ If a more accurate moisture observation was needed, the observation value could be computed by actually integrating multiple values between the levels. -At this point it doesn't seem necessary. +The observation could be defined with a layer top, in pressure, and a number of +points to use for the integration across the layer. Then the forward operator would +query the model at each of the N points in the vertical for a given horizontal +location, and compute the mean moisture value. This code has not been implemented +yet, and would require a different QTY_xxx to distinguish it from the simple +location/value moisture obs. The observation converter would also have to bring +in moisture observation metadata for this forward operator. See the +GPS non-local operator code (:doc:`../gps/gps`) for an example of how this +would need to be implemented. diff --git a/observations/obs_converters/AIRS/work/input.nml b/observations/obs_converters/AIRS/work/input.nml index a827cdc9b9..8b83eddebf 100644 --- a/observations/obs_converters/AIRS/work/input.nml +++ b/observations/obs_converters/AIRS/work/input.nml @@ -12,10 +12,12 @@ '../../../../observations/forward_operators/obs_def_AIRS_mod.f90' / -! version 5 file?: +! example version 5 file: ! l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0.G08078150655.hdf' -! version 6 file?: +! example version 6 file: ! l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf' +! example version 7 file: +! l2_files = '../AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.4.0.G20330033505.hdf' &convert_airs_L2_nml l2_files = 'AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf' @@ -29,6 +31,7 @@ lon2 = 360.0 lat1 = -90.0 lat2 = 90.0 + version = 7 / From 163c4332a6f2ddcabd7bdc52491e493f1c86c6f0 Mon Sep 17 00:00:00 2001 From: braczka Date: Fri, 15 Mar 2024 16:39:25 -0600 Subject: [PATCH 16/36] Updated AMSU obs converter docs --- .../obs_converters/AIRS/convert_amsu_L1.rst | 204 ++++++++---------- 1 file changed, 93 insertions(+), 111 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index d8eebb683f..1521ac01fb 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -4,92 +4,100 @@ Program ``convert_amsu_L1`` Overview --------- -There is a little bit of confusing history to be aware of for AMSU/A: - -https://en.wikipedia.org/wiki/Advanced_microwave_sounding_unit#History - -AMSU/A was flown on NOAA 15-17. It is also on the Aqua satellite (that -also houses AIRS) as well as the European MetOp. It has been replaced by -ATMS on NOAA-20. - -The datset of interest is: “AIRS/Aqua L1B AMSU (A1/A2) geolocated and -calibrated brightness temperatures V005 (AIRABRAD) at GES DISC” The -*short name* for this dataset is ‘AIRABRAD’ - -The introductory paragraph for the dataset is: - - Version 5 is the current version of the data set.tmospheric Infrared - Sounder (AIRS) is a grating spectrometer (R = 1200) aboard the second - Earth Observing System (EOS) polar-orbiting platform, EOS Aqua. In - combination with the Advanced Microwave Sounding Unit (AMSU) and the - Humidity Sounder for Brazil (HSB), AIRS constitutes an innovative - atmospheric sounding group of visible, infrared, and microwave - sensors. The AMSU-A instrument is co-aligned with AIRS so that - successive blocks of 3 x 3 AIRS footprints are contained within one - AMSU-A footprint. AMSU-A is primarily a temperature sounder that - provides atmospheric information in the presence of clouds, which can - be used to correct the AIRS infrared measurements for the effects of - clouds. This is possible because non-precipitating clouds are for the - most part transparent to microwave radiation, in contrast to visible - and infrared radiation which are strongly scattered and absorbed by - clouds. AMSU-A1 has 13 channels from 50 - 90 GHz and AMSU-A2 has 2 - channels from 23 - 32 GHz. The AIRABRAD_005 products are stored in - files (often referred to as “granules”) that contain 6 minutes of - data, 30 footprints across track by 45 lines along track. - -The citation information for this dataset is: - - Title: AIRS/Aqua L1B AMSU (A1/A2) geolocated and calibrated - brightness temperatures V005 Version: 005 Creator: AIRS project - Publisher: Goddard Earth Sciences Data and Information Services - Center (GES DISC) Release Date: 2007-07-26T00:00:00.000Z Linkage: - https://disc.gsfc.nasa.gov/datacollection/AIRABRAD_005.html - -NASA provides a `README.AIRABRAD.pdf `__ -through the Goddard Earth Sciences Data and Information Services Center. +The following is an excerpt from the AIRS L1B AMSU-A documentation. +The complete documentation provided by the Goddard Earth Sciences Data +and Information Services Center `(GES DISC) `_ +can be within the Documentation->README Document `found here `_. + +The Atmospheric Infrared Sounder (AIRS) Version 5 Level 1B Advanced Microwave +Sounding Unit (AMSU)-A Products (AIRABRAD) contain calibrated and +geolocated brightness temperatures in degrees Kelvin. AIRABRAD_NRT (Near Real Time) +products are also available within ~3 hours of observations globally and stay for +about 5 days from the time they are generated. This data set is generated from +AMSU-A level 1A digital numbers (DN) and contains 15 microwave channels in the +50-90 GHz and 23-32 GHz regions of the spectrum. A day's worth of data is divided +into 240 scenes (granules), each of 6 minute duration. An AMSU-A scene contains +30 cross-track footprints in each of 45 along-track scanlines, for a total of +45 x 30 = 1350 footprints per scene. AMSU-A scans three times as slowly as AIRS +(once per 8 seconds) and its footprints are approximately three times as large as +those of AIRS (45 km at nadir). This results in three AIRS scans per AMSU-A scans +and nine AIRS footprints per AMSU-A footprint. + +For more details on the history of the AMSU/A satellite instrumentation +see the following `link `_. + +To summarize, AMSU/A was flown on satellites NOAA 15-17. Versions of AMSU-A also +fly on the Aqua satellite (that also houses AIRS) as well as the European MetOp +satellite. It has been replaced by the Advance Technology Microwave Sounder (ATMS) +on the satellite NOAA-20. + +Instructions to download the AMSU-A L1B Version 5 (AIRABRAD) dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The AMSU-A data is located within the Goddard Earth Sciences Data and Information +Services Center (GES DISC) `located here `_. You need +to create an Earthdata account before you can download data. To access the +AMSU-A data, search on keyword ``AIRABRAD`` and locate +the **AIRS/Aqua L1B AMSU (A1/A2) geolocated and calibrated brightness temperatures V005 +(AIRABRAD)** heading within your search results. + +Next, under the Data Access header, click on `Subset/Get Data`, then refine your +search results by 1) data range (time) and 2) spatial region. + +There are various options for downloading, however,the most straightforward approach +for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` +provide the proper wget flags/options. The ``Download Links List`` provides +the AMSU-A file list based on your search results. + + +| Each granule is about 560K and has names like + +:: + + AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures --------------------------------------------------------------------- -Converting AMSU_L1 observations is a two-step process: +Perform the following steps to convert the AMSU_L1 observations: -- convert the data from HDF to netCDF -- run ``convert_amsua_L1`` to convert the netCDF file to an obs_seq file. +- 1. Download the `h4tonccf_nc4 tool `_ provided + from the hdf-eos website. Options are provided for Mac, Linux and Windows platforms. -.. note:: +- 2. Compile the the h4tonccf_nc4 tool following the instructions provided on the hdf-eos + site. Compiling requires both HDF4 and HDF-EOS2 libraries. For Derecho the HDF4 + libraries are accessed through the ``module load hdf`` command. HDF-EOS2 libaries are + provided at ``/glade/campaign/cisl/dares/libraries/``. - The native HDF-EOS2 format files must be converted to netCDF before running - ``convert_amsua_L1``. - -To convert from HDF-EOS2 to netCDF use the -`h4tonccf_nc4 ` from the HDF-EOS -tools. +- 3. Convert the data format from HDF-EOS to netCDF using the h4tonccf_nc4 exectuable + generated from Step 2 as: +:: + h4tonccf_nc4 AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf AMSU.nc -The netCDF files have two global -attributes that are exceedingly large and uninformative. If needed you can -remove these attributes, you can use the ``ncatted`` command from -`NCO `_. +- 3b. Optional: The netCDF files have two global attributes that are exceedingly large + and uninformative. If needed you can remove these attributes, you can use the ` + `ncatted`` command from + `NCO `_ through the following command: :: - h4tonccf_nc4 AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf bob.nc - ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, bob.nc bill.nc + ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, AMSU.nc AMSU_final.nc +- 4. Run ``convert_amsua_L1`` to convert the netCDF file to the DART obs_seq format. + Important: Be sure to configure your namelist settings (below) before running the + converter. + -As you can imagine, you need to download each satellite’s data in a -different way. Also, just for your information, AMSU/B has been replaced -on newer satellites by MHS and HSB, but especially MHS is almost -identical. Namelist ~~~~~~~~ -``convert_amsua_L1`` makes use of :doc:`../../forward_operators/obs_def_rttov_mod` -Only two &obs_def_rttov_nml options are required when converting -the observations: *use_zeeman* and *rttov_sensor_db_file*. +The ``convert_amsua_L1`` converter requries :doc:`../../forward_operators/obs_def_rttov_mod` +Only two ``&obs_def_rttov_nml`` options are required when converting +the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. Be aware that if the RTTOV namelist option ``use_zeeman = .true.`` certain metadata must be available in the observation. This is not fully @@ -121,6 +129,12 @@ The default values are shown below. More realistic values are provided in verbose = 0 / +:: + + &obs_def_rttov_nml + rttov_sensor_db_file = '../../../forward_operators/rttov_sensor_db.csv' + use_zeeman = .false. + / .. container:: @@ -150,21 +164,21 @@ The default values are shown below. More realistic values are provided in | channel_list | character(len=8), | The AMSU channels desired. | | | dimension(15) | See the table below for valid input. | +--------------------+------------------------+--------------------------------------------------------------+ - | along_track_thin | integer | provides ability to thin the data by keeping every Nth data | + | along_track_thin | integer | Provides ability to thin the data by keeping every Nth data | | | | value in the along-track scan. [0,45] | | | | e.g. 4 == keep only every 4th row. 0 is no thinning. | +--------------------+------------------------+--------------------------------------------------------------+ - | cross_track_thin | integer | provides ability to thin the data by keeping every Nth data | + | cross_track_thin | integer | Provides ability to thin the data by keeping every Nth data | | | | value in the cross-track scan. [0,30] | | | | e.g. 3 == keep every third value. 0 is no thinning. | +--------------------+------------------------+--------------------------------------------------------------+ - | lon1 | real(r8) | the West-most longitude of interest in degrees. [0.0, 360] | + | lon1 | real(r8) | The West-most longitude of interest in degrees. [0.0, 360] | +--------------------+------------------------+--------------------------------------------------------------+ - | lon2 | real(r8) | the East-most longitude of interest in degrees. [0.0, 360] | + | lon2 | real(r8) | The East-most longitude of interest in degrees. [0.0, 360] | +--------------------+------------------------+--------------------------------------------------------------+ - | lat1 | real(r8) | the South-most latitude of interest in degrees. [-90.0,90.0] | + | lat1 | real(r8) | The South-most latitude of interest in degrees. [-90.0,90.0] | +--------------------+------------------------+--------------------------------------------------------------+ - | lat2 | real(r8) | the North-most latitude of interest in degrees. [-90.0,90.0] | + | lat2 | real(r8) | The North-most latitude of interest in degrees. [-90.0,90.0] | +--------------------+------------------------+--------------------------------------------------------------+ | verbose | integer | Controls the amount of run-time output. | | | | 0 == bare minimum. 3 is very verbose. | @@ -175,6 +189,10 @@ The default values are shown below. More realistic values are provided in Channel Specification ~~~~~~~~~~~~~~~~~~~~~ +The following channel description is excerpted from the +Documentation->README Document `found here `_. + + "AMSU-A primarily provides temperature soundings. It is a 15-channel microwave temperature sounder implemented as two independently operated modules. Module 1 (AMSU-A1) has 12 channels in the 50-58 GHz oxygen absorption band which provide @@ -184,10 +202,9 @@ Channel Specification precipitable water and cloud liquid water)." -To facilitate the selection of channels, either the 'Integer' or 'String' values -may be used to specify ``channel_list``. The 'Documentation' and 'netCDF' values -are provided for reference only. The 'Documentation' values are from the -`README.AIRABRAD.pdf `__ document. +To facilitate the selection of channels, either the ``Integer`` or ``String`` values +may be used to specify ``channel_list`` within ``&convert_amsua_L1_nml``. The +`Documentation` and `netCDF` values are provided for reference only. .. container:: @@ -233,40 +250,5 @@ are provided for reference only. The 'Documentation' values are from the +---------+---------+---------------+---------------+ -.. _instructions-to-download-the-airabrad-dataset-1: - -Instructions to download the AIRABRAD dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Go to https://earthdata.nasa.gov -2. Log in (or create an account if necessary) -3. Search for AIRABRAD -4. Scroll down past datasets to “Matching results.” - -- Follow the link to “AIRS/Aqua L1B AMSU (A1/A2) geolocated and - calibrated brightness temperatures V005 (AIRABRAD) at GES DISC” - -5. You should now be at - ‘https://cmr.earthdata.nasa.gov/search/concepts/C1243477366-GES_DISC.html’ - (unless they’ve changed the site). - -- Select the ‘Download data’ tab -- Select ‘Earthdata search’ -- Select the AIRS link under ‘Matching datasets’ (I have not tested the - NRT products) - -6. You can now select ‘Granule filters’ to choose your start and end - dates. -7. Select the granules you want, then click ‘download all’ and - 'download data’ -8. Click download access script -9. Follow the instructions on that page to download the data. - - -| Each granule is about 560K and has names like - -:: - - AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf From dd45fc97f823679a65e90867490e24e4dc285104 Mon Sep 17 00:00:00 2001 From: braczka Date: Mon, 18 Mar 2024 08:56:59 -0600 Subject: [PATCH 17/36] Syntax error on AIRS2RET product names --- .../obs_converters/AIRS/convert_airs_L2.rst | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index d2b178051c..e8e80d3a09 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -53,30 +53,32 @@ Downloading Atmospheric Infrared Sounder (AIRS) L2 Observations There are several data file types and versions that contain L2 observations for temperature and moisture profiles. **We recommend the use of -the AIRS2RET version 7 (AIRS2RETv7) data product.** The ``AIRSRET`` data (AIRS data only) +the AIRS2RET version 7 (AIRS2RETv7) data product.** The ``AIRS2RET`` data (AIRS data only) product is preferred to the ``AIRX2RET`` (AIRS/AMSU data) because the radiometric noise in several AMSU channels increased (since June 2007) degrading the -``AIRXRET`` product. Furthermore, the version 7 product is higher quality than version 6 +``AIRX2RET`` product. Furthermore, the version 7 product is higher quality than version 6 because of an improved retrieval algorithm leading to significantly improved RMSE and bias statistics. -See the `AIRSRETv7 documentation `_ +See the `AIRS2RETv7 documentation `_ for more information. -Although we recommend ``AIRSRETv7``, the `convert_airs_L2` converter is compatible +Although we recommend ``AIRS2RETv7``, the ``convert_airs_L2`` converter is compatible with ``AIRS2RET`` and ``AIRX2RET`` versions 5-7. Version 5 is no longer available within the GES DISC database. For more information on these data products see the links below: -`AIRSRETv6 `_ -`AIRXRETv6 `_ -`AIRXRETv7 `_ +- `AIRS2RETv6 `_ +- `AIRX2RETv6 `_ +- `AIRX2RETv7 `_ The AIRS data is located within the Goddard Earth Sciences Data and Information Services Center (GES DISC) `located here `_. You need to create an Earthdata account before you can download data. As an example, to -access the AIRSRETv7 data, search on keyword ``AIRSRET`` and locate -the AIRSRET 7.0 data set within your search results. Next, click on the -`Subset/Get Data` link, then refine your search results by 1) data range (time) -and 2) spatial region. +access the AIRS2RETv7 data, search on keyword ``AIRS2RET`` and locate +the AIRS2RET 7.0 data set within your search results. The full name is listed as +**Aqua/AIRS L2 Standard Physical Retrieval (AIRS-only) V7.0 (AIRS2RET)**. Next, click on the +``Subset/Get Data`` link within the `Data Access` portion of the webpage. This will +bring up a separate window that allows you to refine your search results +by 1) ``Refine range (time)`` and 2) `Refine region (spatial)`. There are various options for downloading, however,the most straightforward approach for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` From f8ae543fc17ec17c50fea2752b476d649ad6e519 Mon Sep 17 00:00:00 2001 From: braczka Date: Tue, 19 Mar 2024 16:44:32 -0600 Subject: [PATCH 18/36] Completed AMSU-A metadata docs --- .../obs_converters/AIRS/convert_amsu_L1.rst | 91 ++++++++++++++++--- .../obs_converters/AIRS/work/input.nml | 9 -- 2 files changed, 76 insertions(+), 24 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 1521ac01fb..40faa7f8ee 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -63,45 +63,99 @@ Perform the following steps to convert the AMSU_L1 observations: - 1. Download the `h4tonccf_nc4 tool `_ provided from the hdf-eos website. Options are provided for Mac, Linux and Windows platforms. + For example, the following command downloads the CentOS7 v1.3 executable that + works for Derecho: +:: -- 2. Compile the the h4tonccf_nc4 tool following the instructions provided on the hdf-eos - site. Compiling requires both HDF4 and HDF-EOS2 libraries. For Derecho the HDF4 - libraries are accessed through the ``module load hdf`` command. HDF-EOS2 libaries are - provided at ``/glade/campaign/cisl/dares/libraries/``. + wget https://hdfeos.org/software/h4cflib/bin/linux/v1.3/CentOS7/h4tonccf_nc4 -- 3. Convert the data format from HDF-EOS to netCDF using the h4tonccf_nc4 exectuable - generated from Step 2 as: +- 2. Convert the AMSU data file from HDF-EOS to netCDF format using the ``h4tonccf_nc4`` + exectuable as shown below. Be sure to provide execute permission first: :: - h4tonccf_nc4 AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf AMSU.nc + chmod +x h4tonccf_nc4 + ./h4tonccf_nc4 AMSU.hdf + + Done with writing netcdf file AMSU.nc -- 3b. Optional: The netCDF files have two global attributes that are exceedingly large - and uninformative. If needed you can remove these attributes, you can use the ` - `ncatted`` command from +- 2b. Optional: The netCDF files have two global attributes that are exceedingly large + and uninformative. If needed you can remove these attributes, you can use the + ``ncatted`` command from `NCO `_ through the following command: :: + module load nco ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, AMSU.nc AMSU_final.nc -- 4. Run ``convert_amsua_L1`` to convert the netCDF file to the DART obs_seq format. - Important: Be sure to configure your namelist settings (below) before running the - converter. +- 3. Run ``convert_amsua_L1`` to convert the AMSU_final.nc file to the DART obs_seq format. + **Important: Be sure to configure your namelist settings (below) before running the + converter. Also be sure you have compiled the ``convert_amsua_L1`` executable using + the proper ~/DART/build_templates/mkmf.template that includes both RTTOV and HDF-EOS2 + libraries as described here:** :doc:`./README` + +:: + + ./convert_amsua_L1 + + +Check the completed ``obs_seq``. It should include brightness temperatures for +the ``EOS_2_AMSUA_TB`` observation type. The converter should also produce the +following metadata underneath the ``mw`` (microwave) header as shown in the table +below. For more information on the metadata see the +`RTTOV documentation `_ +.. container:: + + +-----------------------+------------------------+ + | Metadata variable Name| Description | + +=======================+========================+ + | Sat_az | Azimuth of satellite | + | | position (degrees) | + +-----------------------+------------------------+ + | Sat_ze | Aenith of satellite | + | | position (degrees) | + +-----------------------+------------------------+ + | Platform_id | EOS (9), RTTOV User | + | | Guide, Table 2 | + +-----------------------+------------------------+ + | Sat_id | (2), RTTOV User | + | | Guide, Table 2 | + +-----------------------+------------------------+ + | Sensor_id | AMSU-A (3), RTTOV User | + | | Guide, Table 2 | + +-----------------------+------------------------+ + | Channel | Microwave frequency | + | | channel (1-15) | + +-----------------------+------------------------+ + | Mag_field | Earth magnetic field | + | | strength (Gauss) | + +-----------------------+------------------------+ + | cosbk | Cosine of angle between| + | | magnetic field and | + | | viewing direction | + +-----------------------+------------------------+ + | Fastem_p(1-5) | Land/sea-ice parameters| + | | 1-5 for FASTEM | + | | emissivity model | + | | Table 21, RTTOV User | + | | Guide | + +-----------------------+------------------------+ Namelist ~~~~~~~~ -The ``convert_amsua_L1`` converter requries :doc:`../../forward_operators/obs_def_rttov_mod` +The ``convert_amsua_L1`` converter requires :doc:`../../forward_operators/obs_def_rttov_mod` Only two ``&obs_def_rttov_nml`` options are required when converting the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. Be aware that if the RTTOV namelist option ``use_zeeman = .true.`` certain metadata must be available in the observation. This is not fully -implemented in the AMSU-A observation converter. For more information, +implemented in the AMSU-A observation converterm so we recommend setting +``use_zeeman = .false.``. For more information, please see GitHub Issue 99 “`AIRS AMSUA observation converter … Zeeman coefficients and channels `__” @@ -206,6 +260,13 @@ To facilitate the selection of channels, either the ``Integer`` or ``String`` va may be used to specify ``channel_list`` within ``&convert_amsua_L1_nml``. The `Documentation` and `netCDF` values are provided for reference only. +For example the following ``channel list`` settings are identical and +specify the AMSU channels centered on 50.3 and 89 GHz: + +:: + + channel_list = 3,15 + channel_list = 'A1-1','A1-13' .. container:: diff --git a/observations/obs_converters/AIRS/work/input.nml b/observations/obs_converters/AIRS/work/input.nml index 8b83eddebf..fae76a18f4 100644 --- a/observations/obs_converters/AIRS/work/input.nml +++ b/observations/obs_converters/AIRS/work/input.nml @@ -48,7 +48,6 @@ # All these are identical: # channel_list = 3,15 # channel_list = 'A1-1','A1-13' -# channel_list = 50.3,89 &convert_amsu_L1_nml l1_files = '../data/AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.nc', @@ -66,14 +65,6 @@ verbose = 1 / -# The 'L1_AMSUA_to_netcdf.f90' program is not working yet. -&L1_AMSUA_to_netcdf_nml - file_name = '../data/AIRS.2019.06.22.236.L1B.AMSU_Rad.v5.0.0.0.G19174110442.hdf' - outputfile = 'amsua_bt.nc' - track = 23 - xtrack = 30 - / - &obs_sequence_nml write_binary_obs_sequence = .false. From d99b3e449cb2e83205c0300e8b9789ddd93f7661 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Wed, 20 Mar 2024 10:40:17 -0400 Subject: [PATCH 19/36] chore: fix indenting for list --- .../obs_converters/AIRS/convert_amsu_L1.rst | 67 +++++++++---------- 1 file changed, 32 insertions(+), 35 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 40faa7f8ee..18cf51bbed 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -61,43 +61,40 @@ Advanced Microwave Sounding Unit (AMSU-A) L1B Brightness Temperatures Perform the following steps to convert the AMSU_L1 observations: -- 1. Download the `h4tonccf_nc4 tool `_ provided - from the hdf-eos website. Options are provided for Mac, Linux and Windows platforms. - For example, the following command downloads the CentOS7 v1.3 executable that - works for Derecho: -:: - - wget https://hdfeos.org/software/h4cflib/bin/linux/v1.3/CentOS7/h4tonccf_nc4 - -- 2. Convert the AMSU data file from HDF-EOS to netCDF format using the ``h4tonccf_nc4`` - exectuable as shown below. Be sure to provide execute permission first: -:: - - chmod +x h4tonccf_nc4 - ./h4tonccf_nc4 AMSU.hdf - - Done with writing netcdf file AMSU.nc - -- 2b. Optional: The netCDF files have two global attributes that are exceedingly large - and uninformative. If needed you can remove these attributes, you can use the - ``ncatted`` command from - `NCO `_ through the following command: - -:: - - module load nco - ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, AMSU.nc AMSU_final.nc - - -- 3. Run ``convert_amsua_L1`` to convert the AMSU_final.nc file to the DART obs_seq format. - **Important: Be sure to configure your namelist settings (below) before running the - converter. Also be sure you have compiled the ``convert_amsua_L1`` executable using - the proper ~/DART/build_templates/mkmf.template that includes both RTTOV and HDF-EOS2 - libraries as described here:** :doc:`./README` +1. Download the `h4tonccf_nc4 tool `_ provided + from the hdf-eos website. Options are provided for Mac, Linux and Windows platforms. + For example, the following command downloads the CentOS7 v1.3 executable that + works for Derecho: + :: + + wget https://hdfeos.org/software/h4cflib/bin/linux/v1.3/CentOS7/h4tonccf_nc4 + +2. Convert the AMSU data file from HDF-EOS to netCDF format using the ``h4tonccf_nc4`` + exectuable as shown below. Be sure to provide execute permission first: + :: + + chmod +x h4tonccf_nc4 + ./h4tonccf_nc4 AMSU.hdf + + Done with writing netcdf file AMSU.nc + +2. b. Optional: The netCDF files have two global attributes that are exceedingly large and uninformative. If needed you can remove these attributes, you can use the + ``ncatted`` command from + `NCO `_ through the following command: + :: + + module load nco + ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, AMSU.nc AMSU_final.nc + +3. Run ``convert_amsua_L1`` to convert the AMSU_final.nc file to the DART obs_seq format. + **Important:** Be sure to configure your namelist settings (below) before running the + converter. Also be sure you have compiled the ``convert_amsua_L1`` executable using + the proper ~/DART/build_templates/mkmf.template that includes both RTTOV and HDF-EOS2 + libraries as described here: :doc:`./README` -:: + :: - ./convert_amsua_L1 + ./convert_amsua_L1 Check the completed ``obs_seq``. It should include brightness temperatures for From 779b96df8494cb2e71c3d7a904e329ee6e59e206 Mon Sep 17 00:00:00 2001 From: Helen Kershaw Date: Wed, 20 Mar 2024 11:12:08 -0400 Subject: [PATCH 20/36] use link anchors instead of relative links --- observations/obs_converters/AIRS/convert_airs_L2.rst | 4 ++-- observations/obs_converters/AIRS/convert_amsu_L1.rst | 2 +- observations/obs_converters/gps/gps.rst | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index e8e80d3a09..e39da0f450 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -108,7 +108,7 @@ in log space, between the levels. The converter program reads each AIRS hdf file granule and outputs a DART obs_seq file containing up to 56700 observations. Only those with a quality control of 0 (Best) are kept. The resulting obs_seq files can be merged with the -:doc:`../../../assimilation_code/programs/obs_sequence_tool/obs_sequence_tool` into +:ref:`obs sequence tool` into larger time periods. During the excecution of the obs converter, It is possible to restrict the output @@ -222,6 +222,6 @@ location, and compute the mean moisture value. This code has not been implemente yet, and would require a different QTY_xxx to distinguish it from the simple location/value moisture obs. The observation converter would also have to bring in moisture observation metadata for this forward operator. See the -GPS non-local operator code (:doc:`../gps/gps`) for an example of how this +GPS non-local operator code (:ref:`gps`) for an example of how this would need to be implemented. diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 18cf51bbed..71b4cdcd07 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -145,7 +145,7 @@ below. For more information on the metadata see the Namelist ~~~~~~~~ -The ``convert_amsua_L1`` converter requires :doc:`../../forward_operators/obs_def_rttov_mod` +The ``convert_amsua_L1`` converter requires :ref:`obs_def_rttov_mod` Only two ``&obs_def_rttov_nml`` options are required when converting the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. diff --git a/observations/obs_converters/gps/gps.rst b/observations/obs_converters/gps/gps.rst index 2f018a51e2..772ce806b6 100644 --- a/observations/obs_converters/gps/gps.rst +++ b/observations/obs_converters/gps/gps.rst @@ -1,3 +1,5 @@ +.. _gps: + GPS Observations ================ From 0ca542183370fe98732e88ff4b34f73991d8671a Mon Sep 17 00:00:00 2001 From: ann-norcio Date: Thu, 21 Mar 2024 12:37:22 -0400 Subject: [PATCH 21/36] Create CITATION.cff Adding a citation file to help user correctly cite DART. --- CITATION.cff | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000..3b031ea7d4 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,9 @@ +cff-version: 1.2.0 +message: "To cite DART, please use the following metadata. Update the DART version and year as appropriate." +title: "The Data Assimilation Research Testbed" +version: "X.Y.Z" +date-released: 2024 +doi: "http://doi.org/10.5065/D6WQ0202" +authors: + - names: "UCAR/NCAR/CISL/DAReS" + city: "Boulder, Colorado" From 2bf5dd7d2e9a1858ffb8c2151e3c00cfe725758f Mon Sep 17 00:00:00 2001 From: ann-norcio Date: Thu, 21 Mar 2024 15:06:51 -0400 Subject: [PATCH 22/36] Update CITATION.cff --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 3b031ea7d4..670c7e24b7 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -2,7 +2,7 @@ cff-version: 1.2.0 message: "To cite DART, please use the following metadata. Update the DART version and year as appropriate." title: "The Data Assimilation Research Testbed" version: "X.Y.Z" -date-released: 2024 +date-released: "2024" doi: "http://doi.org/10.5065/D6WQ0202" authors: - names: "UCAR/NCAR/CISL/DAReS" From 12a124616c057f2eea71c4991797e7ffabc845b1 Mon Sep 17 00:00:00 2001 From: ann-norcio Date: Thu, 21 Mar 2024 15:16:30 -0400 Subject: [PATCH 23/36] Update CITATION.cff updating date format --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 670c7e24b7..21c97056a1 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -2,7 +2,7 @@ cff-version: 1.2.0 message: "To cite DART, please use the following metadata. Update the DART version and year as appropriate." title: "The Data Assimilation Research Testbed" version: "X.Y.Z" -date-released: "2024" +date-released: "2024-03-13" doi: "http://doi.org/10.5065/D6WQ0202" authors: - names: "UCAR/NCAR/CISL/DAReS" From b98b8577dfdda3f22de6b84f6ac6485d3c1f5988 Mon Sep 17 00:00:00 2001 From: ann-norcio Date: Thu, 21 Mar 2024 15:20:00 -0400 Subject: [PATCH 24/36] Update CITATION.cff --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 21c97056a1..c87330b2f5 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -5,5 +5,5 @@ version: "X.Y.Z" date-released: "2024-03-13" doi: "http://doi.org/10.5065/D6WQ0202" authors: - - names: "UCAR/NCAR/CISL/DAReS" + - name: "UCAR/NCAR/CISL/DAReS" city: "Boulder, Colorado" From fcaf70746b7b94fa01400dc0a97c19c0f0f25edd Mon Sep 17 00:00:00 2001 From: Helen Kershaw <20047007+hkershaw-brown@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:15:39 -0400 Subject: [PATCH 25/36] doi as 10.XXX --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index c87330b2f5..08550c7e99 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -3,7 +3,7 @@ message: "To cite DART, please use the following metadata. Update the DART versi title: "The Data Assimilation Research Testbed" version: "X.Y.Z" date-released: "2024-03-13" -doi: "http://doi.org/10.5065/D6WQ0202" +doi: "10.5065/D6WQ0202" authors: - name: "UCAR/NCAR/CISL/DAReS" city: "Boulder, Colorado" From 5bdf98308c9e20daf93888aaba3b0b93876f9ea3 Mon Sep 17 00:00:00 2001 From: Marlena Smith <44214771+mjs2369@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:30:14 -0600 Subject: [PATCH 26/36] Fix text formatting in convert_airs_L2.rst Co-authored-by: Brett Raczka --- observations/obs_converters/AIRS/convert_airs_L2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index e39da0f450..5504ad4b52 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -78,7 +78,7 @@ the AIRS2RET 7.0 data set within your search results. The full name is listed as **Aqua/AIRS L2 Standard Physical Retrieval (AIRS-only) V7.0 (AIRS2RET)**. Next, click on the ``Subset/Get Data`` link within the `Data Access` portion of the webpage. This will bring up a separate window that allows you to refine your search results -by 1) ``Refine range (time)`` and 2) `Refine region (spatial)`. +by 1) ``Refine range (time)`` and 2) ``Refine region (spatial)``. There are various options for downloading, however,the most straightforward approach for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` From 7bd7f4253efac6b400d35b28689d29995ca26b01 Mon Sep 17 00:00:00 2001 From: Marlena Smith <44214771+mjs2369@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:30:59 -0600 Subject: [PATCH 27/36] Fixing spacing in text in convert_airs_L2.rst Co-authored-by: Brett Raczka --- observations/obs_converters/AIRS/convert_airs_L2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index 5504ad4b52..b0447bbee7 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -80,7 +80,7 @@ the AIRS2RET 7.0 data set within your search results. The full name is listed as bring up a separate window that allows you to refine your search results by 1) ``Refine range (time)`` and 2) ``Refine region (spatial)``. -There are various options for downloading, however,the most straightforward approach +There are various options for downloading, however, the most straightforward approach for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` provide the proper wget flags/options. The ``Download Links List`` provides the AIRS file list based on your search results. From 0c9c6139e9516aacb3a0f4b53e19a3974479c4a5 Mon Sep 17 00:00:00 2001 From: Marlena Smith <44214771+mjs2369@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:31:21 -0600 Subject: [PATCH 28/36] Fixing spacing in text in convert_amsu_L1.rst Co-authored-by: Brett Raczka --- observations/obs_converters/AIRS/convert_amsu_L1.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 71b4cdcd07..93d6e98b4a 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -44,7 +44,7 @@ the **AIRS/Aqua L1B AMSU (A1/A2) geolocated and calibrated brightness temperatur Next, under the Data Access header, click on `Subset/Get Data`, then refine your search results by 1) data range (time) and 2) spatial region. -There are various options for downloading, however,the most straightforward approach +There are various options for downloading, however, the most straightforward approach for macOS and Linux users is to use the ``wget`` command. The ``download instructions`` provide the proper wget flags/options. The ``Download Links List`` provides the AMSU-A file list based on your search results. From c049ee63b8cea9178af1c764fc97c9ee1bba248e Mon Sep 17 00:00:00 2001 From: Marlena Smith <44214771+mjs2369@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:32:27 -0600 Subject: [PATCH 29/36] Adding missing period in convert_amsu_L1.rst text Co-authored-by: Brett Raczka --- observations/obs_converters/AIRS/convert_amsu_L1.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 93d6e98b4a..8dbd2867f5 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -145,7 +145,7 @@ below. For more information on the metadata see the Namelist ~~~~~~~~ -The ``convert_amsua_L1`` converter requires :ref:`obs_def_rttov_mod` +The ``convert_amsua_L1`` converter requires :ref:`obs_def_rttov_mod`. Only two ``&obs_def_rttov_nml`` options are required when converting the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. From 3021cb16d60f9ab275a64b5e6a61762c5fe3857d Mon Sep 17 00:00:00 2001 From: Marlee Smith Date: Tue, 26 Mar 2024 17:12:31 -0600 Subject: [PATCH 30/36] Moving example filenames for l2 files from comments in namelist to documentation --- observations/obs_converters/AIRS/convert_airs_L2.rst | 10 ++++++++++ observations/obs_converters/AIRS/work/input.nml | 7 ------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index b0447bbee7..0533897f56 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -159,6 +159,16 @@ The default values are shown below. More realistic values are provided in | | | each will be read and the results will be placed in a | | | | separate file with an output filename constructed based on | | | | the input filename. | + | | | | + | | | Example value for a version 5 file: | + | | | l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0| + | | | .G08078150655.hdf' | + | | | Example value for a version 6 file: | + | | | l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.| + | | | 31.1.G19058124823.hdf' | + | | | Example value for a version 7 file: | + | | | l2_files = '../data/AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.| + | | | 4.0.G20330033505.hdf' | +--------------------+------------------------+--------------------------------------------------------------+ | l2_file_list | character(len=256) | The name of an ascii text file which contains one filename | | | | per line, NOT including the directory. Each file will be | diff --git a/observations/obs_converters/AIRS/work/input.nml b/observations/obs_converters/AIRS/work/input.nml index fae76a18f4..0651cef097 100644 --- a/observations/obs_converters/AIRS/work/input.nml +++ b/observations/obs_converters/AIRS/work/input.nml @@ -12,13 +12,6 @@ '../../../../observations/forward_operators/obs_def_AIRS_mod.f90' / -! example version 5 file: -! l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0.G08078150655.hdf' -! example version 6 file: -! l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf' -! example version 7 file: -! l2_files = '../AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.4.0.G20330033505.hdf' - &convert_airs_L2_nml l2_files = 'AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf' l2_file_list = '' From 2780baf9eb663a2e2436be36f3683f8b986fc279 Mon Sep 17 00:00:00 2001 From: Marlee Smith Date: Tue, 26 Mar 2024 17:54:34 -0600 Subject: [PATCH 31/36] Moving l2 files examples to below table --- .../obs_converters/AIRS/convert_airs_L2.rst | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index 0533897f56..fa983b2cac 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -159,16 +159,6 @@ The default values are shown below. More realistic values are provided in | | | each will be read and the results will be placed in a | | | | separate file with an output filename constructed based on | | | | the input filename. | - | | | | - | | | Example value for a version 5 file: | - | | | l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0| - | | | .G08078150655.hdf' | - | | | Example value for a version 6 file: | - | | | l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.| - | | | 31.1.G19058124823.hdf' | - | | | Example value for a version 7 file: | - | | | l2_files = '../data/AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.| - | | | 4.0.G20330033505.hdf' | +--------------------+------------------------+--------------------------------------------------------------+ | l2_file_list | character(len=256) | The name of an ascii text file which contains one filename | | | | per line, NOT including the directory. Each file will be | @@ -208,6 +198,10 @@ The default values are shown below. More realistic values are provided in | | | the converter is compatible with versions 5-7. | +--------------------+------------------------+--------------------------------------------------------------+ + Included here are some example values for the l2_files namelist option. + Example version 5 file: ``l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0.G08078150655.hdf'`` + Example version 6 file: ``l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf'`` + Example version 7 file: ``l2_files = '../data/AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.4.0.G20330033505.hdf'`` Known Bugs ~~~~~~~~~~ From 4fada4260ae8cef2ae2437fa9b6411bf6fbc1c6a Mon Sep 17 00:00:00 2001 From: Marlee Smith Date: Tue, 26 Mar 2024 18:03:26 -0600 Subject: [PATCH 32/36] Fixing line break formatting --- observations/obs_converters/AIRS/convert_airs_L2.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index fa983b2cac..dd33ade9f7 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -198,10 +198,10 @@ The default values are shown below. More realistic values are provided in | | | the converter is compatible with versions 5-7. | +--------------------+------------------------+--------------------------------------------------------------+ - Included here are some example values for the l2_files namelist option. - Example version 5 file: ``l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0.G08078150655.hdf'`` - Example version 6 file: ``l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf'`` - Example version 7 file: ``l2_files = '../data/AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.4.0.G20330033505.hdf'`` + | Included here are some example values for the l2_files namelist option. + | Version 5 file: ``l2_files = '../data/AIRS.2007.11.01.001.L2.RetStd.v5.2.2.0.G08078150655.hdf'`` + | Version 6 file: ``l2_files = '../data/AIRS.2017.01.01.110.L2.RetStd_IR.v6.0.31.1.G19058124823.hdf'`` + | Version 7 file: ``l2_files = '../data/AIRS.2020.06.15.224.L2.RetStd_IR.v7.0.4.0.G20330033505.hdf'`` Known Bugs ~~~~~~~~~~ From b3e00330ff23e3d2d2c8f03e262b8a0b8743e57c Mon Sep 17 00:00:00 2001 From: kdraeder Date: Wed, 27 Mar 2024 10:36:42 -0600 Subject: [PATCH 33/36] Update convert_airs_L2 docs. Condensed the description of the vertical levels and layers. Added field names in the AIRS files which define them. Changed 'mb' to 'hPa'. Removed warnings in the namelist table about directory names in l2_files names. Example contents have ../data/file_name. Utilities_mod.f90 says "!> We agreed to support filenames/pathnames up to 256." I found no checks into whether there's a directory in the pathname, and no directory name is supplied to add to l2_files elements. Updated namelist to specify default version to be 7. --- .../obs_converters/AIRS/convert_airs_L2.nml | 2 +- .../obs_converters/AIRS/convert_airs_L2.rst | 60 +++++++++---------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_airs_L2.nml b/observations/obs_converters/AIRS/convert_airs_L2.nml index 497de7da07..3aaf0f9661 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.nml +++ b/observations/obs_converters/AIRS/convert_airs_L2.nml @@ -11,6 +11,6 @@ cross_track_thin = 0 along_track_thin = 0 use_NCEP_errs = .false. - version = 6 + version = 7 / diff --git a/observations/obs_converters/AIRS/convert_airs_L2.rst b/observations/obs_converters/AIRS/convert_airs_L2.rst index dd33ade9f7..8fbc451fd6 100644 --- a/observations/obs_converters/AIRS/convert_airs_L2.rst +++ b/observations/obs_converters/AIRS/convert_airs_L2.rst @@ -16,8 +16,8 @@ The AIRS Standard Retrieval Product consists of retrieved estimates of cloud and surface properties, plus profiles of retrieved temperature, water vapor, ozone, carbon monoxide and methane. Estimates of the errors associated with these quantities will also be part of the Standard Product. The temperature profile -vertical resolution is 28 levels total between 1100 mb and 0.1 mb, while moisture -profile is reported at 14 atmospheric layers between 1100 mb and 50 mb. The +vertical resolution is 28 levels total between 1100 and 0.1 hPa, while moisture +profile is reported at 14 atmospheric layers between 1100 hPa and 50 hPa. The horizontal resolution is 50 km. An AIRS granule has been set as 6 minutes of data, There are 240 granules per day, with orbit repeat cycle of approximately 16 days. @@ -89,21 +89,22 @@ convert_airs_L2.f90 ------------------- The ``convert_airs_L2`` converter is for **temperature and moisture retrievals** from -the L2 data. The temperature observations are at the -corresponding vertical pressure levels. However, the moisture obs are the mean for -the layer, so the location in the vertical is the midpoint, in log space, of the -current layer and the layer above it. There is an alternative computation for the -moisture across the layer which may be more accurate, but requires a forward -operator subroutine to be written and for the observation converter to include -additional metadata to support this forward operator. For more information see -the `Future Plans` section below. - -The temperature observations are located on standard levels; there is a single array -of heights in each file and all temperature data is located on one of these levels. -The moisture observations, however, are an integrated quantity for the space between -the levels; in their terminology the fixed heights are 'levels' and the space between -them are 'layers'. The current converter locates the moisture obs at the midpoint, -in log space, between the levels. +the L2 data. +The vertical coordinate is pressure. +The temperature observations are defined at standard pressure levels (see Overview). +Those are defined in each file by the array +'StdPressureLev:L2_Standard_atmospheric&surface_product'. +Between 2 levels is a "layer". +A moisture observation is an average across the layer +and is defined at the midpoint (in log(pressure)) of the layer. +This choice makes half of the mass of the layer above the midpoint and half below. +The midpoints are defined in 'H2OPressureLay:L2_Standard_atmospheric&surface_product'. + +There is an alternative computation for the moisture across the layer +which may be more accurate, but requires a forward operator subroutine +to be written and for the observation converter to include additional metadata +to support this forward operator. +For more information see the Future Plans section below. The converter program reads each AIRS hdf file granule and outputs a DART obs_seq file containing up to 56700 observations. Only those with a quality control of 0 (Best) @@ -154,18 +155,17 @@ The default values are shown below. More realistic values are provided in +--------------------+------------------------+--------------------------------------------------------------+ | Contents | Type | Description | +====================+========================+==============================================================+ - | l2_files | character(len=256), | A list of one or more names of the HDF file(s) to read, | - | | dimension(512) | NOT including the directory. If multiple files are listed, | - | | | each will be read and the results will be placed in a | - | | | separate file with an output filename constructed based on | - | | | the input filename. | + | l2_files | character(len=256), | A list of one or more names of the HDF file(s) to read. | + | | dimension(512) | If multiple files are listed, each will be read and | + | | | the results will be placed in a separate file with | + | | | an output filename constructed based on the input filename. | +--------------------+------------------------+--------------------------------------------------------------+ | l2_file_list | character(len=256) | The name of an ascii text file which contains one filename | - | | | per line, NOT including the directory. Each file will be | - | | | read and the observations converted into an output file | - | | | where the output filename is based on the input filename. | - | | | Only one of 'l2_files' and 'l2_file_list' can be | - | | | specified. The other must be ' ' (empty). | + | | | per line. Each file will be read and the observations | + | | | converted into an output file where the output filename | + | | | is based on the input filename. | + | | | Only one of 'l2_files' and 'l2_file_list' can be specified. | + | | | The other must be ' ' (empty). | +--------------------+------------------------+--------------------------------------------------------------+ | outputfile | character(len=256) | The name of the output observation sequence file. | +--------------------+------------------------+--------------------------------------------------------------+ @@ -177,11 +177,11 @@ The default values are shown below. More realistic values are provided in +--------------------+------------------------+--------------------------------------------------------------+ | lat2 | real(r8) | The North-most latitude of interest in degrees. [-90.0,90.0] | +--------------------+------------------------+--------------------------------------------------------------+ - | min_MMR_threshold | real(r8) | The data files contains 'Retrieved Water Vapor Mass Mixing | - | | | Ratio'. This is the minimum threshold (gm/kg) that will | + | min_MMR_threshold | real(r8) | The data files contain 'Retrieved Water Vapor Mass Mixing | + | | | Ratio'. This is the minimum threshold (g/kg) that will | | | | be converted into a specific humidity observation (kg/kg). | +--------------------+------------------------+--------------------------------------------------------------+ - | top_pressure_level | real(r8) | The highest pressure level of interest (in mb). | + | top_pressure_level | real(r8) | The highest pressure level of interest (in hPa). | +--------------------+------------------------+--------------------------------------------------------------+ | cross_track_thin | integer | Provides ability to thin the data by keeping every Nth data | | | | value in the cross-track scan. [0,30] | From 3d2f2b2fdbb3a62d363e649a5fc5bd51ccafc79f Mon Sep 17 00:00:00 2001 From: Marlena Smith <44214771+mjs2369@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:45:33 -0600 Subject: [PATCH 34/36] Fixing typo in convert_amsu_L1.rst Co-authored-by: kdraeder --- observations/obs_converters/AIRS/convert_amsu_L1.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 8dbd2867f5..2b304cc6fc 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -151,7 +151,7 @@ the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. Be aware that if the RTTOV namelist option ``use_zeeman = .true.`` certain metadata must be available in the observation. This is not fully -implemented in the AMSU-A observation converterm so we recommend setting +implemented in the AMSU-A observation converter, so we recommend setting ``use_zeeman = .false.``. For more information, please see GitHub Issue 99 “`AIRS AMSUA observation converter … Zeeman coefficients and channels `__” From df277baa255f616f35ed380e7c03dcce53f7adbe Mon Sep 17 00:00:00 2001 From: Marlee Smith Date: Wed, 27 Mar 2024 13:16:13 -0600 Subject: [PATCH 35/36] Fixing the name of the program in convert_amsu_L1.rst. The actual name of the program is convert_amsu_L1, not convert_amsua_L1 --- observations/obs_converters/AIRS/convert_amsu_L1.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/observations/obs_converters/AIRS/convert_amsu_L1.rst b/observations/obs_converters/AIRS/convert_amsu_L1.rst index 2b304cc6fc..2ef3724097 100644 --- a/observations/obs_converters/AIRS/convert_amsu_L1.rst +++ b/observations/obs_converters/AIRS/convert_amsu_L1.rst @@ -86,15 +86,15 @@ Perform the following steps to convert the AMSU_L1 observations: module load nco ncatted -a coremetadata,global,d,,, -a StructMetadata_0,global,d,,, AMSU.nc AMSU_final.nc -3. Run ``convert_amsua_L1`` to convert the AMSU_final.nc file to the DART obs_seq format. +3. Run ``convert_amsu_L1`` to convert the AMSU_final.nc file to the DART obs_seq format. **Important:** Be sure to configure your namelist settings (below) before running the - converter. Also be sure you have compiled the ``convert_amsua_L1`` executable using + converter. Also be sure you have compiled the ``convert_amsu_L1`` executable using the proper ~/DART/build_templates/mkmf.template that includes both RTTOV and HDF-EOS2 libraries as described here: :doc:`./README` :: - ./convert_amsua_L1 + ./convert_amsu_L1 Check the completed ``obs_seq``. It should include brightness temperatures for @@ -145,7 +145,7 @@ below. For more information on the metadata see the Namelist ~~~~~~~~ -The ``convert_amsua_L1`` converter requires :ref:`obs_def_rttov_mod`. +The ``convert_amsu_L1`` converter requires :ref:`obs_def_rttov_mod`. Only two ``&obs_def_rttov_nml`` options are required when converting the observations: ``use_zeeman`` and ``rttov_sensor_db_file``. @@ -165,7 +165,7 @@ The default values are shown below. More realistic values are provided in :: - &convert_amsua_L1_nml + &convert_amsu_L1_nml l1_files = '' l1_file_list = '' outputfile = '' @@ -254,7 +254,7 @@ Documentation->README Document `found here Date: Wed, 27 Mar 2024 16:56:06 -0600 Subject: [PATCH 36/36] Update CHANGELOG and conf.py --- CHANGELOG.rst | 19 +++++++++++++++++++ conf.py | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 036388428e..9fa8f5d357 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -22,6 +22,25 @@ individual files. The changes are now listed with the most recent at the top. +**March 27 2024 :: WRF-Hydro Developments; AIRS converter documentation update; Add citation.cff file. Tag v11.4.0** + +- WRF-Hydro: + - Added a new perfect model obs experimental capability to HydroDART + - Modified the Streamflow obs converter to allow for better diagnostics: allows DART to + compute obs space diagnostics on all gauges from the Routelink + - Enhanced performance in the model_mod and noah_hydro_mod when running a full CONUS domain + - Improved HydroDART Diagnostics with new capabilities (saves the hydrographs in a high-resolution + pdf, handles hybrid DA components, separate plots for the hybrid statistics, allows the openloop + to have different ens size and gauges than the DA runs) +- AIRS and AMSU-A observation converters: + - Updated the documentation to use up-to-date build suggestions for the HDFEOS library + - Updated the AIRS converter code to be able to use version 7 of the AIRS data formats + - Removed unused and non-functional code: AIRS/BUILD_HDF-EOS.sh, AIRS/L1_AMSUA_to_netcdf.f90, + AIRS/shell_scripts/Build_HDF_to_netCDF.sh, AIRS/shell_scripts/Convert_HDF_to_netCDF.csh + - Removed the unnecessary entries from obs_def_rttov_nml in the input.nml +- Added a citation.cff file to help users correctly cite DART software - creates a link to cite + the repository on the landing page sidebar on GitHub. + **March 13 2024 :: Update WRF-DART scripts and bug template to Derecho; remove no-op routines in ensemble manager. Tag v11.3.1** - Updated the csh scripting templates used to run WRF-DART and WRF-DART tutorial from Cheyenne to Derecho diff --git a/conf.py b/conf.py index 4ecb5b9aaf..bbdaa06a7d 100644 --- a/conf.py +++ b/conf.py @@ -21,7 +21,7 @@ author = 'Data Assimilation Research Section' # The full version, including alpha/beta/rc tags -release = '11.3.1' +release = '11.4.0' root_doc = 'index' # -- General configuration ---------------------------------------------------