3535 "MYPE" ,
3636]
3737_BOUT_TIME_DEPENDENT_META_VARS = ["iteration" , "hist_hi" , "tt" ]
38+ _BOUT_GEOMETRY_VARS = [
39+ "ixseps1" ,
40+ "ixseps2" ,
41+ "jyseps1_1" ,
42+ "jyseps2_1" ,
43+ "jyseps1_2" ,
44+ "jyseps2_2" ,
45+ "nx" ,
46+ "ny" ,
47+ "ny_inner" ,
48+ ]
3849
3950
4051# This code should run whenever any function from this module is imported
@@ -350,6 +361,10 @@ def attrs_remove_section(obj, section):
350361 pass
351362 else :
352363 raise ValueError (msg )
364+ for v in _BOUT_GEOMETRY_VARS :
365+ if v not in ds .metadata and v in grid :
366+ ds .metadata [v ] = grid [v ].values
367+
353368 # Update coordinates to match particular geometry of grid
354369 ds = geometries .apply_geometry (ds , geometry , grid = grid )
355370
@@ -365,6 +380,42 @@ def attrs_remove_section(obj, section):
365380 # BOUT++
366381 ds .bout .fine_interpolation_factor = 8
367382
383+ if ("dump" in input_type or "restart" in input_type ) and ds .metadata [
384+ "BOUT_VERSION"
385+ ] < 4.0 :
386+ # Add workarounds for missing information or different conventions in data saved
387+ # by BOUT++ v3.x.
388+ for v in ds :
389+ if ds .metadata ["bout_zdim" ] in ds [v ].dims :
390+ # All fields saved on aligned grid for BOUT-3
391+ ds [v ].attrs ["direction_y" ] = "Aligned"
392+
393+ added_location = False
394+ if any (
395+ d in ds [v ].dims
396+ for d in (
397+ ds .metadata ["bout_xdim" ],
398+ ds .metadata ["bout_ydim" ],
399+ ds .metadata ["bout_zdim" ],
400+ )
401+ ):
402+ # zShift, etc. did not support staggered grids in BOUT++ v3 anyway, so
403+ # just treat all variables as if they were at CELL_CENTRE
404+ ds [v ].attrs ["cell_location" ] = "CELL_CENTRE"
405+ added_location = True
406+ if added_location :
407+ warn (
408+ "Detected data from BOUT++ v3.x. Treating all variables as being "
409+ "at `CELL_CENTRE`. Should be similar to what BOUT++ v3.x did, but "
410+ "if your code uses staggered grids, this may produce unexpected "
411+ "effects in some places."
412+ )
413+
414+ if "nz" not in ds .metadata :
415+ # `nz` used to be stored as `MZ` and `MZ` used to include an extra buffer
416+ # point that was not used for data.
417+ ds .metadata ["nz" ] = ds .metadata ["MZ" ] - 1
418+
368419 if info == "terse" :
369420 print ("Read in dataset from {}" .format (str (Path (datapath ))))
370421 elif info :
@@ -600,17 +651,40 @@ def _auto_open_mfboutdataset(
600651
601652 paths_grid , concat_dims = _arrange_for_concatenation (filepaths , nxpe , nype )
602653
603- ds = xr .open_mfdataset (
604- paths_grid ,
605- concat_dim = concat_dims ,
606- combine = "nested" ,
607- data_vars = data_vars ,
608- preprocess = _preprocess ,
609- engine = filetype ,
610- chunks = chunks ,
611- join = "exact" ,
612- ** kwargs ,
613- )
654+ try :
655+ ds = xr .open_mfdataset (
656+ paths_grid ,
657+ concat_dim = concat_dims ,
658+ combine = "nested" ,
659+ data_vars = data_vars ,
660+ preprocess = _preprocess ,
661+ engine = filetype ,
662+ chunks = chunks ,
663+ join = "exact" ,
664+ ** kwargs ,
665+ )
666+ except ValueError as e :
667+ message_to_catch = (
668+ "some variables in data_vars are not data variables on the first "
669+ "dataset:"
670+ )
671+ if str (e )[: len (message_to_catch )] == message_to_catch :
672+ # Open concatenating any variables that are different in
673+ # different files as a work around to support opening older
674+ # data.
675+ ds = xr .open_mfdataset (
676+ paths_grid ,
677+ concat_dim = concat_dims ,
678+ combine = "nested" ,
679+ data_vars = "different" ,
680+ preprocess = _preprocess ,
681+ engine = filetype ,
682+ chunks = chunks ,
683+ join = "exact" ,
684+ ** kwargs ,
685+ )
686+ else :
687+ raise
614688 else :
615689 # datapath was nested list of Datasets
616690
@@ -744,8 +818,16 @@ def get_nonnegative_scalar(ds, key, default=1, info=True):
744818
745819 # Check whether this is a single file squashed from the multiple output files of a
746820 # parallel run (i.e. NXPE*NYPE > 1 even though there is only a single file to read).
747- nx = ds ["nx" ].values
748- ny = ds ["ny" ].values
821+ if "nx" in ds :
822+ nx = ds ["nx" ].values
823+ else :
824+ # Workaround for older data files
825+ nx = ds ["MXSUB" ].values * ds ["NXPE" ].values + 2 * ds ["MXG" ].values
826+ if "ny" in ds :
827+ ny = ds ["ny" ].values
828+ else :
829+ # Workaround for older data files
830+ ny = ds ["MYSUB" ].values * ds ["NYPE" ].values
749831 nx_file = ds .dims ["x" ]
750832 ny_file = ds .dims ["y" ]
751833 is_squashed_doublenull = False
@@ -758,7 +840,10 @@ def get_nonnegative_scalar(ds, key, default=1, info=True):
758840 mxg = 0
759841
760842 # Check if there are two divertor targets present
761- if ds ["jyseps1_2" ] > ds ["jyseps2_1" ]:
843+ # Note: if jyseps2_1 and jyseps1_2 are not in ds it probably
844+ # indicates older data and likely the upper target boundary cells
845+ # were not saved anyway, so continue as if they were not.
846+ if "jyseps2_1" in ds and ds ["jyseps1_2" ] > ds ["jyseps2_1" ]:
762847 upper_target_cells = myg
763848 else :
764849 upper_target_cells = 0
@@ -771,7 +856,13 @@ def get_nonnegative_scalar(ds, key, default=1, info=True):
771856
772857 nxpe = 1
773858 nype = 1
774- is_squashed_doublenull = (ds ["jyseps2_1" ] != ds ["jyseps1_2" ]).values
859+ if "jyseps2_1" in ds :
860+ is_squashed_doublenull = (ds ["jyseps2_1" ] != ds ["jyseps1_2" ]).values
861+ else :
862+ # For older data with no jyseps2_1 or jyseps1_2 in the
863+ # dataset, probably do not need to handle double null data
864+ # squashed with upper target points.
865+ is_squashed_doublenull = False
775866 elif ny_file == ny + 2 * myg :
776867 # Older squashed file from double-null grid but containing only lower
777868 # target boundary cells.
0 commit comments