diff --git a/docs/api/changelog.rst b/docs/api/changelog.rst index a0f68b104..55d5fdcdd 100644 --- a/docs/api/changelog.rst +++ b/docs/api/changelog.rst @@ -12,9 +12,17 @@ Unreleased Fixed ~~~~~ +- Fix `imod.mf6.open_cbc`: it failed with ``flowja=False`` on budget output for + DISV models if the model contained inactive cells. - :func:`imod.prepare.fill` previously assigned to the result of an xarray ``.sel`` operation. This might not work for dask backed data and has been - fixed. + addressed. + +Added +~~~~~ + +- Added :func:`imod.mf6.open_dvs` to read dependent variable output files like + the water content file of :class:`imod.mf6.UnsaturatedZoneFlow`. Changed ~~~~~~~ diff --git a/imod/mf6/__init__.py b/imod/mf6/__init__.py index d72b1bdb8..17aaf048c 100644 --- a/imod/mf6/__init__.py +++ b/imod/mf6/__init__.py @@ -39,7 +39,14 @@ from imod.mf6.mst import MobileStorageTransfer from imod.mf6.npf import NodePropertyFlow from imod.mf6.oc import OutputControl -from imod.mf6.out import open_cbc, open_conc, open_hds, read_cbc_headers, read_grb +from imod.mf6.out import ( + open_cbc, + open_conc, + open_dvs, + open_hds, + read_cbc_headers, + read_grb, +) from imod.mf6.rch import Recharge from imod.mf6.riv import River from imod.mf6.simulation import Modflow6Simulation diff --git a/imod/mf6/out/__init__.py b/imod/mf6/out/__init__.py index 953d5fbab..55401a86f 100644 --- a/imod/mf6/out/__init__.py +++ b/imod/mf6/out/__init__.py @@ -69,6 +69,12 @@ def open_cbc( "disu": disu.open_cbc, } +_OPEN_DVS = { + "dis": dis.open_dvs, + "disv": disv.open_dvs, + "disu": disu.open_dvs, +} + def _get_function(d: Dict[str, Callable], key: str) -> Callable: try: @@ -153,6 +159,55 @@ def open_hds( return _open(hds_path, grb_content, dry_nan, simulation_start_time, time_unit) +def open_dvs( + dvs_path: FilePath, + grb_path: FilePath, + indices: np.ndarray, + simulation_start_time: Optional[np.datetime64] = None, + time_unit: Optional[str] = "d", +) -> GridDataArray: + """ + Open modflow6 dependent variable output for complex packages (like the watercontent file for the UZF-package). + + The data is lazily read per timestep and automatically converted into + (dense) xr.DataArrays or xu.UgridDataArrays, for DIS and DISV respectively. + The conversion is done via the information stored in the Binary Grid file + (GRB). + + + Parameters + ---------- + dsv_path: Union[str, pathlib.Path] + grb_path: Union[str, pathlib.Path] + indices: np.ndarray + The indices to map dvs variable to model nodes + simulation_start_time : Optional datetime + The time and date correpsonding to the beginning of the simulation. + Use this to convert the time coordinates of the output array to + calendar time/dates. time_unit must also be present if this argument is present. + time_unit: Optional str + The time unit MF6 is working in, in string representation. + Only used if simulation_start_time was provided. + Admissible values are: + ns -> nanosecond + ms -> microsecond + s -> second + m -> minute + h -> hour + d -> day + w -> week + Units "month" or "year" are not supported, as they do not represent unambiguous timedelta values durations. + + Returns + ------- + dvs : Union[xr.DataArray, xu.UgridDataArray] + """ + grb_content = read_grb(grb_path) + distype = grb_content["distype"] + _open = _get_function(_OPEN_DVS, distype) + return _open(dvs_path, grb_content, indices, simulation_start_time, time_unit) + + def open_conc( ucn_path: FilePath, grb_path: FilePath, diff --git a/imod/mf6/out/common.py b/imod/mf6/out/common.py index b642ec57e..cb0cf884d 100644 --- a/imod/mf6/out/common.py +++ b/imod/mf6/out/common.py @@ -1,4 +1,5 @@ import pathlib +import struct from typing import Any, BinaryIO, Dict, List, Union import numpy as np @@ -29,3 +30,33 @@ def get_first_header_advanced_package( if "flow-ja-face" not in key and "gwf_" in key: return header_list[0] return None + + +def read_name_dvs(path: FilePath) -> str: + """ + Reads variable name from first header in dependent variable file. + """ + with open(path, "rb") as f: + f.seek(24) + name = struct.unpack("16s", f.read(16))[0] + return name.decode().strip() + + +def read_times_dvs(path: FilePath, ntime: int, indices: np.ndarray) -> FloatArray: + """ + Reads all total simulation times. + """ + times = np.empty(ntime, dtype=np.float64) + + # Compute how much to skip to the next timestamp + start_of_header = 16 + rest_of_header = 28 + data_single_layer = indices.size * 8 + nskip = rest_of_header + data_single_layer + start_of_header + + with open(path, "rb") as f: + f.seek(start_of_header) + for i in range(ntime): + times[i] = struct.unpack("d", f.read(8))[0] # total simulation time + f.seek(nskip, 1) + return times diff --git a/imod/mf6/out/dis.py b/imod/mf6/out/dis.py index ad144287c..a3c84b8c0 100644 --- a/imod/mf6/out/dis.py +++ b/imod/mf6/out/dis.py @@ -17,6 +17,8 @@ IntArray, _to_nan, get_first_header_advanced_package, + read_name_dvs, + read_times_dvs, ) @@ -121,6 +123,20 @@ def read_hds_timestep( return _to_nan(a3d, dry_nan) +def read_dvs_timestep( + path: FilePath, nlayer: int, nrow: int, ncol: int, pos: int, indices: np.ndarray +) -> FloatArray: + """ + Reads all values of one timestep. + """ + with open(path, "rb") as f: + f.seek(pos) + a1d = np.full(nlayer * nrow * ncol, dtype=np.float64, fill_value=np.nan) + f.seek(52, 1) # skip kstp, kper, pertime + a1d[indices] = np.fromfile(f, np.float64, indices.size) + return a1d.reshape((nlayer, nrow, ncol)) + + def open_hds( path: FilePath, grid_info: Dict[str, Any], @@ -155,6 +171,42 @@ def open_hds( return data_array +def open_dvs( + path: FilePath, + grid_info: Dict[str, Any], + indices: np.ndarray, + simulation_start_time: Optional[np.datetime64] = None, + time_unit: Optional[str] = "d", +) -> xr.DataArray: + nlayer, nrow, ncol = grid_info["nlayer"], grid_info["nrow"], grid_info["ncol"] + filesize = os.path.getsize(path) + ntime = filesize // (52 + (indices.size * 8)) + times = read_times_dvs(path, ntime, indices) + dv_name = read_name_dvs(path) + + coords = grid_info["coords"] + coords["time"] = times + + dask_list = [] + # loop over times and add delayed arrays + for i in range(ntime): + # TODO verify dimension order + pos = i * (52 + indices.size * 8) + a = dask.delayed(read_dvs_timestep)(path, nlayer, nrow, ncol, pos, indices) + x = dask.array.from_delayed(a, shape=(nlayer, nrow, ncol), dtype=np.float64) + dask_list.append(x) + + daskarr = dask.array.stack(dask_list, axis=0) + data_array = xr.DataArray( + daskarr, coords, ("time", "layer", "y", "x"), name=dv_name + ) + if simulation_start_time is not None: + data_array = assign_datetime_coords( + data_array, simulation_start_time, time_unit + ) + return data_array + + def open_imeth1_budgets( cbc_path: FilePath, grb_content: dict, header_list: List[cbc.Imeth1Header] ) -> xr.DataArray: diff --git a/imod/mf6/out/disu.py b/imod/mf6/out/disu.py index 77bb03d52..6aad3d37b 100644 --- a/imod/mf6/out/disu.py +++ b/imod/mf6/out/disu.py @@ -77,6 +77,16 @@ def open_hds( raise NotImplementedError +def open_dvs( + path: FilePath, + grid_info: Dict[str, Any], + indices: np.ndarray, + simulation_start_time: Optional[np.datetime64] = None, + time_unit: Optional[str] = "d", +) -> xr.DataArray: + raise NotImplementedError + + def open_imeth1_budgets( cbc_path: FilePath, grb_content: dict, header_list: List[cbc.Imeth1Header] ) -> xr.DataArray: diff --git a/imod/mf6/out/disv.py b/imod/mf6/out/disv.py index 33184f46e..a3eba4f59 100644 --- a/imod/mf6/out/disv.py +++ b/imod/mf6/out/disv.py @@ -3,7 +3,6 @@ from typing import Any, BinaryIO, Dict, List, Optional, Tuple, cast import dask -import numba import numpy as np import scipy.sparse import xarray as xr @@ -18,8 +17,13 @@ IntArray, _to_nan, get_first_header_advanced_package, + read_name_dvs, + read_times_dvs, ) +XUGRID_FILL_VALUE = -1 +IDOMAIN_ACTIVE = 1 + def _ugrid_iavert_javert( iavert: IntArray, javert: IntArray @@ -147,6 +151,20 @@ def read_hds_timestep( return _to_nan(a2d, dry_nan) +def read_dvs_timestep( + path: FilePath, nlayer: int, ncells_per_layer: int, pos: int, indices: np.ndarray +) -> FloatArray: + """ + Reads all values of one timestep. + """ + with open(path, "rb") as f: + f.seek(pos) + a1d = np.full(nlayer * ncells_per_layer, dtype=np.float64, fill_value=np.nan) + f.seek(52, 1) # skip kstp, kper, pertime + a1d[indices] = np.fromfile(f, np.float64, indices.size) + return a1d.reshape((nlayer, ncells_per_layer)) + + def open_hds( path: FilePath, grid_info: Dict[str, Any], @@ -185,6 +203,45 @@ def open_hds( return xu.UgridDataArray(da, grid) +def open_dvs( + path: FilePath, + grid_info: Dict[str, Any], + indices: np.ndarray, + simulation_start_time: Optional[np.datetime64] = None, + time_unit: Optional[str] = "d", +) -> xu.UgridDataArray: + grid = grid_info["grid"] + nlayer, ncells_per_layer = grid_info["nlayer"], grid_info["ncells_per_layer"] + filesize = os.path.getsize(path) + ntime = filesize // (52 + (indices.size * 8)) + times = read_times_dvs(path, ntime, indices) + dv_name = read_name_dvs(path) + + coords = grid_info["coords"] + coords["time"] = times + + dask_list = [] + # loop over times and add delayed arrays + for i in range(ntime): + # TODO verify dimension order + pos = i * (52 + indices.size * 8) + a = dask.delayed(read_dvs_timestep)( + path, nlayer, ncells_per_layer, pos, indices + ) + x = dask.array.from_delayed( + a, shape=(nlayer, ncells_per_layer), dtype=np.float64 + ) + dask_list.append(x) + + daskarr = dask.array.stack(dask_list, axis=0) + da = xr.DataArray( + daskarr, coords, ("time", "layer", grid.face_dimension), name=dv_name + ) + if simulation_start_time is not None: + da = assign_datetime_coords(da, simulation_start_time, time_unit) + return xu.UgridDataArray(da, grid) + + def open_imeth1_budgets( cbc_path: FilePath, grb_content: dict, header_list: List[cbc.Imeth1Header] ) -> xu.UgridDataArray: @@ -291,105 +348,141 @@ def open_imeth6_budgets( return xu.UgridDataArray(da, grid) -@numba.njit # type: ignore[misc] -def disv_lower_index( - ia: IntArray, - ja: IntArray, - ncells: int, - nlayer: int, - ncells_per_layer: int, -) -> IntArray: - lower = np.full(ncells, -1, np.int64) - for i in range(ncells): - for nzi in range(ia[i], ia[i + 1]): - nzi -= 1 # python is 0-based, modflow6 is 1-based - j = ja[nzi] - 1 # python is 0-based, modflow6 is 1-based - d = j - i - if d < ncells_per_layer: # upper, diagonal, horizontal - continue - elif d == ncells_per_layer: # lower neighbor - lower[i] = nzi - else: # skips one: must be pass through - npassed = int(d / ncells_per_layer) - for ipass in range(0, npassed): - lower[i + ipass * ncells_per_layer] = nzi - - return lower.reshape(nlayer, ncells_per_layer) - - -def expand_indptr(ia: IntArray): +def compute_flow_orientation( + edge_face_connectivity: IntArray, face_coordinates: FloatArray +) -> Tuple[FloatArray, FloatArray]: + # Compute unit components (x: u, y: v) + nedge = len(edge_face_connectivity) + is_connection = edge_face_connectivity[:, 1] != XUGRID_FILL_VALUE + edge_faces = edge_face_connectivity[is_connection] + # Ensure direction matches flow from low cell index to high cell index. + edge_faces.sort(axis=1) + u = np.full(nedge, np.nan) + v = np.full(nedge, np.nan) + xy = face_coordinates[edge_faces] + dx = xy[:, 1, 0] - xy[:, 0, 0] + dy = xy[:, 1, 1] - xy[:, 0, 1] + t = np.sqrt(dx**2 + dy**2) + u[is_connection] = dx / t + v[is_connection] = dy / t + return u, v + + +def mf6_csr_to_coo(ia: IntArray, ja: IntArray) -> Tuple[IntArray, IntArray]: + """ + Convert MODFLOW 6 Compressed Sparse Row (CSR) matrix 1-based arrays into + 0-based COO(rdinate) arrays. + """ n = np.diff(ia) - return np.repeat(np.arange(ia.size - 1), n) + i = np.repeat(np.arange(ia.size - 1), n) + j = ja - 1 + return i, j + + +def alt_cumsum(a): + """Alternative cumsum, start 0 and omit the last value.""" + out = np.empty(a.size, a.dtype) + out[0] = 0 + np.cumsum(a[:-1], out=out[1:]) + return out + +def ragged_arange(n: IntArray) -> IntArray: + """Equal to: np.concatenate([np.arange(e) for e in n])""" + return alt_cumsum(np.ones(int(n.sum()), dtype=int)) - np.repeat(alt_cumsum(n), n) -def disv_horizontal_index( + +def disv_indices( ia: IntArray, ja: IntArray, - nlayer: int, - ncells_per_layer: int, + idomain: IntArray, edge_face_connectivity: IntArray, - fill_value: int, - face_coordinates: FloatArray, ): - # Allocate output array + """ + Parameters + ---------- + ia: IntArray of shape (ncell+1,) + MODFLOW 6 indptr of CSR connectivity matrix. 1-based. + ja: IntArray of shape (nconnections + ncell,) + MODFLOW 6 indices of CSR connectivity matrix. 1-based. + idomain: IntArray of shape (nlayer, nface) + Active cells are marked by a value of 1 (IDOMAIN_ACTIVE). + edge_face_connectivity: IntArray of shape (nface, 2) + External boundaries are marked by a second face value of -1 + (XUGRID_FILL_VALUE). + + Returns + ------- + lower: IntArray of shape (nlayer, nface) + Contains the indices into the flow data for the lower face for each + cell. Lower faces without flow are marked by a value of -1. + horizontal: IntArray of shape (nlayer, nedge) + Contains the indices into the flow data for the each edge. Edges + without flow are marked by a value of -1. + """ + nlayer, ncells_per_layer = idomain.shape + # Allocate output arrays. nedge = len(edge_face_connectivity) horizontal = np.full((nlayer, nedge), -1) + lower = np.full((nlayer, ncells_per_layer), -1) + + i, j = mf6_csr_to_coo(ia, ja) + diff = j - i + is_vertical = diff >= ncells_per_layer + # Remove diagonals as well (i == j) + is_horizontal = (diff > 0) & (~is_vertical) + # Generate a linear index into the cells. + index = np.arange(j.size) + + # Vertical flows + # -------------- + # Stored in an array of shape (nlayer, nface). + # For pass-through cells, set it from layers i to j using ragged_arange. + n_pass = diff[is_vertical] // ncells_per_layer + ii = np.repeat(i[is_vertical], n_pass) + ragged_arange(n_pass) * ncells_per_layer + lower.ravel()[ii] = np.repeat(index[is_vertical], n_pass) + + # Horizontal flows + # ---------------- + # Will be stored in an array of shape (nlayer, nedge). + # i -> j is pre-sorted (required by CSR structure). Because i -> j is + # sorted in terms of face numbering, we need only to figure out which order + # the edge_face_connectivity has. A lexsort of (i_face, j_face) results in + # the same ordering as the CSR structure: sorted first by i, then by j. + + # Create edge face connectivity for each layer, shape: (nlayer, nface, 2). + # NOTE: the addition obliterates any -1 FILL value. Hence we check the original + # edge_face_connectivity afterwards for the outer boundaries. + layered_edge_faces = np.add.outer( + np.arange(nlayer) * ncells_per_layer, + edge_face_connectivity, + ) - # Grab the index values to the horizontal connections - i = expand_indptr(ia) - j = ja - 1 - d = j - i - is_horizontal = (0 < d) & (d < ncells_per_layer) - index = np.arange(j.size)[is_horizontal].reshape((nlayer, -1)) - - # i -> j is pre-sorted (required by CSR structure); the edge_faces are repeated - # per layer. Because i -> j is sorted in terms of face numbering, we need - # only to figure out which order the edge_face_connectivity has. - is_connection = edge_face_connectivity[:, 1] != fill_value - edge_faces = edge_face_connectivity[is_connection] - order = np.argsort(np.lexsort(edge_faces.T[::-1])) - # Reshuffle for every layer - index = index[:, order] + # Identify inactive faces and outer boundaries (second face == FILL). + is_active = idomain.ravel()[layered_edge_faces] == IDOMAIN_ACTIVE + is_inner_edge = edge_face_connectivity[:, 1] != XUGRID_FILL_VALUE + is_connection = (is_active.all(axis=2) & is_inner_edge[np.newaxis, :]).ravel() - # Now set the values in the output array - horizontal[:, is_connection] = index + # Create face i to face j connections; find the ordering. + i_to_j = layered_edge_faces.reshape((-1, 2))[is_connection] + order = np.argsort(np.lexsort(i_to_j.T[::-1])) - # Compute unit components (x: u, y: v) - edge_faces.sort(axis=1) - u = np.full(nedge, np.nan) - v = np.full(nedge, np.nan) - xy = face_coordinates[edge_faces] - dx = xy[:, 1, 0] - xy[:, 0, 0] - dy = xy[:, 1, 1] - xy[:, 0, 1] - t = np.sqrt(dx**2 + dy**2) - u[is_connection] = dx / t - v[is_connection] = dy / t - return horizontal, u, v + # Now set the values in the output array. + horizontal.ravel()[is_connection] = index[is_horizontal][order] + return lower, horizontal def disv_to_horizontal_lower_indices( grb_content: dict, ) -> Tuple[xr.DataArray, xr.DataArray, xr.DataArray, xr.DataArray]: grid = grb_content["grid"] - horizontal, u, v = disv_horizontal_index( + lower, horizontal = disv_indices( ia=grb_content["ia"], ja=grb_content["ja"], - nlayer=grb_content["nlayer"], - ncells_per_layer=grb_content["ncells_per_layer"], + idomain=grb_content["idomain"].to_numpy(), edge_face_connectivity=grid.edge_face_connectivity, - fill_value=grid.fill_value, - face_coordinates=grid.face_coordinates, ) - lower = disv_lower_index( - ia=grb_content["ia"], - ja=grb_content["ja"], - ncells=grb_content["ncells"], - nlayer=grb_content["nlayer"], - ncells_per_layer=grb_content["ncells_per_layer"], - ) - - # Compute unit_vector - + u, v = compute_flow_orientation(grid.edge_face_connectivity, grid.face_coordinates) return ( xr.DataArray( horizontal, grb_content["coords"], dims=["layer", grid.edge_dimension] diff --git a/imod/mf6/uzf.py b/imod/mf6/uzf.py index 2376cc351..2cdf0771b 100644 --- a/imod/mf6/uzf.py +++ b/imod/mf6/uzf.py @@ -1,5 +1,6 @@ import numpy as np import xarray as xr +import xugrid as xu from imod.logging import init_log_decorator from imod.mf6.boundary_condition import AdvancedBoundaryCondition, BoundaryCondition @@ -97,6 +98,8 @@ class UnsaturatedZoneFlow(AdvancedBoundaryCondition): path to output cbc-file for UZF budgets budgetcsv_fileout: ({"str"}, optional) path to output csv-file for summed budgets + water_content_file: ({"str"}, optional) + path to output file for unsaturated zone water content observations: [Not yet supported.] Default is None. water_mover: [Not yet supported.] @@ -233,6 +236,7 @@ def __init__( save_flows=False, budget_fileout=None, budgetcsv_fileout=None, + water_content_file=None, observations=None, water_mover=None, timeseries=None, @@ -271,6 +275,7 @@ def __init__( "save_flows": save_flows, "budget_fileout": budget_fileout, "budgetcsv_fileout": budgetcsv_fileout, + "water_content_file": water_content_file, "observations": observations, "water_mover": water_mover, "timeseries": timeseries, @@ -301,7 +306,10 @@ def fill_stress_perioddata(self): for var in self._period_data: if self.dataset[var].size == 1: # Prevent loading large arrays in memory if self.dataset[var].values[()] is None: - self.dataset[var] = xr.full_like(self["infiltration_rate"], 0.0) + if isinstance(self["infiltration_rate"], xu.UgridDataArray): + self.dataset[var] = xu.full_like(self["infiltration_rate"], 0) + else: + self.dataset[var] = xr.full_like(self["infiltration_rate"], 0.0) else: raise ValueError("{} cannot be a scalar".format(var)) @@ -379,9 +387,12 @@ def _package_data_to_sparse(self): field_spec = self._get_field_spec_from_dtype(recarr) field_names = [i[0] for i in field_spec] - index_spec = [("iuzno", np.int32)] + field_spec[:3] + n = 3 + if isinstance(self.dataset, xu.UgridDataset): + n = 2 + index_spec = [("iuzno", np.int32)] + field_spec[:n] field_spec = ( - [("landflag", np.int32)] + [("ivertcon", np.int32)] + field_spec[3:] + [("landflag", np.int32)] + [("ivertcon", np.int32)] + field_spec[n:] ) sparse_dtype = np.dtype(index_spec + field_spec) diff --git a/imod/templates/mf6/gwf-uzf.j2 b/imod/templates/mf6/gwf-uzf.j2 index 13dfd28a4..fe0c8b7da 100644 --- a/imod/templates/mf6/gwf-uzf.j2 +++ b/imod/templates/mf6/gwf-uzf.j2 @@ -15,6 +15,8 @@ begin options budget fileout {{budget_fileout}}{% endif %} {%- if budgetcsv_fileout is defined %} budgetcsv fileout {{budgetcsv_fileout}}{% endif %} +{%- if water_content_file is defined %} + water_content fileout {{water_content_file}}{% endif %} {%- if ts_filerecord is defined %} ts6 filein {{ts6_filename}}{% endif %} {%- if obs_filerecord is defined %} diff --git a/imod/tests/test_mf6/test_mf6_out.py b/imod/tests/test_mf6/test_mf6_out.py index 2f3dc2018..9afe60286 100644 --- a/imod/tests/test_mf6/test_mf6_out.py +++ b/imod/tests/test_mf6/test_mf6_out.py @@ -183,6 +183,86 @@ def test_dis_indices__idomain(): assert np.allclose(lower.reshape(nlayer, nrow), lower_expected) +def test_mf6_csr_to_coo(): + indptr = np.array([1, 1, 2, 4, 6]) + indices = np.array([1, 2, 3, 4, 5]) + i, j = imod.mf6.out.disv.mf6_csr_to_coo(indptr, indices) + np.testing.assert_array_equal(i, [1, 2, 2, 3, 3]) + np.testing.assert_array_equal(j, indices - 1) + + +def test_disv_indices(): + r""" + Top view of grid: + ______ + /| |\ + / | | \ + / | | \ + / 1 | 2 | 3 \ + /____|______|____\ + + Vertical cross-section of idomain: + + ┌─────┌─────┌─────┐ + │ │ │ │ + │ 0 │ 1 │ 0 │ + │ 1│ 2│ 3│ + ┌─────┌─────┌─────┐ + │ │ │ │ + │ 0 │ -1 │ 1 │ + │ 4│ 5│ 6│ + ┌─────┌─────┌─────┐ + │ │ │ │ + │ 1 │ 1 │ 1 │ + │ 7│ 8│ 9│ + └─────└─────└─────┘ + + nlayer = 3, nface = 3 + 1-based cell number in lower right corner. + """ + nlayer = 3 + nface = 3 + shape = (nlayer, nface) + idomain = np.zeros(shape, dtype=int) + idomain[0, 1] = 1 + idomain[1, 1] = -1 + idomain[1, 2] = 1 + idomain[2, :] = 1 + + # i = 2, 6, 7, 8 + # j = 8, 9, 8, 9 + ia = np.array([0, 0, 1, 1, 1, 1, 2, 3, 4]) + ja = np.array([8, 9, 8, 9]) + + # Top view: 3 cells + edge_face_connectivity = np.array( + [ + [0, -1], + [0, -1], + [0, 1], # 2 + [1, -1], + [1, -1], + [1, 2], # 5 + [2, -1], + [2, -1], + ] + ) + n_edge = len(edge_face_connectivity) + expected_lower = np.full(shape, -1) + expected_lower[0, 1] = 0 + expected_lower[1, 1] = 0 + expected_lower[1, 2] = 1 + expected_horizontal = np.full((nlayer, n_edge), -1) + expected_horizontal[2, 2] = 2 + expected_horizontal[2, 5] = 3 + + lower, horizontal = imod.mf6.out.disv.disv_indices( + ia, ja, idomain, edge_face_connectivity + ) + np.testing.assert_array_equal(lower, expected_lower) + np.testing.assert_array_equal(horizontal, expected_horizontal) + + @pytest.mark.usefixtures("twri_result") def test_open_cbc__dis(twri_result): modeldir = twri_result diff --git a/imod/tests/test_mf6/test_mf6_uzf_model.py b/imod/tests/test_mf6/test_mf6_uzf_model.py index 2c4a5c27e..5a3a85105 100644 --- a/imod/tests/test_mf6/test_mf6_uzf_model.py +++ b/imod/tests/test_mf6/test_mf6_uzf_model.py @@ -4,11 +4,12 @@ import pandas as pd import pytest import xarray as xr +import xugrid as xu import imod -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def uzf_model(): # Initiate model gwf_model = imod.mf6.GroundwaterFlowModel(newton=True) @@ -120,6 +121,7 @@ def uzf_model(): uds["simulate_groundwater_seepage"] = True uds["save_flows"] = True uds["budget_fileout"] = "GWF_1/uzf.cbc" + uds["water_content_file"] = "GWF_1/uzf.wc" gwf_model["uzf"] = imod.mf6.UnsaturatedZoneFlow(**uds) @@ -144,6 +146,99 @@ def uzf_model(): return simulation +def uzf_model_disv(uzf_model) -> imod.mf6.Modflow6Simulation: + def toface(da): + return imod.util.spatial.ugrid2d_data(da, grid.face_dimension) + + simulation = uzf_model + idomain = simulation["GWF_1"]["dis"]["idomain"] + bottom = simulation["GWF_1"]["dis"]["bottom"] + top = simulation["GWF_1"]["dis"]["top"] + + icelltype = simulation["GWF_1"]["npf"]["icelltype"] + k = simulation["GWF_1"]["npf"]["k"] + k33 = simulation["GWF_1"]["npf"]["k33"] + + chd = simulation["GWF_1"]["chd"]["head"] + + grid = xu.Ugrid2d.from_structured(idomain) + idomain = xu.UgridDataArray(toface(idomain), grid) + icelltype = xu.UgridDataArray(toface(icelltype), grid) + chd = xu.UgridDataArray(toface(chd), grid) + + simulation["GWF_1"].pop("dis") + simulation["GWF_1"]["disv"] = imod.mf6.VerticesDiscretization(top, bottom, idomain) + + simulation["GWF_1"]["chd"] = imod.mf6.ConstantHead( + chd, print_input=True, print_flows=True, save_flows=True + ) + simulation["GWF_1"]["npf"] = imod.mf6.NodePropertyFlow( + icelltype=icelltype, + k=k, + k33=k33, + variable_vertical_conductance=False, + dewatered=False, + perched=False, + save_flows=True, + ) + + uds = {} + UZF_GRID_VARS = [ + "kv_sat", + "theta_sat", + "theta_res", + "theta_init", + "epsilon", + "surface_depression_depth", + "infiltration_rate", + "et_pot", + "extinction_depth", + ] + for var_name in UZF_GRID_VARS: + sda = simulation["GWF_1"]["uzf"][var_name] + uda = xu.UgridDataArray(toface(sda), grid) + uds[var_name] = uda + uds["save_flows"] = True + uds["budget_fileout"] = "GWF_1/uzf.cbc" + uds["water_content_file"] = "GWF_1/uzf.wc" + simulation["GWF_1"].pop("uzf") + simulation["GWF_1"]["uzf"] = imod.mf6.UnsaturatedZoneFlow(**uds) + return simulation + + +def test_simulation_write_disv(uzf_model, tmp_path): + simulation = uzf_model_disv(uzf_model) + modeldir = tmp_path / "uzf_model" + simulation.write(modeldir, validate=True, binary=False) + with imod.util.cd(modeldir): + simulation.run() + budget_mf6 = imod.mf6.open_cbc( + "GWF_1/GWF_1.cbc", "GWF_1/disv.disv.grb", flowja=True + ) + budgets_uzf = imod.mf6.open_cbc("GWF_1/uzf.cbc", "GWF_1/disv.disv.grb") + assert ("time", "layer", "mesh2d_nFaces") == budgets_uzf["gwf_gwf_1"].dims + assert (47, 7, 81) == budgets_uzf["gwf_gwf_1"].shape + assert np.allclose( + budget_mf6["uzf-gwrch_uzf"].obj, + -budgets_uzf["gwf_gwf_1"].obj, + equal_nan=True, + ) + kv_sat = simulation["GWF_1"]["uzf"]["kv_sat"] + nlay, nnodes = kv_sat.shape + indices = np.arange(nlay * nnodes)[kv_sat.notnull().to_numpy().flatten()] + water_content = imod.mf6.open_dvs( + "GWF_1/uzf.wc", "GWF_1/disv.disv.grb", indices + ) + not_active = xr.where(kv_sat.obj.notnull(), False, True) + # should increase with constant recharge + assert ( + water_content.fillna(0.0)[-1, :, 5 * 5] + >= water_content.fillna(0.0)[0, :, 5 * 5] + ).all() + # should be nan if no uzf-package is defined + assert water_content.obj.where(not_active).isnull().all() + + @pytest.mark.skipif(sys.version_info < (3, 7), reason="capture_output added in 3.7") def test_simulation_write(uzf_model, tmp_path): simulation = uzf_model @@ -157,8 +252,21 @@ def test_simulation_write(uzf_model, tmp_path): meanhead = head.mean().values mean_answer = -1.54998241 assert np.allclose(meanhead, mean_answer) - budget_mf6 = head = imod.mf6.open_cbc("GWF_1/GWF_1.cbc", "GWF_1/dis.dis.grb") + budget_mf6 = imod.mf6.open_cbc("GWF_1/GWF_1.cbc", "GWF_1/dis.dis.grb") budgets_uzf = imod.mf6.open_cbc("GWF_1/uzf.cbc", "GWF_1/dis.dis.grb") assert np.allclose( budget_mf6["uzf-gwrch_uzf"], -budgets_uzf["gwf_gwf_1"], equal_nan=True ) + + kv_sat = simulation["GWF_1"]["uzf"]["kv_sat"] + nlay, nrow, ncol = kv_sat.shape + indices = np.arange(nlay * nrow * ncol)[kv_sat.notnull().to_numpy().flatten()] + water_content = imod.mf6.open_dvs("GWF_1/uzf.wc", "GWF_1/dis.dis.grb", indices) + not_active = xr.where(kv_sat.notnull(), False, True) + # should increase with constant recharge + assert ( + water_content.fillna(0.0)[-1, :, 5, 5] + >= water_content.fillna(0.0)[0, :, 5, 5] + ).all() + # should be nan if no uzf-package is defined + assert water_content.where(not_active).isnull().all() diff --git a/imod/tests/test_mf6/test_multimodel/test_mf6_partitioning_unstructured.py b/imod/tests/test_mf6/test_multimodel/test_mf6_partitioning_unstructured.py index 0ec8a78e8..be10d5d2c 100644 --- a/imod/tests/test_mf6/test_multimodel/test_mf6_partitioning_unstructured.py +++ b/imod/tests/test_mf6/test_multimodel/test_mf6_partitioning_unstructured.py @@ -160,7 +160,6 @@ def is_expected_hfb_partition_combination_fail(current_cases): def test_partitioning_unstructured( tmp_path: Path, circle_model: Modflow6Simulation, partition_array: xu.UgridDataArray ): - # %% simulation = circle_model # Increase the recharge to make the head gradient more pronounced. simulation["GWF_1"]["rch"]["rate"] *= 100