From 0028eb56ef38316c5442bb2d15d21bfd20464e0b Mon Sep 17 00:00:00 2001 From: mloubout Date: Wed, 13 Nov 2024 14:46:10 -0500 Subject: [PATCH 1/2] api: support custom coeffs for div/grad/curl --- devito/finite_differences/derivative.py | 5 ++- devito/finite_differences/differentiable.py | 12 +++---- devito/finite_differences/operators.py | 28 ++++++++++----- devito/types/tensor.py | 37 +++++++++---------- tests/test_symbolic_coefficients.py | 26 +++++++++++++- tests/test_tensors.py | 39 ++++++++++++++++++++- 6 files changed, 112 insertions(+), 35 deletions(-) diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index b550d0812e..6a3fdf42e7 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -218,7 +218,8 @@ def _process_weights(cls, **kwargs): else: return as_tuple(weights) - def __call__(self, x0=None, fd_order=None, side=None, method=None, weights=None): + def __call__(self, x0=None, fd_order=None, side=None, method=None, + weights=None, w=None): rkw = {} if side is not None: rkw['side'] = side @@ -226,6 +227,8 @@ def __call__(self, x0=None, fd_order=None, side=None, method=None, weights=None) rkw['method'] = method if weights is not None: rkw['weights'] = weights + if w is not None: + rkw['weights'] = w if x0 is not None: x0 = self._process_x0(self.dims, x0=x0) diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index 69160f72ea..63e44370e1 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -293,7 +293,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None): + def laplacian(self, shift=None, order=None, method='FD', w=None): """ Laplacian of the Differentiable with shifted derivatives and custom FD order. @@ -315,10 +315,10 @@ def laplacian(self, shift=None, order=None): shift_x0 = make_shift_x0(shift, (len(space_dims),)) derivs = tuple('d%s2' % d.name for d in space_dims) return Add(*[getattr(self, d)(x0=shift_x0(shift, space_dims[i], None, i), - fd_order=order) + method=method, fd_order=order, w=w) for i, d in enumerate(derivs)]) - def div(self, shift=None, order=None, method='FD'): + def div(self, shift=None, order=None, method='FD', w=None): """ Divergence of the input Function. @@ -339,10 +339,10 @@ def div(self, shift=None, order=None, method='FD'): shift_x0 = make_shift_x0(shift, (len(space_dims),)) order = order or self.space_order return Add(*[getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method) + fd_order=order, method=method, w=w) for i, d in enumerate(space_dims)]) - def grad(self, shift=None, order=None, method='FD'): + def grad(self, shift=None, order=None, method='FD', w=None): """ Gradient of the input Function. @@ -364,7 +364,7 @@ def grad(self, shift=None, order=None, method='FD'): shift_x0 = make_shift_x0(shift, (len(space_dims),)) order = order or self.space_order comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method) + fd_order=order, method=method, w=w) for i, d in enumerate(space_dims)] vec_func = VectorTimeFunction if self.is_TimeDependent else VectorFunction return vec_func(name='grad_%s' % self.name, time_order=self.time_order, diff --git a/devito/finite_differences/operators.py b/devito/finite_differences/operators.py index b664f1e9e1..29c9779a22 100644 --- a/devito/finite_differences/operators.py +++ b/devito/finite_differences/operators.py @@ -1,4 +1,4 @@ -def div(func, shift=None, order=None, method='FD'): +def div(func, shift=None, order=None, method='FD', weights=None, w=None): """ Divergence of the input Function. @@ -14,9 +14,12 @@ def div(func, shift=None, order=None, method='FD'): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights: list, tupe, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = weights or w try: - return func.div(shift=shift, order=order, method=method) + return func.div(shift=shift, order=order, method=method, w=w) except AttributeError: return 0 @@ -38,7 +41,7 @@ def div45(func, shift=None, order=None): return div(func, shift=shift, order=order, method='RSFD') -def grad(func, shift=None, order=None, method='FD'): +def grad(func, shift=None, order=None, method='FD', weights=None, w=None): """ Gradient of the input Function. @@ -54,9 +57,12 @@ def grad(func, shift=None, order=None, method='FD'): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights: list, tupe, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = weights or w try: - return func.grad(shift=shift, order=order, method=method) + return func.grad(shift=shift, order=order, method=method, w=w) except AttributeError: raise AttributeError("Gradient not supported for class %s" % func.__class__) @@ -78,7 +84,7 @@ def grad45(func, shift=None, order=None): return grad(func, shift=shift, order=order, method='RSFD') -def curl(func, shift=None, order=None, method='FD'): +def curl(func, shift=None, order=None, method='FD', weights=None, w=None): """ Curl of the input Function. Only supported for VectorFunction @@ -94,9 +100,12 @@ def curl(func, shift=None, order=None, method='FD'): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights: list, tupe, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = weights or w try: - return func.curl(shift=shift, order=order, method=method) + return func.curl(shift=shift, order=order, method=method, w=w) except AttributeError: raise AttributeError("Curl only supported for 3D VectorFunction") @@ -119,7 +128,7 @@ def curl45(func, shift=None, order=None): return curl(func, shift=shift, order=order, method='RSFD') -def laplace(func, shift=None, order=None, method='FD'): +def laplace(func, shift=None, order=None, method='FD', weights=None, w=None): """ Laplacian of the input Function. @@ -134,9 +143,12 @@ def laplace(func, shift=None, order=None, method='FD'): Uses `func.space_order` when not specified method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' + weights: list, tupe, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = weights or w try: - return func.laplacian(shift=shift, order=order, method=method) + return func.laplacian(shift=shift, order=order, method=method, w=w) except AttributeError: return 0 diff --git a/devito/types/tensor.py b/devito/types/tensor.py index 843bbd51d2..0b87b185c0 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -188,7 +188,7 @@ def values(self): else: return super().values() - def div(self, shift=None, order=None, method='FD'): + def div(self, shift=None, order=None, method='FD', w=None): """ Divergence of the TensorFunction (is a VectorFunction). @@ -211,7 +211,7 @@ def div(self, shift=None, order=None, method='FD'): for i in range(len(self.space_dimensions)): comps.append(sum([getattr(self[j, i], 'd%s' % d.name) (x0=shift_x0(shift, d, i, j), fd_order=order, - method=method) + method=method, w=w) for j, d in enumerate(self.space_dimensions)])) return func._new(comps) @@ -222,7 +222,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None): + def laplacian(self, shift=None, order=None, method='FD', w=None): """ Laplacian of the TensorFunction with shifted derivatives and custom FD order. @@ -246,11 +246,12 @@ def laplacian(self, shift=None, order=None): shift_x0 = make_shift_x0(shift, (ndim, ndim)) for j in range(ndim): comps.append(sum([getattr(self[j, i], 'd%s2' % d.name) - (x0=shift_x0(shift, d, j, i), fd_order=order) + (x0=shift_x0(shift, d, j, i), fd_order=order, + method=method, w=w) for i, d in enumerate(self.space_dimensions)])) return func._new(comps) - def grad(self, shift=None, order=None): + def grad(self, shift=None, order=None, method=None, w=None): raise AttributeError("Gradient of a second order tensor not supported") def new_from_mat(self, mat): @@ -313,7 +314,7 @@ def __str__(self): __repr__ = __str__ - def div(self, shift=None, order=None, method='FD'): + def div(self, shift=None, order=None, method='FD', w=None): """ Divergence of the VectorFunction, creates the divergence Function. @@ -331,7 +332,7 @@ def div(self, shift=None, order=None, method='FD'): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method) + fd_order=order, method=method, w=w) for i, d in enumerate(self.space_dimensions)]) @property @@ -341,7 +342,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None): + def laplacian(self, shift=None, order=None, method='FD', w=None): """ Laplacian of the VectorFunction, creates the Laplacian VectorFunction. @@ -357,12 +358,12 @@ def laplacian(self, shift=None, order=None): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order comps = [sum([getattr(s, 'd%s2' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order) + fd_order=order, w=w, method=method) for i, d in enumerate(self.space_dimensions)]) for s in self] return func._new(comps) - def curl(self, shift=None, order=None, method='FD'): + def curl(self, shift=None, order=None, method='FD', w=None): """ Gradient of the (3D) VectorFunction, creates the curl VectorFunction. @@ -385,21 +386,21 @@ def curl(self, shift=None, order=None, method='FD'): shift_x0 = make_shift_x0(shift, (len(dims), len(dims))) order = order or self.space_order comp1 = (getattr(self[2], derivs[1])(x0=shift_x0(shift, dims[1], 2, 1), - fd_order=order, method=method) - + fd_order=order, method=method, w=w) - getattr(self[1], derivs[2])(x0=shift_x0(shift, dims[2], 1, 2), - fd_order=order, method=method)) + fd_order=order, method=method, w=w)) comp2 = (getattr(self[0], derivs[2])(x0=shift_x0(shift, dims[2], 0, 2), - fd_order=order, method=method) - + fd_order=order, method=method, w=w) - getattr(self[2], derivs[0])(x0=shift_x0(shift, dims[0], 2, 0), - fd_order=order, method=method)) + fd_order=order, method=method, w=w)) comp3 = (getattr(self[1], derivs[0])(x0=shift_x0(shift, dims[0], 1, 0), - fd_order=order, method=method) - + fd_order=order, method=method, w=w) - getattr(self[0], derivs[1])(x0=shift_x0(shift, dims[1], 0, 1), - fd_order=order, method=method)) + fd_order=order, method=method, w=w)) func = vec_func(self) return func._new(3, 1, [comp1, comp2, comp3]) - def grad(self, shift=None, order=None, method='FD'): + def grad(self, shift=None, order=None, method='FD', w=None): """ Gradient of the VectorFunction, creates the gradient TensorFunction. @@ -418,7 +419,7 @@ def grad(self, shift=None, order=None, method='FD'): ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim)) order = order or self.space_order - comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j), + comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j), w=w, fd_order=order, method=method) for j, d in enumerate(self.space_dimensions)] for i, f in enumerate(self)] diff --git a/tests/test_symbolic_coefficients.py b/tests/test_symbolic_coefficients.py index 30a89b155b..5bebb1f240 100644 --- a/tests/test_symbolic_coefficients.py +++ b/tests/test_symbolic_coefficients.py @@ -3,7 +3,7 @@ import pytest from devito import (Grid, Function, TimeFunction, Eq, - Dimension, solve, Operator) + Dimension, solve, Operator, div, grad, laplace) from devito.finite_differences import Differentiable from devito.finite_differences.coefficients import Coefficient, Substitutions from devito.finite_differences.finite_difference import _PRECISION @@ -300,3 +300,27 @@ def test_compound_nested_subs(self): # `str` simply because some objects are of type EvalDerivative assert sp.simplify(eq.evaluate.rhs - (term0 + term1 + term2)) == 0 + + def test_operators(self): + grid = Grid(shape=(11, 11)) + x, y = grid.dimensions + + f = Function(name='f', grid=grid, space_order=2) + + coeffs0 = [100, 100, 100] + + # Div + expr0 = div(f, w=coeffs0) + assert expr0 == f.dx(weights=coeffs0) + f.dy(weights=coeffs0) + assert list(expr0.args[0].weights) == coeffs0 + + # Grad + expr2 = grad(f, w=coeffs0) + assert expr2[0] == f.dx(weights=coeffs0) + assert expr2[1] == f.dy(weights=coeffs0) + assert list(expr2[0].weights) == coeffs0 + + # Laplace + expr3 = laplace(f, w=coeffs0) + assert expr3 == f.dx2(weights=coeffs0) + f.dy2(weights=coeffs0) + assert list(expr3.args[0].weights) == coeffs0 diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 0202cae96d..fdad398a36 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -5,7 +5,7 @@ import pytest from devito import VectorFunction, TensorFunction, VectorTimeFunction, TensorTimeFunction -from devito import Grid, Function, TimeFunction, Dimension, Eq, div, grad, curl +from devito import Grid, Function, TimeFunction, Dimension, Eq, div, grad, curl, laplace from devito.symbolics import retrieve_derivatives from devito.types import NODE @@ -401,3 +401,40 @@ def test_basic_arithmetic(): t1 = tau * 2 assert all(t1i == ti * 2 for (t1i, ti) in zip(t1, tau)) + + +def test_custom_coeffs_vector(): + grid = Grid(tuple([5]*3)) + v = VectorFunction(name="v", grid=grid, space_order=2) + + # Custom coefficients + c = [10, 10, 10] + + dv = div(v, weights=c) + assert dv == v[0].dx(w=c) + v[1].dy(w=c) + v[2].dz(w=c) + assert list(dv.args[0].weights) == c + + for func in [div, grad, curl, laplace]: + dv = func(v, weights=c) + derivs = retrieve_derivatives(dv) + for drv in derivs: + assert list(drv.weights) == c + + +def test_custom_coeffs_tensor(): + grid = Grid(tuple([5]*3)) + tau = TensorFunction(name="tau", grid=grid, space_order=2) + + # Custom coefficients + c = [10, 10, 10] + + dtau = div(tau, weights=c) + for i, d in enumerate(grid.dimensions): + assert dtau[i] == tau[i, 0].dx(w=c) + tau[i, 1].dy(w=c) + tau[i, 2].dz(w=c) + assert list(dtau[i].args[0].weights) == c + + for func in [div, laplace]: + dtau = func(tau, weights=c) + derivs = retrieve_derivatives(dtau) + for drv in derivs: + assert list(drv.weights) == c From f668d8eaccd16295806a338c62d43a9620fa38e7 Mon Sep 17 00:00:00 2001 From: mloubout Date: Wed, 13 Nov 2024 15:13:23 -0500 Subject: [PATCH 2/2] api: cleanup weight kwargs --- devito/finite_differences/derivative.py | 6 ++-- devito/finite_differences/differentiable.py | 18 ++++++++-- devito/finite_differences/operators.py | 24 ++++++------- devito/types/tensor.py | 38 +++++++++++++++++---- 4 files changed, 60 insertions(+), 26 deletions(-) diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index 6a3fdf42e7..1019ba8672 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -218,8 +218,8 @@ def _process_weights(cls, **kwargs): else: return as_tuple(weights) - def __call__(self, x0=None, fd_order=None, side=None, method=None, - weights=None, w=None): + def __call__(self, x0=None, fd_order=None, side=None, method=None, **kwargs): + weights = kwargs.get('weights', kwargs.get('w')) rkw = {} if side is not None: rkw['side'] = side @@ -227,8 +227,6 @@ def __call__(self, x0=None, fd_order=None, side=None, method=None, rkw['method'] = method if weights is not None: rkw['weights'] = weights - if w is not None: - rkw['weights'] = w if x0 is not None: x0 = self._process_x0(self.dims, x0=x0) diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index 63e44370e1..f8b2faddd0 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -293,7 +293,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None, method='FD', w=None): + def laplacian(self, shift=None, order=None, method='FD', **kwargs): """ Laplacian of the Differentiable with shifted derivatives and custom FD order. @@ -309,7 +309,13 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): order: int, optional, default=None Discretization order for the finite differences. Uses `func.space_order` when not specified + method: str, optional, default='FD' + Discretization method. Options are 'FD' (default) and + 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite differences. """ + w = kwargs.get('weights', kwargs.get('w')) order = order or self.space_order space_dims = [d for d in self.dimensions if d.is_Space] shift_x0 = make_shift_x0(shift, (len(space_dims),)) @@ -318,7 +324,7 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): method=method, fd_order=order, w=w) for i, d in enumerate(derivs)]) - def div(self, shift=None, order=None, method='FD', w=None): + def div(self, shift=None, order=None, method='FD', **kwargs): """ Divergence of the input Function. @@ -334,7 +340,10 @@ def div(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = kwargs.get('weights', kwargs.get('w')) space_dims = [d for d in self.dimensions if d.is_Space] shift_x0 = make_shift_x0(shift, (len(space_dims),)) order = order or self.space_order @@ -342,7 +351,7 @@ def div(self, shift=None, order=None, method='FD', w=None): fd_order=order, method=method, w=w) for i, d in enumerate(space_dims)]) - def grad(self, shift=None, order=None, method='FD', w=None): + def grad(self, shift=None, order=None, method='FD', **kwargs): """ Gradient of the input Function. @@ -358,11 +367,14 @@ def grad(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite """ from devito.types.tensor import VectorFunction, VectorTimeFunction space_dims = [d for d in self.dimensions if d.is_Space] shift_x0 = make_shift_x0(shift, (len(space_dims),)) order = order or self.space_order + w = kwargs.get('weights', kwargs.get('w')) comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), fd_order=order, method=method, w=w) for i, d in enumerate(space_dims)] diff --git a/devito/finite_differences/operators.py b/devito/finite_differences/operators.py index 29c9779a22..c19a9cdfce 100644 --- a/devito/finite_differences/operators.py +++ b/devito/finite_differences/operators.py @@ -1,4 +1,4 @@ -def div(func, shift=None, order=None, method='FD', weights=None, w=None): +def div(func, shift=None, order=None, method='FD', **kwargs): """ Divergence of the input Function. @@ -14,10 +14,10 @@ def div(func, shift=None, order=None, method='FD', weights=None, w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). - weights: list, tupe, or dict, optional, default=None + weights/w: list, tuple, or dict, optional, default=None Custom weights for the finite difference coefficients. """ - w = weights or w + w = kwargs.get('weights', kwargs.get('w')) try: return func.div(shift=shift, order=order, method=method, w=w) except AttributeError: @@ -41,7 +41,7 @@ def div45(func, shift=None, order=None): return div(func, shift=shift, order=order, method='RSFD') -def grad(func, shift=None, order=None, method='FD', weights=None, w=None): +def grad(func, shift=None, order=None, method='FD', **kwargs): """ Gradient of the input Function. @@ -57,10 +57,10 @@ def grad(func, shift=None, order=None, method='FD', weights=None, w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). - weights: list, tupe, or dict, optional, default=None + weights/w: list, tuple, or dict, optional, default=None Custom weights for the finite difference coefficients. """ - w = weights or w + w = kwargs.get('weights', kwargs.get('w')) try: return func.grad(shift=shift, order=order, method=method, w=w) except AttributeError: @@ -84,7 +84,7 @@ def grad45(func, shift=None, order=None): return grad(func, shift=shift, order=order, method='RSFD') -def curl(func, shift=None, order=None, method='FD', weights=None, w=None): +def curl(func, shift=None, order=None, method='FD', **kwargs): """ Curl of the input Function. Only supported for VectorFunction @@ -100,10 +100,10 @@ def curl(func, shift=None, order=None, method='FD', weights=None, w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). - weights: list, tupe, or dict, optional, default=None + weights/w: list, tuple, or dict, optional, default=None Custom weights for the finite difference coefficients. """ - w = weights or w + w = kwargs.get('weights', kwargs.get('w')) try: return func.curl(shift=shift, order=order, method=method, w=w) except AttributeError: @@ -128,7 +128,7 @@ def curl45(func, shift=None, order=None): return curl(func, shift=shift, order=order, method='RSFD') -def laplace(func, shift=None, order=None, method='FD', weights=None, w=None): +def laplace(func, shift=None, order=None, method='FD', **kwargs): """ Laplacian of the input Function. @@ -143,10 +143,10 @@ def laplace(func, shift=None, order=None, method='FD', weights=None, w=None): Uses `func.space_order` when not specified method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' - weights: list, tupe, or dict, optional, default=None + weights/w: list, tuple, or dict, optional, default=None Custom weights for the finite difference coefficients. """ - w = weights or w + w = kwargs.get('weights', kwargs.get('w')) try: return func.laplacian(shift=shift, order=order, method=method, w=w) except AttributeError: diff --git a/devito/types/tensor.py b/devito/types/tensor.py index 0b87b185c0..e7a37897c7 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -188,7 +188,7 @@ def values(self): else: return super().values() - def div(self, shift=None, order=None, method='FD', w=None): + def div(self, shift=None, order=None, method='FD', **kwargs): """ Divergence of the TensorFunction (is a VectorFunction). @@ -202,7 +202,10 @@ def div(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite differences. """ + w = kwargs.get('weights', kwargs.get('w')) comps = [] func = vec_func(self) ndim = len(self.space_dimensions) @@ -222,7 +225,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None, method='FD', w=None): + def laplacian(self, shift=None, order=None, method='FD', **kwargs): """ Laplacian of the TensorFunction with shifted derivatives and custom FD order. @@ -238,7 +241,13 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): order: int, optional, default=None Discretization order for the finite differences. Uses `func.space_order` when not specified + method: str, optional, default='FD' + Discretization method. Options are 'FD' (default) and + 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite """ + w = kwargs.get('weights', kwargs.get('w')) comps = [] func = vec_func(self) order = order or self.space_order @@ -251,7 +260,7 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): for i, d in enumerate(self.space_dimensions)])) return func._new(comps) - def grad(self, shift=None, order=None, method=None, w=None): + def grad(self, shift=None, order=None, method=None, **kwargs): raise AttributeError("Gradient of a second order tensor not supported") def new_from_mat(self, mat): @@ -314,7 +323,7 @@ def __str__(self): __repr__ = __str__ - def div(self, shift=None, order=None, method='FD', w=None): + def div(self, shift=None, order=None, method='FD', **kwargs): """ Divergence of the VectorFunction, creates the divergence Function. @@ -328,7 +337,10 @@ def div(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = kwargs.get('weights', kwargs.get('w')) shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), @@ -342,7 +354,7 @@ def laplace(self): """ return self.laplacian() - def laplacian(self, shift=None, order=None, method='FD', w=None): + def laplacian(self, shift=None, order=None, method='FD', **kwargs): """ Laplacian of the VectorFunction, creates the Laplacian VectorFunction. @@ -353,7 +365,13 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): order: int, optional, default=None Discretization order for the finite differences. Uses `func.space_order` when not specified + method: str, optional, default='FD' + Discretization method. Options are 'FD' (default) and + 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite """ + w = kwargs.get('weights', kwargs.get('w')) func = vec_func(self) shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order @@ -363,7 +381,7 @@ def laplacian(self, shift=None, order=None, method='FD', w=None): for s in self] return func._new(comps) - def curl(self, shift=None, order=None, method='FD', w=None): + def curl(self, shift=None, order=None, method='FD', **kwargs): """ Gradient of the (3D) VectorFunction, creates the curl VectorFunction. @@ -377,10 +395,13 @@ def curl(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ if len(self.space_dimensions) != 3: raise AttributeError("Curl only supported for 3D VectorFunction") # The curl of a VectorFunction is a VectorFunction + w = kwargs.get('weights', kwargs.get('w')) dims = self.space_dimensions derivs = ['d%s' % d.name for d in dims] shift_x0 = make_shift_x0(shift, (len(dims), len(dims))) @@ -400,7 +421,7 @@ def curl(self, shift=None, order=None, method='FD', w=None): func = vec_func(self) return func._new(3, 1, [comp1, comp2, comp3]) - def grad(self, shift=None, order=None, method='FD', w=None): + def grad(self, shift=None, order=None, method='FD', **kwargs): """ Gradient of the VectorFunction, creates the gradient TensorFunction. @@ -414,7 +435,10 @@ def grad(self, shift=None, order=None, method='FD', w=None): method: str, optional, default='FD' Discretization method. Options are 'FD' (default) and 'RSFD' (rotated staggered grid finite-difference). + weights/w: list, tuple, or dict, optional, default=None + Custom weights for the finite difference coefficients. """ + w = kwargs.get('weights', kwargs.get('w')) func = tens_func(self) ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim))