Skip to content

Commit

Permalink
Merge pull request #2380 from devitocodes/decoupler-alpha
Browse files Browse the repository at this point in the history
compiler: Misc compiler improvements
  • Loading branch information
mloubout authored Jun 3, 2024
2 parents 10963f3 + b6f1a08 commit a00b66f
Show file tree
Hide file tree
Showing 14 changed files with 294 additions and 132 deletions.
93 changes: 71 additions & 22 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,29 @@ def EVAL(exprs, *args):
return processed[0] if isinstance(exprs, str) else processed


def get_testname(item):
if item.cls is not None:
return "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name)
else:
return "%s::%s" % (item.fspath, item.name)


def set_run_reset(env_vars, call):
old_env_vars = {k: os.environ.get(k, None) for k in env_vars}

os.environ.update(env_vars)
os.environ['DEVITO_PYTEST_FLAG'] = '1'

try:
check_call(call)
return True
except:
return False
finally:
os.environ['DEVITO_PYTEST_FLAG'] = '0'
os.environ.update({k: v for k, v in old_env_vars.items() if v is not None})


def parallel(item, m):
"""
Run a test in parallel. Readapted from:
Expand All @@ -141,14 +164,12 @@ def parallel(item, m):
else:
raise ValueError("Can't run test: unexpected mode `%s`" % m)

env_vars = {'DEVITO_MPI': scheme}

pyversion = sys.executable
testname = get_testname(item)
# Only spew tracebacks on rank 0.
# Run xfailing tests to ensure that errors are reported to calling process
if item.cls is not None:
testname = "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name)
else:
testname = "%s::%s" % (item.fspath, item.name)

args = ["-n", "1", pyversion, "-m", "pytest", "--no-summary", "-s",
"--runxfail", "-qq", testname]
if nprocs > 1:
Expand All @@ -161,16 +182,24 @@ def parallel(item, m):
else:
call = [mpi_exec] + args

# Tell the MPI ranks that they are running a parallel test
os.environ['DEVITO_MPI'] = scheme
try:
check_call(call)
res = True
except:
res = False
finally:
os.environ['DEVITO_MPI'] = '0'
return res
return set_run_reset(env_vars, call)


def decoupler(item, m):
"""
Run a test in decoupled mode.
"""
mpi_exec = 'mpiexec'
assert sniff_mpi_distro(mpi_exec) != 'unknown', "Decoupled tests require MPI"

env_vars = {'DEVITO_DECOUPLER': '1'}
if isinstance(m, int):
env_vars['DEVITO_DECOUPLER_WORKERS'] = str(m)

testname = get_testname(item)
call = ["pytest", "--no-summary", "-s", "--runxfail", testname]

return set_run_reset(env_vars, call)


def pytest_configure(config):
Expand All @@ -179,6 +208,10 @@ def pytest_configure(config):
"markers",
"parallel(mode): mark test to run in parallel"
)
config.addinivalue_line(
"markers",
"decoupler(mode): mark test to run in decoupled mode",
)


def pytest_generate_tests(metafunc):
Expand All @@ -187,26 +220,37 @@ def pytest_generate_tests(metafunc):
if 'mode' in metafunc.fixturenames:
markers = metafunc.definition.iter_markers()
for marker in markers:
if marker.name == 'parallel':
if marker.name in ('parallel', 'decoupler'):
mode = list(as_tuple(marker.kwargs.get('mode', 2)))
metafunc.parametrize("mode", mode)


@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_call(item):
partest = os.environ.get('DEVITO_MPI', 0)
inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0)
try:
partest = int(partest)
inside_pytest_marker = int(inside_pytest_marker)
except ValueError:
pass

if item.get_closest_marker("parallel") and not partest:
if inside_pytest_marker:
outcome = yield

elif item.get_closest_marker("parallel"):
# Spawn parallel processes to run test
outcome = parallel(item, item.funcargs['mode'])
if outcome:
pytest.skip(f"{item} success in parallel")
else:
pytest.fail(f"{item} failed in parallel")

elif item.get_closest_marker("decoupler"):
outcome = decoupler(item, item.funcargs.get('mode'))
if outcome:
pytest.skip(f"{item} success in decoupled mode")
else:
pytest.fail(f"{item} failed in decoupled mode")

else:
outcome = yield

Expand All @@ -215,12 +259,17 @@ def pytest_runtest_call(item):
def pytest_runtest_makereport(item, call):
outcome = yield
result = outcome.get_result()
partest = os.environ.get('DEVITO_MPI', 0)

inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0)
try:
partest = int(partest)
inside_pytest_marker = int(inside_pytest_marker)
except ValueError:
pass
if item.get_closest_marker("parallel") and not partest:
if inside_pytest_marker:
return

if item.get_closest_marker("parallel") or \
item.get_closest_marker("decoupler"):
if call.when == 'call' and result.outcome == 'skipped':
result.outcome = 'passed'

Expand Down
11 changes: 11 additions & 0 deletions devito/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import atexit
from itertools import product
from . import _version

Expand Down Expand Up @@ -159,4 +160,14 @@ def mode_performance():
configuration['opt-options']['blockinner'] = True


# Ensure the SymPy caches are purged at exit
# For whatever reason, if we don't do this the garbage collector won't its
# job properly and thus we may end up missing some custom __del__'s
atexit.register(clear_cache)


__version__ = _version.get_versions()['version']


# Clean up namespace
del atexit, product
23 changes: 23 additions & 0 deletions devito/arch/archinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,6 +634,13 @@ def __repr__(self):
def _detect_isa(self):
return 'unknown'

@property
def numa_domains(self):
"""
Number of NUMA domains, or None if unknown.
"""
return 1

@property
def threads_per_core(self):
return self.cores_logical // self.cores_physical
Expand Down Expand Up @@ -706,6 +713,18 @@ def simd_items_per_reg(self, dtype):
assert self.simd_reg_nbytes % np.dtype(dtype).itemsize == 0
return int(self.simd_reg_nbytes / np.dtype(dtype).itemsize)

@cached_property
def numa_domains(self):
try:
return int(lscpu()['NUMA node(s)'])
except KeyError:
warning("NUMA domain count autodetection failed")
return 1

@property
def cores_physical_per_numa_domain(self):
return self.cores_physical // self.numa_domains

@cached_property
def memtotal(self):
return psutil.virtual_memory().total
Expand Down Expand Up @@ -785,6 +804,10 @@ def _mro(cls):
def march(self):
return None

@property
def numa_domains(self):
raise NotImplementedError

@cached_property
def memtotal(self):
info = get_gpu_info()
Expand Down
31 changes: 6 additions & 25 deletions devito/builtins/arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,8 @@
import devito as dv
from devito.builtins.utils import make_retval


__all__ = ['norm', 'sumall', 'sum', 'inner', 'mmin', 'mmax']

accumulator_mapper = {
# Integer accumulates on Float64
np.int8: np.float64, np.uint8: np.float64,
np.int16: np.float64, np.uint16: np.float64,
np.int32: np.float64, np.uint32: np.float64,
np.int64: np.float64, np.uint64: np.float64,
# FloatX accumulates on Float2X
np.float16: np.float32,
np.float32: np.float64,
# NOTE: np.float128 isn't really a thing, see for example
# https://github.com/numpy/numpy/issues/10288
# https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html#1070
np.float64: np.float64
}


@dv.switchconfig(log_level='ERROR')
def norm(f, order=2):
Expand All @@ -43,9 +27,8 @@ def norm(f, order=2):
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])

dtype = accumulator_mapper[f.dtype]
n = make_retval(f.grid, dtype)
s = dv.types.Symbol(name='sum', dtype=dtype)
n = make_retval(f)
s = dv.types.Symbol(name='sum', dtype=n.dtype)

op = dv.Operator([dv.Eq(s, 0.0)] + eqns +
[dv.Inc(s, dv.Abs(Pow(p, order))), dv.Eq(n[0], s)],
Expand Down Expand Up @@ -128,9 +111,8 @@ def sumall(f):
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])

dtype = accumulator_mapper[f.dtype]
n = make_retval(f.grid, dtype)
s = dv.types.Symbol(name='sum', dtype=dtype)
n = make_retval(f)
s = dv.types.Symbol(name='sum', dtype=n.dtype)

op = dv.Operator([dv.Eq(s, 0.0)] + eqns +
[dv.Inc(s, p), dv.Eq(n[0], s)],
Expand Down Expand Up @@ -183,9 +165,8 @@ def inner(f, g):
# otherwise we would eventually be summing more than expected
rhs, eqns = f.guard(f*g) if f.is_SparseFunction else (f*g, [])

dtype = accumulator_mapper[f.dtype]
n = make_retval(f.grid or g.grid, dtype)
s = dv.types.Symbol(name='sum', dtype=dtype)
n = make_retval(f)
s = dv.types.Symbol(name='sum', dtype=n.dtype)

op = dv.Operator([dv.Eq(s, 0.0)] + eqns +
[dv.Inc(s, rhs), dv.Eq(n[0], s)],
Expand Down
35 changes: 30 additions & 5 deletions devito/builtins/utils.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,51 @@
from functools import wraps

import numpy as np

import devito as dv
from devito.symbolics import uxreplace
from devito.tools import as_tuple

__all__ = ['make_retval', 'nbl_to_padsize', 'pad_outhalo', 'abstract_args']


def make_retval(grid, dtype):
accumulator_mapper = {
# Integer accumulates on Float64
np.int8: np.float64, np.uint8: np.float64,
np.int16: np.float64, np.uint16: np.float64,
np.int32: np.float64, np.uint32: np.float64,
np.int64: np.float64, np.uint64: np.float64,
# FloatX accumulates on Float2X
np.float16: np.float32,
np.float32: np.float64,
# NOTE: np.float128 isn't really a thing, see for example
# https://github.com/numpy/numpy/issues/10288
# https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html#1070
np.float64: np.float64
}


def make_retval(f):
"""
Devito does not support passing values by reference. This function
creates a dummy Function of size 1 to store the return value of a builtin
applied to `f`.
"""
if grid is None:
raise ValueError("Expected Grid, got None")
if f.grid is None:
raise ValueError("No Grid available")

cls = make_retval.cls or dv.Function

dtype = accumulator_mapper[f.dtype]

i = dv.Dimension(name='mri',)
n = dv.Function(name='n', shape=(1,), dimensions=(i,), grid=grid,
dtype=dtype, space='host')
n = cls(name='n', shape=(1,), dimensions=(i,), grid=f.grid,
dtype=dtype, space='host')

n.data[:] = 0

return n
make_retval.cls = None # noqa


def nbl_to_padsize(nbl, ndim):
Expand Down
Loading

0 comments on commit a00b66f

Please sign in to comment.