Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,12 @@ version: 2
build:
os: ubuntu-24.04
tools:
python: "3.13"
jobs:
pre_build:
- "jupyter-book config sphinx docs/source"
python: "3.12"

python:
install:
- requirements: requirements.txt

# Build documentation in the "docs/source" directory with Sphinx
# Build documentation with Sphinx
sphinx:
builder: html
configuration: docs/source/_config.yml
configuration: docs/source/conf.py
4 changes: 4 additions & 0 deletions docs/source/_config.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
# Book settings
# Learn more at https://jupyterbook.org/customize/config.html

# Jupyter Book version
# This is required for jupyter-book < 1.0
version: 0.15.1

title: OpenMC Fusion Benchmarks
logo: images/logo.svg

Expand Down
23 changes: 4 additions & 19 deletions docs/source/_toc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,7 @@

format: jb-book
root: intro
parts:
- caption: Installation
chapters:
- file: user_installation
- file: dev_installation
- caption: FNG
chapters:
- file: fng_readme
- file: fng_str_benchmark
- file: fng_w_benchmark
- caption: FNS
chapters:
- file: fns_readme
- file: fns_clean_w_benchmark
- file: fns_duct_benchmark
- caption: Oktavian
chapters:
- file: oktavian_readme
- file: oktavian_al_benchmark
chapters:
- file: user_installation
- file: oktavian_readme
- file: oktavian_al_benchmark
51 changes: 51 additions & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Configuration file for the Sphinx documentation builder.
# This file replaces jupyter-book build system

import os
import sys

# -- Project information -----------------------------------------------------
project = 'OpenMC Fusion Benchmarks'
copyright = '2025, OpenMC Fusion Benchmarks Contributors'
author = 'OpenMC Fusion Benchmarks Contributors'

# -- General configuration ---------------------------------------------------
extensions = [
'myst_parser',
'sphinxcontrib.bibtex',
]

# Bibliography files
bibtex_bibfiles = ['references.bib']
bibtex_reference_style = 'label'

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']

# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_book_theme'
html_logo = 'images/logo.svg'
html_title = 'OpenMC Fusion Benchmarks'

html_theme_options = {
'repository_url': 'https://github.com/eepeterson/openmc_fusion_benchmarks',
'use_repository_button': True,
'use_issues_button': True,
'path_to_docs': 'docs/source',
'repository_branch': 'develop',
}

# -- MyST options ------------------------------------------------------------
myst_enable_extensions = [
'colon_fence',
'deflist',
'dollarmath',
]

# Don't execute notebooks (set to 'off' to disable)
# This is equivalent to jupyter-book's execute_notebooks: false
jupyter_execute_notebooks = 'off'
16 changes: 16 additions & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
OpenMC Fusion Benchmarks
=========================

.. toctree::
:maxdepth: 2
:caption: Contents:

intro
user_installation
oktavian_readme
oktavian_al_benchmark

References
----------

.. bibliography::
5 changes: 4 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
jupyter-book
sphinx>=7.0.0
myst-parser
sphinx-book-theme
sphinxcontrib-bibtex
1 change: 1 addition & 0 deletions src/openmc_fusion_benchmarks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from openmc_fusion_benchmarks.benchmark import *
from openmc_fusion_benchmarks.benchmark_results import *
from openmc_fusion_benchmarks.validate import *
import openmc_fusion_benchmarks.uq

Expand Down
5 changes: 2 additions & 3 deletions src/openmc_fusion_benchmarks/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,9 +446,8 @@ def run(self, uq: bool = False, *args, **kwargs):
self._uncertainty_quantification(*args, **kwargs)
else:
# Run the OpenMC model
# self._run_model(*args, **kwargs)
# Run the OpenMC model
statepoint = self.model.run(*args, **kwargs)
sp = self.model.run(*args, **kwargs)
statepoint = openmc.StatePoint(sp)
# Post-process the results
self._postprocess(statepoint=statepoint)

Expand Down
84 changes: 84 additions & 0 deletions src/openmc_fusion_benchmarks/benchmark_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import h5py
import xarray as xr
import numpy as np


class BenchmarkResults:
def __init__(self, filepath: str):
self.filepath = filepath

@property
def tallies(self):
with h5py.File(self.filepath, 'r') as f:
tallies = list(f.keys())
return tallies

def get_tally(self, name: str) -> xr.DataArray:
return xr.load_dataarray(self.filepath, group=name)


# # Some utilities for data analysis
# def get_means(tally: xr.DataArray) -> xr.DataArray:
# """Extract tally means from the tally DataArray."""
# return tally.sel(column='mean').squeeze()

# def get_stds(tally: xr.DataArray) -> xr.DataArray:
# """Extract tally standard deviations from the tally DataArray."""
# return tally.sel(column='std. dev.').squeeze()

# def get_rstds(tally: xr.DataArray) -> xr.DataArray:
# """Compute tally relative standard deviations from the tally DataArray."""
# mean_vals = get_means(tally)
# std_vals = get_stds(tally)
# return std_vals / mean_vals


# # UQ-TMC base analysis functions - Move in uq/ ?
# def mean_of_means(tally: xr.DataArray) -> xr.DataArray:
# """Compute the mean of the means across realizations."""
# means = get_means(tally)
# return means.mean(dim='realization')

# def std_of_means(tally: xr.DataArray) -> xr.DataArray:
# """Compute the standard deviation of the means across realizations."""
# means = get_means(tally)
# return means.std(dim='realization')

# def rstd_of_means(tally: xr.DataArray) -> xr.DataArray:
# """Compute the relative standard deviation of the means across realizations."""
# mean_vals = mean_of_means(tally)
# std_vals = std_of_means(tally)
# return std_vals / mean_vals

# # UQ-TMC dynamic realization analysis functions - Move in uq/ ?
# def dynamic_mean_of_means(tally: xr.DataArray) -> np.ndarray:
# """Compute the dynamic mean of the means across realizations."""
# means = get_means(tally)
# return np.array([means[:i].mean(dim='realization') for i in range(2, len(means.realization) + 1)])

# def dynamic_std_of_means(tally: xr.DataArray) -> np.ndarray:
# """Compute the dynamic standard deviation of the means across realizations."""
# means = get_means(tally)
# return np.array([
# means[:i].std(dim='realization') for i in range(2, len(means.realization) + 1)
# ])

# def dynamic_rstd_of_means(tally: xr.DataArray) -> np.ndarray:
# """Compute the dynamic relative standard deviation of the means across realizations."""
# means = get_means(tally)
# return np.array([
# means[:i].std(dim='realization') / means[:i].mean(dim='realization')
# for i in range(2, len(means.realization) + 1)
# ])

# def dynamic_rstd_of_rstds(tally: xr.DataArray) -> np.ndarray:
# """Compute the dynamic relative standard deviation of the relative standard deviations across realizations."""
# rstds = dynamic_rstd_of_means(tally)
# return np.array([
# rstds[:i].std() / rstds[:i].mean() for i in range(2, len(rstds) + 1)
# ])

# def derivative_of_dynamic_rstds(tally: xr.DataArray) -> np.ndarray:
# """Compute the derivative of the dynamic relative standard deviations."""
# dynamic_rstds = dynamic_rstd_of_means(tally)
# return np.gradient(dynamic_rstds, axis=0)
11 changes: 7 additions & 4 deletions src/openmc_fusion_benchmarks/uq/tmc_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,20 +77,23 @@ def tmc_engine(model: openmc.Model, realizations: int, lib_name: str, nuclide,
tally = sp.get_tally(id=t)
df = tally.get_pandas_dataframe()

df = df.drop(columns=['surface', 'cell', 'particle', 'nuclide',
'score', 'energyfunction'], errors='ignore')

# Convert to xarray and add dimensions
t = xr.DataArray(
da = xr.DataArray(
df.values[np.newaxis, :, :], # shape: (1, r, c)
dims=["realization", "row", "column"],
coords={
"realization": [realization_label],
"column": df.columns,
"row": np.arange(df.shape[0]),
},
name=t.name
name=tally.name
)

_save_result(new_result=t, filename="tmc_results.h5",
group=t.name, realization_label=f'{nuclide}_{n}_{lib_name}')
_save_result(new_result=da, filename="tmc_results.h5",
group=tally.name, realization_label=f'{nuclide}_{n}_{lib_name}')

Path('summary.h5').unlink(missing_ok=True)
Path(statepoint).unlink(missing_ok=True)
10 changes: 8 additions & 2 deletions src/openmc_fusion_benchmarks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,17 @@ def _openmc_to_ofb(spec_tallies: str, statepoint: openmc.StatePoint,
for f in spec_t['filters']:
if f['type'] == 'cell':
# Get cell volumes for normalization
norm = [msh.volumes_by_id[v].area for v in f['values']]
norm = [msh.volumes_by_id[v].volume for v in f['values']]
elif f['type'] == 'surface':
# Get surface areas for normalization
norm = [msh.surfaces_by_id[v].area for v in f['values']]
elif f['type'] == 'material':
raise NotImplementedError(
'Material filter not implemented in postprocess yet.')
else:
norm = 1

# Normalize the tally data
# Normalize the tally data
df['mean'] = df['mean'] / norm
df['std. dev.'] = df['std. dev.'] / norm

Expand Down Expand Up @@ -68,13 +70,17 @@ def _save_result(new_result: xr.DataArray, filename: str, group: str, realizatio
# First time -> create file with this group
new_result.to_netcdf(
path, mode="w", engine="netcdf4", group=group)
new_result = new_result.assign_coords(
realization=new_result.realization.astype(object))
print(f"Created file '{filename}' with group '{group}'")
return

# File exists -> try to read & merge
try:
with xr.open_dataset(path, group=group, engine="netcdf4") as existing:
existing_da = xr.load_dataarray(path, group=group)
existing_da = existing_da.assign_coords(
realization=existing_da.realization.astype(object))

# Align coords explicitly so realization labels don’t clash
combined = xr.concat([existing_da, new_result], dim="realization")
Expand Down
Loading