Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ jobs:
with:
src: "./castep_outputs"
args: "check"
version: "0.11.0"
version: "0.13.2"
4 changes: 2 additions & 2 deletions .github/workflows/test_castep_outputs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
- name: Install vermin
run: python -m pip install vermin
- name: Run Vermin
run: vermin -t=3.9- --lint --no-parse-comments castep_outputs
run: vermin -t=3.10- --lint --no-parse-comments castep_outputs

build:
runs-on: ubuntu-latest
Expand All @@ -37,7 +37,7 @@ jobs:
pull-requests: write
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.10", "3.11", "3.12"]

steps:
- uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion castep_outputs/bin_parsers/cst_esp_file_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def parse_cst_esp_file(cst_esp_file: BinaryIO) -> ESPData:
accum = {"esp": []}

reader = binary_file_reader(cst_esp_file)
for (key, typ), datum in zip(dtypes.items(), reader):
for (key, typ), datum in zip(dtypes.items(), reader, strict=False):
accum[key] = to_type(datum, typ)

prev_nx = None
Expand Down
43 changes: 28 additions & 15 deletions castep_outputs/parsers/castep_file_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from collections import defaultdict
from collections.abc import Callable
from enum import Flag, auto
from typing import Any, TextIO, Union, cast
from typing import Any, TextIO, cast

from ..utilities import castep_res as REs
from ..utilities.castep_res import gen_table_re, get_numbers, labelled_floats
Expand Down Expand Up @@ -1683,7 +1683,11 @@ def _process_unit_cell(block: Block) -> CellInfo:
else:
prop.append(float(numbers[0]))

cell.update({name: val for val, name in zip(prop, ("volume", "density_amu", "density_g"))})
cell.update({name: val for val, name in zip(
prop,
("volume", "density_amu", "density_g"),
strict=False,
)})

return cell

Expand Down Expand Up @@ -1812,7 +1816,7 @@ def _process_initial_spins(block: Block) -> dict[AtomIndex, InitialSpin]:
ind = atreg_to_index(val)
fix_data_types(val, {"spin": float, "magmom": float})
val["fix"] = val["fix"] == "T"
accum[ind] = cast(dict[str, Union[float, bool]], val)
accum[ind] = cast(dict[str, float | bool], val)
return accum


Expand Down Expand Up @@ -2201,8 +2205,9 @@ def _process_dynamical_matrix(block: Block) -> tuple[tuple[complex, ...], ...]:
imag_part = [numbers[2:] for line in block if (numbers := get_numbers(line))]

return tuple(
tuple(complex(float(real), float(imag)) for real, imag in zip(real_row, imag_row))
for real_row, imag_row in zip(real_part, imag_part)
tuple(complex(float(real), float(imag))
for real, imag in zip(real_row, imag_row, strict=True))
for real_row, imag_row in zip(real_part, imag_part, strict=True)
)


Expand Down Expand Up @@ -2302,7 +2307,11 @@ def _process_dftd(block: Block) -> dict[str, Any]:
def _process_occupancies(block: Block) -> list[Occupancies]:
label = ("band", "eigenvalue", "occupancy")

accum = [dict(zip(label, numbers)) for line in block if (numbers := get_numbers(line))]
accum = [
dict(zip(label, numbers, strict=True))
for line in block
if (numbers := get_numbers(line))
]
for elem in accum:
fix_data_types(elem, {"band": int,
"eigenvalue": float,
Expand Down Expand Up @@ -2369,7 +2378,7 @@ def _process_phonon(block: Block, logger: Logger) -> list[QData]:
head, tail = char_line.split("|")
_, rep, *name, mul = head.split()
*vals, _ = tail.split()
char.append({"chars": tuple(zip(headers, map(int, vals))),
char.append({"chars": tuple(zip(headers, map(int, vals), strict=False)),
"mul": int(mul),
"rep": rep,
"name": name})
Expand Down Expand Up @@ -2429,9 +2438,13 @@ def _process_pair_params(block_in: Block) -> dict[str, dict[str, dict | str]]:
if lab not in accum[typ]:
accum[typ][lab] = {}

accum[typ][lab].update(zip(labels,
to_type(match["params"].split(),
float)))
accum[typ][lab].update(
zip(
labels,
to_type(match["params"].split(), float),
strict=True,
),
)

elif match := REs.PAIR_POT_RES["two_body_one_spec"].match(blk_line):
labels = ((match["spec"],),)
Expand All @@ -2454,7 +2467,7 @@ def _process_pair_params(block_in: Block) -> dict[str, dict[str, dict | str]]:

accum[typ][lab].update(zip(labels,
to_type(match["params"].split(),
float)))
float), strict=False))

# Globals
elif match := REs.PAIR_POT_RES["three_body_val"].match(line):
Expand All @@ -2479,7 +2492,7 @@ def _process_geom_table(block: Block) -> GeomTable:
fix_data_types(val, dict.fromkeys(("lambda", "fdelta", "enthalpy"), float))

key = normalise_string(val.pop("step"))
accum[key] = cast(dict[str, Union[bool, float]], val)
accum[key] = cast(dict[str, bool | float], val)

elif match := REs.GEOMOPT_TABLE_RE.match(line):
val = match.groupdict()
Expand All @@ -2488,7 +2501,7 @@ def _process_geom_table(block: Block) -> GeomTable:
val["converged"] = val["converged"] == "Yes"

key = normalise_key(val.pop("parameter"))
accum[key] = cast(dict[str, Union[bool, float]], val)
accum[key] = cast(dict[str, bool | float], val)

return accum

Expand Down Expand Up @@ -2523,7 +2536,7 @@ def _process_elastic_properties(block: Block) -> ElasticProperties:
for line in block:
if "::" in line:
key = line.split("::")[0]
val = cast(Union[ThreeVector, SixVector], to_type(get_numbers(line), float))
val = cast(ThreeVector | SixVector, to_type(get_numbers(line), float))

if len(val) == 1:
val = val[0]
Expand All @@ -2543,7 +2556,7 @@ def _process_elastic_properties(block: Block) -> ElasticProperties:
def _process_internal_constraints(block: TextIO) -> list[InternalConstraints]:

# Skip table headers
for _ in zip(range(3), block):
for _ in zip(range(3), block, strict=False):
pass

accum = []
Expand Down
21 changes: 9 additions & 12 deletions castep_outputs/parsers/cell_param_file_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from collections import Counter, defaultdict
from collections.abc import Callable, Sequence
from functools import partial
from typing import Any, Literal, TextIO, TypedDict, Union
from typing import Any, Literal, TextIO, TypedDict

import castep_outputs.utilities.castep_res as REs

Expand Down Expand Up @@ -72,18 +72,15 @@ class XCDef(TypedDict):
xc: dict[str, float]


DevelElem = MaybeSequence[Union[str, float, dict[str, Union[str, float]]]]
DevelBlock = dict[str, Union[DevelElem, dict[str, DevelElem]]]
HubbardU = dict[Union[str, AtomIndex], Union[str, dict[str, float]]]
DevelElem = MaybeSequence[str | float | dict[str, str | float]]
DevelBlock = dict[str, DevelElem | dict[str, DevelElem]]
HubbardU = dict[str | AtomIndex, str | dict[str, float]]
CellParamData = dict[
str, Union[str, float, tuple[float, str], dict[str, Any], HubbardU, DevelBlock, XCDef],
str, str | float | tuple[float, str] | dict[str, Any] | HubbardU | DevelBlock | XCDef,
]
GeneralBlock = dict[
str,
Union[
list[Union[str, float]],
dict[str, MaybeSequence[float]],
],
list[str | float] | dict[str, MaybeSequence[float]],
]


Expand Down Expand Up @@ -180,7 +177,7 @@ def _parse_pspot_string(string: str, *, debug: bool = False) -> PSPotStrInfo:

for proj in pspot["proj"].split(":"):
if match := REs.PSPOT_PROJ_RE.match(proj):
pdict = dict(zip(REs.PSPOT_PROJ_GROUPS, match.groups()))
pdict = dict(zip(REs.PSPOT_PROJ_GROUPS, match.groups(), strict=True))
else:
raise ValueError("Invalid PSPot string")

Expand Down Expand Up @@ -565,12 +562,12 @@ def _parse_general(block: Block) -> GeneralBlock:
if REs.SPEC_PROP_RE.match(line):
if isinstance(block_data["data"], list):
block_data["data"] = {}
typing.cast(dict[str, MaybeSequence[Union[float, str]]], block_data["data"])
typing.cast(dict[str, MaybeSequence[float | str]], block_data["data"])

spec, val = line.strip().split(maxsplit=1)
val = to_type(val, determine_type(val))

typing.cast(MaybeSequence[Union[float, str]], val)
typing.cast(MaybeSequence[float | str], val)

block_data["data"][spec] = val

Expand Down
2 changes: 1 addition & 1 deletion castep_outputs/parsers/parse_fmt_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _parse_kpt_info(
words = line.split()
qpt = to_type(words[0:3], int)
val = to_type(words[3:], float)
stack_dict(qdata, {"q": qpt, **dict(zip(prop, val))})
stack_dict(qdata, {"q": qpt, **dict(zip(prop, val, strict=False))})

return qdata

Expand Down
4 changes: 2 additions & 2 deletions castep_outputs/parsers/tddft_file_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from __future__ import annotations

import re
from typing import Literal, TextIO, TypedDict, Union
from typing import Literal, TextIO, TypedDict

from ..utilities import castep_res as REs
from ..utilities.castep_res import get_numbers, labelled_floats
Expand All @@ -12,7 +12,7 @@
from .parse_utilities import parse_regular_header

#: Overlap type
TDDFTOverlap = dict[Union[tuple[int, int], Literal["total"]], float]
TDDFTOverlap = dict[tuple[int, int] | Literal["total"], float]


class TDDFTSpectroData(TypedDict):
Expand Down
3 changes: 1 addition & 2 deletions castep_outputs/utilities/castep_res.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,11 @@
import itertools
import re
from collections.abc import Sequence
from typing import Union

from .constants import FST_D, MINIMISERS, SHELLS, SND_D

#: Valid input where patterns are wanted.
Pattern = Union[str, re.Pattern]
Pattern = str | re.Pattern


def get_numbers(line: str) -> list[str]:
Expand Down
4 changes: 2 additions & 2 deletions castep_outputs/utilities/datatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from __future__ import annotations

from collections.abc import Callable, Sequence
from typing import Any, Literal, TextIO, TypedDict, TypeVar, Union
from typing import Any, Literal, TextIO, TypedDict, TypeVar

T = TypeVar("T")

Expand All @@ -11,7 +11,7 @@

# General types

MaybeSequence = Union[Sequence[T], T]
MaybeSequence = Sequence[T] | T

#: CASTEP atom keys.
AtomIndex = tuple[str, int]
Expand Down
3 changes: 2 additions & 1 deletion castep_outputs/utilities/dumpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@

import json
import pprint
from collections.abc import Callable
from contextlib import suppress
from typing import Any, Callable, TextIO
from typing import Any, TextIO

_YAML_TYPE = None

Expand Down
3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
name = "castep_outputs"
authors = [{name = "Jacob Wilkins", email = "[email protected]"}]
dynamic = ["version"]
requires-python = ">= 3.9"
requires-python = ">= 3.10"
readme = "README.rst"
description = "A package for extracting information from castep outputs"
keywords = ["castep", "dft", "parser"]
Expand Down Expand Up @@ -50,7 +50,6 @@ version = {attr = "castep_outputs.__version__"}
[tool.ruff]
line-length = 100
indent-width = 4
target-version = "py39"

# Exclude a variety of commonly ignored directories.
extend-exclude = [
Expand Down