Skip to content

Commit 2fcb2b2

Browse files
authored
Merge pull request #2319 from mikedh/test/iteration
Release: Iteration Tests
2 parents 9dbb10c + 21cbdb6 commit 2fcb2b2

20 files changed

+191
-71
lines changed

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ requires = ["setuptools >= 61.0", "wheel"]
55
[project]
66
name = "trimesh"
77
requires-python = ">=3.8"
8-
version = "4.5.2"
8+
version = "4.5.3"
99
authors = [{name = "Michael Dawson-Haggerty", email = "[email protected]"}]
1010
license = {file = "LICENSE.md"}
1111
description = "Import, export, process, analyze and view triangular meshes."

tests/test_iteration.py

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
from functools import reduce
2+
3+
import numpy as np
4+
5+
from trimesh.iteration import chain, reduce_cascade
6+
7+
8+
def test_reduce_cascade():
9+
# the multiply will explode quickly past the integer maximum
10+
def both(operation, items):
11+
"""
12+
Run our cascaded reduce and regular reduce.
13+
"""
14+
15+
b = reduce_cascade(operation, items)
16+
17+
if len(items) > 0:
18+
assert b == reduce(operation, items)
19+
20+
return b
21+
22+
for i in range(20):
23+
data = np.arange(i)
24+
c = both(items=data, operation=lambda a, b: a + b)
25+
26+
if i == 0:
27+
assert c is None
28+
else:
29+
assert c == np.arange(i).sum()
30+
31+
# try a multiply
32+
data = np.arange(i)
33+
c = both(items=data, operation=lambda a, b: a * b)
34+
35+
if i == 0:
36+
assert c is None
37+
else:
38+
assert c == np.prod(data)
39+
40+
# try a multiply
41+
data = np.arange(i)[1:]
42+
c = both(items=data, operation=lambda a, b: a * b)
43+
if i <= 1:
44+
assert c is None
45+
else:
46+
assert c == np.prod(data)
47+
48+
data = ["a", "b", "c", "d", "e", "f", "g"]
49+
print("# reduce_pairwise\n-----------")
50+
r = both(operation=lambda a, b: a + b, items=data)
51+
52+
assert r == "abcdefg"
53+
54+
55+
def test_chain():
56+
# should work on iterables the same as `itertools.chain`
57+
assert np.allclose(chain([1, 3], [4]), [1, 3, 4])
58+
# should work with non-iterable single values
59+
assert np.allclose(chain([1, 3], 4), [1, 3, 4])
60+
# should filter out `None` arguments
61+
assert np.allclose(chain([1, 3], None, 4, None), [1, 3, 4])
62+
63+
64+
if __name__ == "__main__":
65+
test_reduce_cascade()

tests/test_remesh.py

+6
Original file line numberDiff line numberDiff line change
@@ -86,13 +86,19 @@ def test_sub(self):
8686
meshes = [g.trimesh.creation.box(), g.trimesh.creation.icosphere()]
8787

8888
for m in meshes:
89+
# set vertex positions as attributes for trivial check after subdivision
90+
# make sure we're copying the array to avoid in-place check
91+
m.vertex_attributes = {"pos": g.np.array(m.vertices) + 1.0}
92+
8993
s = m.subdivide(face_index=[0, len(m.faces) - 1])
9094
# shouldn't have subdivided in-place
9195
assert len(s.faces) > len(m.faces)
9296
# area should be the same
9397
assert g.np.isclose(m.area, s.area)
9498
# volume should be the same
9599
assert g.np.isclose(m.volume, s.volume)
100+
# position attributes and actual vertices should be the same
101+
assert g.np.allclose(s.vertex_attributes["pos"], s.vertices + 1.0)
96102

97103
max_edge = m.scale / 50
98104
s = m.subdivide_to_size(max_edge=max_edge)

tests/test_util.py

-10
Original file line numberDiff line numberDiff line change
@@ -75,16 +75,6 @@ def test_stack(self):
7575
# this is what should happen
7676
pass
7777

78-
def test_chain(self):
79-
from trimesh.util import chain
80-
81-
# should work on iterables the same as `itertools.chain`
82-
assert g.np.allclose(chain([1, 3], [4]), [1, 3, 4])
83-
# should work with non-iterable single values
84-
assert g.np.allclose(chain([1, 3], 4), [1, 3, 4])
85-
# should filter out `None` arguments
86-
assert g.np.allclose(chain([1, 3], None, 4, None), [1, 3, 4])
87-
8878
def test_has_module(self):
8979
# built-in
9080
assert g.trimesh.util.has_module("collections")

tests/test_voxel.py

+17-4
Original file line numberDiff line numberDiff line change
@@ -67,16 +67,29 @@ def test_marching(self):
6767
g.log.warning("no skimage, skipping marching cubes test")
6868
return
6969

70+
march = g.trimesh.voxel.ops.matrix_to_marching_cubes
71+
7072
# make sure offset is correct
7173
matrix = g.np.ones((3, 3, 3), dtype=bool)
72-
mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix)
74+
mesh = march(matrix=matrix)
7375
assert mesh.is_watertight
7476

75-
mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix).apply_scale(
76-
3.0
77-
)
77+
mesh = march(matrix=matrix).apply_scale(3.0)
7878
assert mesh.is_watertight
7979

80+
# try an array full of a small number
81+
matrix = g.np.full((3, 3, 3), 0.01, dtype=g.np.float64)
82+
# set some to zero
83+
matrix[:2, :2, :2] = 0.0
84+
85+
a = march(matrix)
86+
assert a.is_watertight
87+
88+
# but above the threshold it should be not empty
89+
b = march(matrix, threshold=-0.001)
90+
assert b.is_watertight
91+
assert b.volume > a.volume
92+
8093
def test_marching_points(self):
8194
"""
8295
Try marching cubes on points

trimesh/__init__.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,8 @@
8282

8383

8484
__all__ = [
85-
"PointCloud",
8685
"Geometry",
87-
"Trimesh",
86+
"PointCloud",
8887
"Scene",
8988
"Trimesh",
9089
"__version__",
@@ -103,8 +102,8 @@
103102
"graph",
104103
"grouping",
105104
"inertia",
106-
"iteration",
107105
"intersections",
106+
"iteration",
108107
"load",
109108
"load_mesh",
110109
"load_path",

trimesh/exchange/binvox.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,9 @@ def voxel_from_binvox(rle_data, shape, translate=None, scale=1.0, axis_order="xz
201201
elif axis_order is None or axis_order == "xyz":
202202
encoding = encoding.reshape(shape)
203203
else:
204-
raise ValueError("Invalid axis_order '%s': must be None, 'xyz' or 'xzy'")
204+
raise ValueError(
205+
"Invalid axis_order '%s': must be None, 'xyz' or 'xzy'", axis_order
206+
)
205207

206208
assert encoding.shape == shape
207209

@@ -423,7 +425,7 @@ def __init__(
423425
)
424426

425427
if dimension > 1024 and not exact:
426-
raise ValueError("Maximum dimension using exact is 1024, got %d" % dimension)
428+
raise ValueError("Maximum dimension using exact is 1024, got %d", dimension)
427429
if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES:
428430
raise ValueError(
429431
f"file_type {file_type} not in set of supported output types {Binvoxer.SUPPORTED_OUTPUT_TYPES!s}"
@@ -471,7 +473,7 @@ def __init__(
471473
times = np.log2(downsample_factor)
472474
if int(times) != times:
473475
raise ValueError(
474-
"downsample_factor must be a power of 2, got %d" % downsample_factor
476+
"downsample_factor must be a power of 2, got %d", downsample_factor
475477
)
476478
args.extend(("-down",) * int(times))
477479
if downsample_threshold is not None:

trimesh/exchange/gltf.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1029,14 +1029,14 @@ def _build_accessor(array):
10291029
if vec_length > 4:
10301030
raise ValueError("The GLTF spec does not support vectors larger than 4")
10311031
if vec_length > 1:
1032-
data_type = "VEC%d" % vec_length
1032+
data_type = f"VEC{int(vec_length)}"
10331033
else:
10341034
data_type = "SCALAR"
10351035

10361036
if len(shape) == 3:
10371037
if shape[2] not in [2, 3, 4]:
10381038
raise ValueError("Matrix types must have 4, 9 or 16 components")
1039-
data_type = "MAT%d" % shape[2]
1039+
data_type = f"MAT{int(shape[2])}"
10401040

10411041
# get the array data type as a str stripping off endian
10421042
lookup = array.dtype.str.lstrip("<>")

trimesh/interfaces/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from . import blender, gmsh
22

33
# add to __all__ as per pep8
4-
__all__ = ["gmsh", "blender"]
4+
__all__ = ["blender", "gmsh"]

trimesh/iteration.py

+10-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import numpy as np
1+
from math import log2
22

33
from .typed import Any, Callable, Iterable, List, NDArray, Sequence, Union
44

@@ -54,12 +54,17 @@ def reduce_cascade(operation: Callable, items: Union[Sequence, NDArray]):
5454
# skip the loop overhead for a single pair
5555
return operation(items[0], items[1])
5656

57-
for _ in range(int(1 + np.log2(len(items)))):
57+
for _ in range(int(1 + log2(len(items)))):
5858
results = []
59-
for i in np.arange(len(items) // 2) * 2:
59+
60+
# loop over pairs of items.
61+
items_mod = len(items) % 2
62+
for i in range(0, len(items) - items_mod, 2):
6063
results.append(operation(items[i], items[i + 1]))
6164

62-
if len(items) % 2:
65+
# if we had a non-even number of items it will have been
66+
# skipped by the loop so append it to our list
67+
if items_mod != 0:
6368
results.append(items[-1])
6469

6570
items = results
@@ -117,7 +122,7 @@ def chain(*args: Union[Iterable[Any], Any, None]) -> List[Any]:
117122
# extend if it's a sequence, otherwise append
118123
[
119124
chained.extend(a)
120-
if (hasattr(a, "__iter__") and not isinstance(a, str))
125+
if (hasattr(a, "__iter__") and not isinstance(a, (str, bytes)))
121126
else chained.append(a)
122127
for a in args
123128
if a is not None

trimesh/path/exchange/load.py

+17-12
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,63 @@
1-
import os
2-
31
from ... import util
42
from ...exchange.ply import load_ply
3+
from ...typed import Optional
54
from ..path import Path
65
from . import misc
76
from .dxf import _dxf_loaders
87
from .svg_io import svg_to_path
98

109

11-
def load_path(file_obj, file_type=None, **kwargs):
10+
def load_path(file_obj, file_type: Optional[str] = None, **kwargs):
1211
"""
1312
Load a file to a Path file_object.
1413
1514
Parameters
1615
-----------
17-
file_obj : One of the following:
16+
file_obj
17+
Accepts many types:
1818
- Path, Path2D, or Path3D file_objects
1919
- open file file_object (dxf or svg)
2020
- file name (dxf or svg)
2121
- shapely.geometry.Polygon
2222
- shapely.geometry.MultiLineString
2323
- dict with kwargs for Path constructor
24-
- (n,2,(2|3)) float, line segments
25-
file_type : str
24+
- `(n, 2, (2|3)) float` line segments
25+
file_type
2626
Type of file is required if file
27-
file_object passed.
27+
object is passed.
2828
2929
Returns
3030
---------
3131
path : Path, Path2D, Path3D file_object
32-
Data as a native trimesh Path file_object
32+
Data as a native trimesh Path file_object
3333
"""
3434
# avoid a circular import
3535
from ...exchange.load import load_kwargs
3636

37+
if isinstance(file_type, str):
38+
# we accept full file names here so make sure we
39+
file_type = util.split_extension(file_type).lower()
40+
3741
# record how long we took
3842
tic = util.now()
3943

4044
if isinstance(file_obj, Path):
41-
# we have been passed a Path file_object so
42-
# do nothing and return the passed file_object
45+
# we have been passed a file object that is already a loaded
46+
# trimesh.path.Path object so do nothing and return
4347
return file_obj
4448
elif util.is_file(file_obj):
4549
# for open file file_objects use loaders
4650
if file_type == "ply":
47-
# we cannot register this exporter to path_loaders since this is already reserved by TriMesh in ply format in trimesh.load()
51+
# we cannot register this exporter to path_loaders
52+
# since this is already reserved for 3D values in `trimesh.load`
4853
kwargs.update(load_ply(file_obj, file_type=file_type))
4954
else:
5055
kwargs.update(path_loaders[file_type](file_obj, file_type=file_type))
5156
elif isinstance(file_obj, str):
5257
# strings passed are evaluated as file file_objects
5358
with open(file_obj, "rb") as f:
5459
# get the file type from the extension
55-
file_type = os.path.splitext(file_obj)[-1][1:].lower()
60+
file_type = util.split_extension(file_obj).lower()
5661
if file_type == "ply":
5762
# we cannot register this exporter to path_loaders since this is already reserved by TriMesh in ply format in trimesh.load()
5863
kwargs.update(load_ply(f, file_type=file_type))

trimesh/path/packing.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import numpy as np
99

1010
from ..constants import log, tol
11-
from ..typed import Integer, Number, Optional
11+
from ..typed import ArrayLike, Integer, NDArray, Number, Optional, float64
1212
from ..util import allclose, bounds_tree
1313

1414
# floating point zero
@@ -692,7 +692,7 @@ def visualize(extents, bounds):
692692
return Scene(meshes)
693693

694694

695-
def roll_transform(bounds, extents):
695+
def roll_transform(bounds: ArrayLike, extents: ArrayLike) -> NDArray[float64]:
696696
"""
697697
Packing returns rotations with integer "roll" which
698698
needs to be converted into a homogeneous rotation matrix.

trimesh/path/util.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,16 @@
33
from ..util import is_ccw # NOQA
44

55

6-
def concatenate(paths):
6+
def concatenate(paths, **kwargs):
77
"""
88
Concatenate multiple paths into a single path.
99
1010
Parameters
1111
-------------
1212
paths : (n,) Path
1313
Path objects to concatenate
14+
kwargs
15+
Passed through to the path constructor
1416
1517
Returns
1618
-------------
@@ -52,6 +54,6 @@ def concatenate(paths):
5254
# generate the single new concatenated path
5355
# use input types so we don't have circular imports
5456
concat = type(path)(
55-
metadata=metadata, entities=entities, vertices=np.vstack(vertices)
57+
metadata=metadata, entities=entities, vertices=np.vstack(vertices), **kwargs
5658
)
5759
return concat

trimesh/remesh.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,7 @@ def subdivide(
9090
if vertex_attributes is not None:
9191
new_attributes = {}
9292
for key, values in vertex_attributes.items():
93-
attr_tris = values[faces_subset]
94-
attr_mid = np.vstack(
95-
[attr_tris[:, g, :].mean(axis=1) for g in [[0, 1], [1, 2], [2, 0]]]
96-
)
97-
attr_mid = attr_mid[unique]
93+
attr_mid = values[edges[unique]].mean(axis=1)
9894
new_attributes[key] = np.vstack((values, attr_mid))
9995
return new_vertices, new_faces, new_attributes
10096

0 commit comments

Comments
 (0)