Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit f917759

Browse files
author
Alejandro Gaston Alvarez Franceschi
committedJan 9, 2024
Improve Functional convolution tests
1 parent 9be8f1b commit f917759

File tree

1 file changed

+112
-20
lines changed

1 file changed

+112
-20
lines changed
 

‎coremltools/converters/mil/frontend/torch/test/test_torch_ops.py

+112-20
Original file line numberDiff line numberDiff line change
@@ -1694,21 +1694,25 @@ def test_convolution1d(
16941694
self,
16951695
compute_unit,
16961696
backend,
1697+
padding,
16971698
width,
16981699
in_channels,
16991700
out_channels,
17001701
kernel_size,
17011702
stride,
1702-
padding,
17031703
groups=1,
17041704
):
1705-
class DynamicConv(nn.Module):
1705+
if padding == "same" and stride != 1:
1706+
# configuration not supported
1707+
return
1708+
1709+
class FunctionalConv1D(nn.Module):
17061710
def forward(self, input_data, weights):
17071711
return nn.functional.conv1d(
1708-
input_data, weights, stride=stride, padding=padding
1712+
input_data, weights, stride=stride, padding=padding, groups=groups
17091713
)
17101714

1711-
model = DynamicConv()
1715+
model = FunctionalConv1D()
17121716
input_shape = [
17131717
(1, in_channels, width),
17141718
(out_channels, int(in_channels / groups), kernel_size),
@@ -1725,29 +1729,30 @@ def forward(self, input_data, weights):
17251729
[
17261730
"compute_unit",
17271731
"backend",
1732+
"padding",
17281733
"height",
17291734
"width",
17301735
"in_channels",
17311736
"out_channels",
17321737
"kernel_size",
17331738
"stride",
1734-
"padding",
17351739
]
17361740
),
17371741
[
1738-
(compute_unit, backend, *param)
1739-
for compute_unit, backend, param in itertools.product(
1742+
(compute_unit, backend, padding, *param)
1743+
for compute_unit, backend, padding, param in itertools.product(
17401744
compute_units,
17411745
backends,
1746+
["same", "valid", 1, 0],
17421747
[
1743-
(5, 3, 1, 1, 1, 2, 0),
1744-
(3, 3, 1, 1, 1, 2, 1),
1745-
(4, 3, 3, 3, 1, 2, 0),
1746-
(7, 3, 3, 3, 1, 3, 0),
1747-
(5, 5, 3, 3, 2, 1, 0),
1748-
(3, 5, 3, 3, 1, 3, 0),
1749-
(3, 5, 3, 3, 1, 3, 1),
1750-
(7, 5, 3, 3, 2, 3, 1),
1748+
(5, 3, 1, 1, 1, 2),
1749+
(3, 3, 1, 1, 1, 2),
1750+
(4, 3, 3, 3, 1, 2),
1751+
(7, 3, 3, 3, 1, 3),
1752+
(5, 5, 3, 3, 2, 1),
1753+
(3, 5, 3, 3, 1, 3),
1754+
(3, 5, 3, 3, 1, 3),
1755+
(7, 5, 3, 3, 2, 3),
17511756
],
17521757
)
17531758
],
@@ -1756,31 +1761,118 @@ def test_convolution2d(
17561761
self,
17571762
compute_unit,
17581763
backend,
1764+
padding,
17591765
height,
17601766
width,
17611767
in_channels,
17621768
out_channels,
17631769
kernel_size,
17641770
stride,
1765-
padding,
17661771
groups=1,
17671772
):
1768-
class DynamicConv(nn.Module):
1773+
if padding == "same" and stride != 1:
1774+
# configuration not supported
1775+
return
1776+
1777+
class FunctionalConv2D(nn.Module):
17691778
def forward(self, input_data, weights):
17701779
return nn.functional.conv2d(
1771-
input_data, weights, stride=stride, padding=padding
1780+
input_data, weights, stride=stride, padding=padding, groups=groups
17721781
)
17731782

1774-
model = DynamicConv()
1783+
model = FunctionalConv2D()
17751784

17761785
input_shape = [
17771786
(1, in_channels, height, width),
17781787
(out_channels, int(in_channels / groups), kernel_size, kernel_size),
17791788
]
17801789
self.run_compare_torch(
1781-
input_shape, model, backend=backend, compute_unit=compute_unit
1790+
input_shape,
1791+
model,
1792+
backend=backend,
1793+
compute_unit=compute_unit,
17821794
)
17831795

1796+
@pytest.mark.parametrize(
1797+
",".join(
1798+
[
1799+
"compute_unit",
1800+
"backend",
1801+
"padding",
1802+
"depth",
1803+
"height",
1804+
"width",
1805+
"in_channels",
1806+
"out_channels",
1807+
"kernel_size",
1808+
"stride",
1809+
]
1810+
),
1811+
[
1812+
(compute_unit, backend, padding, *param)
1813+
for compute_unit, backend, padding, param in itertools.product(
1814+
compute_units,
1815+
backends,
1816+
["same", "valid", 1, 0],
1817+
[
1818+
(5, 3, 2, 1, 1, 1, 2),
1819+
(3, 3, 1, 1, 1, 1, 2),
1820+
(4, 3, 3, 3, 3, 1, 2),
1821+
(7, 3, 4, 3, 3, 1, 3),
1822+
(5, 5, 3, 3, 3, 2, 1),
1823+
(3, 5, 1, 3, 3, 1, 3),
1824+
(3, 5, 4, 3, 3, 1, 3),
1825+
(7, 5, 6, 3, 3, 2, 3),
1826+
],
1827+
)
1828+
],
1829+
)
1830+
def test_convolution3d(
1831+
self,
1832+
compute_unit,
1833+
backend,
1834+
padding,
1835+
depth,
1836+
height,
1837+
width,
1838+
in_channels,
1839+
out_channels,
1840+
kernel_size,
1841+
stride,
1842+
groups=1,
1843+
):
1844+
if padding == "same" and stride != 1:
1845+
# configuration not supported
1846+
return
1847+
1848+
class FunctionalConv3D(nn.Module):
1849+
def forward(self, input_data, weights):
1850+
return nn.functional.conv3d(
1851+
input_data, weights, stride=stride, padding=padding, groups=groups
1852+
)
1853+
1854+
model = FunctionalConv3D()
1855+
input_shape = [
1856+
(1, in_channels, depth, height, width),
1857+
(out_channels, int(in_channels / groups), kernel_size, kernel_size, kernel_size),
1858+
]
1859+
1860+
if "neuralnetwork" in backend:
1861+
with pytest.raises(ValueError, match="3D Convolution doesn't support dynamic weights."):
1862+
self.run_compare_torch(
1863+
input_shape,
1864+
model,
1865+
backend=backend,
1866+
compute_unit=compute_unit,
1867+
)
1868+
else:
1869+
self.run_compare_torch(
1870+
input_shape,
1871+
model,
1872+
backend=backend,
1873+
compute_unit=compute_unit,
1874+
)
1875+
17841876

17851877
class TestConvTranspose(TorchBaseTest):
17861878
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)
Please sign in to comment.