@@ -1694,21 +1694,25 @@ def test_convolution1d(
1694
1694
self ,
1695
1695
compute_unit ,
1696
1696
backend ,
1697
+ padding ,
1697
1698
width ,
1698
1699
in_channels ,
1699
1700
out_channels ,
1700
1701
kernel_size ,
1701
1702
stride ,
1702
- padding ,
1703
1703
groups = 1 ,
1704
1704
):
1705
- class DynamicConv (nn .Module ):
1705
+ if padding == "same" and stride != 1 :
1706
+ # configuration not supported
1707
+ return
1708
+
1709
+ class FunctionalConv1D (nn .Module ):
1706
1710
def forward (self , input_data , weights ):
1707
1711
return nn .functional .conv1d (
1708
- input_data , weights , stride = stride , padding = padding
1712
+ input_data , weights , stride = stride , padding = padding , groups = groups
1709
1713
)
1710
1714
1711
- model = DynamicConv ()
1715
+ model = FunctionalConv1D ()
1712
1716
input_shape = [
1713
1717
(1 , in_channels , width ),
1714
1718
(out_channels , int (in_channels / groups ), kernel_size ),
@@ -1725,29 +1729,30 @@ def forward(self, input_data, weights):
1725
1729
[
1726
1730
"compute_unit" ,
1727
1731
"backend" ,
1732
+ "padding" ,
1728
1733
"height" ,
1729
1734
"width" ,
1730
1735
"in_channels" ,
1731
1736
"out_channels" ,
1732
1737
"kernel_size" ,
1733
1738
"stride" ,
1734
- "padding" ,
1735
1739
]
1736
1740
),
1737
1741
[
1738
- (compute_unit , backend , * param )
1739
- for compute_unit , backend , param in itertools .product (
1742
+ (compute_unit , backend , padding , * param )
1743
+ for compute_unit , backend , padding , param in itertools .product (
1740
1744
compute_units ,
1741
1745
backends ,
1746
+ ["same" , "valid" , 1 , 0 ],
1742
1747
[
1743
- (5 , 3 , 1 , 1 , 1 , 2 , 0 ),
1744
- (3 , 3 , 1 , 1 , 1 , 2 , 1 ),
1745
- (4 , 3 , 3 , 3 , 1 , 2 , 0 ),
1746
- (7 , 3 , 3 , 3 , 1 , 3 , 0 ),
1747
- (5 , 5 , 3 , 3 , 2 , 1 , 0 ),
1748
- (3 , 5 , 3 , 3 , 1 , 3 , 0 ),
1749
- (3 , 5 , 3 , 3 , 1 , 3 , 1 ),
1750
- (7 , 5 , 3 , 3 , 2 , 3 , 1 ),
1748
+ (5 , 3 , 1 , 1 , 1 , 2 ),
1749
+ (3 , 3 , 1 , 1 , 1 , 2 ),
1750
+ (4 , 3 , 3 , 3 , 1 , 2 ),
1751
+ (7 , 3 , 3 , 3 , 1 , 3 ),
1752
+ (5 , 5 , 3 , 3 , 2 , 1 ),
1753
+ (3 , 5 , 3 , 3 , 1 , 3 ),
1754
+ (3 , 5 , 3 , 3 , 1 , 3 ),
1755
+ (7 , 5 , 3 , 3 , 2 , 3 ),
1751
1756
],
1752
1757
)
1753
1758
],
@@ -1756,31 +1761,118 @@ def test_convolution2d(
1756
1761
self ,
1757
1762
compute_unit ,
1758
1763
backend ,
1764
+ padding ,
1759
1765
height ,
1760
1766
width ,
1761
1767
in_channels ,
1762
1768
out_channels ,
1763
1769
kernel_size ,
1764
1770
stride ,
1765
- padding ,
1766
1771
groups = 1 ,
1767
1772
):
1768
- class DynamicConv (nn .Module ):
1773
+ if padding == "same" and stride != 1 :
1774
+ # configuration not supported
1775
+ return
1776
+
1777
+ class FunctionalConv2D (nn .Module ):
1769
1778
def forward (self , input_data , weights ):
1770
1779
return nn .functional .conv2d (
1771
- input_data , weights , stride = stride , padding = padding
1780
+ input_data , weights , stride = stride , padding = padding , groups = groups
1772
1781
)
1773
1782
1774
- model = DynamicConv ()
1783
+ model = FunctionalConv2D ()
1775
1784
1776
1785
input_shape = [
1777
1786
(1 , in_channels , height , width ),
1778
1787
(out_channels , int (in_channels / groups ), kernel_size , kernel_size ),
1779
1788
]
1780
1789
self .run_compare_torch (
1781
- input_shape , model , backend = backend , compute_unit = compute_unit
1790
+ input_shape ,
1791
+ model ,
1792
+ backend = backend ,
1793
+ compute_unit = compute_unit ,
1782
1794
)
1783
1795
1796
+ @pytest .mark .parametrize (
1797
+ "," .join (
1798
+ [
1799
+ "compute_unit" ,
1800
+ "backend" ,
1801
+ "padding" ,
1802
+ "depth" ,
1803
+ "height" ,
1804
+ "width" ,
1805
+ "in_channels" ,
1806
+ "out_channels" ,
1807
+ "kernel_size" ,
1808
+ "stride" ,
1809
+ ]
1810
+ ),
1811
+ [
1812
+ (compute_unit , backend , padding , * param )
1813
+ for compute_unit , backend , padding , param in itertools .product (
1814
+ compute_units ,
1815
+ backends ,
1816
+ ["same" , "valid" , 1 , 0 ],
1817
+ [
1818
+ (5 , 3 , 2 , 1 , 1 , 1 , 2 ),
1819
+ (3 , 3 , 1 , 1 , 1 , 1 , 2 ),
1820
+ (4 , 3 , 3 , 3 , 3 , 1 , 2 ),
1821
+ (7 , 3 , 4 , 3 , 3 , 1 , 3 ),
1822
+ (5 , 5 , 3 , 3 , 3 , 2 , 1 ),
1823
+ (3 , 5 , 1 , 3 , 3 , 1 , 3 ),
1824
+ (3 , 5 , 4 , 3 , 3 , 1 , 3 ),
1825
+ (7 , 5 , 6 , 3 , 3 , 2 , 3 ),
1826
+ ],
1827
+ )
1828
+ ],
1829
+ )
1830
+ def test_convolution3d (
1831
+ self ,
1832
+ compute_unit ,
1833
+ backend ,
1834
+ padding ,
1835
+ depth ,
1836
+ height ,
1837
+ width ,
1838
+ in_channels ,
1839
+ out_channels ,
1840
+ kernel_size ,
1841
+ stride ,
1842
+ groups = 1 ,
1843
+ ):
1844
+ if padding == "same" and stride != 1 :
1845
+ # configuration not supported
1846
+ return
1847
+
1848
+ class FunctionalConv3D (nn .Module ):
1849
+ def forward (self , input_data , weights ):
1850
+ return nn .functional .conv3d (
1851
+ input_data , weights , stride = stride , padding = padding , groups = groups
1852
+ )
1853
+
1854
+ model = FunctionalConv3D ()
1855
+ input_shape = [
1856
+ (1 , in_channels , depth , height , width ),
1857
+ (out_channels , int (in_channels / groups ), kernel_size , kernel_size , kernel_size ),
1858
+ ]
1859
+
1860
+ if "neuralnetwork" in backend :
1861
+ with pytest .raises (ValueError , match = "3D Convolution doesn't support dynamic weights." ):
1862
+ self .run_compare_torch (
1863
+ input_shape ,
1864
+ model ,
1865
+ backend = backend ,
1866
+ compute_unit = compute_unit ,
1867
+ )
1868
+ else :
1869
+ self .run_compare_torch (
1870
+ input_shape ,
1871
+ model ,
1872
+ backend = backend ,
1873
+ compute_unit = compute_unit ,
1874
+ )
1875
+
1784
1876
1785
1877
class TestConvTranspose (TorchBaseTest ):
1786
1878
@pytest .mark .parametrize (
0 commit comments