Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f6efa83
[Deconv] Initial draft of deconv node
auphelia Apr 18, 2024
7b64b9b
Merge branch 'dev' into feature/deconv
auphelia Apr 18, 2024
db9130b
[Deconv] Add padding and cleanup hls code generation
auphelia Apr 25, 2024
b3bbfe0
Merge branch 'dev' into feature/deconv
hleblevec Jun 26, 2024
629a65e
update finn-hlslib commit hash
hleblevec Oct 15, 2024
b8501c9
deconv test now passing multiple channels
hleblevec Oct 15, 2024
c5aa427
[Deconv] Updating tests and custom HLS node, passing most cppsim tests
hleblevec Nov 6, 2024
f7cbd48
[Deconv] Updating tests and custom HLS node, passing most cppsim tests
hleblevec Nov 6, 2024
891a305
[Deconv] Updating tests and custom HLS node, passing most cppsim tests
hleblevec Nov 6, 2024
dc2a48f
Merge branch 'dev' into feature/deconv
hleblevec Nov 6, 2024
9e2125f
Merge branch 'dev' into feature/deconv
auphelia Nov 13, 2024
c157dc0
Changing the computation of the timout value to be based on parameters
hleblevec Jan 16, 2025
cee4e70
Increasing the timeout value as it fails some test configurations
hleblevec Jan 16, 2025
6a1fc41
Merge branch 'feature/deconv' of https://github.com/Xilinx/finn into …
hleblevec Jan 16, 2025
837019a
Merge branch 'Xilinx-feature/deconv' into feature/deconv
hleblevec Jan 16, 2025
248848e
Updating HLSLIB commit hash to the most recent
hleblevec Jan 16, 2025
91e8116
Setting test parameters to failing case
hleblevec Jan 16, 2025
1163908
Merge branch 'dev' into feature/deconv
auphelia Jan 23, 2025
05f9a70
updating templates to match recent changes
hleblevec May 7, 2025
a8b0b49
fixing merge conflicts with upstream
hleblevec May 7, 2025
7deb296
changing stream names to match the template
hleblevec May 7, 2025
d1f8aca
Merge branch 'dev' into feature/deconv
auphelia Aug 12, 2025
e5dfec9
Merge remote-tracking branch 'upstream/feature/deconv' into feature/d…
auphelia Aug 12, 2025
1283202
[Deconv] Align custom op with changes from dev
auphelia Aug 12, 2025
7e68ea6
[Tests] Bring back pixel padding test
auphelia Aug 12, 2025
8bc3c55
Merge pull request #1263 from hleblevec/feature/deconv
auphelia Aug 12, 2025
ee7d711
[Util] Remove custom numpy to hls code conversion for deconv and use …
auphelia Aug 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fetch-repos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ QONNX_COMMIT="0630ceaee17799096d1750abcfb5bbe0a2877888"
FINN_EXP_COMMIT="0724be21111a21f0d81a072fccc1c446e053f851"
BREVITAS_COMMIT="4617f7bd136e96fa21c7f76e3c7e2e37fe563837"
CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4"
HLSLIB_COMMIT="5dde96382b84979c6caa6f34cdad2ac72fa28489"
HLSLIB_COMMIT="120c46293fdf534415a6a47973a8f712fca6d900"
OMX_COMMIT="0b59762f9e4c4f7e5aa535ee9bc29f292434ca7a"
AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b"
XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e"
Expand Down
4 changes: 3 additions & 1 deletion src/finn/custom_op/fpgadataflow/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Copyright (C) 2020-2022, Xilinx, Inc.
# Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
# Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
Expand Down Expand Up @@ -33,6 +33,7 @@
from finn.custom_op.fpgadataflow.convolutioninputgenerator import (
ConvolutionInputGenerator,
)
from finn.custom_op.fpgadataflow.deconvolution import Deconvolution
from finn.custom_op.fpgadataflow.duplicatestreams import DuplicateStreams
from finn.custom_op.fpgadataflow.fmpadding import FMPadding
from finn.custom_op.fpgadataflow.fmpadding_pixel import FMPadding_Pixel
Expand Down Expand Up @@ -67,6 +68,7 @@
custom_op["AddStreams"] = AddStreams
custom_op["ChannelwiseOp"] = ChannelwiseOp
custom_op["ConvolutionInputGenerator"] = ConvolutionInputGenerator
custom_op["Deconvolution"] = Deconvolution
custom_op["DuplicateStreams"] = DuplicateStreams
custom_op["FMPadding"] = FMPadding
custom_op["FMPadding_Pixel"] = FMPadding_Pixel
Expand Down
173 changes: 173 additions & 0 deletions src/finn/custom_op/fpgadataflow/deconvolution.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
# Copyright (C) 2024, Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import warnings
from qonnx.core.datatype import DataType

from finn.custom_op.fpgadataflow.hwcustomop import HWCustomOp


class Deconvolution(HWCustomOp):
"""Abstraction layer for HW implementation of Deconvolution"""

def __init__(self, onnx_node, **kwargs):
super().__init__(onnx_node, **kwargs)

def get_nodeattr_types(self):
my_attrs = {
"KernelDim": ("ints", True, []), # [H, W] = [Y, X]
"IFMChannels": ("i", True, 0),
"OFMChannels": ("i", True, 0),
"IFMDim": ("ints", True, []), # [H, W] = [Y, X]
"PE": ("i", True, 0),
"SIMD": ("i", True, 0),
"Stride": ("ints", True, [1, 1]), # [H, W] = [Y, X]
"Padding": ("ints", True, []), # [H, W] = [Y, X]
# FINN DataTypes for inputs, weights, outputs
"inputDataType": ("s", True, ""),
"weightDataType": ("s", True, ""),
"outputDataType": ("s", True, ""),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs

def get_normal_input_shape(self, ind=0):
if ind == 0:
ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim")
ifm_ch = self.get_nodeattr("IFMChannels")
ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch)
else:
ifm_ch = self.get_nodeattr("IFMChannels")
ofm_ch = self.get_nodeattr("OFMChannels")
k_h, k_w = self.get_nodeattr("KernelDim")
ishape = (ofm_ch, k_h, k_w, ifm_ch)
return ishape

def get_folded_input_shape(self, ind=0):
if ind == 0:
ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim")
ifm_ch = self.get_nodeattr("IFMChannels")
simd = self.get_nodeattr("SIMD")
assert ifm_ch % simd == 0, "SIMD must divide IFMChannels"
fold = int(ifm_ch / simd)
folded_ishape = (1, ifm_dim_h, ifm_dim_w, fold, simd)
else:
folded_ishape = self.get_normal_input_shape(ind)
return folded_ishape

def get_normal_output_shape(self, ind=0):
idim_h, idim_w = self.get_nodeattr("IFMDim")
stride_h, stride_w = self.get_nodeattr("Stride")
k_h, k_w = self.get_nodeattr("KernelDim")
ofm_ch = self.get_nodeattr("OFMChannels")
pad_h, pad_w = self.get_nodeattr("Padding")
odim_h = (idim_h - 1) * stride_h - 2 * pad_h + (k_h - 1) + 1
odim_w = (idim_w - 1) * stride_w - 2 * pad_w + (k_w - 1) + 1
oshape = (1, odim_h, odim_w, ofm_ch)
return oshape

def get_folded_output_shape(self, ind=0):
normal_oshape = self.get_normal_output_shape()
odim_h = normal_oshape[1]
odim_w = normal_oshape[2]
ofm_ch = normal_oshape[3]
pe = self.get_nodeattr("PE")
fold = int(ofm_ch / pe)
folded_oshape = (1, odim_h, odim_w, fold, pe)
return folded_oshape

def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape for Deconv."
# implement tensor with correct shape
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("inputDataType", idt.name)
# set output datatype from property
odt = self.get_output_datatype()
model.set_tensor_datatype(node.output[0], odt)

def verify_node(self):
pass

def get_input_datatype(self, ind=0):
"""Returns FINN DataType of input."""
return DataType[self.get_nodeattr("inputDataType")]

def get_weight_datatype(self):
"""Returns FINN DataType of weights."""
return DataType[self.get_nodeattr("weightDataType")]

def get_output_datatype(self, ind=0):
"""Returns FINN DataType of output."""
return DataType[self.get_nodeattr("outputDataType")]

def get_instream_width(self, ind=0):
"""Returns stream width, input and output stream width are equal for
the sliding window function"""
if ind == 0:
ibits = self.get_input_datatype().bitwidth()
simd = self.get_nodeattr("SIMD")
ifm_ch = self.get_nodeattr("IFMChannels")
assert ifm_ch % simd == 0, "SIMD must divide IFMChannels"
in_width = simd * ibits
else:
in_width = 0
return in_width

def get_outstream_width(self, ind=0):
o_bits = self.get_output_datatype().bitwidth()
out_width = o_bits * self.get_nodeattr("PE")
return out_width

def get_exp_cycles(self):
return 0

def bram_estimation(self):
return 0

def lut_estimation(self):
return 0

def uram_estimation(self):
return 0

def execute_node(self, context, graph):
pass
2 changes: 2 additions & 0 deletions src/finn/custom_op/fpgadataflow/hls/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from finn.custom_op.fpgadataflow.hls.channelwise_op_hls import ChannelwiseOp_hls
from finn.custom_op.fpgadataflow.hls.checksum_hls import CheckSum_hls
from finn.custom_op.fpgadataflow.hls.concat_hls import StreamingConcat_hls
from finn.custom_op.fpgadataflow.hls.deconvolution_hls import Deconvolution_hls
from finn.custom_op.fpgadataflow.hls.duplicatestreams_hls import DuplicateStreams_hls
from finn.custom_op.fpgadataflow.hls.fmpadding_pixel_hls import FMPadding_Pixel_hls
from finn.custom_op.fpgadataflow.hls.globalaccpool_hls import GlobalAccPool_hls
Expand All @@ -55,6 +56,7 @@
custom_op["AddStreams_hls"] = AddStreams_hls
custom_op["ChannelwiseOp_hls"] = ChannelwiseOp_hls
custom_op["CheckSum_hls"] = CheckSum_hls
custom_op["Deconvolution_hls"] = Deconvolution_hls
custom_op["DuplicateStreams_hls"] = DuplicateStreams_hls
custom_op["FMPadding_Pixel_hls"] = FMPadding_Pixel_hls
custom_op["GlobalAccPool_hls"] = GlobalAccPool_hls
Expand Down
Loading