Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions src/finn/custom_op/fpgadataflow/hls/elementwise_binary_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,28 @@ def get_ap_int_max_w(self):
# Find the biggest of the inputs/outputs
return max([i_bits_max, o_bits_max])

def adapt_for_loop_body(self, input_types):
"""
Adapt elementwise binary operator for loop body execution.

When an elementwise operator is placed inside a loop, parameters that
are indexed per iteration (PARAMETER type) need to be received as
streaming inputs rather than embedded constants. This method changes
the lhs_style/rhs_style attributes from "const" to "input" as needed.
"""
from finn.transformation.fpgadataflow.loop_rolling import LoopBodyInputType

# If rhs (input[1]) is a PARAMETER (streamed per iteration),
# change its style to "input"
if len(input_types) > 1 and input_types[1] == LoopBodyInputType.PARAMETER:
if self.rhs_style == "const":
self.set_nodeattr("rhs_style", "input")

# Similarly for lhs if needed
if len(input_types) > 0 and input_types[0] == LoopBodyInputType.PARAMETER:
if self.lhs_style == "const":
self.set_nodeattr("lhs_style", "input")

# Note: End of shape and datatype utilities

def code_generation_ipgen(self, model, fpgapart, clk):
Expand Down
20 changes: 20 additions & 0 deletions src/finn/custom_op/fpgadataflow/hwcustomop.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,3 +526,23 @@ def accumulate_char_fxn(chrc):
self.set_nodeattr("io_chrc_out", all_txns_out)
self.set_nodeattr("io_chrc_pads_in", all_pad_in)
self.set_nodeattr("io_chrc_pads_out", all_pad_out)

def adapt_for_loop_body(self, input_types):
"""
Called by LoopRolling transformation to allow operators to adapt their
attributes when being placed inside a loop body.

This base implementation does nothing. Operators that need to modify
their behavior when placed in loops should override this method.

Args:
input_types: List of LoopBodyInputType values for each input,
indicating whether inputs are ACTIVATION, CONSTANT,
PARAMETER, etc.

Example:
If an operator has a parameter that becomes a streamed input
in a loop context (PARAMETER type), it might need to change
an attribute like `rhs_style` from "const" to "input".
"""
pass # Default: no adaptation needed
16 changes: 16 additions & 0 deletions src/finn/transformation/fpgadataflow/loop_rolling.py
Original file line number Diff line number Diff line change
Expand Up @@ -535,6 +535,22 @@ def apply(self, model: ModelWrapper) -> Tuple[ModelWrapper, bool]:

model_wrapper = ModelWrapper(model)

# Allow operators in the loop body to adapt their attributes based on
# the determined input signature (e.g., changing parameter styles from
# "const" to "input" for streamed parameters)
# This must be done after serialization so we can work with protobuf nodes
import qonnx.custom_op.registry as registry
from qonnx.util.basic import get_by_name
for loop_node in model_wrapper.get_nodes_by_op_type("FINNLoop"):
loop_body_graph = get_by_name(loop_node.attribute, "body").g
for node in loop_body_graph.node:
try:
inst = registry.getCustomOp(node)
inst.adapt_for_loop_body(LoopBody.signature)
except (KeyError, AttributeError):
# Operator doesn't need adaptation or doesn't support it
pass

model = model_wrapper.transform(FoldConstants())

return (model, False)
21 changes: 10 additions & 11 deletions tests/fpgadataflow/test_fpgadataflow_finnloop.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ def test_fpgadataflow_finnloop(dim, iteration, elemwise_optype, rhs_shape, eltw_
y_ref = y_dict[model.graph.output[0].name]

# loop extraction and rolling
loop_extraction = LoopExtraction(hierarchy_list=["", "layers.0"])
loop_extraction = LoopExtraction(hierarchy_list=[["", "layers.0"]])
model = model.transform(loop_extraction)

assert (
Expand All @@ -405,25 +405,24 @@ def test_fpgadataflow_finnloop(dim, iteration, elemwise_optype, rhs_shape, eltw_

model = model.transform(LoopRolling(loop_extraction.loop_body_template))

# the rhs_style for the elementwise node needs to be set to 'input' for the loop
# this requires recompilation of the elementwise node for cppsim
# LoopRolling automatically adapts operator attributes for loop context
# (e.g., rhs_style changes from "const" to "input" for streamed parameters)
# This requires recompilation of the elementwise node for cppsim
loop_node = model.get_nodes_by_op_type("FINNLoop")[0]
loop_body_graph = get_by_name(loop_node.attribute, "body").g
elementwise_node = get_by_name(loop_body_graph.node, elemwise_optype, "op_type")
rhs_style_attr = get_by_name(elementwise_node.attribute, "rhs_style")
rhs_style_attr.s = b"input"
code_gen_dir_cppsim_attr = get_by_name(elementwise_node.attribute, "code_gen_dir_cppsim")
code_gen_dir_cppsim_attr.s = b"" # reset cpp gen directory to force recompilation
executable_path_attr = get_by_name(elementwise_node.attribute, "executable_path")
executable_path_attr.s = b"" # reset cpp exec directory to force recompilation

# recompile element wise node for cppsim
# model = model.transform(PrepareCppSim(), apply_to_subgraphs=True)
# model = model.transform(CompileCppSim(), apply_to_subgraphs=True)
# recompile elementwise node for cppsim
model = model.transform(PrepareCppSim(), apply_to_subgraphs=True)
model = model.transform(CompileCppSim(), apply_to_subgraphs=True)

# y_dict = oxe.execute_onnx(model, io_dict)
# y_prod = y_dict[model.graph.output[0].name]
# assert (y_prod == y_ref).all()
y_dict = oxe.execute_onnx(model, io_dict)
y_prod = y_dict[model.graph.output[0].name]
assert (y_prod == y_ref).all()

# node-by-node rtlsim
model = model.transform(GiveUniqueNodeNames(), apply_to_subgraphs=True)
Expand Down