Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docker/finn_entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ else
echo "See https://docs.xilinx.com/r/en-US/ug835-vivado-tcl-commands/Tcl-Initialization-Scripts"
fi

export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$VITIS_PATH/lnx64/tools/fpo_v7_1"

export PATH=$PATH:$HOME/.local/bin
# execute the provided command(s) as root
exec "$@"
2 changes: 1 addition & 1 deletion fetch-repos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ FINN_EXP_COMMIT="0724be21111a21f0d81a072fccc1c446e053f851"
BREVITAS_COMMIT="4617f7bd136e96fa21c7f76e3c7e2e37fe563837"
CNPY_COMMIT="8c82362372ce600bbd1cf11d64661ab69d38d7de"
HLSLIB_COMMIT="5dde96382b84979c6caa6f34cdad2ac72fa28489"
OMX_COMMIT="0b59762f9e4c4f7e5aa535ee9bc29f292434ca7a"
OMX_COMMIT="a5d48f93309b235fdd21556d16e86e6ef5db6e2e"
AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b"
XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e"
RFSOC4x2_BDF_COMMIT="13fb6f6c02c7dfd7e4b336b18b959ad5115db696"
Expand Down
22 changes: 21 additions & 1 deletion src/finn/transformation/fpgadataflow/synth_ooc.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,23 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import os
from qonnx.custom_op.registry import getCustomOp
from qonnx.transformation.base import Transformation
from shutil import copy2

from finn.util.basic import make_build_dir
from finn.util.fpgadataflow import is_hls_node
from finn.util.vivado import out_of_context_synth


def is_hls_float_op(node, model):
if is_hls_node(node):
for inp in node.input:
if model.get_tensor_datatype(inp).name.startswith("FLOAT"):
return True
return False


class SynthOutOfContext(Transformation):
"""Run out-of-context Vivado synthesis on a stitched IP design."""

Expand All @@ -58,8 +68,18 @@ def file_to_basename(x):
for file in all_verilog_srcs:
if any([file.endswith(x) for x in verilog_extensions]):
copy2(file, build_dir)
# extract additional tcl commands to set up floating-point ips correctly
float_ip_tcl = []
for node in model.graph.node:
if is_hls_float_op(node, model):
code_gen_dir = getCustomOp(node).get_nodeattr("code_gen_dir_ipgen")
verilog_path = "{}/project_{}/sol1/impl/verilog/".format(code_gen_dir, node.name)
file_suffix = ".tcl"
for fname in os.listdir(verilog_path):
if fname.endswith(file_suffix):
float_ip_tcl.append(verilog_path + fname)
ret = out_of_context_synth(
build_dir, top_module_name, self.part, self.clk_name, self.clk_period_ns
build_dir, top_module_name, float_ip_tcl, self.part, self.clk_name, self.clk_period_ns
)
model.set_metadata_prop("res_total_ooc_synth", str(ret))
return (model, False)
6 changes: 4 additions & 2 deletions src/finn/util/vivado.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
def out_of_context_synth(
verilog_dir,
top_name,
float_ip_tcl,
fpga_part="xczu3eg-sbva484-1-e",
clk_name="ap_clk_0",
clk_period_ns=5.0,
Expand All @@ -48,11 +49,12 @@ def out_of_context_synth(
raise Exception("vivado is not in PATH, ensure settings64.sh is sourced.")
omx_path = os.environ["OHMYXILINX"]
script = "vivadocompile.sh"
# vivadocompile.sh <top-level-entity> <clock-name (optional)> <fpga-part (optional)>
call_omx = "zsh %s/%s %s %s %s %f" % (
# vivadocompile.sh <top-level-entity> <fp0.tcl#fp1.tcl> <clk-name (opt)> <fpga-part (opt)>
call_omx = "zsh %s/%s %s %s %s %s %f" % (
omx_path,
script,
top_name,
'"%s"' % "#".join(float_ip_tcl),
clk_name,
fpga_part,
float(clk_period_ns),
Expand Down
187 changes: 187 additions & 0 deletions tests/end2end/test_ooc_synthesis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
############################################################################
# Copyright (C) 2025, Advanced Micro Devices, Inc.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# ##########################################################################

import pytest

import numpy as np
from onnx import TensorProto, helper
from qonnx.core.datatype import DataType
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.transformation.general import GiveUniqueNodeNames
from qonnx.transformation.infer_datatypes import InferDataTypes
from qonnx.transformation.infer_shapes import InferShapes
from qonnx.util.basic import gen_finn_dt_tensor

import finn.core.onnx_exec as oxe
import finn.transformation.fpgadataflow.convert_to_hw_layers as to_hw
from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP
from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.fpgadataflow.set_fifo_depths import InsertAndSetFIFODepths
from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers
from finn.transformation.fpgadataflow.synth_ooc import SynthOutOfContext

fpga_part = "xczu7ev-ffvc1156-2-e"
clk_ns = 10


def generate_random_threshold_values(data_type, num_input_channels, num_steps):
if data_type.is_integer():
return np.random.randint(
data_type.min(),
data_type.max() + 1,
(num_input_channels, num_steps),
).astype(np.float32)
else:
return (np.random.randn(num_input_channels, num_steps) * 1000).astype(
data_type.to_numpy_dt()
)


def create_test_model():
W = gen_finn_dt_tensor(DataType["INT4"], (16, 32))
T = np.sort(
generate_random_threshold_values(
DataType["FLOAT32"],
1,
DataType["INT8"].get_num_possible_values() - 1,
),
axis=1,
)
MulParam = gen_finn_dt_tensor(DataType["FLOAT32"], [1])
AddParam = gen_finn_dt_tensor(DataType["FLOAT32"], [1, 4, 32])

# Initialize a new graph
nodes = []

# Add nodes
mt_op = helper.make_node(
"MultiThreshold",
inputs=["inp", "thresh"],
outputs=["mt_output"],
domain="qonnx.custom_op.general",
out_dtype="INT8",
out_bias=float(DataType["INT8"].min()),
)
nodes.append(mt_op)

matmul_op = helper.make_node(
"MatMul",
inputs=["mt_output", "matmul_weight"],
outputs=["matmul_output"],
)
nodes.append(matmul_op)

scalar_mul_op = helper.make_node(
"Mul",
inputs=["matmul_output", "scalar_input"],
outputs=["scalar_output"],
)
nodes.append(scalar_mul_op)

channel_add_op = helper.make_node(
"Add",
inputs=["scalar_output", "channelwise_bias"],
outputs=["final_output"],
)
nodes.append(channel_add_op)

# Define inputs
inputs = [
helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 4, 16]),
]

# Define outputs
outputs = [helper.make_tensor_value_info("final_output", TensorProto.FLOAT, [1, 4, 32])]

value_info = [
helper.make_tensor_value_info("mt_output", TensorProto.FLOAT, [1, 4, 16]),
helper.make_tensor_value_info("thresh", TensorProto.FLOAT, [1, 255]),
helper.make_tensor_value_info("matmul_output", TensorProto.FLOAT, [1, 4, 32]),
helper.make_tensor_value_info("matmul_weight", TensorProto.FLOAT, [16, 32]),
helper.make_tensor_value_info("scalar_input", TensorProto.FLOAT, [1]),
helper.make_tensor_value_info("scalar_output", TensorProto.FLOAT, [1, 4, 32]),
helper.make_tensor_value_info("channelwise_bias", TensorProto.FLOAT, [1, 4, 32]),
]

# Create the graph
graph = helper.make_graph(
nodes=nodes, name="TestModelGraph", inputs=inputs, outputs=outputs, value_info=value_info
)

# Create the model
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 11)])
model = ModelWrapper(model)

# Set initializers and datatypes
model.set_initializer("matmul_weight", W)
model.set_initializer("thresh", T)
model.set_initializer("scalar_input", MulParam)
model.set_initializer("channelwise_bias", AddParam)

model.set_tensor_datatype("inp", DataType["FLOAT32"])
model.set_tensor_datatype("matmul_weight", DataType["INT4"])
model.set_tensor_datatype("thresh", DataType["FLOAT32"])
model.set_tensor_datatype("scalar_input", DataType["FLOAT32"])
model.set_tensor_datatype("channelwise_bias", DataType["FLOAT32"])

return model


@pytest.mark.end2end
@pytest.mark.vivado
@pytest.mark.slow
def test_ooc_synthesis():
model = create_test_model()
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())

# generate reference output
x = gen_finn_dt_tensor(DataType["FLOAT32"], (1, 4, 16))
y_dict = oxe.execute_onnx(model, {model.graph.input[0].name: x})
y_ref = y_dict[model.graph.output[0].name]

# infer and specialize layers
model = model.transform(to_hw.InferThresholdingLayer())
model = model.transform(to_hw.InferElementwiseBinaryOperation())
model = model.transform(to_hw.InferQuantizedMatrixVectorActivation())
model = model.transform(SpecializeLayers(fpga_part))

# node-by-node rtlsim
model = model.transform(GiveUniqueNodeNames())
model = model.transform(SetExecMode("rtlsim"))
model = model.transform(PrepareIP(fpga_part, clk_ns))
model = model.transform(HLSSynthIP())
model = model.transform(PrepareRTLSim())

y_dict = oxe.execute_onnx(model, {model.graph.input[0].name: x})
y_prod = y_dict[model.graph.output[0].name]
assert (y_prod == y_ref).all()

# FIFO sizing
model = model.transform(InsertAndSetFIFODepths(fpga_part, clk_ns))

# stitched IP rtlsim
model = model.transform(PrepareIP(fpga_part, clk_ns))
model = model.transform(HLSSynthIP())
model = model.transform(CreateStitchedIP(fpga_part, clk_ns))
model = model.transform(SynthOutOfContext(fpga_part, clk_ns))
ret = model.get_metadata_prop("res_total_ooc_synth")
assert ret is not None
# example expected output: (details may differ based on Vivado version etc)
# "{'vivado_proj_folder': ...,
# 'LUT': 708.0, 'FF': 1516.0, 'DSP': 0.0, 'BRAM': 0.0, 'WNS': 0.152, '': 0,
# 'fmax_mhz': 206.27062706270627}"
ret = eval(ret)
assert ret["LUT"] > 0
assert ret["FF"] > 0
assert ret["DSP"] > 0
assert ret["BRAM"] > 0
assert ret["fmax_mhz"] > 100