Skip to content

Commit 6deb886

Browse files
tarun292facebook-github-bot
authored andcommitted
Enable xnnpack in aten mode (#9049)
Summary: Pull Request resolved: #9049 This diff enables XNNPack delegate in ATen mode resolving some compilation issues and also making sure that the deps that XNNPack depends on are portable + aten friendly. Reviewed By: mcr229 Differential Revision: D70704202
1 parent 1572381 commit 6deb886

File tree

2 files changed

+49
-40
lines changed

2 files changed

+49
-40
lines changed

backends/xnnpack/runtime/XNNExecutor.cpp

+9-2
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,18 @@ ET_NODISCARD Error XNNExecutor::prepare_args(EValue** args) {
9595
Tensor* tensor = &args[ext_id]->toTensor();
9696
externals_[i].data = tensor->mutable_data_ptr<float>();
9797

98+
executorch::aten::DimOrderType dim_order[kTensorDimensionLimit];
99+
98100
// Reshape runtime inputs
99101
if (i < input_ids_.size()) {
100102
size_t num_dims = tensor->dim();
103+
Error err = runtime::get_dim_order(*tensor, dim_order, num_dims);
104+
ET_CHECK_OR_RETURN_ERROR(
105+
err == Error::Ok,
106+
Internal,
107+
"Failed to retrieve dim order from tensor!");
101108
ET_CHECK_OR_RETURN_ERROR(
102-
is_contiguous_dim_order(tensor->dim_order().data(), tensor->dim()),
109+
is_contiguous_dim_order(dim_order, tensor->dim()),
103110
Internal,
104111
"Expecting default dim_order but got a non default dim_order tensor for external input %u",
105112
i);
@@ -220,7 +227,7 @@ ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const {
220227
expected_output_size, static_cast<size_t>(num_dim)};
221228

222229
ET_LOG(Debug, "Resizing output tensor to a new shape");
223-
Error err = resize_tensor(*out_tensor, output_size);
230+
Error err = runtime::resize_tensor(*out_tensor, output_size);
224231
if (err != Error::Ok) {
225232
ET_LOG(Error, "Failed to resize output tensor for XNNExecutor");
226233
return err;

backends/xnnpack/targets.bzl

+40-38
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
load("@fbsource//xplat/executorch/backends/xnnpack/third-party:third_party_libs.bzl", "third_party_dep")
2-
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_aten_mode_options", "runtime")
33

44
def _get_preprocessor_flags():
55
"""
@@ -33,40 +33,42 @@ def define_common_targets():
3333
],
3434
)
3535

36-
runtime.cxx_library(
37-
name = "xnnpack_backend",
38-
srcs = native.glob([
39-
"runtime/*.cpp",
40-
"runtime/profiling/*.cpp",
41-
]),
42-
headers = native.glob([
43-
"runtime/*.h",
44-
"runtime/profiling/*.h",
45-
]),
46-
visibility = [
47-
"//executorch/exir/backend:backend_lib",
48-
"//executorch/exir/backend/test/...",
49-
"//executorch/backends/xnnpack/test/...",
50-
"//executorch/extension/pybindings/...",
51-
"@EXECUTORCH_CLIENTS",
52-
],
53-
preprocessor_flags = [
54-
# Uncomment to enable per operator timings
55-
# "-DENABLE_XNNPACK_PROFILING",
56-
# Uncomment to enable using KleidiAI Kernels
57-
# "-DENABLE_XNNPACK_KLEIDI"
58-
] + _get_preprocessor_flags(),
59-
exported_deps = [
60-
"//executorch/runtime/backend:interface",
61-
],
62-
deps = [
63-
third_party_dep("XNNPACK"),
64-
"//executorch/backends/xnnpack/serialization:xnnpack_flatbuffer_header",
65-
"//executorch/extension/threadpool:threadpool",
66-
"//executorch/runtime/core/exec_aten/util:tensor_util",
67-
"//executorch/runtime/executor:pte_data_map"
68-
],
69-
# XnnpackBackend.cpp needs to compile with executor as whole
70-
# @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole)
71-
link_whole = True,
72-
)
36+
for aten_mode in get_aten_mode_options():
37+
aten_suffix = "_aten" if aten_mode else ""
38+
runtime.cxx_library(
39+
name = "xnnpack_backend" + aten_suffix,
40+
srcs = native.glob([
41+
"runtime/*.cpp",
42+
"runtime/profiling/*.cpp",
43+
]),
44+
headers = native.glob([
45+
"runtime/*.h",
46+
"runtime/profiling/*.h",
47+
]),
48+
visibility = [
49+
"//executorch/exir/backend:backend_lib",
50+
"//executorch/exir/backend/test/...",
51+
"//executorch/backends/xnnpack/test/...",
52+
"//executorch/extension/pybindings/...",
53+
"@EXECUTORCH_CLIENTS",
54+
],
55+
preprocessor_flags = [
56+
# Uncomment to enable per operator timings
57+
# "-DENABLE_XNNPACK_PROFILING",
58+
# Uncomment to enable using KleidiAI Kernels
59+
# "-DENABLE_XNNPACK_KLEIDI"
60+
] + _get_preprocessor_flags(),
61+
exported_deps = [
62+
"//executorch/runtime/backend:interface",
63+
],
64+
deps = [
65+
third_party_dep("XNNPACK"),
66+
"//executorch/backends/xnnpack/serialization:xnnpack_flatbuffer_header",
67+
"//executorch/extension/threadpool:threadpool",
68+
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
69+
"//executorch/runtime/executor:pte_data_map"
70+
],
71+
# XnnpackBackend.cpp needs to compile with executor as whole
72+
# @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole)
73+
link_whole = True,
74+
)

0 commit comments

Comments
 (0)