Skip to content

Commit 8fac3bc

Browse files
committed
[ExecuTorch] Remove xnn_executor_runner
Pull Request resolved: #9292 It is redundant with executor_runner after #9248 . Differential Revision: [D71159265](https://our.internmc.facebook.com/intern/diff/D71159265/) ghstack-source-id: 271912198
1 parent 605218c commit 8fac3bc

File tree

10 files changed

+20
-118
lines changed

10 files changed

+20
-118
lines changed

.ci/scripts/test_model.sh

+6-6
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,8 @@ test_model() {
115115
run_portable_executor_runner
116116
}
117117

118-
build_cmake_xnn_executor_runner() {
119-
echo "Building xnn_executor_runner"
118+
build_cmake_executor_runner() {
119+
echo "Building executor_runner"
120120

121121
(rm -rf ${CMAKE_OUTPUT_DIR} \
122122
&& mkdir ${CMAKE_OUTPUT_DIR} \
@@ -152,12 +152,12 @@ test_model_with_xnnpack() {
152152

153153
# Run test model
154154
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
155-
buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
155+
buck2 run //examples/portable/executor_runner:executor_runner_opt -- --model_path "${OUTPUT_MODEL_PATH}"
156156
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
157-
if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
158-
build_cmake_xnn_executor_runner
157+
if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then
158+
build_cmake_executor_runner
159159
fi
160-
./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}"
160+
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "${OUTPUT_MODEL_PATH}"
161161
else
162162
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
163163
exit 1

backends/xnnpack/CMakeLists.txt

-40
Original file line numberDiff line numberDiff line change
@@ -115,46 +115,6 @@ target_include_directories(
115115
target_compile_options(xnnpack_backend PUBLIC ${_common_compile_options})
116116
target_link_options_shared_lib(xnnpack_backend)
117117

118-
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
119-
list(APPEND xnn_executor_runner_libs optimized_native_cpu_ops_lib)
120-
else()
121-
list(APPEND xnn_executor_runner_libs portable_ops_lib)
122-
endif()
123-
124-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
125-
list(APPEND xnn_executor_runner_libs $<LINK_LIBRARY:WHOLE_ARCHIVE,custom_ops>)
126-
endif()
127-
128-
list(APPEND xnn_executor_runner_libs xnnpack_backend executorch)
129-
130-
# ios can only build library but not binary
131-
if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
132-
#
133-
# xnn_executor_runner: Like executor_runner but with XNNPACK, the binary will
134-
# be at ${CMAKE_BINARY_DIR}/backends/xnnpack
135-
#
136-
list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/")
137-
add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs})
138-
139-
if(EXECUTORCH_ENABLE_EVENT_TRACER)
140-
if(EXECUTORCH_BUILD_DEVTOOLS)
141-
list(APPEND xnn_executor_runner_libs etdump)
142-
else()
143-
message(
144-
SEND_ERROR
145-
"Use of 'EXECUTORCH_ENABLE_EVENT_TRACER' requires 'EXECUTORCH_BUILD_DEVTOOLS' to be enabled."
146-
)
147-
endif()
148-
endif()
149-
150-
target_link_libraries(xnn_executor_runner gflags ${xnn_executor_runner_libs})
151-
target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options})
152-
if(EXECUTORCH_BUILD_PTHREADPOOL)
153-
target_link_libraries(xnn_executor_runner extension_threadpool pthreadpool)
154-
target_compile_definitions(xnn_executor_runner PRIVATE ET_USE_THREADPOOL)
155-
endif()
156-
endif()
157-
158118
install(
159119
TARGETS xnnpack_backend
160120
DESTINATION lib

backends/xnnpack/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ After lowering to the XNNPACK Program, we can then prepare it for executorch and
9292

9393

9494
### Running the XNNPACK Model with CMake
95-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
95+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
9696
```bash
9797
# cd to the root of executorch repo
9898
cd executorch
@@ -119,9 +119,9 @@ Then you can build the runtime componenets with
119119
cmake --build cmake-out -j9 --target install --config Release
120120
```
121121

122-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
122+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
123123
```bash
124-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
124+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
125125
```
126126

127127
## Help & Improvements

build/cmake_deps.toml

-18
Original file line numberDiff line numberDiff line change
@@ -353,24 +353,6 @@ filters = [
353353
# ---------------------------------- MPS end ----------------------------------
354354
# ---------------------------------- XNNPACK start ----------------------------------
355355

356-
[targets.xnn_executor_runner]
357-
buck_targets = [
358-
"//examples/xnnpack:xnn_executor_runner",
359-
]
360-
filters = [
361-
".cpp$",
362-
]
363-
excludes = [
364-
"^codegen",
365-
]
366-
deps = [
367-
"executorch",
368-
"executorch_core",
369-
"extension_threadpool",
370-
"xnnpack_backend",
371-
"portable_kernels",
372-
]
373-
374356
[targets.xnnpack_backend]
375357
buck_targets = [
376358
"//backends/xnnpack:xnnpack_backend",

docs/source/backend-delegates-xnnpack-reference.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre
7070
When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors.
7171

7272
#### **Profiling**
73-
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](./tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).
73+
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](./tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).
7474

7575

7676
[comment]: <> (TODO: Refactor quantizer to a more official quantization doc)

docs/source/tutorial-xnnpack-delegate-lowering.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ Note in the example above,
141141
The generated model file will be named `[model_name]_xnnpack_[qs8/fp32].pte` depending on the arguments supplied.
142142

143143
## Running the XNNPACK Model with CMake
144-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
144+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
145145
```bash
146146
# cd to the root of executorch repo
147147
cd executorch
@@ -168,15 +168,15 @@ Then you can build the runtime componenets with
168168
cmake --build cmake-out -j9 --target install --config Release
169169
```
170170

171-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
171+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
172172
```bash
173-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
173+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
174174
# or to run the quantized variant
175-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_q8.pte
175+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_q8.pte
176176
```
177177

178178
## Building and Linking with the XNNPACK Backend
179179
You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](demo-apps-android.md) next.
180180

181181
## Profiling
182-
To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details).
182+
To enable profiling in the `executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `executor_runner --help` for details).

examples/xnnpack/README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ The following command will produce a floating-point XNNPACK delegated model `mv2
2424
python3 -m examples.xnnpack.aot_compiler --model_name="mv2" --delegate
2525
```
2626

27-
Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `xnn_executor_runner`. With cmake, you first configure your cmake with the following:
27+
Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `executor_runner`. With cmake, you first configure your cmake with the following:
2828

2929
```bash
3030
# cd to the root of executorch repo
@@ -56,7 +56,7 @@ cmake --build cmake-out -j9 --target install --config Release
5656
Now finally you should be able to run this model with the following command
5757

5858
```bash
59-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path ./mv2_xnnpack_fp32.pte
59+
./cmake-out/executor_runner --model_path ./mv2_xnnpack_fp32.pte
6060
```
6161

6262
## Quantization
@@ -80,7 +80,7 @@ python3 -m examples.xnnpack.quantization.example --help
8080
```
8181

8282
## Running the XNNPACK Model with CMake
83-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
83+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
8484
```bash
8585
# cd to the root of executorch repo
8686
cd executorch
@@ -107,9 +107,9 @@ Then you can build the runtime componenets with
107107
cmake --build cmake-out -j9 --target install --config Release
108108
```
109109

110-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
110+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
111111
```bash
112-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_quantized.pte
112+
./cmake-out/executor_runner --model_path=./mv2_quantized.pte
113113
```
114114

115115
## Delegating a Quantized Model

examples/xnnpack/executor_runner/TARGETS

-8
This file was deleted.

examples/xnnpack/executor_runner/targets.bzl

-20
This file was deleted.

examples/xnnpack/targets.bzl

-12
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,3 @@ def define_common_targets():
4949
"@EXECUTORCH_CLIENTS",
5050
],
5151
)
52-
53-
# executor_runner for XNNPACK Backend and portable kernels.
54-
runtime.cxx_binary(
55-
name = "xnn_executor_runner",
56-
deps = [
57-
"//executorch/examples/portable/executor_runner:executor_runner_lib",
58-
"//executorch/backends/xnnpack:xnnpack_backend",
59-
"//executorch/kernels/portable:generated_lib",
60-
],
61-
define_static_target = True,
62-
**get_oss_build_kwargs()
63-
)

0 commit comments

Comments
 (0)