Skip to content

Commit 324a668

Browse files
committed
build tensorflow_wrapper with tensorflow
1 parent 939b563 commit 324a668

File tree

9 files changed

+183
-27
lines changed

9 files changed

+183
-27
lines changed

.gitignore

+6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
*~
22
*.o
3+
*.so
34
*.pyc
45
*.d
56
.DS_Store*
@@ -23,3 +24,8 @@ compile_commands.json
2324
.vscode/
2425
.cache/
2526
.mypy_cache/
27+
28+
bazel-bin
29+
bazel-out
30+
bazel-testlogs
31+
bazel-tensorflow

CMakeLists.txt

+11-24
Original file line numberDiff line numberDiff line change
@@ -190,25 +190,14 @@ target_link_libraries(backend PRIVATE backend_obj)
190190

191191
## backend: TensorFlow support ##
192192
if(USE_TENSORFLOW)
193-
add_library(tensorflow_cc INTERFACE)
194-
target_include_directories(tensorflow_cc INTERFACE ${LIBTF_DIR}/tensorflow/include)
195-
target_include_directories(tensorflow_cc INTERFACE ${LIBTF_DIR}/protobuf/include)
196-
target_link_libraries(tensorflow_cc INTERFACE ${LIBTF_DIR}/tensorflow/lib/libtensorflow_cc.so)
197-
target_link_libraries(tensorflow_cc INTERFACE ${LIBTF_DIR}/tensorflow/lib/libtensorflow_framework.so)
198-
199-
add_library(tensorflow_wrapper src/nexus/backend/tensorflow_wrapper.cpp)
200-
target_include_directories(tensorflow_wrapper PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
201-
target_link_libraries(tensorflow_wrapper PUBLIC tensorflow_cc)
202-
203-
# TensorFlow is currently built with C++14. Some TensorFlow APIs expose
204-
# pre-adopted std types with absl. To avoid weird ABI compatibility issues,
205-
# compile the wrapper with the same C++ standard as TensorFlow.
206-
target_compile_options(tensorflow_wrapper PRIVATE -std=c++14)
207-
208-
target_compile_definitions(backend_obj PUBLIC USE_TENSORFLOW)
193+
add_library(tfwrapper INTERFACE)
194+
target_include_directories(tfwrapper INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/include)
195+
target_link_libraries(tfwrapper INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/lib/libtensorflow_wrapper.so)
196+
target_compile_definitions(tfwrapper INTERFACE USE_TENSORFLOW)
197+
209198
target_sources(backend_obj PRIVATE
210199
src/nexus/backend/tensorflow_model.cpp)
211-
target_link_libraries(backend_obj PUBLIC tensorflow_wrapper)
200+
target_link_libraries(backend_obj PUBLIC tfwrapper)
212201
endif()
213202

214203
## backend: Caffe2 support ##
@@ -279,7 +268,6 @@ endif()
279268
## backend: CUDA support ##
280269
if(USE_GPU)
281270
target_compile_definitions(common PUBLIC USE_GPU)
282-
target_compile_definitions(tensorflow_wrapper PUBLIC USE_GPU)
283271
target_include_directories(common PUBLIC ${CUDA_PATH}/include)
284272
target_link_libraries(common PUBLIC
285273
${CUDA_PATH}/lib64/stubs/libcuda.so
@@ -339,12 +327,11 @@ target_link_libraries(stress_rankmt PUBLIC bench_dispatcher_obj)
339327

340328

341329
###### tools/test_gpu_mem_sharing ######
342-
if(USE_GPU AND USE_TENSORFLOW)
343-
add_executable(test_gpu_mem_sharing tools/test_gpu_mem_sharing.cpp)
344-
target_link_libraries(test_gpu_mem_sharing PUBLIC common)
345-
target_link_libraries(test_gpu_mem_sharing PUBLIC tensorflow_wrapper)
346-
target_link_libraries(test_gpu_mem_sharing PUBLIC tensorflow_cc)
347-
endif()
330+
# if(USE_GPU AND USE_TENSORFLOW)
331+
# add_executable(test_gpu_mem_sharing tools/test_gpu_mem_sharing.cpp)
332+
# target_link_libraries(test_gpu_mem_sharing PUBLIC common)
333+
# target_link_libraries(test_gpu_mem_sharing PUBLIC tfwrapper)
334+
# endif()
348335

349336

350337

tensorflow/.bazelrc

+43
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Cherry-picked from https://github.com/tensorflow/tensorflow/blob/master/.bazelrc
2+
3+
# TensorFlow common settings
4+
build --spawn_strategy=standalone
5+
build --announce_rc
6+
build --define=grpc_no_ares=true
7+
build --noincompatible_remove_legacy_whole_archive
8+
build --noincompatible_prohibit_aapt1
9+
common --experimental_repo_remote_exec
10+
11+
# TensorFlow uses C++14
12+
build --cxxopt=-std=c++14
13+
build --host_cxxopt=-std=c++14
14+
15+
# Compiler optimization
16+
build -c opt
17+
build --copt=-mavx2
18+
build --copt=-mfma
19+
20+
# Suppress C++ compiler warnings
21+
build --copt=-w
22+
build --host_copt=-w
23+
24+
# TensorFlow 2.x API
25+
build --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
26+
27+
# Monolithic
28+
build --define=framework_shared_object=false
29+
30+
# Disable features
31+
build --define=with_xla_support=false
32+
build --define=no_aws_support=true
33+
build --define=no_gcp_support=true
34+
build --define=no_hdfs_support=true
35+
build --define=no_nccl_support=true
36+
build --define=build_with_mkl=false --define=enable_mkl=false --define=build_with_openmp=false
37+
build --repo_env TF_NEED_TENSORRT=0
38+
build --repo_env TF_NEED_ROCM=0 --define=using_rocm=false --define=using_rocm_hipcc=false
39+
40+
# Options used to build with CUDA.
41+
build:cuda --repo_env TF_NEED_CUDA=1
42+
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
43+
build:cuda --@local_config_cuda//:enable_cuda

tensorflow/BUILD

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
load(
2+
"@org_tensorflow//tensorflow:tensorflow.bzl",
3+
"tf_cc_shared_object",
4+
"tf_copts",
5+
)
6+
7+
cc_library(
8+
name = "wrapper",
9+
srcs = ["tensorflow_wrapper.cpp"],
10+
hdrs = ["include/nexus/backend/tensorflow_wrapper.h"],
11+
copts = ["-Iinclude"] + tf_copts(),
12+
deps = [
13+
"@org_tensorflow//tensorflow/core:tensorflow",
14+
],
15+
)
16+
17+
tf_cc_shared_object(
18+
name = "libtensorflow_wrapper.so",
19+
linkopts = [
20+
"-z defs",
21+
"-Wl,--version-script=version_script.lds",
22+
],
23+
deps = [
24+
":wrapper",
25+
],
26+
)

tensorflow/Makefile

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# Default value from: https://www.tensorflow.org/install/source#gpu
2+
TF_CUDA_VERSION ?= 11.2
3+
TF_CUDNN_VERSION ?= 8
4+
TF_CUDA_COMPUTE_CAPABILITIES ?= sm_35,sm_50,sm_60,sm_70,sm_75,compute_80
5+
6+
7+
8+
TF_VERSION != grep -Po '(?<="tensorflow-).*(?=")' WORKSPACE
9+
CPU_SO := libtensorflow_wrapper.so.$(TF_VERSION)-cpu
10+
GPU_SO := libtensorflow_wrapper.so.$(TF_VERSION)-cuda$(TF_CUDA_VERSION)-cudnn$(TF_CUDNN_VERSION)
11+
12+
DOCKER_IMAGE := gcr.io/tensorflow-testing/nosla-cuda11.2-cudnn8.1-ubuntu18.04-manylinux2010-multipython
13+
DOCKER_NAME := tensorflow_wrapper_builder
14+
DOCKER_START := docker run --rm -d --name $(DOCKER_NAME) -v $(shell pwd):/build --init $(DOCKER_IMAGE) sleep inf
15+
DOCKER_EXEC := docker exec -t -w /build $(DOCKER_NAME)
16+
DOCKER_CLEAN := $(DOCKER_EXEC) rm -rf bazel-bin bazel-out bazel-testlogs bazel-tensorflow bazel-build
17+
DOCKER_STOP := docker stop $(DOCKER_NAME)
18+
BAZEL_BUILD = bazel build --color=yes --curses=yes \
19+
--action_env=PYTHON_BIN_PATH=/usr/local/bin/python3.8 \
20+
--crosstool_top=@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11.2:toolchain
21+
BAZEL_TARGET = //:libtensorflow_wrapper.so
22+
23+
.NOTPARALLEL:
24+
.DEFAULT_GOAL: prompt
25+
.PHONY: prompt cpu gpu clean
26+
prompt:
27+
@echo Please \`make gpu\` or \`make cpu\`
28+
29+
gpu: lib/$(GPU_SO)
30+
ln -sf $(GPU_SO) lib/libtensorflow_wrapper.so
31+
lib/$(GPU_SO):
32+
$(DOCKER_START)
33+
$(DOCKER_EXEC) $(BAZEL_BUILD) \
34+
--config=cuda \
35+
--action_env TF_CUDA_VERSION=$(TF_CUDA_VERSION) \
36+
--action_env TF_CUDNN_VERSION=$(TF_CUDNN_VERSION) \
37+
--action_env TF_CUDA_COMPUTE_CAPABILITIES=$(TF_CUDA_COMPUTE_CAPABILITIES) \
38+
$(BAZEL_TARGET)
39+
docker cp $(DOCKER_NAME):/build/bazel-bin/libtensorflow_wrapper.so lib/$(GPU_SO)
40+
$(DOCKER_CLEAN)
41+
$(DOCKER_STOP)
42+
43+
44+
cpu: lib/$(CPU_SO)
45+
ln -sf $(CPU_SO) lib/libtensorflow_wrapper.so
46+
lib/$(CPU_SO):
47+
$(DOCKER_START)
48+
$(DOCKER_EXEC) $(BAZEL_BUILD) $(BAZEL_TARGET)
49+
docker cp $(DOCKER_NAME):/build/bazel-bin/libtensorflow_wrapper.so lib/$(CPU_SO)
50+
$(DOCKER_CLEAN)
51+
$(DOCKER_STOP)
52+
53+
clean:
54+
rm -f bazel-bin/libtensorflow_wrapper.so
55+
rm -f lib/libtensorflow_wrapper.so
56+
rm -f lib/$(GPU_SO)
57+
rm -f lib/$(CPU_SO)
58+
rm -f bazel-out bazel-out bazel-testlogs bazel-tensorflow

tensorflow/WORKSPACE

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
2+
3+
http_archive(
4+
name = "org_tensorflow",
5+
build_file = "@//:BUILD",
6+
sha256 = "e3d0ee227cc19bd0fa34a4539c8a540b40f937e561b4580d4bbb7f0e31c6a713",
7+
strip_prefix = "tensorflow-2.5.0",
8+
urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.5.0.zip"],
9+
)
10+
11+
load(
12+
"@org_tensorflow//tensorflow:version_check.bzl",
13+
"check_bazel_version_at_least"
14+
)
15+
check_bazel_version_at_least("3.7.2")
16+
17+
load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3")
18+
tf_workspace3()
19+
load("@org_tensorflow//tensorflow:workspace2.bzl", "tf_workspace2")
20+
tf_workspace2()
21+
load("@org_tensorflow//tensorflow:workspace1.bzl", "tf_workspace1")
22+
tf_workspace1()
23+
load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0")
24+
tf_workspace0()

src/nexus/backend/tensorflow_wrapper.cpp tensorflow/tensorflow_wrapper.cpp

+4-3
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include <memory>
44
#include <string>
55

6-
#ifdef USE_GPU
6+
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
77
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
88
#else
99
#include "tensorflow/core/common_runtime/process_state.h"
@@ -106,7 +106,7 @@ Session::Session(const std::string& visible_device_list,
106106
const std::string& pb_path)
107107
: impl_(std::make_unique<Impl>()) {
108108
// Init session options
109-
#ifdef USE_GPU
109+
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
110110
auto& tf_option = impl_->gpu_option;
111111
auto gpu_opt = impl_->gpu_option.config.mutable_gpu_options();
112112
gpu_opt->set_visible_device_list(visible_device_list);
@@ -140,7 +140,7 @@ Session::Session(const std::string& visible_device_list,
140140
}
141141

142142
// Get the GPU allocator for creating input buffer
143-
#ifdef USE_GPU
143+
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
144144
impl_->tf_allocator =
145145
tensorflow::GPUProcessState::singleton()->GetGPUAllocator(
146146
impl_->gpu_option.config.gpu_options(), tensorflow::TfDeviceId(0), 0,
@@ -175,6 +175,7 @@ Tensor Session::NewTensor(DataType dtype, const std::vector<size_t>& shape) {
175175
tf_shape.AddDim(dim);
176176
}
177177
tensorflow::Tensor tf_tensor(impl_->tf_allocator, tf_dtype, tf_shape);
178+
CHECK(tf_tensor.data() != nullptr);
178179
return TensorProxy::CopyFromTensorFlow(tf_tensor);
179180
}
180181

tensorflow/version_script.lds

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
NEXUS_1.0 {
2+
# Export nexus namespace
3+
global:
4+
extern "C++" {
5+
nexus::*;
6+
};
7+
8+
# Hide everything else.
9+
local:
10+
*;
11+
};

0 commit comments

Comments
 (0)