Skip to content

Commit bad1c0a

Browse files
authored
Merge pull request #47 from JDAI-CV/update_onnx2bnn
Update onnx2bnn and convert onnx->daq in ci test
2 parents 4337638 + c3b6751 commit bad1c0a

18 files changed

+120
-338
lines changed

.gitmodules

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,3 @@
1313
[submodule "third_party/protobuf"]
1414
path = third_party/protobuf
1515
url = https://github.com/protocolbuffers/protobuf
16-
[submodule "third_party/pybind11"]
17-
path = third_party/pybind11
18-
url = https://github.com/pybind/pybind11

ci/build_onnx2bnn.sh

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
#! /usr/bin/env bash
22
set -e
33

4-
nproc=$(ci/get_cores.sh)
5-
64
mkdir build_onnx2bnn && cd build_onnx2bnn
75
cmake ..
8-
cmake --build . -- -j$nproc
6+
cmake --build .
97
cd -

ci/dabnn_build_and_test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ jobs:
3232
- bash: brew install watch gnu-sed
3333
displayName: Install watch and gnu-sed
3434
- bash: ci/build_dabnn.sh
35-
displayName: Build
35+
displayName: Build dabnn
3636
- bash: ci/start_android_emulator.sh
3737
displayName: Start Android Emulator
3838
- template: template_dabnn_run_test.yml
@@ -49,7 +49,7 @@ jobs:
4949
- bash: brew install watch gnu-sed
5050
displayName: Install watch and gnu-sed
5151
- bash: ci/build_dabnn_v7.sh
52-
displayName: Build
52+
displayName: Build dabnn
5353
- bash: ci/start_android_emulator_v7.sh
5454
displayName: Start Android Emulator
5555
- template: template_dabnn_run_test.yml

ci/download_and_convert_models.sh

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#! /usr/bin/env bash
2+
3+
wget "https://drive.google.com/uc?export=download&id=1Xp3HB51H6Nhl6e555ieJubVutQake5sR" -O model_imagenet.onnx
4+
./build_onnx2bnn/tools/onnx2bnn/onnx2bnn model_imagenet.onnx model_imagenet.dab --aggressive --verbose
5+
adb push model_imagenet.dab /data/local/tmp
6+
wget "https://drive.google.com/uc?export=download&id=1zu48CFptAGZ91IDCBPJSPM0bxDuPm9HS" -O model_imagenet_stem.onnx
7+
./build_onnx2bnn/tools/onnx2bnn/onnx2bnn model_imagenet_stem.onnx model_imagenet_stem.dab --aggressive --verbose
8+
adb push model_imagenet_stem.dab /data/local/tmp/

ci/download_models.sh

Lines changed: 0 additions & 6 deletions
This file was deleted.

ci/onnx2bnn_build.yml

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -51,29 +51,24 @@ jobs:
5151
steps:
5252
- checkout: self
5353
submodules: true
54-
- template: template_onnx2bnn_build_python_all_version.yml
55-
- task: CopyFiles@2
56-
inputs:
57-
sourceFolder: '.setuptools-cmake-build\tools\onnx2bnn\Release\'
58-
contents: 'onnx2bnn.exe'
59-
targetFolder: $(Build.ArtifactStagingDirectory)
54+
- bash: ci/build_onnx2bnn.sh
55+
displayName: Build
6056
- template: template_onnx2bnn_publish_artifacts.yml
6157
- template: template_onnx2bnn_github_release.yml
62-
- template: template_onnx2bnn_upload_to_pypi.yml
6358
- job: macOS
6459
pool:
6560
vmImage: 'macOS-10.14'
6661
steps:
6762
- checkout: self
6863
submodules: true
69-
- template: template_onnx2bnn_build_python_all_version.yml
70-
- script: 'cp .setuptools-cmake-build/tools/onnx2bnn/onnx2bnn .setuptools-cmake-build/tools/onnx2bnn/onnx2bnn-macos'
64+
- bash: ci/build_onnx2bnn.sh
65+
displayName: Build
66+
- script: 'cp build_onnx2bnn/tools/onnx2bnn/onnx2bnn build_onnx2bnn/tools/onnx2bnn/onnx2bnn-macos'
7167
displayName: 'Rename onnx2bnn'
7268
- task: CopyFiles@2
7369
inputs:
74-
sourceFolder: '.setuptools-cmake-build/tools/onnx2bnn'
70+
sourceFolder: 'build_onnx2bnn/tools/onnx2bnn'
7571
contents: 'onnx2bnn-macos'
7672
targetFolder: $(Build.ArtifactStagingDirectory)
7773
- template: template_onnx2bnn_publish_artifacts.yml
7874
- template: template_onnx2bnn_github_release.yml
79-
- template: template_onnx2bnn_upload_to_pypi.yml

ci/template_dabnn_run_test.yml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@ steps:
33
displayName: Binary Conv Test
44
- bash: ci/adb_push_and_run.sh build_dabnn/tests/bgemm_test
55
displayName: Binary Gemm Test
6-
- bash: ci/download_models.sh
7-
displayName: Download Models
6+
- bash: ci/build_onnx2bnn.sh
7+
displayName: Build onnx2bnn
8+
- bash: ci/download_and_convert_models.sh
9+
displayName: Download and Convert Models
810
- bash: ci/adb_push_and_run.sh build_dabnn/tests/net_test
911
displayName: Model Test

ci/template_onnx2bnn_build_python.yml

Lines changed: 0 additions & 6 deletions
This file was deleted.

ci/template_onnx2bnn_build_python_all_version.yml

Lines changed: 0 additions & 20 deletions
This file was deleted.

ci/template_onnx2bnn_upload_to_pypi.yml

Lines changed: 0 additions & 9 deletions
This file was deleted.

third_party/pybind11

Lines changed: 0 additions & 1 deletion
This file was deleted.

tools/onnx2bnn/OnnxConverter.cpp

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,10 @@ std::vector<OnnxConverter::BTensor> OnnxConverter::split(
172172
return outputs;
173173
}
174174

175-
void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
176-
const std::string &filepath,
177-
const OnnxConverter::Level level) {
175+
std::vector<std::string> OnnxConverter::Convert(
176+
const ONNX_NAMESPACE::ModelProto &model_proto, const std::string &filepath,
177+
const OnnxConverter::Level level,
178+
const std::vector<std::string> &expected_binary_conv_outputs) {
178179
GOOGLE_PROTOBUF_VERIFY_VERSION;
179180

180181
// We recognize binary convolutions in our custom ONNX optimizers.
@@ -236,6 +237,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
236237
inputs.push_back(flat_input);
237238
}
238239

240+
vector<string> binary_conv_outputs;
239241
vector<string> skipped_act;
240242
bool has_reshape = false;
241243
for (const auto &node : model_proto_.graph().node()) {
@@ -270,7 +272,15 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
270272
}
271273

272274
auto ori_weight_name = m(node.input(1));
273-
const bool binary_conv = (node.domain() == "dabnn");
275+
const bool binary_conv =
276+
(node.domain() == "dabnn") ||
277+
(std::find(expected_binary_conv_outputs.begin(),
278+
expected_binary_conv_outputs.end(),
279+
node.output(0)) !=
280+
expected_binary_conv_outputs.end());
281+
if (binary_conv) {
282+
binary_conv_outputs.push_back(node.output(0));
283+
}
274284
AddConv(m(node.input(0)), strides, pads, dilations, group,
275285
ori_weight_name, bias_name, m(node.output(0)), binary_conv);
276286
VLOG(5) << "Converting Conv completed";
@@ -472,6 +482,17 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
472482
throw std::invalid_argument("Unsupported operator " + op);
473483
}
474484
}
485+
486+
for (const auto &expected : expected_binary_conv_outputs) {
487+
if (std::find(binary_conv_outputs.begin(), binary_conv_outputs.end(),
488+
expected) == binary_conv_outputs.end()) {
489+
throw std::invalid_argument(
490+
expected +
491+
" is in the list file but not in the ONNX model, please check "
492+
"your list file");
493+
}
494+
}
495+
475496
auto flat_layers = builder_.CreateVector(layers_);
476497
auto flat_inputs = builder_.CreateVector(inputs);
477498
auto flat_tensors = builder_.CreateVector(tensors_);
@@ -487,6 +508,8 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
487508
ofs.write(reinterpret_cast<char *>(builder_.GetBufferPointer()),
488509
builder_.GetSize());
489510
ofs.close();
511+
512+
return binary_conv_outputs;
490513
}
491514

492515
void OnnxConverter::CalculateCoeff(const ONNX_NAMESPACE::NodeProto &node,

tools/onnx2bnn/OnnxConverter.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,9 +153,9 @@ class OnnxConverter {
153153
kModerate,
154154
kAggressive,
155155
};
156-
void Convert(const ONNX_NAMESPACE::ModelProto &model,
156+
std::vector<std::string> Convert(const ONNX_NAMESPACE::ModelProto &model,
157157
const std::string &filepath,
158-
const Level level=Level::kModerate);
158+
const Level level, const std::vector<std::string> &expected_binary_conv_outputs);
159159
};
160160

161161
template <>

tools/onnx2bnn/onnx2bnn.cpp

Lines changed: 70 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -16,52 +16,91 @@ using std::string;
1616
using std::vector;
1717

1818
void usage(const std::string &filename) {
19+
std::cout << "Usage:" << std::endl;
20+
std::cout << " " << filename
21+
<< " onnx_model output_filename [ --strict | --moderate | "
22+
"--aggressive ] [--binary-list] [--verbose]"
23+
<< std::endl;
24+
std::cout << std::endl;
25+
std::cout << "Options:" << std::endl;
26+
std::cout
27+
<< " --aggressive The default optimization level. In this level, "
28+
"onnx2bnn will mark all convolutions with binary (+1/-1) weights as "
29+
"binary convolutions. It is for the existing BNN models, which may "
30+
"not use the correct padding value. Note: The output of the "
31+
"generated dabnn model is different from that of the ONNX model "
32+
"since the padding value is 0 instead of -1."
33+
<< std::endl;
34+
std::cout << " --moderate This level is for our \"standard\" "
35+
"implementation -- A Conv operator with binary weight and "
36+
"following a -1 Pad operator."
37+
<< std::endl;
1938
std::cout
20-
<< "Usage: " << filename
21-
<< " onnx_model output_filename [--optimize strict|moderate|aggressive]"
39+
<< " --strict In this level, onnx2bnn only recognizes the "
40+
"following natural and correct \"pattern\" of binary convolutions: "
41+
"A Conv operator, whose input is got from a Sign op and a Pad op "
42+
"(the order doesn't matter), and weight is got from a Sign op."
2243
<< std::endl;
23-
std::cout << "Example: " << filename
44+
std::cout
45+
<< " --binary-list A text file containing the **output "
46+
"names** of some convolutions, which will be treated as binary "
47+
"convlutions unconditionally. It is mainly for benchmark purpose."
48+
<< std::endl;
49+
std::cout << std::endl;
50+
std::cout << "Example:" << std::endl;
51+
std::cout << " " << filename
2452
<< " model.onnx model.dab (The optimization leval will be "
2553
"\"aggressive\")"
2654
<< std::endl;
27-
std::cout << "Example: " << filename
28-
<< " model.onnx model.dab --optimize strict (The optimization "
55+
std::cout << " " << filename
56+
<< " model.onnx model.dab --strict (The optimization "
2957
"level will be \"strict\")"
3058
<< std::endl;
3159
}
3260

3361
int main(int argc, char **argv) {
3462
argh::parser cmdl;
35-
cmdl.add_param("optimize");
63+
cmdl.add_param("--binary-list");
3664
cmdl.parse(argc, argv);
3765
google::InitGoogleLogging(cmdl[0].c_str());
3866
FLAGS_alsologtostderr = true;
3967
if (!cmdl(2)) {
4068
usage(cmdl[0]);
4169
return -1;
4270
}
43-
// flags like 'onnx2bnn --strict' is not supported now
4471
for (const auto flag : cmdl.flags()) {
45-
std::cout << "Invalid flag: " << flag << std::endl;
46-
usage(cmdl[0]);
47-
return -2;
72+
if (flag != "strict" && flag != "moderate" && flag != "aggressive" &&
73+
flag != "verbose") {
74+
std::cout << "Invalid flag: " << flag << std::endl;
75+
usage(cmdl[0]);
76+
return -2;
77+
}
4878
}
4979

50-
const std::string opt_level_str =
51-
cmdl("optimize").str().empty() ? "aggressive" : cmdl("optimize").str();
52-
53-
bnn::OnnxConverter::Level opt_level;
54-
if (opt_level_str == "strict") {
80+
bnn::OnnxConverter::Level opt_level =
81+
bnn::OnnxConverter::Level::kAggressive;
82+
if (cmdl["strict"]) {
5583
opt_level = bnn::OnnxConverter::Level::kStrict;
56-
} else if (opt_level_str == "moderate") {
84+
} else if (cmdl["moderate"]) {
5785
opt_level = bnn::OnnxConverter::Level::kModerate;
58-
} else if (opt_level_str == "aggressive") {
86+
} else if (cmdl["aggressive"]) {
5987
opt_level = bnn::OnnxConverter::Level::kAggressive;
60-
} else {
61-
std::cout << "Invalid optimization level: " << opt_level_str
62-
<< std::endl;
63-
usage(cmdl[0]);
64-
return -3;
88+
}
89+
90+
if (cmdl["verbose"]) {
91+
FLAGS_v = 5;
92+
}
93+
94+
const auto binary_list_filepath = cmdl("binary-list").str();
95+
vector<string> expected_binary_conv_outputs;
96+
if (!binary_list_filepath.empty()) {
97+
std::ifstream ifs(binary_list_filepath);
98+
if (ifs.is_open()) {
99+
string binary_conv_output;
100+
while (ifs >> binary_conv_output) {
101+
expected_binary_conv_outputs.push_back(binary_conv_output);
102+
}
103+
}
65104
}
66105

67106
ONNX_NAMESPACE::ModelProto model_proto;
@@ -72,7 +111,15 @@ int main(int argc, char **argv) {
72111
}
73112

74113
bnn::OnnxConverter converter;
75-
converter.Convert(model_proto, cmdl[2], opt_level);
114+
const auto binary_conv_outputs = converter.Convert(
115+
model_proto, cmdl[2], opt_level, expected_binary_conv_outputs);
116+
117+
LOG(INFO) << "Conversion completed! Found " << binary_conv_outputs.size()
118+
<< " binary convolutions. Add --verbose to get what they are.";
119+
VLOG(5) << "The outputs name of binary convolutions are: ";
120+
for (const auto &x : binary_conv_outputs) {
121+
VLOG(5) << x;
122+
}
76123

77124
google::protobuf::ShutdownProtobufLibrary();
78125
return 0;

tools/onnx2bnn/python/onnx2bnn/__init__.py

Lines changed: 0 additions & 1 deletion
This file was deleted.

tools/onnx2bnn/python/onnx2bnn/__main__.py

Lines changed: 0 additions & 18 deletions
This file was deleted.

tools/onnx2bnn/python/onnx2bnn/convert.py

Lines changed: 0 additions & 17 deletions
This file was deleted.

0 commit comments

Comments
 (0)