9
9
#include " factory/layer_factory.hpp"
10
10
11
11
namespace kuiper_infer {
12
+
13
+ void RuntimeGraphShape::InitOperatorInputTensor (
14
+ const std::vector<std::shared_ptr<RuntimeOperator>> &operators) {
15
+ if (operators.empty ()) {
16
+ LOG (ERROR) << " Operators for init input shapes is empty!" ;
17
+ return ;
18
+ }
19
+ for (const auto &op : operators) {
20
+ if (op->input_operands .empty ()) {
21
+ continue ;
22
+ } else {
23
+ const std::map<std::string, std::shared_ptr<RuntimeOperand>> &
24
+ input_operands_map = op->input_operands ;
25
+ for (const auto &input_operand_iter : input_operands_map) {
26
+ const auto &input_operand = input_operand_iter.second ;
27
+ const auto &type = input_operand->type ;
28
+ CHECK (type == RuntimeDataType::kTypeFloat32 )
29
+ << " The graph only support float32 yet!" ;
30
+ const auto &input_operand_shape = input_operand->shapes ;
31
+ auto &input_datas = input_operand->datas ;
32
+
33
+ CHECK (!input_operand_shape.empty ());
34
+ const int32_t batch = input_operand_shape.at (0 );
35
+ CHECK (batch >= 0 ) << " Dynamic batch size is not supported!" ;
36
+ CHECK (input_operand_shape.size () == 2 ||
37
+ input_operand_shape.size () == 4 ||
38
+ input_operand_shape.size () == 3 )
39
+ << " Unsupported tensor shape sizes: " << input_operand_shape.size ();
40
+
41
+ if (!input_datas.empty ()) {
42
+ CHECK (input_datas.size () == batch) << " Batch size is wrong!" ;
43
+ for (int32_t i = 0 ; i < batch; ++i) {
44
+ const std::vector<uint32_t > &input_data_shape =
45
+ input_datas.at (i)->shapes ();
46
+ CHECK (input_data_shape.size () == 3 )
47
+ << " THe origin shape size of operator input data do not equals "
48
+ " to three" ;
49
+ if (input_operand_shape.size () == 4 ) {
50
+ CHECK (input_data_shape.at (0 ) == input_operand_shape.at (1 ) &&
51
+ input_data_shape.at (1 ) == input_operand_shape.at (2 ) &&
52
+ input_data_shape.at (2 ) == input_operand_shape.at (3 ));
53
+ } else if (input_operand_shape.size () == 2 ) {
54
+ CHECK (input_data_shape.at (1 ) == input_operand_shape.at (1 ) &&
55
+ input_data_shape.at (0 ) == 1 && input_data_shape.at (2 ) == 1 );
56
+ } else {
57
+ // current shape size = 3
58
+ CHECK (input_data_shape.at (1 ) == input_operand_shape.at (1 ) &&
59
+ input_data_shape.at (0 ) == 1 &&
60
+ input_data_shape.at (2 ) == input_operand_shape.at (2 ));
61
+ }
62
+ }
63
+ } else {
64
+ input_datas.resize (batch);
65
+ for (int32_t i = 0 ; i < batch; ++i) {
66
+ if (input_operand_shape.size () == 4 ) {
67
+ input_datas.at (i) = std::make_shared<Tensor<float >>(
68
+ input_operand_shape.at (1 ), input_operand_shape.at (2 ),
69
+ input_operand_shape.at (3 ));
70
+ } else if (input_operand_shape.size () == 2 ) {
71
+ input_datas.at (i) = std::make_shared<Tensor<float >>(
72
+ 1 , input_operand_shape.at (1 ), 1 );
73
+ } else {
74
+ // current shape is 3
75
+ input_datas.at (i) = std::make_shared<Tensor<float >>(
76
+ 1 , input_operand_shape.at (1 ), input_operand_shape.at (2 ));
77
+ }
78
+ }
79
+ }
80
+ }
81
+ }
82
+ }
83
+ }
84
+
85
+ void RuntimeGraphShape::InitOperatorOutputTensor (
86
+ const std::vector<pnnx::Operator *> &pnnx_operators,
87
+ const std::vector<std::shared_ptr<RuntimeOperator>> &operators) {
88
+ CHECK (!pnnx_operators.empty () && !operators.empty ());
89
+ CHECK (pnnx_operators.size () == operators.size ());
90
+ for (uint32_t i = 0 ; i < pnnx_operators.size (); ++i) {
91
+ const std::vector<pnnx::Operand *> operands = pnnx_operators.at (i)->outputs ;
92
+ CHECK (operands.size () <= 1 ) << " Only support one node one output yet!" ;
93
+ if (operands.empty ()) {
94
+ continue ;
95
+ }
96
+ CHECK (operands.size () == 1 ) << " Only support one output in the KuiperInfer" ;
97
+ pnnx::Operand *operand = operands.front ();
98
+ const auto &runtime_op = operators.at (i);
99
+ CHECK (operand != nullptr ) << " Operand output is null" ;
100
+ const std::vector<int32_t > &operand_shapes = operand->shape ;
101
+ const auto &output_tensors = runtime_op->output_operands ;
102
+
103
+ const int32_t batch = operand_shapes.at (0 );
104
+ CHECK (batch >= 0 ) << " Dynamic batch size is not supported!" ;
105
+ CHECK (operand_shapes.size () == 2 || operand_shapes.size () == 4 ||
106
+ operand_shapes.size () == 3 )
107
+ << " Unsupported shape sizes: " << operand_shapes.size ();
108
+
109
+ if (!output_tensors) {
110
+ std::shared_ptr<RuntimeOperand> output_operand =
111
+ std::make_shared<RuntimeOperand>();
112
+ output_operand->shapes = operand_shapes;
113
+ output_operand->type = RuntimeDataType::kTypeFloat32 ;
114
+ output_operand->name = operand->name + " _output" ;
115
+ for (int j = 0 ; j < batch; ++j) {
116
+ if (operand_shapes.size () == 4 ) {
117
+ output_operand->datas .push_back (std::make_shared<Tensor<float >>(
118
+ operand_shapes.at (1 ), operand_shapes.at (2 ),
119
+ operand_shapes.at (3 )));
120
+ } else if (operand_shapes.size () == 2 ) {
121
+ output_operand->datas .push_back (
122
+ std::make_shared<Tensor<float >>(1 , operand_shapes.at (1 ), 1 ));
123
+ } else {
124
+ // current shape is 3
125
+ output_operand->datas .push_back (std::make_shared<Tensor<float >>(
126
+ 1 , operand_shapes.at (1 ), operand_shapes.at (2 )));
127
+ }
128
+ }
129
+ runtime_op->output_operands = std::move (output_operand);
130
+ } else {
131
+ CHECK (batch == output_tensors->datas .size ());
132
+ // output_tensors empty
133
+ CHECK (output_tensors->type == RuntimeDataType::kTypeFloat32 );
134
+ CHECK (output_tensors->shapes == operand_shapes);
135
+ for (uint32_t b = 0 ; b < batch; ++b) {
136
+ const std::vector<uint32_t > &tensor_shapes =
137
+ output_tensors->datas .at (b)->shapes ();
138
+ if (operand_shapes.size () == 4 ) {
139
+ if (tensor_shapes.at (0 ) != operand_shapes.at (1 ) ||
140
+ tensor_shapes.at (1 ) != operand_shapes.at (2 ) ||
141
+ tensor_shapes.at (2 ) != operand_shapes.at (3 )) {
142
+ DLOG (WARNING) << " The shape of tensor do not adapting with output operand" ;
143
+ const auto &target_shapes = std::vector<uint32_t >{(uint32_t ) operand_shapes.at (1 ),
144
+ (uint32_t ) operand_shapes.at (2 ),
145
+ (uint32_t ) operand_shapes.at (3 )};
146
+ output_tensors->datas .at (b)->ReRawshape (target_shapes);
147
+ }
148
+ } else if (operand_shapes.size () == 2 ) {
149
+ if (tensor_shapes.at (0 ) != 1 ||
150
+ tensor_shapes.at (1 ) != operand_shapes.at (1 ) ||
151
+ tensor_shapes.at (2 ) != 1 ) {
152
+ DLOG (WARNING) << " The shape of tensor do not adapting with output operand" ;
153
+ const auto &target_shapes = std::vector<uint32_t >{1 , (uint32_t ) operand_shapes.at (1 ), 1 };
154
+ output_tensors->datas .at (b)->ReRawshape (target_shapes);
155
+ }
156
+ } else {
157
+ // current shape is 3
158
+ if (tensor_shapes.at (0 ) != 1 ||
159
+ tensor_shapes.at (1 ) != operand_shapes.at (1 ) ||
160
+ tensor_shapes.at (2 ) != operand_shapes.at (2 )) {
161
+ DLOG (WARNING) << " The shape of tensor do not adapting with output operand" ;
162
+ const auto &target_shapes =
163
+ std::vector<uint32_t >{1 , (uint32_t ) operand_shapes.at (1 ), (uint32_t ) operand_shapes.at (2 )};
164
+ output_tensors->datas .at (b)->ReRawshape (target_shapes);
165
+ }
166
+ }
167
+ }
168
+ }
169
+ }
170
+ }
171
+
12
172
RuntimeGraph::RuntimeGraph (std::string param_path, std::string bin_path)
13
173
: param_path_(std::move(param_path)), bin_path_(std::move(bin_path)) {
14
174
@@ -242,4 +402,34 @@ void RuntimeGraph::InitGraphAttrs(const std::map<std::string, pnnx::Attribute> &
242
402
const std::vector<std::shared_ptr<RuntimeOperator>> RuntimeGraph::operators () const {
243
403
return this ->operators_ ;
244
404
}
405
+
406
+ void RuntimeGraph::Build (const std::string &input_name, const std::string &output_name) {
407
+ if (graph_state_ == GraphState::NeedInit) {
408
+ bool init_graph = Init ();
409
+ LOG_IF (FATAL, !init_graph) << " Init graph failed!" ;
410
+ }
411
+
412
+ CHECK (graph_state_ >= GraphState::NeedBuild)
413
+ << " Graph status error, current state is " << int (graph_state_);
414
+ LOG_IF (FATAL, this ->operators_ .empty ())
415
+ << " Graph operators is empty, may be no init" ;
416
+
417
+ this ->input_operators_maps_ .clear ();
418
+ this ->output_operators_maps_ .clear ();
419
+
420
+ for (const auto &kOperator : this ->operators_ ) {
421
+ if (kOperator ->type == " pnnx.Input" ) {
422
+ this ->input_operators_maps_ .insert ({kOperator ->name , kOperator });
423
+ } else if (kOperator ->type == " pnnx.Output" ) {
424
+ this ->output_operators_maps_ .insert ({kOperator ->name , kOperator });
425
+ } else {
426
+ // 以后的课中加layer的
427
+ }
428
+ }
429
+ RuntimeGraphShape::InitOperatorInputTensor (operators_);
430
+ RuntimeGraphShape::InitOperatorOutputTensor (graph_->ops , operators_);
431
+ graph_state_ = GraphState::Complete;
432
+ input_name_ = input_name;
433
+ output_name_ = output_name;
434
+ }
245
435
}
0 commit comments