1
+ /*
2
+ * Copyright (c) 2024 EdgeImpulse Inc.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ * http://www.apache.org/licenses/LICENSE-2.0
8
+ *
9
+ * Unless required by applicable law or agreed to in writing,
10
+ * software distributed under the License is distributed on an "AS
11
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12
+ * express or implied. See the License for the specific language
13
+ * governing permissions and limitations under the License.
14
+ *
15
+ * SPDX-License-Identifier: Apache-2.0
16
+ */
17
+
18
+ #ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_ATON_H
19
+
20
+ #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ATON )
21
+
22
+ /* Include ----------------------------------------------------------------- */
23
+ #include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
24
+ #include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
25
+ #include "edge-impulse-sdk/classifier/ei_model_types.h"
26
+ #include "edge-impulse-sdk/classifier/ei_run_dsp.h"
27
+ #include "edge-impulse-sdk/porting/ei_logging.h"
28
+
29
+ #include "ll_aton_runtime.h"
30
+ #include "app_config.h"
31
+
32
+ /* Private variables ------------------------------------------------------- */
33
+ static uint8_t * nn_in ;
34
+ static uint8_t * nn_out ;
35
+
36
+ static const LL_Buffer_InfoTypeDef * nn_in_info ;
37
+ static const LL_Buffer_InfoTypeDef * nn_out_info ;
38
+
39
+ LL_ATON_DECLARE_NAMED_NN_INSTANCE_AND_INTERFACE (Default );
40
+
41
+
42
+ EI_IMPULSE_ERROR run_nn_inference_image_quantized (
43
+ const ei_impulse_t * impulse ,
44
+ signal_t * signal ,
45
+ ei_impulse_result_t * result ,
46
+ void * config_ptr ,
47
+ bool debug = false)
48
+ {
49
+ EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK ;
50
+ extern uint8_t * global_camera_buffer ;
51
+ extern uint8_t * snapshot_buf ;
52
+ // this needs to be changed for multi-model, multi-impulse
53
+ static bool first_run = true;
54
+
55
+ uint64_t ctx_start_us = ei_read_timer_us ();
56
+
57
+ #if DATA_OUT_FORMAT_FLOAT32
58
+ static float32_t * nn_out ;
59
+ #else
60
+ static uint8_t * nn_out ;
61
+ #endif
62
+ static uint32_t nn_out_len ;
63
+
64
+ if (first_run == true) {
65
+
66
+ nn_in_info = LL_ATON_Input_Buffers_Info_Default ();
67
+ nn_out_info = LL_ATON_Output_Buffers_Info_Default ();
68
+
69
+ nn_in = (uint8_t * ) nn_in_info [0 ].addr_start .p ;
70
+ uint32_t nn_in_len = LL_Buffer_len (& nn_in_info [0 ]);
71
+ nn_out = (uint8_t * ) nn_out_info [0 ].addr_start .p ;
72
+
73
+
74
+ #if DATA_OUT_FORMAT_FLOAT32
75
+ nn_out = (float32_t * ) nn_out_info [0 ].addr_start .p ;
76
+ #else
77
+ nn_out = (uint8_t * ) nn_out_info [0 ].addr_start .p ;
78
+ #endif
79
+ nn_out_len = LL_Buffer_len (& nn_out_info [0 ]);
80
+
81
+ first_run = false;
82
+ }
83
+
84
+ memcpy (nn_in , snapshot_buf , impulse -> input_width * impulse -> input_height * 3 );
85
+ #ifdef USE_DCACHE
86
+ SCB_CleanInvalidateDCache_by_Addr (nn_in , impulse -> input_width * impulse -> input_height * 3 );
87
+ #endif
88
+
89
+ LL_ATON_RT_Main (& NN_Instance_Default );
90
+
91
+ #ifdef USE_DCACHE
92
+ SCB_CleanInvalidateDCache_by_Addr (nn_out , nn_out_len );
93
+ #endif
94
+
95
+ ei_learning_block_config_tflite_graph_t * block_config = (ei_learning_block_config_tflite_graph_t * )impulse -> learning_blocks [0 ].config ;
96
+ if (block_config -> classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_OBJECT_DETECTION ) {
97
+ switch (block_config -> object_detection_last_layer ) {
98
+
99
+ case EI_CLASSIFIER_LAST_LAYER_YOLOV5 :
100
+ #if MODEL_OUTPUT_IS_FLOAT
101
+ fill_res = fill_result_struct_f32_yolov5 (
102
+ ei_default_impulse .impulse ,
103
+ & result ,
104
+ 6 , // hard coded for now
105
+ (float * )& data ,//output.data.uint8,
106
+ // output.params.zero_point,
107
+ // output.params.scale,
108
+ ei_default_impulse .impulse -> tflite_output_features_count );
109
+ #else
110
+ fill_res = fill_result_struct_quantized_yolov5 (
111
+ impulse ,
112
+ block_config ,
113
+ result ,
114
+ 6 , // hard coded for now
115
+ (uint8_t * )nn_out ,
116
+ nn_out_info [0 ].offset [0 ],
117
+ nn_out_info [0 ].scale [0 ],
118
+ nn_out_len );
119
+ #endif
120
+ break ;
121
+
122
+ case EI_CLASSIFIER_LAST_LAYER_FOMO :
123
+ fill_res = fill_result_struct_i8_fomo (
124
+ impulse ,
125
+ block_config ,
126
+ result ,
127
+ (int8_t * )nn_out ,
128
+ nn_out_info [0 ].offset [0 ],
129
+ nn_out_info [0 ].scale [0 ],
130
+ impulse -> fomo_output_size ,
131
+ impulse -> fomo_output_size );
132
+ break ;
133
+
134
+ default :
135
+ ei_printf ("ERR: Unsupported object detection last layer (%d)\n" ,
136
+ block_config -> object_detection_last_layer );
137
+ fill_res = EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE ;
138
+ break ;
139
+ }
140
+
141
+ }
142
+
143
+ result -> timing .classification_us = ei_read_timer_us () - ctx_start_us ;
144
+
145
+ return fill_res ;
146
+ }
147
+
148
+
149
+ /**
150
+ * @brief Do neural network inferencing over the processed feature matrix
151
+ *
152
+ * @param fmatrix Processed matrix
153
+ * @param result Output classifier results
154
+ * @param[in] debug Debug output enable
155
+ *
156
+ * @return The ei impulse error.
157
+ */
158
+ EI_IMPULSE_ERROR run_nn_inference (
159
+ const ei_impulse_t * impulse ,
160
+ ei_feature_t * fmatrix ,
161
+ uint32_t learn_block_index ,
162
+ uint32_t * input_block_ids ,
163
+ uint32_t input_block_ids_size ,
164
+ ei_impulse_result_t * result ,
165
+ void * config_ptr ,
166
+ bool debug = false)
167
+ {
168
+
169
+
170
+ return EI_IMPULSE_OK ;
171
+ }
172
+
173
+ #endif // EI_CLASSIFIER_INFERENCING_ENGINE
174
+ #endif // _EI_CLASSIFIER_INFERENCING_ENGINE_ATON_H
0 commit comments