@@ -145,6 +145,8 @@ double* Diwa::inference(double *inputNeurons) {
145
145
memcpy (this ->outputs , inputNeurons, sizeof (double ) * this ->inputNeurons );
146
146
if (!this ->hiddenLayers ) {
147
147
double *returnValues = outputs;
148
+
149
+ #pragma omp parallel for
148
150
for (int j = 0 ; j < this ->outputNeurons ; ++j) {
149
151
double sum = *weights++ * -1.0 ;
150
152
@@ -166,6 +168,7 @@ double* Diwa::inference(double *inputNeurons) {
166
168
167
169
inputs += this ->inputNeurons ;
168
170
for (int h = 1 ; h < this ->hiddenLayers ; ++h) {
171
+ #pragma omp parallel for
169
172
for (int j = 0 ; j < this ->hiddenNeurons ; ++j) {
170
173
double sum = *weights++ * -1.0 ;
171
174
@@ -178,6 +181,8 @@ double* Diwa::inference(double *inputNeurons) {
178
181
}
179
182
180
183
double * returnValue = outputs;
184
+
185
+ #pragma omp parallel for
181
186
for (int j = 0 ; j < this ->outputNeurons ; ++j) {
182
187
double sum = *weights++ * -1.0 ;
183
188
@@ -270,6 +275,7 @@ void Diwa::train(double learningRate, double *inputNeurons, double *outputNeuron
270
275
(this ->inputNeurons + this ->hiddenNeurons *
271
276
(this ->hiddenLayers - 1 )) : 0 );
272
277
278
+ #pragma omp parallel for
273
279
for (int j = 0 ; j < this ->outputNeurons ; ++j) {
274
280
*weights++ += *deltas * learningRate * -1.0 ;
275
281
@@ -297,6 +303,7 @@ void Diwa::train(double learningRate, double *inputNeurons, double *outputNeuron
297
303
(this ->hiddenNeurons + 1 ) * this ->hiddenNeurons *
298
304
(h - 1 ) : 0 );
299
305
306
+ #pragma omp parallel for
300
307
for (int j = 0 ; j < this ->hiddenNeurons ; ++j) {
301
308
*weights += *deltas * learningRate * -1.0 ;
302
309
0 commit comments