@@ -95,8 +95,8 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
9595 */
9696 auto apply_data = [&]<typename T>(T *val) {
9797 unsigned col_w = 0 ;
98- for (int hs = -pt; hs <= h_stride_end; hs += hstride) {
99- for (int ws = -pl; ws <= w_stride_end; ws += wstride) {
98+ for (int hs = -( int ) pt; hs <= h_stride_end; hs += hstride) {
99+ for (int ws = -( int ) pl; ws <= w_stride_end; ws += wstride) {
100100 unsigned col_h = 0 ;
101101 int patch_height_end = hs + eff_k_height;
102102 int patch_width_end = ws + eff_k_width;
@@ -238,7 +238,7 @@ static void im2col(const Tensor &in, const TensorDim &kdim,
238238 // / hs is height_strided, ws is width_strided
239239 unsigned int owidth = out.width ();
240240 unsigned int base_im_w = 0 ;
241- for (int hs = -pt; hs <= h_stride_end; hs += mstride[0 ]) {
241+ for (int hs = -( int ) pt; hs <= h_stride_end; hs += mstride[0 ]) {
242242 unsigned int base_im_h = 0 ;
243243 int patch_height_end = eff_k_height + hs;
244244 // / map the patch to a single line looping through channel
@@ -252,7 +252,7 @@ static void im2col(const Tensor &in, const TensorDim &kdim,
252252 }
253253
254254 unsigned int im_w = base_im_w;
255- for (int ws = -pl; ws <= w_stride_end; ws += mstride[1 ]) {
255+ for (int ws = -( int ) pl; ws <= w_stride_end; ws += mstride[1 ]) {
256256 unsigned int im_h = base_im_h;
257257 int patch_width_end = eff_k_width + ws;
258258
@@ -610,7 +610,7 @@ void Conv2DLayer::calcGradient(RunLayerContext &context) {
610610 * for the whole batch. try this while benchmarking.
611611 */
612612 im2col (in_sub, filter_dim, padding, stride, dilation, result);
613- deriv_sub.dot (result, delK, false , false , b == 0 ? 0 : 1 );
613+ deriv_sub.dot (result, delK, false , false , b == 0 ? 0 . 0f : 1 . 0f );
614614 }
615615 result.deallocate ();
616616 }
0 commit comments