Skip to content

Commit 58c4adb

Browse files
committed
Fix Windows Build Warnings
This PR resolves Windows build warnings. - Type Conversion Warnings (C4244, C4267, C4146, C4293) - MSVC Compiler Flag Warnings (D9024, D9025, D9027) - Hexadecimal Floating-Point Warnings (C4305, C4309) **Self-evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Donghyeon Jeong <[email protected]>
1 parent b332f3f commit 58c4adb

29 files changed

+145
-109
lines changed

meson.build

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ elif arch == 'x86' or arch == 'x86_64'
131131
message('Build for X86 architecture')
132132
if cxx_compiler_id == 'msvc'
133133
add_project_arguments(['/arch:AVX2'], language: ['c','cpp'])
134-
add_project_arguments(['cpp_args=/MT'], language: ['c','cpp'] )
134+
# Note: C runtime library (/MT or /MD) is controlled by b_vscrt option
135135
else
136136
add_project_arguments(['-march=native'], language: ['c','cpp'])
137137
add_project_arguments(['-mavx2', '-mfma'], language: ['c','cpp'])

nntrainer/dataset/databuffer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ void DataBuffer::displayProgress(const int count, float loss) {
197197
else
198198
progress = (((float)(count * batch_size)) / (float)samples_per_epoch);
199199

200-
int pos = barWidth * progress;
200+
int pos = (int)(barWidth * progress);
201201
std::cout << " [ ";
202202
for (int l = 0; l < barWidth; ++l) {
203203
if (l <= pos)

nntrainer/dataset/dir_data_producers.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
118118
std::string p = std::filesystem::absolute(entry.path()).string();
119119
if (p.compare(".") && p.compare("..")) {
120120
num_data++;
121-
data_list.push_back(std::make_pair(id, p));
121+
data_list.push_back(std::make_pair((unsigned int)id, p));
122122
}
123123
itr++;
124124
}
@@ -152,7 +152,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
152152

153153
unsigned int c_id = data_list[idx].first;
154154

155-
std::memset(labels[0].getData(), 0.0, num_class * sizeof(float));
155+
std::memset(labels[0].getData(), 0, num_class * sizeof(float));
156156

157157
labels[0].getData()[c_id] = 1.0;
158158

nntrainer/dataset/raw_file_data_producer.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,12 @@ RawFileDataProducer::finalize(const std::vector<TensorDim> &input_dims,
5050
auto sz = size(input_dims, label_dims);
5151
auto path_prop = std::get<props::FilePath>(*raw_file_props);
5252

53-
auto size_accumulator = [](const unsigned int &a, const TensorDim &b) {
53+
auto size_accumulator = [](const size_t &a, const TensorDim &b) {
5454
return a + b.getFeatureLen();
5555
};
5656

57-
auto sample_size =
58-
std::accumulate(input_dims.begin(), input_dims.end(), 0u, size_accumulator);
57+
auto sample_size = std::accumulate(input_dims.begin(), input_dims.end(),
58+
(size_t)0, size_accumulator);
5959
sample_size = std::accumulate(label_dims.begin(), label_dims.end(),
6060
sample_size, size_accumulator);
6161

@@ -85,12 +85,12 @@ RawFileDataProducer::finalize(const std::vector<TensorDim> &input_dims,
8585
unsigned int
8686
RawFileDataProducer::size(const std::vector<TensorDim> &input_dims,
8787
const std::vector<TensorDim> &label_dims) const {
88-
auto size_accumulator = [](const unsigned int &a, const TensorDim &b) {
88+
auto size_accumulator = [](const size_t &a, const TensorDim &b) {
8989
return a + b.getFeatureLen();
9090
};
9191

92-
auto sample_size =
93-
std::accumulate(input_dims.begin(), input_dims.end(), 0u, size_accumulator);
92+
auto sample_size = std::accumulate(input_dims.begin(), input_dims.end(),
93+
(size_t)0, size_accumulator);
9494
sample_size = std::accumulate(label_dims.begin(), label_dims.end(),
9595
sample_size, size_accumulator);
9696
NNTR_THROW_IF(sample_size == 0, std::invalid_argument)

nntrainer/layers/conv2d_layer.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
9595
*/
9696
auto apply_data = [&]<typename T>(T *val) {
9797
unsigned col_w = 0;
98-
for (int hs = -pt; hs <= h_stride_end; hs += hstride) {
99-
for (int ws = -pl; ws <= w_stride_end; ws += wstride) {
98+
for (int hs = -(int)pt; hs <= h_stride_end; hs += hstride) {
99+
for (int ws = -(int)pl; ws <= w_stride_end; ws += wstride) {
100100
unsigned col_h = 0;
101101
int patch_height_end = hs + eff_k_height;
102102
int patch_width_end = ws + eff_k_width;
@@ -238,7 +238,7 @@ static void im2col(const Tensor &in, const TensorDim &kdim,
238238
/// hs is height_strided, ws is width_strided
239239
unsigned int owidth = out.width();
240240
unsigned int base_im_w = 0;
241-
for (int hs = -pt; hs <= h_stride_end; hs += mstride[0]) {
241+
for (int hs = -(int)pt; hs <= h_stride_end; hs += mstride[0]) {
242242
unsigned int base_im_h = 0;
243243
int patch_height_end = eff_k_height + hs;
244244
/// map the patch to a single line looping through channel
@@ -252,7 +252,7 @@ static void im2col(const Tensor &in, const TensorDim &kdim,
252252
}
253253

254254
unsigned int im_w = base_im_w;
255-
for (int ws = -pl; ws <= w_stride_end; ws += mstride[1]) {
255+
for (int ws = -(int)pl; ws <= w_stride_end; ws += mstride[1]) {
256256
unsigned int im_h = base_im_h;
257257
int patch_width_end = eff_k_width + ws;
258258

@@ -610,7 +610,7 @@ void Conv2DLayer::calcGradient(RunLayerContext &context) {
610610
* for the whole batch. try this while benchmarking.
611611
*/
612612
im2col(in_sub, filter_dim, padding, stride, dilation, result);
613-
deriv_sub.dot(result, delK, false, false, b == 0 ? 0 : 1);
613+
deriv_sub.dot(result, delK, false, false, b == 0 ? 0.0f : 1.0f);
614614
}
615615
result.deallocate();
616616
}

nntrainer/layers/conv2d_transpose_layer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,7 @@ void Conv2DTransposeLayer::calcGradient(RunLayerContext &context) {
527527
* for the whole batch. try this while benchmarking.
528528
*/
529529
im2col_transpose(in_sub, filter_dim, padding, stride, dilation, result);
530-
deriv_sub.dot(result, delK, false, false, b == 0 ? 0 : 1);
530+
deriv_sub.dot(result, delK, false, false, b == 0 ? 0.0f : 1.0f);
531531
}
532532
result.deallocate();
533533
}

nntrainer/layers/embedding.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ void EmbeddingLayer::calcGradient(RunLayerContext &context) {
187187

188188
if (djdw.getDataType() == TensorDim::DataType::FP32) {
189189
for (unsigned int i = 0; i < input_.width(); ++i) {
190-
unsigned int embed_idx = ((float *)(in_data))[i];
190+
unsigned int embed_idx = (unsigned int)((float *)(in_data))[i];
191191
// Assume padding is 0 and index always start from 1.
192192
// If in_data[i] - 1 < 0, then it skips.
193193
// if (embed_idx == 0)
@@ -203,7 +203,7 @@ void EmbeddingLayer::calcGradient(RunLayerContext &context) {
203203
} else if (djdw.getDataType() == TensorDim::DataType::FP16) {
204204
#ifdef ENABLE_FP16
205205
for (unsigned int i = 0; i < input_.width(); ++i) {
206-
unsigned int embed_idx = ((float *)(in_data))[i];
206+
unsigned int embed_idx = (unsigned int)((float *)(in_data))[i];
207207
// Assume padding is 0 and index always start from 1.
208208
// If in_data[i] - 1 < 0, then it skips.
209209
// if (embed_idx == 0)

nntrainer/layers/loss/cross_entropy_sigmoid_loss_layer.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ void CrossEntropySigmoidLossLayer::forwarding(RunLayerContext &context,
3838
// @note: the output should be logit before applying sigmoid
3939
// log(1 + exp(-abs(y))) + max(y, 0)
4040
Tensor mid_term = y.apply<float>(static_cast<float (*)(float)>(&std::fabs))
41-
.multiply(-1.0)
41+
.multiply(-1.0f)
4242
.apply<float>(static_cast<float (*)(float)>(&std::exp))
43-
.add(1.0)
43+
.add(1.0f)
4444
.apply<float>(logFloat<float>);
4545
mid_term = mid_term.add(y.apply<float>(ActiFunc::relu<float>));
4646

@@ -62,7 +62,8 @@ void CrossEntropySigmoidLossLayer::calcDerivative(RunLayerContext &context) {
6262

6363
y.apply<float>(ActiFunc::sigmoid<float>, ret_derivative);
6464
ret_derivative.subtract_i(y2);
65-
if (ret_derivative.divide_i(ret_derivative.size()) != ML_ERROR_NONE) {
65+
if (ret_derivative.divide_i(static_cast<float>(ret_derivative.size())) !=
66+
ML_ERROR_NONE) {
6667
throw std::runtime_error("[CrossEntropySigmoidLossLayer::calcDerivative] "
6768
"Error when calculating loss");
6869
}

nntrainer/layers/loss/cross_entropy_softmax_loss_layer.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void CrossEntropySoftmaxLossLayer::forwarding(RunLayerContext &context,
3838
Tensor &y2 = context.getLabel(SINGLE_INOUT_IDX);
3939
l = y2.multiply(hidden_.apply<float>(logFloat<float>))
4040
.sum_by_batch()
41-
.multiply(-1);
41+
.multiply(-1.0f);
4242

4343
// update the loss value
4444
LossLayer::updateLoss(context, l);
@@ -84,7 +84,8 @@ void CrossEntropySoftmaxLossLayer::calcDerivative(RunLayerContext &context) {
8484
// TODO: verify y and ret_derivative must not be same as loss layer is not
8585
// working in-place
8686
ret.subtract(y2, ret_derivative);
87-
if (ret_derivative.divide_i(ret.batch()) != ML_ERROR_NONE) {
87+
if (ret_derivative.divide_i(static_cast<float>(ret.batch())) !=
88+
ML_ERROR_NONE) {
8889
throw std::runtime_error("[CrossEntropySoftmaxLossLayer::calcDerivative] "
8990
"Error when calculating loss");
9091
}

nntrainer/layers/mol_attention_layer.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -220,12 +220,12 @@ void MoLAttentionLayer::forwarding(RunLayerContext &context, bool training) {
220220
for (unsigned int b = 0; b < batch; b++) {
221221
for (unsigned int h = 0; h < u_base.height(); h++) {
222222
float *u_data = u_base.getAddress<float>(b, 0, h, 0);
223-
std::fill(u_data, u_data + u_base.width(), h + 1);
223+
std::fill(u_data, u_data + u_base.width(), static_cast<float>(h + 1));
224224
}
225225
}
226226

227-
Tensor u_pos = u_base.add(0.5);
228-
u_base.add_i(-0.5);
227+
Tensor u_pos = u_base.add(0.5f);
228+
u_base.add_i(-0.5f);
229229
Tensor u_neg = u_base;
230230

231231
Tensor beta_eps = beta.add(1e-8f);

0 commit comments

Comments
 (0)