Skip to content

Commit f534f23

Browse files
authored
Update train_neuralnet.py
'시험 데이터로 평가하기' 코드를 train_neuralnet_2.py로 분리 - 4.5.2절의 코드 - '[그림 4-11] 손실 함수 값의 추이'의 왼쪽 그래프 출력
1 parent 948d38f commit f534f23

File tree

1 file changed

+8
-23
lines changed

1 file changed

+8
-23
lines changed

ch04/train_neuralnet.py

+8-23
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,15 @@
1010
# 데이터 읽기
1111
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
1212

13-
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
13+
train_loss_list = []
1414

1515
# 하이퍼파라미터
1616
iters_num = 10000 # 반복 횟수를 적절히 설정한다.
1717
train_size = x_train.shape[0]
1818
batch_size = 100 # 미니배치 크기
1919
learning_rate = 0.1
2020

21-
train_loss_list = []
22-
train_acc_list = []
23-
test_acc_list = []
24-
25-
# 1에폭당 반복 수
26-
iter_per_epoch = max(train_size / batch_size, 1)
21+
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
2722

2823
for i in range(iters_num):
2924
# 미니배치 획득
@@ -42,22 +37,12 @@
4237
# 학습 경과 기록
4338
loss = network.loss(x_batch, t_batch)
4439
train_loss_list.append(loss)
45-
46-
# 1에폭당 정확도 계산
47-
if i % iter_per_epoch == 0:
48-
train_acc = network.accuracy(x_train, t_train)
49-
test_acc = network.accuracy(x_test, t_test)
50-
train_acc_list.append(train_acc)
51-
test_acc_list.append(test_acc)
52-
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
5340

5441
# 그래프 그리기
55-
markers = {'train': 'o', 'test': 's'}
56-
x = np.arange(len(train_acc_list))
57-
plt.plot(x, train_acc_list, label='train acc')
58-
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
59-
plt.xlabel("epochs")
60-
plt.ylabel("accuracy")
61-
plt.ylim(0, 1.0)
62-
plt.legend(loc='lower right')
42+
x = np.arange(len(train_loss_list))
43+
plt.plot(x, train_loss_list)
44+
plt.xlabel("iteration")
45+
plt.ylabel("loss")
46+
plt.ylim(0, 9)
47+
plt.xlim(0, 10000)
6348
plt.show()

0 commit comments

Comments
 (0)