|
10 | 10 | # 데이터 읽기
|
11 | 11 | (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
|
12 | 12 |
|
13 |
| -network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) |
| 13 | +train_loss_list = [] |
14 | 14 |
|
15 | 15 | # 하이퍼파라미터
|
16 | 16 | iters_num = 10000 # 반복 횟수를 적절히 설정한다.
|
17 | 17 | train_size = x_train.shape[0]
|
18 | 18 | batch_size = 100 # 미니배치 크기
|
19 | 19 | learning_rate = 0.1
|
20 | 20 |
|
21 |
| -train_loss_list = [] |
22 |
| -train_acc_list = [] |
23 |
| -test_acc_list = [] |
24 |
| - |
25 |
| -# 1에폭당 반복 수 |
26 |
| -iter_per_epoch = max(train_size / batch_size, 1) |
| 21 | +network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) |
27 | 22 |
|
28 | 23 | for i in range(iters_num):
|
29 | 24 | # 미니배치 획득
|
|
42 | 37 | # 학습 경과 기록
|
43 | 38 | loss = network.loss(x_batch, t_batch)
|
44 | 39 | train_loss_list.append(loss)
|
45 |
| - |
46 |
| - # 1에폭당 정확도 계산 |
47 |
| - if i % iter_per_epoch == 0: |
48 |
| - train_acc = network.accuracy(x_train, t_train) |
49 |
| - test_acc = network.accuracy(x_test, t_test) |
50 |
| - train_acc_list.append(train_acc) |
51 |
| - test_acc_list.append(test_acc) |
52 |
| - print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc)) |
53 | 40 |
|
54 | 41 | # 그래프 그리기
|
55 |
| -markers = {'train': 'o', 'test': 's'} |
56 |
| -x = np.arange(len(train_acc_list)) |
57 |
| -plt.plot(x, train_acc_list, label='train acc') |
58 |
| -plt.plot(x, test_acc_list, label='test acc', linestyle='--') |
59 |
| -plt.xlabel("epochs") |
60 |
| -plt.ylabel("accuracy") |
61 |
| -plt.ylim(0, 1.0) |
62 |
| -plt.legend(loc='lower right') |
| 42 | +x = np.arange(len(train_loss_list)) |
| 43 | +plt.plot(x, train_loss_list) |
| 44 | +plt.xlabel("iteration") |
| 45 | +plt.ylabel("loss") |
| 46 | +plt.ylim(0, 9) |
| 47 | +plt.xlim(0, 10000) |
63 | 48 | plt.show()
|
0 commit comments