Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

448 KiB

Отчет по лабораторной работе 1

Ледовской Михаил, Железнов Артем, Щипков Матвей

Группа А-02-22

Пункт 1

В среде GoogleColab создали новый блокнот(notebook).Импортировали необходимые для работы библиотеки и модули.

from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import os
os.chdir('/content/drive/MyDrive/Colab Notebooks')
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import sklearn

Пункт 2

Загрузили набор данных MNIST, содержащий размеченные изображения рукописных цифр.

from keras.datasets import mnist

Пункт 3

Разбили набор данных на обучающие и тестовые данные в соотношении 60000:10000 элементов. При разбиении параметр random_state выбрали 27. Вывели размерности полученных обучающих и тестовых массивов данных.

(X_train,y_train),(X_test,y_test)=mnist.load_data()
from sklearn.model_selection import train_test_split
#объединяем в один набор
X=np.concatenate((X_train,X_test))
y=np.concatenate((y_train,y_test))
#разбиваем по вариантам
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=10000,train_size=60000,random_state=27)
#вывод размерностей
print('Shape of X train:',X_train.shape)
print('Shape of y train:',y_train.shape)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
Shape of X train: (60000, 28, 28)
Shape of y train: (60000,)

Пункт 4

Вывели первые 4 элемента обучающих данных

#вывод изображения
plt.imshow(X_train[1],cmap=plt.get_cmap('gray'))
plt.show()
print(y_train[1])

plt.imshow(X_train[2],cmap=plt.get_cmap('gray'))
plt.show()
print(y_train[2])

plt.imshow(X_train[3],cmap=plt.get_cmap('gray'))
plt.show()
print(y_train[3])

plt.imshow(X_train[4],cmap=plt.get_cmap('gray'))
plt.show()
print(y_train[4])

1

1

4

9

Пункт 5

Провели предобработку данных: привели обучающие и тестовые данные к формату, пригодному для обучения нейронной сети. Входные данные должны принимать значения от 0 до 1, метки цифрдолжны быть закодированы по принципу «one-hotencoding».Вывели размерности предобработанных обучающих и тестовых массивов данных.

#развернем каждое изображение 28*28 в вектор 784
num_pixels=X_train.shape[1]*X_train.shape[2]
X_train=X_train.reshape(X_train.shape[0],num_pixels) / 255
X_test=X_test.reshape(X_test.shape[0],num_pixels) / 255
print('Shape of transformed X train:',X_train.shape)
Shape of transformed X train: (60000, 784)
#переведем метки в one-hot
import keras.utils
y_train=keras.utils.to_categorical(y_train)
y_test=keras.utils.to_categorical(y_test)
print('Shape of transformed y train:',y_train.shape)
num_classes=y_train.shape[1]
Shape of transformed y train: (60000, 10)

Пункт 6

Реализовали модель однослойной нейронной сети и обучили ее на обучающих данных с выделением части обучающих данных в качестве валидационных. Вывели информацию об архитектуре нейронной сети. Вывели график функции ошибки на обучающих и валидационных данных по эпохам.

from keras.models import Sequential
from keras.layers import Dense

model_1 = Sequential()
model_1.add(Dense(units=num_classes, input_dim=num_pixels, activation='softmax'))
model_1.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
/usr/local/lib/python3.12/dist-packages/keras/src/layers/core/dense.py:93: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
print(model_1.summary())
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense (Dense)                   │ (None, 10)             │         7,850 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 7,850 (30.66 KB)
 Trainable params: 7,850 (30.66 KB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H = model_1.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.6997 - loss: 1.1841 - val_accuracy: 0.8700 - val_loss: 0.5217
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8749 - loss: 0.4929 - val_accuracy: 0.8850 - val_loss: 0.4360
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8909 - loss: 0.4140 - val_accuracy: 0.8893 - val_loss: 0.4007
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.8952 - loss: 0.3887 - val_accuracy: 0.8932 - val_loss: 0.3809
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9001 - loss: 0.3671 - val_accuracy: 0.8973 - val_loss: 0.3675
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9057 - loss: 0.3505 - val_accuracy: 0.9012 - val_loss: 0.3575
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9059 - loss: 0.3443 - val_accuracy: 0.9007 - val_loss: 0.3528
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9101 - loss: 0.3316 - val_accuracy: 0.9017 - val_loss: 0.3450
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9105 - loss: 0.3246 - val_accuracy: 0.9042 - val_loss: 0.3406
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9101 - loss: 0.3215 - val_accuracy: 0.9052 - val_loss: 0.3360
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9143 - loss: 0.3177 - val_accuracy: 0.9058 - val_loss: 0.3321
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9118 - loss: 0.3145 - val_accuracy: 0.9075 - val_loss: 0.3299
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9154 - loss: 0.3083 - val_accuracy: 0.9092 - val_loss: 0.3260
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9138 - loss: 0.3097 - val_accuracy: 0.9090 - val_loss: 0.3246
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9138 - loss: 0.3087 - val_accuracy: 0.9112 - val_loss: 0.3225
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9181 - loss: 0.2983 - val_accuracy: 0.9117 - val_loss: 0.3203
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9181 - loss: 0.2956 - val_accuracy: 0.9118 - val_loss: 0.3197
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9197 - loss: 0.2950 - val_accuracy: 0.9115 - val_loss: 0.3175
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9187 - loss: 0.2939 - val_accuracy: 0.9142 - val_loss: 0.3168
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9181 - loss: 0.2940 - val_accuracy: 0.9142 - val_loss: 0.3142
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9184 - loss: 0.2963 - val_accuracy: 0.9157 - val_loss: 0.3131
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9193 - loss: 0.2908 - val_accuracy: 0.9147 - val_loss: 0.3126
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9186 - loss: 0.2892 - val_accuracy: 0.9137 - val_loss: 0.3116
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9180 - loss: 0.2937 - val_accuracy: 0.9142 - val_loss: 0.3109
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9185 - loss: 0.2873 - val_accuracy: 0.9162 - val_loss: 0.3094
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9178 - loss: 0.2903 - val_accuracy: 0.9153 - val_loss: 0.3089
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9217 - loss: 0.2856 - val_accuracy: 0.9147 - val_loss: 0.3085
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9224 - loss: 0.2817 - val_accuracy: 0.9143 - val_loss: 0.3081
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.9223 - loss: 0.2778 - val_accuracy: 0.9153 - val_loss: 0.3068
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9214 - loss: 0.2830 - val_accuracy: 0.9162 - val_loss: 0.3065
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9213 - loss: 0.2863 - val_accuracy: 0.9160 - val_loss: 0.3055
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9223 - loss: 0.2842 - val_accuracy: 0.9172 - val_loss: 0.3048
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9219 - loss: 0.2815 - val_accuracy: 0.9152 - val_loss: 0.3052
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9213 - loss: 0.2806 - val_accuracy: 0.9167 - val_loss: 0.3040
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9226 - loss: 0.2752 - val_accuracy: 0.9172 - val_loss: 0.3033
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9212 - loss: 0.2772 - val_accuracy: 0.9157 - val_loss: 0.3030
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9233 - loss: 0.2755 - val_accuracy: 0.9165 - val_loss: 0.3020
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9232 - loss: 0.2731 - val_accuracy: 0.9173 - val_loss: 0.3018
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9238 - loss: 0.2715 - val_accuracy: 0.9167 - val_loss: 0.3020
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9208 - loss: 0.2788 - val_accuracy: 0.9160 - val_loss: 0.3013
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9240 - loss: 0.2747 - val_accuracy: 0.9177 - val_loss: 0.3007
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9227 - loss: 0.2743 - val_accuracy: 0.9177 - val_loss: 0.3016
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9253 - loss: 0.2735 - val_accuracy: 0.9160 - val_loss: 0.3014
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9243 - loss: 0.2725 - val_accuracy: 0.9168 - val_loss: 0.3009
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9220 - loss: 0.2814 - val_accuracy: 0.9177 - val_loss: 0.2994
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9232 - loss: 0.2769 - val_accuracy: 0.9173 - val_loss: 0.2995
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9238 - loss: 0.2764 - val_accuracy: 0.9170 - val_loss: 0.2992
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9258 - loss: 0.2670 - val_accuracy: 0.9177 - val_loss: 0.2993
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9237 - loss: 0.2779 - val_accuracy: 0.9173 - val_loss: 0.2989
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9254 - loss: 0.2661 - val_accuracy: 0.9172 - val_loss: 0.2989
# вывод графика ошибки по эпохам
plt.plot(H.history['loss'])
plt.plot(H.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

Пункт 7

Применили обученную модель к тестовым данным. Вывели значение функции ошибки и значение метрики качества классификации на тестовых данных.

# Оценка качества работы модели на тестовых данных
scores = model_1.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9199 - loss: 0.2956
Loss on test data: 0.2802773714065552
Accuracy on test data: 0.9199000000953674

Пункт 8

Добавили в модель один скрытый и провели обучение и тестирование при 100, 300, 500 нейронах в скрытом слое. По метрике качества классификации на тестовых данных выбрали наилучшее количество нейронов в скрытом слое.

При 100 нейронах

# создаем модель
model_1h100 = Sequential()
model_1h100.add(Dense(units=100, input_dim=num_pixels, activation='sigmoid'))
model_1h100.add(Dense(units=num_classes, activation='softmax'))
# компилируем модель
model_1h100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

# вывод информации об архитектуре модели
print(model_1h100.summary())
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_1 (Dense)                 │ (None, 100)            │        78,500 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_2 (Dense)                 │ (None, 10)             │         1,010 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 79,510 (310.59 KB)
 Trainable params: 79,510 (310.59 KB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H_1h100 = model_1h100.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.5186 - loss: 1.8903 - val_accuracy: 0.8175 - val_loss: 0.9782
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8339 - loss: 0.8547 - val_accuracy: 0.8592 - val_loss: 0.6317
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8692 - loss: 0.5860 - val_accuracy: 0.8738 - val_loss: 0.5115
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8823 - loss: 0.4847 - val_accuracy: 0.8818 - val_loss: 0.4504
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8907 - loss: 0.4308 - val_accuracy: 0.8892 - val_loss: 0.4150
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8948 - loss: 0.3960 - val_accuracy: 0.8913 - val_loss: 0.3912
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8978 - loss: 0.3748 - val_accuracy: 0.8945 - val_loss: 0.3739
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9010 - loss: 0.3571 - val_accuracy: 0.8983 - val_loss: 0.3602
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9028 - loss: 0.3477 - val_accuracy: 0.8993 - val_loss: 0.3502
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9091 - loss: 0.3322 - val_accuracy: 0.9018 - val_loss: 0.3407
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9087 - loss: 0.3268 - val_accuracy: 0.9050 - val_loss: 0.3326
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9085 - loss: 0.3194 - val_accuracy: 0.9057 - val_loss: 0.3265
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9109 - loss: 0.3084 - val_accuracy: 0.9078 - val_loss: 0.3207
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9157 - loss: 0.2978 - val_accuracy: 0.9103 - val_loss: 0.3158
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9160 - loss: 0.2927 - val_accuracy: 0.9108 - val_loss: 0.3105
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9172 - loss: 0.2932 - val_accuracy: 0.9105 - val_loss: 0.3060
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9169 - loss: 0.2889 - val_accuracy: 0.9145 - val_loss: 0.3008
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9190 - loss: 0.2850 - val_accuracy: 0.9133 - val_loss: 0.2973
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9211 - loss: 0.2770 - val_accuracy: 0.9170 - val_loss: 0.2930
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9223 - loss: 0.2749 - val_accuracy: 0.9172 - val_loss: 0.2900
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9237 - loss: 0.2708 - val_accuracy: 0.9173 - val_loss: 0.2866
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9251 - loss: 0.2617 - val_accuracy: 0.9188 - val_loss: 0.2831
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9234 - loss: 0.2692 - val_accuracy: 0.9190 - val_loss: 0.2800
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9250 - loss: 0.2635 - val_accuracy: 0.9207 - val_loss: 0.2763
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9271 - loss: 0.2546 - val_accuracy: 0.9217 - val_loss: 0.2734
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9268 - loss: 0.2570 - val_accuracy: 0.9212 - val_loss: 0.2707
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9283 - loss: 0.2493 - val_accuracy: 0.9228 - val_loss: 0.2673
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9280 - loss: 0.2467 - val_accuracy: 0.9240 - val_loss: 0.2648
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9292 - loss: 0.2445 - val_accuracy: 0.9238 - val_loss: 0.2625
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9321 - loss: 0.2365 - val_accuracy: 0.9248 - val_loss: 0.2589
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9322 - loss: 0.2420 - val_accuracy: 0.9258 - val_loss: 0.2565
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9332 - loss: 0.2368 - val_accuracy: 0.9267 - val_loss: 0.2535
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9356 - loss: 0.2292 - val_accuracy: 0.9280 - val_loss: 0.2511
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9343 - loss: 0.2272 - val_accuracy: 0.9277 - val_loss: 0.2491
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9373 - loss: 0.2226 - val_accuracy: 0.9288 - val_loss: 0.2456
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9376 - loss: 0.2225 - val_accuracy: 0.9287 - val_loss: 0.2441
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9357 - loss: 0.2244 - val_accuracy: 0.9293 - val_loss: 0.2412
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 10s 3ms/step - accuracy: 0.9374 - loss: 0.2196 - val_accuracy: 0.9293 - val_loss: 0.2392
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9374 - loss: 0.2159 - val_accuracy: 0.9305 - val_loss: 0.2371
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9391 - loss: 0.2159 - val_accuracy: 0.9307 - val_loss: 0.2345
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9405 - loss: 0.2067 - val_accuracy: 0.9328 - val_loss: 0.2326
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9409 - loss: 0.2083 - val_accuracy: 0.9323 - val_loss: 0.2301
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9405 - loss: 0.2056 - val_accuracy: 0.9337 - val_loss: 0.2286
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9405 - loss: 0.2078 - val_accuracy: 0.9343 - val_loss: 0.2261
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9420 - loss: 0.2037 - val_accuracy: 0.9338 - val_loss: 0.2243
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9412 - loss: 0.2013 - val_accuracy: 0.9365 - val_loss: 0.2215
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9445 - loss: 0.1960 - val_accuracy: 0.9370 - val_loss: 0.2200
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9441 - loss: 0.1965 - val_accuracy: 0.9375 - val_loss: 0.2178
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9464 - loss: 0.1902 - val_accuracy: 0.9380 - val_loss: 0.2161
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9462 - loss: 0.1875 - val_accuracy: 0.9377 - val_loss: 0.2142
# вывод графика ошибки по эпохам
plt.plot(H_1h100.history['loss'])
plt.plot(H_1h100.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

# Оценка качества работы модели на тестовых данных
scores = model_1h100.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9421 - loss: 0.2016
Loss on test data: 0.1981867104768753
Accuracy on test data: 0.9398000240325928

При 300 нейронах

# создаем модель
model_1h300 = Sequential()
model_1h300.add(Dense(units=300, input_dim=num_pixels, activation='sigmoid'))
model_1h300.add(Dense(units=num_classes, activation='softmax'))
# компилируем модель
model_1h300.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

# вывод информации об архитектуре модели
print(model_1h300.summary())
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_3 (Dense)                 │ (None, 300)            │       235,500 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_4 (Dense)                 │ (None, 10)             │         3,010 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 238,510 (931.68 KB)
 Trainable params: 238,510 (931.68 KB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H_1h300 = model_1h300.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.5636 - loss: 1.7772 - val_accuracy: 0.8303 - val_loss: 0.8547
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8468 - loss: 0.7468 - val_accuracy: 0.8572 - val_loss: 0.5789
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8721 - loss: 0.5363 - val_accuracy: 0.8743 - val_loss: 0.4822
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8854 - loss: 0.4512 - val_accuracy: 0.8823 - val_loss: 0.4301
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8898 - loss: 0.4107 - val_accuracy: 0.8900 - val_loss: 0.4021
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8963 - loss: 0.3807 - val_accuracy: 0.8920 - val_loss: 0.3837
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8991 - loss: 0.3648 - val_accuracy: 0.8938 - val_loss: 0.3716
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8989 - loss: 0.3554 - val_accuracy: 0.8967 - val_loss: 0.3605
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9041 - loss: 0.3397 - val_accuracy: 0.8993 - val_loss: 0.3497
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9039 - loss: 0.3346 - val_accuracy: 0.9015 - val_loss: 0.3422
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9062 - loss: 0.3246 - val_accuracy: 0.9027 - val_loss: 0.3382
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9111 - loss: 0.3134 - val_accuracy: 0.9048 - val_loss: 0.3316
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9109 - loss: 0.3147 - val_accuracy: 0.9058 - val_loss: 0.3292
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9113 - loss: 0.3095 - val_accuracy: 0.9077 - val_loss: 0.3243
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9146 - loss: 0.2992 - val_accuracy: 0.9083 - val_loss: 0.3205
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9159 - loss: 0.2993 - val_accuracy: 0.9093 - val_loss: 0.3168
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9151 - loss: 0.2961 - val_accuracy: 0.9102 - val_loss: 0.3149
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9163 - loss: 0.2920 - val_accuracy: 0.9112 - val_loss: 0.3116
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9181 - loss: 0.2850 - val_accuracy: 0.9110 - val_loss: 0.3093
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9175 - loss: 0.2893 - val_accuracy: 0.9142 - val_loss: 0.3068
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9182 - loss: 0.2807 - val_accuracy: 0.9138 - val_loss: 0.3031
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9193 - loss: 0.2796 - val_accuracy: 0.9145 - val_loss: 0.3029
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9209 - loss: 0.2741 - val_accuracy: 0.9142 - val_loss: 0.3008
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9190 - loss: 0.2798 - val_accuracy: 0.9157 - val_loss: 0.2976
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9211 - loss: 0.2761 - val_accuracy: 0.9145 - val_loss: 0.2952
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9214 - loss: 0.2671 - val_accuracy: 0.9183 - val_loss: 0.2913
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9233 - loss: 0.2689 - val_accuracy: 0.9173 - val_loss: 0.2903
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9235 - loss: 0.2623 - val_accuracy: 0.9180 - val_loss: 0.2880
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9251 - loss: 0.2595 - val_accuracy: 0.9180 - val_loss: 0.2879
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9240 - loss: 0.2605 - val_accuracy: 0.9202 - val_loss: 0.2840
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9259 - loss: 0.2579 - val_accuracy: 0.9197 - val_loss: 0.2820
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9249 - loss: 0.2601 - val_accuracy: 0.9205 - val_loss: 0.2811
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9278 - loss: 0.2507 - val_accuracy: 0.9218 - val_loss: 0.2773
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9284 - loss: 0.2483 - val_accuracy: 0.9207 - val_loss: 0.2768
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9298 - loss: 0.2464 - val_accuracy: 0.9233 - val_loss: 0.2732
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9295 - loss: 0.2421 - val_accuracy: 0.9232 - val_loss: 0.2714
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9307 - loss: 0.2462 - val_accuracy: 0.9238 - val_loss: 0.2696
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9293 - loss: 0.2424 - val_accuracy: 0.9245 - val_loss: 0.2667
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9313 - loss: 0.2416 - val_accuracy: 0.9267 - val_loss: 0.2651
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9312 - loss: 0.2367 - val_accuracy: 0.9263 - val_loss: 0.2637
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9338 - loss: 0.2350 - val_accuracy: 0.9242 - val_loss: 0.2624
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9351 - loss: 0.2279 - val_accuracy: 0.9260 - val_loss: 0.2596
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9345 - loss: 0.2300 - val_accuracy: 0.9285 - val_loss: 0.2579
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9335 - loss: 0.2295 - val_accuracy: 0.9293 - val_loss: 0.2555
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 2ms/step - accuracy: 0.9361 - loss: 0.2267 - val_accuracy: 0.9297 - val_loss: 0.2528
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9367 - loss: 0.2209 - val_accuracy: 0.9287 - val_loss: 0.2522
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9360 - loss: 0.2243 - val_accuracy: 0.9285 - val_loss: 0.2510
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9365 - loss: 0.2216 - val_accuracy: 0.9307 - val_loss: 0.2474
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9383 - loss: 0.2167 - val_accuracy: 0.9308 - val_loss: 0.2458
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9389 - loss: 0.2143 - val_accuracy: 0.9307 - val_loss: 0.2436
# вывод графика ошибки по эпохам
plt.plot(H_1h300.history['loss'])
plt.plot(H_1h300.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

# Оценка качества работы модели на тестовых данных
scores = model_1h300.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9318 - loss: 0.2330
Loss on test data: 0.22451213002204895
Accuracy on test data: 0.9320999979972839

При 500 нейронах

# создаем модель
model_1h500 = Sequential()
model_1h500.add(Dense(units=500, input_dim=num_pixels, activation='sigmoid'))
model_1h500.add(Dense(units=num_classes, activation='softmax'))
# компилируем модель
model_1h500.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

# вывод информации об архитектуре модели
print(model_1h500.summary())
Model: "sequential_3"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_5 (Dense)                 │ (None, 500)            │       392,500 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_6 (Dense)                 │ (None, 10)             │         5,010 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 397,510 (1.52 MB)
 Trainable params: 397,510 (1.52 MB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H_1h500 = model_1h500.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.5548 - loss: 1.7694 - val_accuracy: 0.8335 - val_loss: 0.8194
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8446 - loss: 0.7199 - val_accuracy: 0.8643 - val_loss: 0.5553
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8702 - loss: 0.5211 - val_accuracy: 0.8750 - val_loss: 0.4687
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 2ms/step - accuracy: 0.8846 - loss: 0.4429 - val_accuracy: 0.8832 - val_loss: 0.4242
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8914 - loss: 0.4015 - val_accuracy: 0.8865 - val_loss: 0.3980
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8965 - loss: 0.3772 - val_accuracy: 0.8915 - val_loss: 0.3807
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8985 - loss: 0.3711 - val_accuracy: 0.8932 - val_loss: 0.3685
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9011 - loss: 0.3468 - val_accuracy: 0.8975 - val_loss: 0.3569
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9035 - loss: 0.3385 - val_accuracy: 0.8995 - val_loss: 0.3493
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9058 - loss: 0.3338 - val_accuracy: 0.9022 - val_loss: 0.3423
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9086 - loss: 0.3205 - val_accuracy: 0.8993 - val_loss: 0.3435
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9102 - loss: 0.3148 - val_accuracy: 0.9025 - val_loss: 0.3348
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9084 - loss: 0.3186 - val_accuracy: 0.9062 - val_loss: 0.3284
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9115 - loss: 0.3143 - val_accuracy: 0.9068 - val_loss: 0.3258
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9125 - loss: 0.3048 - val_accuracy: 0.9060 - val_loss: 0.3249
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9147 - loss: 0.2972 - val_accuracy: 0.9072 - val_loss: 0.3215
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9136 - loss: 0.3003 - val_accuracy: 0.9085 - val_loss: 0.3176
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9161 - loss: 0.2949 - val_accuracy: 0.9108 - val_loss: 0.3151
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9132 - loss: 0.3025 - val_accuracy: 0.9110 - val_loss: 0.3123
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9135 - loss: 0.2974 - val_accuracy: 0.9125 - val_loss: 0.3140
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9147 - loss: 0.2911 - val_accuracy: 0.9097 - val_loss: 0.3141
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9180 - loss: 0.2869 - val_accuracy: 0.9123 - val_loss: 0.3082
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9166 - loss: 0.2879 - val_accuracy: 0.9143 - val_loss: 0.3047
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9213 - loss: 0.2745 - val_accuracy: 0.9143 - val_loss: 0.3036
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9207 - loss: 0.2769 - val_accuracy: 0.9140 - val_loss: 0.3019
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9212 - loss: 0.2780 - val_accuracy: 0.9172 - val_loss: 0.2994
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9210 - loss: 0.2775 - val_accuracy: 0.9183 - val_loss: 0.2980
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9207 - loss: 0.2768 - val_accuracy: 0.9162 - val_loss: 0.2968
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9233 - loss: 0.2662 - val_accuracy: 0.9178 - val_loss: 0.2940
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9230 - loss: 0.2710 - val_accuracy: 0.9178 - val_loss: 0.2934
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9216 - loss: 0.2703 - val_accuracy: 0.9155 - val_loss: 0.2933
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9237 - loss: 0.2627 - val_accuracy: 0.9155 - val_loss: 0.2933
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9237 - loss: 0.2617 - val_accuracy: 0.9200 - val_loss: 0.2894
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9232 - loss: 0.2685 - val_accuracy: 0.9170 - val_loss: 0.2893
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9233 - loss: 0.2666 - val_accuracy: 0.9198 - val_loss: 0.2865
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9273 - loss: 0.2571 - val_accuracy: 0.9213 - val_loss: 0.2851
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9269 - loss: 0.2542 - val_accuracy: 0.9210 - val_loss: 0.2819
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9276 - loss: 0.2531 - val_accuracy: 0.9185 - val_loss: 0.2842
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9291 - loss: 0.2460 - val_accuracy: 0.9220 - val_loss: 0.2799
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9288 - loss: 0.2531 - val_accuracy: 0.9222 - val_loss: 0.2782
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9280 - loss: 0.2535 - val_accuracy: 0.9227 - val_loss: 0.2766
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9277 - loss: 0.2539 - val_accuracy: 0.9237 - val_loss: 0.2748
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9306 - loss: 0.2423 - val_accuracy: 0.9237 - val_loss: 0.2743
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9292 - loss: 0.2490 - val_accuracy: 0.9233 - val_loss: 0.2716
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9305 - loss: 0.2430 - val_accuracy: 0.9252 - val_loss: 0.2695
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9310 - loss: 0.2418 - val_accuracy: 0.9247 - val_loss: 0.2685
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9341 - loss: 0.2349 - val_accuracy: 0.9253 - val_loss: 0.2682
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9349 - loss: 0.2345 - val_accuracy: 0.9258 - val_loss: 0.2652
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9341 - loss: 0.2325 - val_accuracy: 0.9275 - val_loss: 0.2627
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9349 - loss: 0.2305 - val_accuracy: 0.9273 - val_loss: 0.2623
# вывод графика ошибки по эпохам
plt.plot(H_1h500.history['loss'])
plt.plot(H_1h500.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

# Оценка качества работы модели на тестовых данных
scores = model_1h500.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9289 - loss: 0.2530
Loss on test data: 0.24226699769496918
Accuracy on test data: 0.9291999936103821

Наилучшую метрику наблюдаем при архитектуре со 100 нейронами в скрытом слое.

Пункт 9

Добавили в наилучшую архитектуру, определенную в п. 8, второй скрытый слой и провели обучение и тестирование при 50 и 100 нейронах во втором скрытом слое.

При 50 нейронах

# создаем модель
model_1h100_2h50 = Sequential()
model_1h100_2h50.add(Dense(units=100, input_dim=num_pixels, activation='sigmoid'))
model_1h100_2h50.add(Dense(units=50, activation='sigmoid'))
model_1h100_2h50.add(Dense(units=num_classes, activation='softmax'))
# компилируем модель
model_1h100_2h50.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

# вывод информации об архитектуре модели
print(model_1h100_2h50.summary())
Model: "sequential_4"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_7 (Dense)                 │ (None, 100)            │        78,500 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_8 (Dense)                 │ (None, 50)             │         5,050 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_9 (Dense)                 │ (None, 10)             │           510 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 84,060 (328.36 KB)
 Trainable params: 84,060 (328.36 KB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H_1h100_2h50 = model_1h100_2h50.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 8s 4ms/step - accuracy: 0.2284 - loss: 2.2754 - val_accuracy: 0.5273 - val_loss: 2.0668
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.6059 - loss: 1.9391 - val_accuracy: 0.6995 - val_loss: 1.4788
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.7227 - loss: 1.3435 - val_accuracy: 0.7643 - val_loss: 1.0211
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.7838 - loss: 0.9510 - val_accuracy: 0.8127 - val_loss: 0.7872
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8210 - loss: 0.7388 - val_accuracy: 0.8405 - val_loss: 0.6501
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8455 - loss: 0.6185 - val_accuracy: 0.8610 - val_loss: 0.5603
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8620 - loss: 0.5408 - val_accuracy: 0.8712 - val_loss: 0.5009
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8731 - loss: 0.4861 - val_accuracy: 0.8797 - val_loss: 0.4592
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8827 - loss: 0.4452 - val_accuracy: 0.8870 - val_loss: 0.4274
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8900 - loss: 0.4133 - val_accuracy: 0.8907 - val_loss: 0.4051
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8939 - loss: 0.3910 - val_accuracy: 0.8948 - val_loss: 0.3872
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8970 - loss: 0.3783 - val_accuracy: 0.8977 - val_loss: 0.3719
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9017 - loss: 0.3553 - val_accuracy: 0.8997 - val_loss: 0.3604
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9030 - loss: 0.3481 - val_accuracy: 0.9015 - val_loss: 0.3499
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9066 - loss: 0.3317 - val_accuracy: 0.9030 - val_loss: 0.3426
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9068 - loss: 0.3278 - val_accuracy: 0.9048 - val_loss: 0.3339
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9085 - loss: 0.3193 - val_accuracy: 0.9063 - val_loss: 0.3279
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9116 - loss: 0.3095 - val_accuracy: 0.9088 - val_loss: 0.3211
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9141 - loss: 0.3022 - val_accuracy: 0.9100 - val_loss: 0.3154
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9154 - loss: 0.2960 - val_accuracy: 0.9113 - val_loss: 0.3092
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9167 - loss: 0.2889 - val_accuracy: 0.9125 - val_loss: 0.3049
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9172 - loss: 0.2871 - val_accuracy: 0.9145 - val_loss: 0.3009
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9179 - loss: 0.2805 - val_accuracy: 0.9168 - val_loss: 0.2954
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9213 - loss: 0.2759 - val_accuracy: 0.9150 - val_loss: 0.2920
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9208 - loss: 0.2696 - val_accuracy: 0.9178 - val_loss: 0.2882
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9245 - loss: 0.2625 - val_accuracy: 0.9190 - val_loss: 0.2830
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9233 - loss: 0.2652 - val_accuracy: 0.9198 - val_loss: 0.2806
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.9258 - loss: 0.2612 - val_accuracy: 0.9207 - val_loss: 0.2760
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9261 - loss: 0.2563 - val_accuracy: 0.9198 - val_loss: 0.2725
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9272 - loss: 0.2491 - val_accuracy: 0.9237 - val_loss: 0.2693
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9279 - loss: 0.2488 - val_accuracy: 0.9227 - val_loss: 0.2659
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9287 - loss: 0.2469 - val_accuracy: 0.9233 - val_loss: 0.2626
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9309 - loss: 0.2367 - val_accuracy: 0.9245 - val_loss: 0.2601
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9295 - loss: 0.2388 - val_accuracy: 0.9260 - val_loss: 0.2558
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9321 - loss: 0.2339 - val_accuracy: 0.9265 - val_loss: 0.2533
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9351 - loss: 0.2247 - val_accuracy: 0.9287 - val_loss: 0.2496
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9322 - loss: 0.2319 - val_accuracy: 0.9262 - val_loss: 0.2468
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9353 - loss: 0.2250 - val_accuracy: 0.9280 - val_loss: 0.2457
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9345 - loss: 0.2216 - val_accuracy: 0.9298 - val_loss: 0.2413
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9375 - loss: 0.2152 - val_accuracy: 0.9312 - val_loss: 0.2391
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9387 - loss: 0.2146 - val_accuracy: 0.9297 - val_loss: 0.2358
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9399 - loss: 0.2093 - val_accuracy: 0.9328 - val_loss: 0.2326
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9389 - loss: 0.2079 - val_accuracy: 0.9333 - val_loss: 0.2302
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9403 - loss: 0.2059 - val_accuracy: 0.9353 - val_loss: 0.2274
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9406 - loss: 0.2051 - val_accuracy: 0.9350 - val_loss: 0.2250
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9415 - loss: 0.2007 - val_accuracy: 0.9367 - val_loss: 0.2224
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9423 - loss: 0.1980 - val_accuracy: 0.9362 - val_loss: 0.2196
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9442 - loss: 0.1905 - val_accuracy: 0.9365 - val_loss: 0.2172
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9444 - loss: 0.1920 - val_accuracy: 0.9372 - val_loss: 0.2151
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9431 - loss: 0.1941 - val_accuracy: 0.9392 - val_loss: 0.2123
# вывод графика ошибки по эпохам
plt.plot(H_1h100_2h50.history['loss'])
plt.plot(H_1h100_2h50.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

# Оценка качества работы модели на тестовых данных
scores = model_1h100_2h50.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9413 - loss: 0.2001
Loss on test data: 0.19637857377529144
Accuracy on test data: 0.9409000277519226

При 100 нейронах

# создаем модель
model_1h100_2h100 = Sequential()
model_1h100_2h100.add(Dense(units=100, input_dim=num_pixels, activation='sigmoid'))
model_1h100_2h100.add(Dense(units=100, activation='sigmoid'))
model_1h100_2h100.add(Dense(units=num_classes, activation='softmax'))
# компилируем модель
model_1h100_2h100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

# вывод информации об архитектуре модели
print(model_1h100_2h100.summary())
Model: "sequential_5"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                     Output Shape                  Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_10 (Dense)                │ (None, 100)            │        78,500 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_11 (Dense)                │ (None, 100)            │        10,100 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_12 (Dense)                │ (None, 10)             │         1,010 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 89,610 (350.04 KB)
 Trainable params: 89,610 (350.04 KB)
 Non-trainable params: 0 (0.00 B)
None
# Обучаем модель
H_1h100_2h100 = model_1h100_2h100.fit(X_train, y_train, validation_split=0.1, epochs=50)
Epoch 1/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 7s 3ms/step - accuracy: 0.1944 - loss: 2.2929 - val_accuracy: 0.6115 - val_loss: 2.1164
Epoch 2/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.5457 - loss: 2.0001 - val_accuracy: 0.6318 - val_loss: 1.5437
Epoch 3/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.6726 - loss: 1.3931 - val_accuracy: 0.7293 - val_loss: 1.0558
Epoch 4/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.7587 - loss: 0.9758 - val_accuracy: 0.7947 - val_loss: 0.7999
Epoch 5/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 2ms/step - accuracy: 0.8090 - loss: 0.7507 - val_accuracy: 0.8320 - val_loss: 0.6535
Epoch 6/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8389 - loss: 0.6182 - val_accuracy: 0.8545 - val_loss: 0.5586
Epoch 7/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8608 - loss: 0.5351 - val_accuracy: 0.8667 - val_loss: 0.4988
Epoch 8/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8739 - loss: 0.4841 - val_accuracy: 0.8795 - val_loss: 0.4540
Epoch 9/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.8840 - loss: 0.4400 - val_accuracy: 0.8870 - val_loss: 0.4242
Epoch 10/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.8883 - loss: 0.4093 - val_accuracy: 0.8925 - val_loss: 0.4002
Epoch 11/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8930 - loss: 0.3890 - val_accuracy: 0.8972 - val_loss: 0.3819
Epoch 12/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9000 - loss: 0.3661 - val_accuracy: 0.8988 - val_loss: 0.3690
Epoch 13/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 3ms/step - accuracy: 0.9026 - loss: 0.3532 - val_accuracy: 0.9015 - val_loss: 0.3564
Epoch 14/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9043 - loss: 0.3411 - val_accuracy: 0.9017 - val_loss: 0.3475
Epoch 15/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9055 - loss: 0.3358 - val_accuracy: 0.9058 - val_loss: 0.3394
Epoch 16/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9109 - loss: 0.3194 - val_accuracy: 0.9075 - val_loss: 0.3318
Epoch 17/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9103 - loss: 0.3186 - val_accuracy: 0.9080 - val_loss: 0.3267
Epoch 18/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9138 - loss: 0.3052 - val_accuracy: 0.9105 - val_loss: 0.3198
Epoch 19/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9142 - loss: 0.3061 - val_accuracy: 0.9128 - val_loss: 0.3145
Epoch 20/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9147 - loss: 0.3001 - val_accuracy: 0.9128 - val_loss: 0.3095
Epoch 21/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9166 - loss: 0.2913 - val_accuracy: 0.9142 - val_loss: 0.3045
Epoch 22/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9173 - loss: 0.2876 - val_accuracy: 0.9145 - val_loss: 0.3003
Epoch 23/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9193 - loss: 0.2844 - val_accuracy: 0.9165 - val_loss: 0.2951
Epoch 24/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9211 - loss: 0.2803 - val_accuracy: 0.9180 - val_loss: 0.2910
Epoch 25/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9229 - loss: 0.2690 - val_accuracy: 0.9180 - val_loss: 0.2867
Epoch 26/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9246 - loss: 0.2602 - val_accuracy: 0.9185 - val_loss: 0.2839
Epoch 27/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9271 - loss: 0.2599 - val_accuracy: 0.9180 - val_loss: 0.2797
Epoch 28/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9270 - loss: 0.2543 - val_accuracy: 0.9203 - val_loss: 0.2769
Epoch 29/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9271 - loss: 0.2561 - val_accuracy: 0.9205 - val_loss: 0.2731
Epoch 30/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9275 - loss: 0.2486 - val_accuracy: 0.9215 - val_loss: 0.2698
Epoch 31/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9294 - loss: 0.2477 - val_accuracy: 0.9233 - val_loss: 0.2671
Epoch 32/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9312 - loss: 0.2327 - val_accuracy: 0.9240 - val_loss: 0.2626
Epoch 33/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9308 - loss: 0.2468 - val_accuracy: 0.9242 - val_loss: 0.2584
Epoch 34/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.9338 - loss: 0.2294 - val_accuracy: 0.9253 - val_loss: 0.2565
Epoch 35/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9343 - loss: 0.2303 - val_accuracy: 0.9270 - val_loss: 0.2528
Epoch 36/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9347 - loss: 0.2280 - val_accuracy: 0.9268 - val_loss: 0.2493
Epoch 37/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - accuracy: 0.9360 - loss: 0.2212 - val_accuracy: 0.9283 - val_loss: 0.2461
Epoch 38/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9365 - loss: 0.2156 - val_accuracy: 0.9280 - val_loss: 0.2451
Epoch 39/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9358 - loss: 0.2198 - val_accuracy: 0.9298 - val_loss: 0.2416
Epoch 40/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9377 - loss: 0.2160 - val_accuracy: 0.9303 - val_loss: 0.2382
Epoch 41/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9387 - loss: 0.2135 - val_accuracy: 0.9320 - val_loss: 0.2348
Epoch 42/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9376 - loss: 0.2087 - val_accuracy: 0.9318 - val_loss: 0.2328
Epoch 43/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9415 - loss: 0.2067 - val_accuracy: 0.9322 - val_loss: 0.2317
Epoch 44/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9413 - loss: 0.2016 - val_accuracy: 0.9330 - val_loss: 0.2282
Epoch 45/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9420 - loss: 0.2022 - val_accuracy: 0.9335 - val_loss: 0.2260
Epoch 46/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9428 - loss: 0.1934 - val_accuracy: 0.9343 - val_loss: 0.2235
Epoch 47/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9415 - loss: 0.1984 - val_accuracy: 0.9340 - val_loss: 0.2208
Epoch 48/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9434 - loss: 0.1941 - val_accuracy: 0.9363 - val_loss: 0.2184
Epoch 49/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9455 - loss: 0.1875 - val_accuracy: 0.9352 - val_loss: 0.2152
Epoch 50/50
1688/1688 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.9460 - loss: 0.1874 - val_accuracy: 0.9367 - val_loss: 0.2136
# вывод графика ошибки по эпохам
plt.plot(H_1h100_2h100.history['loss'])
plt.plot(H_1h100_2h100.history['val_loss'])
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend(['train_loss', 'val_loss'])
plt.title('Loss by epochs')
plt.show()

# Оценка качества работы модели на тестовых данных
scores = model_1h100_2h100.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9426 - loss: 0.2008
Loss on test data: 0.19593027234077454
Accuracy on test data: 0.9416999816894531

Пункт 10

Результаты исследования архитектуры нейронной сети занесли в таблицу

Таблица с результатами тестирования нейросетевых моделей

Количество скрытых слоёв Количество нейронов в первом скрытом слое Количество нейронов во втором скрытом слое Значение метрики качества классификации
0 - - 0.9199000000953674
1 100 - 0.9398000240325928
300 - 0.9320999979972839
500 - 0.9291999936103821
2 100 50 0.9409000277519226
100 100 0.9416999816894531

Исходя из нашего исследования, можно сделать вывод о том, что наилучшая архитектра - это архитектура с двумя скрытыми слоями (100 нейронов на первом скрытом слое и 100 на втором).

Пункт 11

Сохранили наилучшую нейронную сеть на диск

model_1h100_2h100.save('best_model.keras')

Пункт 12

Вывели результаты тестирования модели

# вывод тестового изображения и результата распознавания 1
n = 123
result = model_1h100_2h100.predict(X_test[n:n+1])
print('NN output:', result)
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
plt.show()
print('Real mark: ', str(np.argmax(y_test[n])))
print('NN answer: ', str(np.argmax(result)))
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 298ms/step
NN output: [[9.9311924e-01 5.2934556e-08 3.6617029e-03 1.9478831e-04 1.4328006e-05
  2.6737533e-03 2.4743416e-04 3.6820653e-05 2.7412230e-05 2.4572681e-05]]

Real mark:  0
NN answer:  0

Пункт 13

Создали собственные изображения чисел

# загрузка собственного изображения
from PIL import Image
file_data = Image.open('five_v3.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)

# вывод собственного изображения
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()

# предобработка
test_img = test_img / 255
test_img = test_img.reshape(1, num_pixels)
# распознавание
result = model_1h100_2h100.predict(test_img)
print('I think it\'s ', np.argmax(result))
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
I think it's  5

Пункт 14

Создали копию нарисованных чисел и повернем их на 90 градусов. Протестируем работу нейронной сети.

file_data = Image.open('three_v3_rotated.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)

# вывод собственного изображения
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()
# предобработка
test_img = test_img / 255
test_img = test_img.reshape(1, num_pixels)
# распознавание
result = model_1h100_2h100.predict(test_img)
print('I think it\'s ', np.argmax(result))

1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
I think it's  2
file_data = Image.open('five_v3_rotated.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)

# вывод собственного изображения
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()
# предобработка
test_img = test_img / 255
test_img = test_img.reshape(1, num_pixels)
# распознавание
result = model_1h100_2h100.predict(test_img)
print('I think it\'s ', np.argmax(result))

1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
I think it's  2

Нейросеть некорректно определила повернутые изображения.