форкнуто от main/is_dnn
Вы не можете выбрать более 25 тем
Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.
338 KiB
338 KiB
import os
os.chdir('/content/drive/MyDrive/Colab Notebooks/IS_LR3')# импорт модулей
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split# загрузка датасета
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()# создание своего разбиения датасета
from sklearn.model_selection import train_test_split
# объединяем в один набор
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# разбиваем по вариантам
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 10000,
train_size = 60000,
random_state = 35)
# вывод размерностей
print('Shape of X train:', X_train.shape)
print('Shape of y train:', y_train.shape)
print('Shape of X test:', X_test.shape)
print('Shape of y test:', y_test.shape)Shape of X train: (60000, 28, 28)
Shape of y train: (60000,)
Shape of X test: (10000, 28, 28)
Shape of y test: (10000,)
# Зададим параметры данных и модели
num_classes = 10
input_shape = (28, 28, 1)
# Приведение входных данных к диапазону [0, 1]
X_train = X_train / 255
X_test = X_test / 255
# Расширяем размерность входных данных, чтобы каждое изображение имело
# размерность (высота, ширина, количество каналов)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
print('Shape of transformed X train:', X_train.shape)
print('Shape of transformed X test:', X_test.shape)
# переведем метки в one-hot
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Shape of transformed y train:', y_train.shape)
print('Shape of transformed y test:', y_test.shape)Shape of transformed X train: (60000, 28, 28, 1)
Shape of transformed X test: (10000, 28, 28, 1)
Shape of transformed y train: (60000, 10)
Shape of transformed y test: (10000, 10)
# создаем модель
model = Sequential()
model.add(layers.Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(num_classes, activation="softmax"))
model.summary()/usr/local/lib/python3.12/dist-packages/keras/src/layers/convolutional/base_conv.py:113: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d (Conv2D) │ (None, 26, 26, 32) │ 320 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d (MaxPooling2D) │ (None, 13, 13, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_1 (Conv2D) │ (None, 11, 11, 64) │ 18,496 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_1 (MaxPooling2D) │ (None, 5, 5, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout (Dropout) │ (None, 5, 5, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten (Flatten) │ (None, 1600) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense (Dense) │ (None, 10) │ 16,010 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 34,826 (136.04 KB)
Trainable params: 34,826 (136.04 KB)
Non-trainable params: 0 (0.00 B)
# компилируем и обучаем модель
batch_size = 512
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)Epoch 1/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 42s 379ms/step - accuracy: 0.5997 - loss: 1.3087 - val_accuracy: 0.9533 - val_loss: 0.1712
Epoch 2/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 42s 392ms/step - accuracy: 0.9412 - loss: 0.1983 - val_accuracy: 0.9698 - val_loss: 0.1051
Epoch 3/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 39s 367ms/step - accuracy: 0.9598 - loss: 0.1331 - val_accuracy: 0.9762 - val_loss: 0.0813
Epoch 4/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 40s 381ms/step - accuracy: 0.9675 - loss: 0.1109 - val_accuracy: 0.9772 - val_loss: 0.0718
Epoch 5/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 40s 380ms/step - accuracy: 0.9724 - loss: 0.0904 - val_accuracy: 0.9807 - val_loss: 0.0629
Epoch 6/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 381ms/step - accuracy: 0.9761 - loss: 0.0784 - val_accuracy: 0.9823 - val_loss: 0.0551
Epoch 7/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 384ms/step - accuracy: 0.9785 - loss: 0.0687 - val_accuracy: 0.9827 - val_loss: 0.0518
Epoch 8/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 39s 364ms/step - accuracy: 0.9812 - loss: 0.0622 - val_accuracy: 0.9842 - val_loss: 0.0484
Epoch 9/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 40s 356ms/step - accuracy: 0.9818 - loss: 0.0592 - val_accuracy: 0.9850 - val_loss: 0.0452
Epoch 10/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 355ms/step - accuracy: 0.9829 - loss: 0.0551 - val_accuracy: 0.9853 - val_loss: 0.0440
Epoch 11/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 357ms/step - accuracy: 0.9837 - loss: 0.0530 - val_accuracy: 0.9868 - val_loss: 0.0413
Epoch 12/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 359ms/step - accuracy: 0.9851 - loss: 0.0479 - val_accuracy: 0.9870 - val_loss: 0.0394
Epoch 13/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 40s 375ms/step - accuracy: 0.9850 - loss: 0.0482 - val_accuracy: 0.9875 - val_loss: 0.0397
Epoch 14/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 41s 371ms/step - accuracy: 0.9851 - loss: 0.0455 - val_accuracy: 0.9883 - val_loss: 0.0372
Epoch 15/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 39s 370ms/step - accuracy: 0.9864 - loss: 0.0406 - val_accuracy: 0.9875 - val_loss: 0.0384
<keras.src.callbacks.history.History at 0x7a39d3d1b3b0>
# Оценка качества работы модели на тестовых данных
scores = model.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 0.9876 - loss: 0.0382
Loss on test data: 0.03760423883795738
Accuracy on test data: 0.9884999990463257
# вывод тестового изображения и результата распознавания
for n in [67, 69]:
result = model.predict(X_test[n:n+1])
print('NN output:', result)
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
plt.show()
print('Real mark: ', np.argmax(y_test[n]))
print('NN answer: ', np.argmax(result))1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 40ms/step
NN output: [[6.4946892e-10 3.8115243e-07 1.6316299e-07 9.9963105e-01 1.6378403e-08
2.3533788e-04 1.6841338e-10 4.0841002e-08 3.6364984e-06 1.2934272e-04]]

Real mark: 3
NN answer: 3
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 38ms/step
NN output: [[2.2877561e-08 9.9993885e-01 1.9471462e-07 6.8260057e-08 4.9374252e-05
1.3361741e-07 3.9278180e-07 4.0640666e-06 6.2291774e-06 6.9445946e-07]]

Real mark: 1
NN answer: 1
# истинные метки классов
true_labels = np.argmax(y_test, axis=1)
# предсказанные метки классов
predicted_labels = np.argmax(model.predict(X_test), axis=1)
# отчет о качестве классификации
print(classification_report(true_labels, predicted_labels))
# вычисление матрицы ошибок
conf_matrix = confusion_matrix(true_labels, predicted_labels)
# отрисовка матрицы ошибок в виде "тепловой карты"
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)
display.plot()
plt.show()313/313 ━━━━━━━━━━━━━━━━━━━━ 9s 28ms/step
precision recall f1-score support
0 0.99 1.00 0.99 965
1 0.99 0.99 0.99 1115
2 0.99 0.99 0.99 1020
3 1.00 0.99 0.99 1075
4 0.99 0.99 0.99 959
5 0.98 0.99 0.99 909
6 0.99 0.99 0.99 970
7 0.98 0.99 0.99 1050
8 0.98 0.97 0.98 972
9 0.99 0.98 0.99 965
accuracy 0.99 10000
macro avg 0.99 0.99 0.99 10000
weighted avg 0.99 0.99 0.99 10000

# загрузка собственного изображения
from PIL import Image
file_data = Image.open('2.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)
# вывод собственного изображения
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()
# предобработка
test_img = test_img / 255
test_img = np.reshape(test_img, (1,28,28,1))
# распознавание
result = model.predict(test_img)
print('I think it\'s ', np.argmax(result))
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 165ms/step
I think it's 2
model_lr1 = keras.models.load_model("/content/drive/MyDrive/Colab Notebooks/IS_LR3/best_model_2l_100_LR1.keras")
model_lr1.summary()Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ dense_1 (Dense) │ (None, 100) │ 78,500 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_2 (Dense) │ (None, 10) │ 1,010 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 79,512 (310.60 KB)
Trainable params: 79,510 (310.59 KB)
Non-trainable params: 0 (0.00 B)
Optimizer params: 2 (12.00 B)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 10000,
train_size = 60000,
random_state = 35)
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels) / 255
X_test = X_test.reshape(X_test.shape[0], num_pixels) / 255
print('Shape of transformed X train:', X_train.shape)
print('Shape of transformed X train:', X_test.shape)
# переведем метки в one-hot
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Shape of transformed y train:', y_train.shape)
print('Shape of transformed y test:', y_test.shape)Shape of transformed X train: (60000, 784)
Shape of transformed X train: (10000, 784)
Shape of transformed y train: (60000, 10)
Shape of transformed y test: (10000, 10)
# Оценка качества работы модели на тестовых данных
scores = model_lr1.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.9166 - loss: 0.3003
Loss on test data: 0.3069264590740204
Accuracy on test data: 0.9150000214576721
# загрузка датасета
from keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170498071/170498071 ━━━━━━━━━━━━━━━━━━━━ 5s 0us/step
# создание своего разбиения датасета
# объединяем в один набор
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# разбиваем по вариантам
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 10000,
train_size = 50000,
random_state = 35)
# вывод размерностей
print('Shape of X train:', X_train.shape)
print('Shape of y train:', y_train.shape)
print('Shape of X test:', X_test.shape)
print('Shape of y test:', y_test.shape)Shape of X train: (50000, 32, 32, 3)
Shape of y train: (50000, 1)
Shape of X test: (10000, 32, 32, 3)
Shape of y test: (10000, 1)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i])
plt.xlabel(class_names[y_train[i][0]])
plt.show()
# Зададим параметры данных и модели
num_classes = 10
input_shape = (32, 32, 3)
# Приведение входных данных к диапазону [0, 1]
X_train = X_train / 255
X_test = X_test / 255
print('Shape of transformed X train:', X_train.shape)
print('Shape of transformed X test:', X_test.shape)
# переведем метки в one-hot
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Shape of transformed y train:', y_train.shape)
print('Shape of transformed y test:', y_test.shape)Shape of transformed X train: (50000, 32, 32, 3)
Shape of transformed X test: (10000, 32, 32, 3)
Shape of transformed y train: (50000, 10)
Shape of transformed y test: (10000, 10)
# создаем модель
model = Sequential()
model.add(layers.Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(128, kernel_size=(3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_classes, activation="softmax"))
model.summary()/usr/local/lib/python3.12/dist-packages/keras/src/layers/convolutional/base_conv.py:113: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d (Conv2D) │ (None, 30, 30, 32) │ 896 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d (MaxPooling2D) │ (None, 15, 15, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_1 (Conv2D) │ (None, 13, 13, 64) │ 18,496 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_1 (MaxPooling2D) │ (None, 6, 6, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_2 (Conv2D) │ (None, 4, 4, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_2 (MaxPooling2D) │ (None, 2, 2, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten (Flatten) │ (None, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense (Dense) │ (None, 128) │ 65,664 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout (Dropout) │ (None, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_1 (Dense) │ (None, 10) │ 1,290 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 160,202 (625.79 KB)
Trainable params: 160,202 (625.79 KB)
Non-trainable params: 0 (0.00 B)
# компилируем и обучаем модель
batch_size = 64
epochs = 50
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)Epoch 1/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 10ms/step - accuracy: 0.2665 - loss: 1.9447 - val_accuracy: 0.4852 - val_loss: 1.4141
Epoch 2/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.4775 - loss: 1.4451 - val_accuracy: 0.5650 - val_loss: 1.2281
Epoch 3/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5397 - loss: 1.2815 - val_accuracy: 0.6018 - val_loss: 1.1288
Epoch 4/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5841 - loss: 1.1718 - val_accuracy: 0.6170 - val_loss: 1.0916
Epoch 5/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6163 - loss: 1.1004 - val_accuracy: 0.6434 - val_loss: 1.0126
Epoch 6/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6398 - loss: 1.0222 - val_accuracy: 0.6596 - val_loss: 0.9966
Epoch 7/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6629 - loss: 0.9663 - val_accuracy: 0.6488 - val_loss: 0.9930
Epoch 8/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6819 - loss: 0.9165 - val_accuracy: 0.6808 - val_loss: 0.9155
Epoch 9/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6976 - loss: 0.8693 - val_accuracy: 0.6846 - val_loss: 0.9188
Epoch 10/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7092 - loss: 0.8309 - val_accuracy: 0.6960 - val_loss: 0.8803
Epoch 11/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7252 - loss: 0.7833 - val_accuracy: 0.6866 - val_loss: 0.9156
Epoch 12/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7389 - loss: 0.7513 - val_accuracy: 0.6980 - val_loss: 0.8891
Epoch 13/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7489 - loss: 0.7227 - val_accuracy: 0.7106 - val_loss: 0.8728
Epoch 14/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7589 - loss: 0.6988 - val_accuracy: 0.7116 - val_loss: 0.8715
Epoch 15/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7630 - loss: 0.6719 - val_accuracy: 0.7134 - val_loss: 0.8539
Epoch 16/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7742 - loss: 0.6419 - val_accuracy: 0.7150 - val_loss: 0.8817
Epoch 17/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.7751 - loss: 0.6425 - val_accuracy: 0.7134 - val_loss: 0.8575
Epoch 18/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7923 - loss: 0.5986 - val_accuracy: 0.6882 - val_loss: 0.9823
Epoch 19/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8005 - loss: 0.5771 - val_accuracy: 0.7208 - val_loss: 0.8856
Epoch 20/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8014 - loss: 0.5661 - val_accuracy: 0.7152 - val_loss: 0.9009
Epoch 21/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.8071 - loss: 0.5448 - val_accuracy: 0.7080 - val_loss: 0.9332
Epoch 22/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.8152 - loss: 0.5233 - val_accuracy: 0.7128 - val_loss: 0.9202
Epoch 23/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8201 - loss: 0.5059 - val_accuracy: 0.7152 - val_loss: 0.9343
Epoch 24/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.8243 - loss: 0.4981 - val_accuracy: 0.7188 - val_loss: 0.9274
Epoch 25/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8289 - loss: 0.4826 - val_accuracy: 0.7162 - val_loss: 0.9568
Epoch 26/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8365 - loss: 0.4606 - val_accuracy: 0.7162 - val_loss: 0.9787
Epoch 27/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8361 - loss: 0.4606 - val_accuracy: 0.7208 - val_loss: 0.9641
Epoch 28/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.8425 - loss: 0.4403 - val_accuracy: 0.7202 - val_loss: 0.9633
Epoch 29/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8440 - loss: 0.4314 - val_accuracy: 0.7254 - val_loss: 0.9901
Epoch 30/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8516 - loss: 0.4154 - val_accuracy: 0.7136 - val_loss: 1.0164
Epoch 31/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8546 - loss: 0.4067 - val_accuracy: 0.7190 - val_loss: 1.0651
Epoch 32/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8592 - loss: 0.3928 - val_accuracy: 0.7224 - val_loss: 1.0705
Epoch 33/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8589 - loss: 0.3900 - val_accuracy: 0.7110 - val_loss: 1.0371
Epoch 34/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8632 - loss: 0.3763 - val_accuracy: 0.7196 - val_loss: 1.0296
Epoch 35/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8702 - loss: 0.3570 - val_accuracy: 0.7188 - val_loss: 1.0846
Epoch 36/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8721 - loss: 0.3516 - val_accuracy: 0.7166 - val_loss: 1.1253
Epoch 37/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8725 - loss: 0.3537 - val_accuracy: 0.7172 - val_loss: 1.1199
Epoch 38/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8694 - loss: 0.3607 - val_accuracy: 0.7152 - val_loss: 1.1645
Epoch 39/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8754 - loss: 0.3421 - val_accuracy: 0.7154 - val_loss: 1.2121
Epoch 40/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8731 - loss: 0.3501 - val_accuracy: 0.7184 - val_loss: 1.1481
Epoch 41/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8793 - loss: 0.3328 - val_accuracy: 0.7174 - val_loss: 1.2047
Epoch 42/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8836 - loss: 0.3210 - val_accuracy: 0.7140 - val_loss: 1.2677
Epoch 43/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8847 - loss: 0.3154 - val_accuracy: 0.7098 - val_loss: 1.2376
Epoch 44/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8894 - loss: 0.3046 - val_accuracy: 0.7088 - val_loss: 1.2208
Epoch 45/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8886 - loss: 0.3104 - val_accuracy: 0.7046 - val_loss: 1.3501
Epoch 46/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 4ms/step - accuracy: 0.8900 - loss: 0.3018 - val_accuracy: 0.7086 - val_loss: 1.3483
Epoch 47/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8901 - loss: 0.3028 - val_accuracy: 0.7100 - val_loss: 1.4048
Epoch 48/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8948 - loss: 0.2922 - val_accuracy: 0.7044 - val_loss: 1.3963
Epoch 49/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8944 - loss: 0.2900 - val_accuracy: 0.7124 - val_loss: 1.3789
Epoch 50/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8971 - loss: 0.2795 - val_accuracy: 0.7118 - val_loss: 1.3672
<keras.src.callbacks.history.History at 0x7d9528b78e60>
# Оценка качества работы модели на тестовых данных
scores = model.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 0.7178 - loss: 1.3206
Loss on test data: 1.3243911266326904
Accuracy on test data: 0.7181000113487244
# вывод двух тестовых изображений и результатов распознавания
for n in [67,3]:
result = model.predict(X_test[n:n+1])
print('NN output:', result)
plt.imshow(X_test[n].reshape(32,32,3), cmap=plt.get_cmap('gray'))
plt.show()
print('Real mark: ', np.argmax(y_test[n]))
print('NN answer: ', np.argmax(result))1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step
NN output: [[2.0852229e-03 6.4687323e-05 8.8319254e-01 2.7147874e-02 2.0701988e-02
5.4570869e-02 5.0194338e-03 6.9489344e-03 1.2296445e-04 1.4538057e-04]]

Real mark: 2
NN answer: 2
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 63ms/step
NN output: [[4.66867859e-05 2.89780496e-06 9.21415904e-08 6.26062393e-01
2.08341021e-06 3.73860687e-01 2.05569340e-05 5.99638597e-08
4.45279193e-06 1.07312246e-07]]

Real mark: 4
NN answer: 3
# истинные метки классов
true_labels = np.argmax(y_test, axis=1)
# предсказанные метки классов
predicted_labels = np.argmax(model.predict(X_test), axis=1)
# отчет о качестве классификации
print(classification_report(true_labels, predicted_labels, target_names=class_names))
# вычисление матрицы ошибок
conf_matrix = confusion_matrix(true_labels, predicted_labels)
# отрисовка матрицы ошибок в виде "тепловой карты"
fig, ax = plt.subplots(figsize=(6, 6))
disp = ConfusionMatrixDisplay(confusion_matrix=conf_matrix,display_labels=class_names)
disp.plot(ax=ax, xticks_rotation=45) # поворот подписей по X и приятная палитра
plt.tight_layout() # чтобы всё влезло
plt.show()