Вы не можете выбрать более 25 тем
Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.
415 KiB
415 KiB
Импорт модулей
# импорт модулей
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplayЗагрузка набора данных
# загрузка датасета
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
2
# создание своего разбиения датасета
from sklearn.model_selection import train_test_split
# объединяем в один набор
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# разбиваем по вариантам
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 10000,
train_size = 60000,
random_state = 39)
# вывод размерностей
print('Shape of X train:', X_train.shape)
print('Shape of y train:', y_train.shape)
print('Shape of X test:', X_test.shape)
print('Shape of y test:', y_test.shape)Shape of X train: (60000, 28, 28)
Shape of y train: (60000,)
Shape of X test: (10000, 28, 28)
Shape of y test: (10000,)
Предобработка данных
# Зададим параметры данных и модели
num_classes = 10
input_shape = (28, 28, 1)
# Приведение входных данных к диапазону [0, 1]
X_train = X_train / 255
X_test = X_test / 255
# Расширяем размерность входных данных, чтобы каждое изображение имело
# размерность (высота, ширина, количество каналов)
3
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
print('Shape of transformed X train:', X_train.shape)
print('Shape of transformed X test:', X_test.shape)
# переведем метки в one-hot
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Shape of transformed y train:', y_train.shape)
print('Shape of transformed y test:', y_test.shape)Shape of transformed X train: (60000, 28, 28, 1)
Shape of transformed X test: (10000, 28, 28, 1)
Shape of transformed y train: (60000, 10)
Shape of transformed y test: (10000, 10)
Реализация сверточной нейронной сети и оценка качества классификации
# создаем модель
model = Sequential()
model.add(layers.Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(num_classes, activation="softmax"))
model.summary()/usr/local/lib/python3.12/dist-packages/keras/src/layers/convolutional/base_conv.py:113: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d_4 (Conv2D) │ (None, 26, 26, 32) │ 320 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_4 (MaxPooling2D) │ (None, 13, 13, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_5 (Conv2D) │ (None, 11, 11, 64) │ 18,496 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_5 (MaxPooling2D) │ (None, 5, 5, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_2 (Dropout) │ (None, 5, 5, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten_2 (Flatten) │ (None, 1600) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_2 (Dense) │ (None, 10) │ 16,010 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 34,826 (136.04 KB)
Trainable params: 34,826 (136.04 KB)
Non-trainable params: 0 (0.00 B)
# компилируем и обучаем модель
batch_size = 512
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)Epoch 1/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 7s 40ms/step - accuracy: 0.6094 - loss: 1.2944 - val_accuracy: 0.9478 - val_loss: 0.1765
Epoch 2/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - accuracy: 0.9412 - loss: 0.1983 - val_accuracy: 0.9695 - val_loss: 0.1006
Epoch 3/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9601 - loss: 0.1309 - val_accuracy: 0.9747 - val_loss: 0.0796
Epoch 4/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9690 - loss: 0.1062 - val_accuracy: 0.9773 - val_loss: 0.0661
Epoch 5/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9728 - loss: 0.0889 - val_accuracy: 0.9802 - val_loss: 0.0581
Epoch 6/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9753 - loss: 0.0769 - val_accuracy: 0.9825 - val_loss: 0.0510
Epoch 7/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 12ms/step - accuracy: 0.9781 - loss: 0.0706 - val_accuracy: 0.9845 - val_loss: 0.0472
Epoch 8/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9808 - loss: 0.0646 - val_accuracy: 0.9850 - val_loss: 0.0459
Epoch 9/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 12ms/step - accuracy: 0.9822 - loss: 0.0584 - val_accuracy: 0.9858 - val_loss: 0.0412
Epoch 10/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 12ms/step - accuracy: 0.9818 - loss: 0.0571 - val_accuracy: 0.9860 - val_loss: 0.0400
Epoch 11/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.9832 - loss: 0.0542 - val_accuracy: 0.9873 - val_loss: 0.0381
Epoch 12/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9855 - loss: 0.0481 - val_accuracy: 0.9872 - val_loss: 0.0366
Epoch 13/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9852 - loss: 0.0485 - val_accuracy: 0.9882 - val_loss: 0.0353
Epoch 14/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9868 - loss: 0.0448 - val_accuracy: 0.9895 - val_loss: 0.0344
Epoch 15/15
106/106 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9862 - loss: 0.0455 - val_accuracy: 0.9880 - val_loss: 0.0343
<keras.src.callbacks.history.History at 0x7c45d5abb6e0>
# Оценка качества работы модели на тестовых данных
scores = model.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9883 - loss: 0.0410
Loss on test data: 0.04110224172472954
Accuracy on test data: 0.988099992275238
Применение обученной модели
# вывод тестового изображения и результата распознавания
n = 123
result = model.predict(X_test[n:n+1])
print('NN output:', result)
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
plt.show()
print('Real mark: ', np.argmax(y_test[n]))
print('NN answer: ', np.argmax(result))1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 330ms/step
NN output: [[2.7634337e-06 9.1870556e-10 1.8290423e-06 1.8450550e-08 2.7429451e-07
8.1886617e-07 9.9999094e-01 5.0058091e-13 3.2977912e-06 7.4129168e-11]]

Real mark: 6
NN answer: 6
Вычисление показателей качества классификации
# истинные метки классов
true_labels = np.argmax(y_test, axis=1)
# предсказанные метки классов
predicted_labels = np.argmax(model.predict(X_test), axis=1)
# отчет о качестве классификации
print(classification_report(true_labels, predicted_labels))
# вычисление матрицы ошибок
conf_matrix = confusion_matrix(true_labels, predicted_labels)
# отрисовка матрицы ошибок в виде "тепловой карты"
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)
display.plot()
plt.show()313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
precision recall f1-score support
0 0.99 0.99 0.99 990
1 0.99 0.99 0.99 1155
2 0.99 0.99 0.99 1025
3 0.99 0.99 0.99 1016
4 0.99 0.99 0.99 959
5 0.99 0.99 0.99 889
6 0.99 0.99 0.99 997
7 0.99 0.98 0.98 1034
8 0.99 0.98 0.98 991
9 0.99 0.98 0.98 944
accuracy 0.99 10000
macro avg 0.99 0.99 0.99 10000
weighted avg 0.99 0.99 0.99 10000

Распознавание собственного изображения
# загрузка собственного изображения
from PIL import Image
file_data = Image.open('/content/drive/MyDrive/Colab Notebooks/IS_lab_4.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)
# вывод собственного изображения
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()
# предобработка
test_img = test_img / 255
test_img = np.reshape(test_img, (1,28,28,1))
# распознавание
result = model.predict(test_img)
print('I think it\'s ', np.argmax(result))
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
I think it's 4
from PIL import Image
file_data = Image.open('/content/drive/MyDrive/Colab Notebooks/IS_lab_7.png')
file_data = file_data.convert('L') # перевод в градации серого
test_img = np.array(file_data)
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
plt.show()
test_img = test_img / 255
test_img = np.reshape(test_img, (1,28,28,1))
result = model.predict(test_img)
print('I think it\'s ', np.argmax(result))
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
I think it's 7
Загрузим модель из первой лабораторной работы и сравним её с полученной в этой ЛР
model_lr1 = keras.models.load_model("/content/drive/MyDrive/Colab Notebooks/best_model_100.keras")
model_lr1.summary()Model: "sequential_16"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ dense_26 (Dense) │ (None, 100) │ 78,500 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_27 (Dense) │ (None, 10) │ 1,010 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 79,512 (310.60 KB)
Trainable params: 79,510 (310.59 KB)
Non-trainable params: 0 (0.00 B)
Optimizer params: 2 (12.00 B)
X_train_flat = X.reshape(70000, 28*28)
X_train_flat = X_train_flat / 255.0
X_train_flat, X_test_flat, y_train_flat, y_test_flat = train_test_split(
X_train_flat, y, test_size=10000, train_size=60000, random_state=39
)
y_train_flat = keras.utils.to_categorical(y_train_flat, num_classes)
y_test_flat = keras.utils.to_categorical(y_test_flat, num_classes)
print('Shape of transformed X train:', X_train_flat.shape)
print('Shape of transformed X test:', X_test_flat.shape)
print('Shape of transformed y train:', y_train_flat.shape)
print('Shape of transformed y test:', y_test_flat.shape)Shape of transformed X train: (60000, 784)
Shape of transformed X test: (10000, 784)
Shape of transformed y train: (60000, 10)
Shape of transformed y test: (10000, 10)
scores = model_lr1.evaluate(X_test_flat, y_test_flat)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9153 - loss: 0.3012
Loss on test data: 0.2998492121696472
Accuracy on test data: 0.9138000011444092
Работа с набором CIFAR-10
# загрузка датасета
from keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170498071/170498071 ━━━━━━━━━━━━━━━━━━━━ 4s 0us/step
# создание своего разбиения датасета
# объединяем в один набор
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# разбиваем по вариантам
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=10000, train_size=50000, random_state=39
)
# вывод размерностей
print('Shape of X train:', X_train.shape)
print('Shape of y train:', y_train.shape)
print('Shape of X test:', X_test.shape)
print('Shape of y test:', y_test.shape)Shape of X train: (50000, 32, 32, 3)
Shape of y train: (50000, 1)
Shape of X test: (10000, 32, 32, 3)
Shape of y test: (10000, 1)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i])
plt.xlabel(class_names[y_train[i][0]])
plt.show()
# Зададим параметры данных и модели
num_classes = 10
input_shape = (32, 32, 3)
# Приведение входных данных к диапазону [0, 1]
X_train = X_train / 255.0
X_test = X_test / 255.0
print('Shape of transformed X train:', X_train.shape)
print('Shape of transformed X test:', X_test.shape)
# Переводим метки в one-hot encoding
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Shape of transformed y train:', y_train.shape)
print('Shape of transformed y test:', y_test.shape)Shape of transformed X train: (50000, 32, 32, 3)
Shape of transformed X test: (10000, 32, 32, 3)
Shape of transformed y train: (50000, 10)
Shape of transformed y test: (10000, 10)
model = Sequential()
model.add(layers.Conv2D(32, (3,3), padding="same", activation="relu",
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(32, (3,3), padding="same", activation="relu"))
model.add(layers.BatchNormalization())
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3,3), padding="same", activation="relu"))
model.add(layers.Conv2D(64, (3,3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(128, (3,3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_classes, activation="softmax"))
model.summary()/usr/local/lib/python3.12/dist-packages/keras/src/layers/convolutional/base_conv.py:113: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential_15"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d_52 (Conv2D) │ (None, 32, 32, 32) │ 896 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_29 │ (None, 32, 32, 32) │ 128 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_53 (Conv2D) │ (None, 32, 32, 32) │ 9,248 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_30 │ (None, 32, 32, 32) │ 128 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_31 │ (None, 32, 32, 32) │ 128 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_37 (Dropout) │ (None, 32, 32, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_54 (Conv2D) │ (None, 32, 32, 64) │ 18,496 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_55 (Conv2D) │ (None, 32, 32, 64) │ 36,928 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_34 (MaxPooling2D) │ (None, 16, 16, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_38 (Dropout) │ (None, 16, 16, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_56 (Conv2D) │ (None, 16, 16, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_35 (MaxPooling2D) │ (None, 8, 8, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_39 (Dropout) │ (None, 8, 8, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten_15 (Flatten) │ (None, 8192) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_27 (Dense) │ (None, 128) │ 1,048,704 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_40 (Dropout) │ (None, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_28 (Dense) │ (None, 10) │ 1,290 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 1,189,802 (4.54 MB)
Trainable params: 1,189,610 (4.54 MB)
Non-trainable params: 192 (768.00 B)
# компилируем и обучаем модель
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=64, validation_split=0.1, epochs=50)Epoch 1/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 26s 25ms/step - accuracy: 0.2890 - loss: 1.9436 - val_accuracy: 0.5242 - val_loss: 1.3238
Epoch 2/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5058 - loss: 1.3752 - val_accuracy: 0.5944 - val_loss: 1.1384
Epoch 3/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5717 - loss: 1.1952 - val_accuracy: 0.6540 - val_loss: 1.0330
Epoch 4/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6078 - loss: 1.1018 - val_accuracy: 0.6750 - val_loss: 0.9730
Epoch 5/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6435 - loss: 1.0084 - val_accuracy: 0.6826 - val_loss: 0.9025
Epoch 6/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.6635 - loss: 0.9596 - val_accuracy: 0.6910 - val_loss: 0.9187
Epoch 7/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 14ms/step - accuracy: 0.6766 - loss: 0.9151 - val_accuracy: 0.6944 - val_loss: 0.8935
Epoch 8/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6900 - loss: 0.8780 - val_accuracy: 0.7118 - val_loss: 0.8351
Epoch 9/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7026 - loss: 0.8393 - val_accuracy: 0.7242 - val_loss: 0.8037
Epoch 10/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7103 - loss: 0.8130 - val_accuracy: 0.7256 - val_loss: 0.8080
Epoch 11/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 15ms/step - accuracy: 0.7247 - loss: 0.7776 - val_accuracy: 0.7216 - val_loss: 0.8186
Epoch 12/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7316 - loss: 0.7570 - val_accuracy: 0.7464 - val_loss: 0.7636
Epoch 13/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7411 - loss: 0.7408 - val_accuracy: 0.7188 - val_loss: 0.7994
Epoch 14/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7461 - loss: 0.7251 - val_accuracy: 0.7462 - val_loss: 0.7230
Epoch 15/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7513 - loss: 0.6972 - val_accuracy: 0.7402 - val_loss: 0.7612
Epoch 16/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 15ms/step - accuracy: 0.7535 - loss: 0.6857 - val_accuracy: 0.7336 - val_loss: 0.7845
Epoch 17/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7570 - loss: 0.6822 - val_accuracy: 0.7594 - val_loss: 0.7080
Epoch 18/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7681 - loss: 0.6493 - val_accuracy: 0.7562 - val_loss: 0.7110
Epoch 19/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7655 - loss: 0.6519 - val_accuracy: 0.7472 - val_loss: 0.7445
Epoch 20/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7692 - loss: 0.6357 - val_accuracy: 0.7504 - val_loss: 0.7394
Epoch 21/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7796 - loss: 0.6127 - val_accuracy: 0.7504 - val_loss: 0.7497
Epoch 22/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7817 - loss: 0.6067 - val_accuracy: 0.7588 - val_loss: 0.7231
Epoch 23/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7868 - loss: 0.5887 - val_accuracy: 0.7700 - val_loss: 0.6992
Epoch 24/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7915 - loss: 0.5789 - val_accuracy: 0.7782 - val_loss: 0.6825
Epoch 25/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7990 - loss: 0.5668 - val_accuracy: 0.7674 - val_loss: 0.6921
Epoch 26/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 15ms/step - accuracy: 0.8018 - loss: 0.5562 - val_accuracy: 0.7748 - val_loss: 0.6816
Epoch 27/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8088 - loss: 0.5431 - val_accuracy: 0.7844 - val_loss: 0.6551
Epoch 28/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8062 - loss: 0.5438 - val_accuracy: 0.7852 - val_loss: 0.6404
Epoch 29/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8089 - loss: 0.5272 - val_accuracy: 0.7744 - val_loss: 0.6705
Epoch 30/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8136 - loss: 0.5237 - val_accuracy: 0.7806 - val_loss: 0.6414
Epoch 31/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8122 - loss: 0.5183 - val_accuracy: 0.7850 - val_loss: 0.6457
Epoch 32/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8202 - loss: 0.5026 - val_accuracy: 0.7744 - val_loss: 0.6928
Epoch 33/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8216 - loss: 0.5051 - val_accuracy: 0.7848 - val_loss: 0.6481
Epoch 34/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8181 - loss: 0.4995 - val_accuracy: 0.7850 - val_loss: 0.6710
Epoch 35/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8242 - loss: 0.4876 - val_accuracy: 0.7900 - val_loss: 0.6416
Epoch 36/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8259 - loss: 0.4865 - val_accuracy: 0.7820 - val_loss: 0.6664
Epoch 37/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8318 - loss: 0.4723 - val_accuracy: 0.7928 - val_loss: 0.6512
Epoch 38/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8304 - loss: 0.4738 - val_accuracy: 0.7980 - val_loss: 0.6287
Epoch 39/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8389 - loss: 0.4546 - val_accuracy: 0.7838 - val_loss: 0.6557
Epoch 40/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8379 - loss: 0.4542 - val_accuracy: 0.7850 - val_loss: 0.6656
Epoch 41/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8414 - loss: 0.4457 - val_accuracy: 0.7942 - val_loss: 0.6333
Epoch 42/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8418 - loss: 0.4431 - val_accuracy: 0.7948 - val_loss: 0.6201
Epoch 43/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8425 - loss: 0.4342 - val_accuracy: 0.7912 - val_loss: 0.6254
Epoch 44/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8442 - loss: 0.4375 - val_accuracy: 0.7920 - val_loss: 0.6304
Epoch 45/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8476 - loss: 0.4289 - val_accuracy: 0.8010 - val_loss: 0.6174
Epoch 46/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8486 - loss: 0.4237 - val_accuracy: 0.8012 - val_loss: 0.6151
Epoch 47/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8526 - loss: 0.4200 - val_accuracy: 0.7984 - val_loss: 0.6139
Epoch 48/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8552 - loss: 0.4111 - val_accuracy: 0.8024 - val_loss: 0.6180
Epoch 49/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8540 - loss: 0.4089 - val_accuracy: 0.7944 - val_loss: 0.6362
Epoch 50/50
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8590 - loss: 0.3945 - val_accuracy: 0.8000 - val_loss: 0.6588
<keras.src.callbacks.history.History at 0x7c45c4332f30>
# Оценка качества работы модели на тестовых данных
scores = model.evaluate(X_test, y_test)
print('Loss on test data:', scores[0])
print('Accuracy on test data:', scores[1])313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7982 - loss: 0.6423
Loss on test data: 0.6325967311859131
Accuracy on test data: 0.8019000291824341
# вывод двух тестовых изображений и результатов распознавания
for n in [7, 16]:
result = model.predict(X_test[n:n+1])
plt.imshow(X_test[n].reshape(32,32,3), cmap=plt.get_cmap('gray'))
plt.show()
print('Real mark: ', np.argmax(y_test[n]))
print('NN answer: ', np.argmax(result))WARNING:tensorflow:6 out of the last 25 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x7c45cc4d1b20> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 694ms/step

Real mark: 4
NN answer: 0
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 47ms/step

Real mark: 4
NN answer: 4
# истинные метки классов
true_labels = np.argmax(y_test, axis=1)
# предсказанные метки классов
predicted_labels = np.argmax(model.predict(X_test), axis=1)
# отчет о качестве классификации
print(classification_report(true_labels, predicted_labels, target_names=class_names))
# вычисление матрицы ошибок
conf_matrix = confusion_matrix(true_labels, predicted_labels)
# отрисовка матрицы ошибок в виде "тепловой карты"
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix, display_labels=class_names)
display.plot()
plt.show()313/313 ━━━━━━━━━━━━━━━━━━━━ 4s 8ms/step
precision recall f1-score support
airplane 0.78 0.85 0.81 983
automobile 0.90 0.93 0.91 1026
bird 0.73 0.69 0.71 1007
cat 0.63 0.64 0.64 1011
deer 0.81 0.75 0.78 985
dog 0.70 0.71 0.71 974
frog 0.86 0.79 0.82 1007
horse 0.81 0.84 0.83 982
ship 0.88 0.93 0.90 1026
truck 0.92 0.88 0.90 999
accuracy 0.80 10000
macro avg 0.80 0.80 0.80 10000
weighted avg 0.80 0.80 0.80 10000
