Сравнить коммиты
9 Коммитов
| Автор | SHA1 | Дата |
|---|---|---|
|
|
feab3ae3b9 | 2 недель назад |
|
|
3289b87a5d | 2 недель назад |
|
|
d81e3851d1 | 2 недель назад |
|
|
77905a587a | 2 недель назад |
|
|
7177c7a232 | 3 месяцев назад |
|
|
ec7819bac7 | 3 месяцев назад |
|
|
3390d95ce7 | 3 месяцев назад |
|
|
c745af814f | 3 месяцев назад |
|
|
af263ae869 | 3 месяцев назад |
|
После Ширина: | Высота: | Размер: 6.6 KiB |
|
После Ширина: | Высота: | Размер: 6.7 KiB |
|
После Ширина: | Высота: | Размер: 6.8 KiB |
|
После Ширина: | Высота: | Размер: 6.6 KiB |
|
После Ширина: | Высота: | Размер: 6.6 KiB |
|
После Ширина: | Высота: | Размер: 25 KiB |
|
После Ширина: | Высота: | Размер: 25 KiB |
|
После Ширина: | Высота: | Размер: 25 KiB |
|
После Ширина: | Высота: | Размер: 25 KiB |
|
После Ширина: | Высота: | Размер: 28 KiB |
|
После Ширина: | Высота: | Размер: 29 KiB |
|
После Ширина: | Высота: | Размер: 7.1 KiB |
|
После Ширина: | Высота: | Размер: 6.6 KiB |
@ -0,0 +1,868 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4"
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "CAumUvAGaImn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.chdir('/content/drive/MyDrive/Colab Notebooks')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# импорт модулей\n",
|
||||
"from tensorflow import keras\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import sklearn"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "h5MSWSsQamWR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка датасета\n",
|
||||
"from keras.datasets import mnist\n",
|
||||
"(X_train, y_train), (X_test, y_test) = mnist.load_data()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "95AfnWl1aq9X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# создание своего разбиения датасета\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"# объединяем в один набор\n",
|
||||
"X = np.concatenate((X_train, X_test))\n",
|
||||
"y = np.concatenate((y_train, y_test))\n",
|
||||
"# разбиваем по вариантам\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y,\n",
|
||||
"test_size = 10000,\n",
|
||||
"train_size = 60000,\n",
|
||||
"random_state = 15)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F2Fe8Fa6av1X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод размерностей\n",
|
||||
"print('Shape of X train:', X_train.shape)\n",
|
||||
"print('Shape of y train:', y_train.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "w5R3s-subD5z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Вывод 4 изображений\n",
|
||||
"plt.figure(figsize=(10, 3))\n",
|
||||
"for i in range(4):\n",
|
||||
" plt.subplot(1, 4, i + 1)\n",
|
||||
" plt.imshow(X_train[i], cmap='gray')\n",
|
||||
" plt.title(f'Label: {y_train[i]}')\n",
|
||||
" plt.axis('off')\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YmYWjSeDbKFg"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# развернем каждое изображение 28*28 в вектор 784\n",
|
||||
"num_pixels = X_train.shape[1] * X_train.shape[2]\n",
|
||||
"X_train = X_train.reshape(X_train.shape[0], num_pixels) / 255\n",
|
||||
"X_test = X_test.reshape(X_test.shape[0], num_pixels) / 255\n",
|
||||
"print('Shape of transformed X train:', X_train.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NGKvRZ8fbypE"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# переведем метки в one-hot\n",
|
||||
"from keras.utils import to_categorical\n",
|
||||
"\n",
|
||||
"y_train = to_categorical(y_train)\n",
|
||||
"y_test = to_categorical(y_test)\n",
|
||||
"\n",
|
||||
"print('Shape of transformed y train:', y_train.shape)\n",
|
||||
"num_classes = y_train.shape[1]"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dKZDth4wdMoi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HdlasD8UdSFr"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# 1. создаем модель - объявляем ее объектом класса Sequential\n",
|
||||
"model = Sequential()\n",
|
||||
"# 2. добавляем выходной слой(скрытые слои отсутствуют)\n",
|
||||
"model.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"# 3. компилируем модель\n",
|
||||
"model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "f7EFobe4dTjU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fr_Lnir_eTUS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# обучение модели\n",
|
||||
"H = model.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "P4jek-2sedhi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JUeBjeS0ffg2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9h5aG6MtfnjN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model.save('/content/drive/MyDrive/Colab Notebooks/models/model_zero_hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "31ngORxnfsJb"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"model100 = Sequential()\n",
|
||||
"model100.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))\n",
|
||||
"model100.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GuUp0o_nf_Oq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model100.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1RJG5PfSgSdz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Обучаем модель\n",
|
||||
"H = model100.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Ofd6o3nzgc8D"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "On3RA9ZghcLj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model100.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "d-2h4TVuhemj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model100.save('/content/drive/MyDrive/Colab Notebooks/models/model100in_1hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1mvHa_c8hjJx"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"model300 = Sequential()\n",
|
||||
"model300.add(Dense(units=300,input_dim=num_pixels, activation='sigmoid'))\n",
|
||||
"model300.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model300.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WO3ZHI6xhlVt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model300.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BqRtNfophpf3"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Обучаем модель\n",
|
||||
"H = model300.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrP4IANqhwjf"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "M7D5NYCSiqzI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model300.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5dBUsxjVivJU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model300.save('/content/drive/MyDrive/Colab Notebooks/models/model300in_1hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0GB5tz5eizCo"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"model500 = Sequential()\n",
|
||||
"model500.add(Dense(units=500,input_dim=num_pixels, activation='sigmoid'))\n",
|
||||
"model500.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model500.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9FlJqDcci26k"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model500.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TbPS-5fKi9mZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Обучаем модель\n",
|
||||
"H = model500.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "rODU_cugjBOX"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7uCJOOJGkTCc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model500.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "H5BhhLZrkWFq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model500.save('/content/drive/MyDrive/Colab Notebooks/models/model500in_1hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Uyv2pf5FkYjc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"model10050 = Sequential()\n",
|
||||
"model10050.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))\n",
|
||||
"model10050.add(Dense(units=50,activation='sigmoid'))\n",
|
||||
"model10050.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model10050.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0X6rM1m6klas"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model10050.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CJRW6vaKkm9o"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Обучаем модель\n",
|
||||
"H = model10050.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wWbPA8j4k18a"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BnxtXX1kl33n"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model10050.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "c97Qx3pul98e"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model10050.save('/content/drive/MyDrive/Colab Notebooks/models/model100in_1hide_50in_2hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Dn5qMhDAmBlZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"model100100 = Sequential()\n",
|
||||
"model100100.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))\n",
|
||||
"model100100.add(Dense(units=100,activation='sigmoid'))\n",
|
||||
"model100100.add(Dense(units=num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model100100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YIfzGZVzmCqT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод информации об архитектуре модели\n",
|
||||
"print(model100100.summary())"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aK8ffWILmIDg"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Обучаем модель\n",
|
||||
"H = model100100.fit(X_train, y_train, validation_split=0.1, epochs=50)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Dz7X9T55mLCh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод графика ошибки по эпохам\n",
|
||||
"plt.plot(H.history['loss'])\n",
|
||||
"plt.plot(H.history['val_loss'])\n",
|
||||
"plt.grid()\n",
|
||||
"plt.xlabel('Epochs')\n",
|
||||
"plt.ylabel('loss')\n",
|
||||
"plt.legend(['train_loss', 'val_loss'])\n",
|
||||
"plt.title('Loss by epochs')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "eF7B4wucnIPS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model100100.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yxdjaq6bnNXt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение модели на диск\n",
|
||||
"model100100.save('/content/drive/MyDrive/Colab Notebooks/models/model100in_1hide_100in_2hide.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Sr9bCq_KnP85"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# сохранение лучшей модели в папку best_model\n",
|
||||
"model100.save('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BV7wEu2SoMaB"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Загрузка модели с диска\n",
|
||||
"from keras.models import load_model\n",
|
||||
"model = load_model('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hg2PYRgwoTiU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод тестового изображения и результата распознавания\n",
|
||||
"n = 222\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"print('Real mark: ', str(np.argmax(y_test[n])))\n",
|
||||
"print('NN answer: ', str(np.argmax(result)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "A8O5K-_4oeK9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод тестового изображения и результата распознавания\n",
|
||||
"n = 123\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"print('Real mark: ', str(np.argmax(y_test[n])))\n",
|
||||
"print('NN answer: ', str(np.argmax(result)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pk03l3jdpUp5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения\n",
|
||||
"from PIL import Image\n",
|
||||
"file_data = Image.open('test.png')\n",
|
||||
"file_data = file_data.convert('L') # перевод в градации серого\n",
|
||||
"test_img = np.array(file_data)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PkjvyImOpii6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"# предобработка\n",
|
||||
"test_img = test_img / 255\n",
|
||||
"test_img = test_img.reshape(1, num_pixels)\n",
|
||||
"# распознавание\n",
|
||||
"result = model.predict(test_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wcbVyWwusUx6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения\n",
|
||||
"from PIL import Image\n",
|
||||
"file2_data = Image.open('test2.png')\n",
|
||||
"file2_data = file2_data.convert('L') # перевод в градации серого\n",
|
||||
"test2_img = np.array(file2_data)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JY7tkymctESN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test2_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"# предобработка\n",
|
||||
"test2_img = test2_img / 255\n",
|
||||
"test2_img = test2_img.reshape(1, num_pixels)\n",
|
||||
"# распознавание\n",
|
||||
"result_2 = model.predict(test2_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result_2))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "saUm4dytutDS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения, повернутого на 90 градусов\n",
|
||||
"from PIL import Image\n",
|
||||
"file90_data = Image.open('test90.png')\n",
|
||||
"file90_data = file90_data.convert('L') # перевод в градации серого\n",
|
||||
"test90_img = np.array(file90_data)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3DV_1KeKvo3S"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test90_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"# предобработка\n",
|
||||
"test90_img = test90_img / 255\n",
|
||||
"test90_img = test90_img.reshape(1, num_pixels)\n",
|
||||
"# распознавание\n",
|
||||
"result_3 = model.predict(test90_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result_3))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uBXsSP-iweMO"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения, повернутого на 90 градусов\n",
|
||||
"from PIL import Image\n",
|
||||
"file902_data = Image.open('test90_2.png')\n",
|
||||
"file902_data = file902_data.convert('L') # перевод в градации серого\n",
|
||||
"test902_img = np.array(file902_data)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "s9FSbb99wh_9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test902_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"# предобработка\n",
|
||||
"test902_img = test902_img / 255\n",
|
||||
"test902_img = test902_img.reshape(1, num_pixels)\n",
|
||||
"# распознавание\n",
|
||||
"result_4 = model.predict(test902_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result_4))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ppK14r4-w0Av"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"id": "ZaKbfAx8xaud"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,11 +1,4 @@
|
||||
## Лабораторныа работа №1
|
||||
|
||||
## Архитектура и обучение глубоких нейронных сетей
|
||||
|
||||
* [Задание](IS_Lab01_2023.pdf)
|
||||
|
||||
* [Задание](IS_Lab01_2023.pdf)
|
||||
* [Методические указания](IS_Lab01_Metod_2023.pdf)
|
||||
|
||||
* <a href="https://youtube.com/playlist?list=PLfdZ2TeaMzfzlpZ60rbaYU_epH5XPNbWU" target="_blank"><s>Какие нейроны, что вообще происходит?</s> Рекомендуется к просмотру для понимания (4 видео)</a>
|
||||
|
||||
* <a href="https://www.youtube.com/watch?v=FwFduRA_L6Q" target="_blank">Почувствуйте себя пионером нейронных сетей в области распознавания образов</a>
|
||||
@ -0,0 +1,581 @@
|
||||
# Отчет по лабораторной работе №1
|
||||
Пивоваров Я.В., Сидора Д.А., А-02-22
|
||||
|
||||
## 1. В среде Google Colab создание нового блокнота.
|
||||
```
|
||||
import os
|
||||
os.chdir('/content/drive/MyDrive/Colab Notebooks')
|
||||
```
|
||||
|
||||
* Импорт библиотек и модулей
|
||||
```
|
||||
from tensorflow import keras
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import sklearn
|
||||
```
|
||||
|
||||
## 2. Загрузка и рассмотрение набора данных
|
||||
```
|
||||
from keras.datasets import mnist
|
||||
(X_train, y_train), (X_test, y_test) = mnist.load_data()
|
||||
```
|
||||
|
||||
## 3. Разбиение набора данных на обучающий и тестовый.
|
||||
```
|
||||
from sklearn.model_selection import train_test_split
|
||||
```
|
||||
* Объединение в один набор.
|
||||
```
|
||||
X = np.concatenate((X_train, X_test))
|
||||
y = np.concatenate((y_train, y_test))
|
||||
```
|
||||
* Разбиение по вариантам. (4 бригада -> k=4*4-1)
|
||||
```
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 10000,train_size = 60000, random_state = 15)
|
||||
```
|
||||
|
||||
* Вывод размерностей.
|
||||
```
|
||||
print('Shape of X train:', X_train.shape)
|
||||
print('Shape of y train:', y_train.shape)
|
||||
```
|
||||
|
||||
> Shape of X train: (60000, 28, 28)
|
||||
> Shape of y train: (60000,)
|
||||
|
||||
## 4. Вывод обучающих данных.
|
||||
* Выведем первые четыре элемента обучающих данных.
|
||||
```
|
||||
plt.figure(figsize=(10, 3))
|
||||
for i in range(4):
|
||||
plt.subplot(1, 4, i + 1)
|
||||
plt.imshow(X_train[i], cmap='gray')
|
||||
plt.title(f'Label: {y_train[i]}')
|
||||
plt.axis('off')
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
## 5. Предобработка данных.
|
||||
* Развернем каждое изображение в вектор.
|
||||
```
|
||||
num_pixels = X_train.shape[1] * X_train.shape[2]
|
||||
X_train = X_train.reshape(X_train.shape[0], num_pixels) / 255
|
||||
X_test = X_test.reshape(X_test.shape[0], num_pixels) / 255
|
||||
print('Shape of transformed X train:', X_train.shape)
|
||||
```
|
||||
|
||||
> Shape of transformed X train: (60000, 784)
|
||||
|
||||
* Переведем метки в one-hot.
|
||||
```
|
||||
from keras.utils import to_categorical
|
||||
|
||||
y_train = to_categorical(y_train)
|
||||
y_test = to_categorical(y_test)
|
||||
|
||||
print('Shape of transformed y train:', y_train.shape)
|
||||
num_classes = y_train.shape[1]
|
||||
```
|
||||
|
||||
> Shape of transformed y train: (60000, 10)
|
||||
|
||||
## 6. Реализация и обучение однослойной нейронной сети.
|
||||
```
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Dense
|
||||
```
|
||||
|
||||
* Создаем модель - объявляем ее объектом класса Sequential, добавляем выходной слой.
|
||||
```
|
||||
model = Sequential()
|
||||
model.add(Dense(units=num_classes, activation='softmax'))
|
||||
```
|
||||
* Компилируем модель.
|
||||
```
|
||||
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
|
||||
print(model.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense (Dense) │ ? │ 0 (unbuilt) │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 0 (0.00 B)
|
||||
> Trainable params: 0 (0.00 B)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
|
||||
* Обучаем модель.
|
||||
```
|
||||
H = model.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* Выводим график функции ошибки
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
## 7. Применение модели к тестовым данным.
|
||||
```
|
||||
scores = model.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9313 - loss: 0.2648
|
||||
>Loss on test data: 0.2729383409023285
|
||||
>Accuracy on test data: 0.9290000200271606
|
||||
|
||||
## 8. Добавление одного скрытого слоя.
|
||||
* При 100 нейронах в скрытом слое.
|
||||
```
|
||||
model100 = Sequential()
|
||||
model100.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))
|
||||
model100.add(Dense(units=num_classes, activation='softmax'))
|
||||
|
||||
model100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']
|
||||
|
||||
print(model100.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential_1"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense_1 (Dense) │ (None, 100) │ 78,500 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_2 (Dense) │ (None, 10) │ 1,010 │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 79,510 (310.59 KB)
|
||||
> Trainable params: 79,510 (310.59 KB)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
|
||||
* Обучение модели.
|
||||
```
|
||||
H = model100.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* График функции ошибки.
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
scores = model100.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9500 - loss: 0.1884
|
||||
>Loss on test data: 0.1930633932352066
|
||||
>Accuracy on test data: 0.9473999738693237
|
||||
|
||||
* При 300 нейронах в скрытом слое.
|
||||
```
|
||||
model300 = Sequential()
|
||||
model300.add(Dense(units=300,input_dim=num_pixels, activation='sigmoid'))
|
||||
model300.add(Dense(units=num_classes, activation='softmax'))
|
||||
|
||||
model300.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
|
||||
|
||||
print(model300.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential_2"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense_3 (Dense) │ (None, 300) │ 235,500 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_4 (Dense) │ (None, 10) │ 3,010 │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 238,510 (931.68 KB)
|
||||
> Trainable params: 238,510 (931.68 KB)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
* Обучение модели.
|
||||
```
|
||||
H = model300.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* Вывод графиков функции ошибки.
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
scores = model300.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9444 - loss: 0.2126
|
||||
>Loss on test data: 0.2181043177843094
|
||||
>Accuracy on test data: 0.9419999718666077
|
||||
|
||||
* При 500 нейронах в скрытом слое.
|
||||
```
|
||||
model500 = Sequential()
|
||||
model500.add(Dense(units=500,input_dim=num_pixels, activation='sigmoid'))
|
||||
model500.add(Dense(units=num_classes, activation='softmax'))
|
||||
|
||||
model500.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
|
||||
|
||||
print(model500.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential_3"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense_5 (Dense) │ (None, 500) │ 392,500 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_6 (Dense) │ (None, 10) │ 5,010 │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 397,510 (1.52 MB)
|
||||
> Trainable params: 397,510 (1.52 MB)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
|
||||
* Обучение модели.
|
||||
```
|
||||
H = model500.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* Вывод графиков функции ошибки.
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
scores = model500.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9401 - loss: 0.2261
|
||||
>Loss on test data: 0.2324201464653015
|
||||
>Accuracy on test data: 0.9376000165939331
|
||||
|
||||
Как мы видим, лучшая метрика получилась при архитектуре со 100 нейронами в скрытом слое:
|
||||
Ошибка на тестовых данных: 0.1930633932352066
|
||||
Точность тестовых данных: 0.9473999738693237
|
||||
|
||||
## 9. Добавление второго скрытого слоя.
|
||||
* При 50 нейронах во втором скрытом слое.
|
||||
```
|
||||
model10050 = Sequential()
|
||||
model10050.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))
|
||||
model10050.add(Dense(units=50,activation='sigmoid'))
|
||||
model10050.add(Dense(units=num_classes, activation='softmax'))
|
||||
|
||||
model10050.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
|
||||
|
||||
print(model10050.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential_4"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense_7 (Dense) │ (None, 100) │ 78,500 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_8 (Dense) │ (None, 50) │ 5,050 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_9 (Dense) │ (None, 10) │ 510 │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 84,060 (328.36 KB)
|
||||
> Trainable params: 84,060 (328.36 KB)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
|
||||
* Обучаем модель.
|
||||
```
|
||||
H = model10050.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* Выводим график функции ошибки.
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
scores = model10050.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9476 - loss: 0.1931
|
||||
>Loss on test data: 0.1974852979183197
|
||||
>Accuracy on test data: 0.9449999928474426
|
||||
|
||||
* При 100 нейронах во втором скрытом слое.
|
||||
```
|
||||
model100100 = Sequential()
|
||||
model100100.add(Dense(units=100,input_dim=num_pixels, activation='sigmoid'))
|
||||
model100100.add(Dense(units=100,activation='sigmoid'))
|
||||
model100100.add(Dense(units=num_classes, activation='softmax'))
|
||||
|
||||
model100100.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
|
||||
|
||||
print(model100100.summary())
|
||||
```
|
||||
|
||||
>Model: "sequential_5"
|
||||
>┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
|
||||
>┃ Layer (type) ┃ Output Shape ┃ Param # ┃
|
||||
>┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
|
||||
>│ dense_10 (Dense) │ (None, 100) │ 78,500 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_11 (Dense) │ (None, 100) │ 10,100 │
|
||||
>├─────────────────────────────────┼────────────────────────┼───────────────┤
|
||||
>│ dense_12 (Dense) │ (None, 10) │ 1,010 │
|
||||
>└─────────────────────────────────┴────────────────────────┴───────────────┘
|
||||
> Total params: 89,610 (350.04 KB)
|
||||
> Trainable params: 89,610 (350.04 KB)
|
||||
> Non-trainable params: 0 (0.00 B)
|
||||
>None
|
||||
|
||||
* Обучаем модель.
|
||||
```
|
||||
H = model100100.fit(X_train, y_train, validation_split=0.1, epochs=50)
|
||||
```
|
||||
|
||||
* Выводим график функции ошибки.
|
||||
```
|
||||
plt.plot(H.history['loss'])
|
||||
plt.plot(H.history['val_loss'])
|
||||
plt.grid()
|
||||
plt.xlabel('Epochs')
|
||||
plt.ylabel('loss')
|
||||
plt.legend(['train_loss', 'val_loss'])
|
||||
plt.title('Loss by epochs')
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
scores = model100100.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
>accuracy: 0.9485 - loss: 0.1814
|
||||
>Loss on test data: 0.18734164535999298
|
||||
>Accuracy on test data: 0.9470000267028809
|
||||
|
||||
## 10. Результаты исследования архитектур нейронной сети.
|
||||
|
||||
| Количество скрытых слоев | Количество нейронов в первом скрытом слое | Количество нейронов во втором скрытом слое | Значение метрики качества классификации |
|
||||
|--------------------------|-------------------------------------------|--------------------------------------------|------------------------------------------|
|
||||
| 0 | - | - | 0.9290000200271606 |
|
||||
| 1 | 100 | - | 0.9473999738693237 |
|
||||
| 1 | 300 | - | 0.9419999718666077 |
|
||||
| 1 | 500 | - | 0.9376000165939331 |
|
||||
| 2 | 100 | 50 | 0.9449999928474426 |
|
||||
| 2 | 100 | 100 | 0.9470000267028809 |
|
||||
|
||||
Анализ результатов позволяет сделать вывод, что наилучшее качество классификации (порядка 94.7%) достигается при использовании моделей с относительно простой архитектурой. Наибольшую точность показали однослойная сеть со 100 нейронами и двухслойная конфигурация с 100 и 100 нейронами соответственно.
|
||||
|
||||
## 11. Сохранение наилучшей модели на диск.
|
||||
```
|
||||
model100.save('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')
|
||||
```
|
||||
|
||||
* Загрузка лучшей модели с диска.
|
||||
```
|
||||
from keras.models import load_model
|
||||
model = load_model('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')
|
||||
```
|
||||
|
||||
## 12. Вывод тестовых изображений и результатов распознаваний.
|
||||
```
|
||||
n = 222
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
print('Real mark: ', str(np.argmax(y_test[n])))
|
||||
print('NN answer: ', str(np.argmax(result)))
|
||||
```
|
||||
|
||||
>NN output: [[3.7926259e-03 9.0994104e-07 2.0981293e-04 2.9478846e-02 2.0727816e-06
|
||||
> 9.6508384e-01 7.6052487e-07 5.7595258e-05 1.0619552e-03 3.1140275e-04]]
|
||||

|
||||
>Real mark: 5
|
||||
>NN answer: 5
|
||||
|
||||
```
|
||||
n = 123
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
print('Real mark: ', str(np.argmax(y_test[n])))
|
||||
print('NN answer: ', str(np.argmax(result)))
|
||||
```
|
||||
|
||||
>NN output: [[7.6678516e-06 2.1507578e-06 2.5754166e-04 6.3994766e-04 2.8644723e-04
|
||||
> 2.3038971e-04 1.0776109e-05 2.3045135e-05 9.9186021e-01 6.6818334e-03]]
|
||||

|
||||
>Real mark: 8
|
||||
>NN answer: 8
|
||||
|
||||
## 13. Тестирование на собственных изображениях.
|
||||
* Загрузка 1 собственного изображения.
|
||||
```
|
||||
from PIL import Image
|
||||
file_data = Image.open('test.png')
|
||||
file_data = file_data.convert('L') # перевод в градации серого
|
||||
test_img = np.array(file_data)
|
||||
```
|
||||
|
||||
* Вывод собственного изображения.
|
||||
```
|
||||
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
* Предобработка.
|
||||
```
|
||||
test_img = test_img / 255
|
||||
test_img = test_img.reshape(1, num_pixels)
|
||||
```
|
||||
|
||||
* Распознавание.
|
||||
```
|
||||
result = model.predict(test_img)
|
||||
print('I think it\'s ', np.argmax(result))
|
||||
```
|
||||
>I think it's 2
|
||||
|
||||
* Тест 2 изображения.
|
||||
```
|
||||
from PIL import Image
|
||||
file2_data = Image.open('test2.png')
|
||||
file2_data = file2_data.convert('L') # перевод в градации серого
|
||||
test2_img = np.array(file2_data)
|
||||
```
|
||||
|
||||
```
|
||||
plt.imshow(test2_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
test2_img = test2_img / 255
|
||||
test2_img = test2_img.reshape(1, num_pixels)
|
||||
```
|
||||
|
||||
```
|
||||
result_2 = model.predict(test2_img)
|
||||
print('I think it\'s ', np.argmax(result_2))
|
||||
```
|
||||
|
||||
>I think it's 8
|
||||
|
||||
Сеть корректно распознала цифры на изображениях.
|
||||
|
||||
## 14. Тестирование на повернутых изображениях.
|
||||
```
|
||||
from PIL import Image
|
||||
file90_data = Image.open('test90.png')
|
||||
file90_data = file90_data.convert('L') # перевод в градации серого
|
||||
test90_img = np.array(file90_data)
|
||||
|
||||
plt.imshow(test90_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
test90_img = test90_img / 255
|
||||
test90_img = test90_img.reshape(1, num_pixels)
|
||||
|
||||
result_3 = model.predict(test90_img)
|
||||
print('I think it\'s ', np.argmax(result_3))
|
||||
```
|
||||
|
||||
>I think it's 8
|
||||
|
||||
```
|
||||
from PIL import Image
|
||||
file902_data = Image.open('test90_2.png')
|
||||
file902_data = file902_data.convert('L') # перевод в градации серого
|
||||
test902_img = np.array(file902_data)
|
||||
|
||||
plt.imshow(test902_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
```
|
||||
test902_img = test902_img / 255
|
||||
test902_img = test902_img.reshape(1, num_pixels)
|
||||
|
||||
result_4 = model.predict(test902_img)
|
||||
print('I think it\'s ', np.argmax(result_4))
|
||||
```
|
||||
|
||||
>I think it's 4
|
||||
|
||||
Сеть не распознала цифры на изображениях корректно.
|
||||
@ -1,11 +0,0 @@
|
||||
## Лабораторныа работа №2
|
||||
|
||||
## Обнаружение аномалий
|
||||
|
||||
* [Задание](IS_Lab02_2023.pdf)
|
||||
|
||||
* [Методические указания](IS_Lab02_Metod_2023.pdf)
|
||||
|
||||
* [Наборы данных](data)
|
||||
|
||||
* [Библиотека для автокодировщиков](lab02_lib.py)
|
||||
|
После Ширина: | Высота: | Размер: 7.1 KiB |
|
После Ширина: | Высота: | Размер: 7.2 KiB |
|
После Ширина: | Высота: | Размер: 30 KiB |
|
После Ширина: | Высота: | Размер: 6.7 KiB |
|
После Ширина: | Высота: | Размер: 6.8 KiB |
|
После Ширина: | Высота: | Размер: 118 KiB |
|
После Ширина: | Высота: | Размер: 12 KiB |
|
После Ширина: | Высота: | Размер: 11 KiB |
|
После Ширина: | Высота: | Размер: 66 KiB |
@ -0,0 +1,419 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4"
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 1**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "02oT0d1nrHsn"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.chdir('/content/drive/MyDrive/Colab Notebooks/IS_LR3')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tRydpSDQWJiB"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# импорт модулей\n",
|
||||
"from tensorflow import keras\n",
|
||||
"from tensorflow.keras import layers\n",
|
||||
"from tensorflow.keras.models import Sequential\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.metrics import classification_report, confusion_matrix\n",
|
||||
"from sklearn.metrics import ConfusionMatrixDisplay"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lR2eVWcXa3G3"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт** 2"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FUlWVQ9WrUIG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка датасета\n",
|
||||
"from keras.datasets import mnist\n",
|
||||
"(X_train, y_train), (X_test, y_test) = mnist.load_data()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4HaBhEvAqcIk"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 3**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OXQ1zw59rZC9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# создание своего разбиения датасета\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"# объединяем в один набор\n",
|
||||
"X = np.concatenate((X_train, X_test))\n",
|
||||
"y = np.concatenate((y_train, y_test))\n",
|
||||
"\n",
|
||||
"# разбиваем по вариантам\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y,\n",
|
||||
" test_size = 10000,\n",
|
||||
" train_size = 60000,\n",
|
||||
" random_state = 15)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IpFTFYx3bH2n"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод размерностей\n",
|
||||
"print('Shape of X train:', X_train.shape)\n",
|
||||
"print('Shape of y train:', y_train.shape)\n",
|
||||
"\n",
|
||||
"print('Shape of X test:', X_test.shape)\n",
|
||||
"print('Shape of y test:', y_test.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QVquJXQgqfLF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 4**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FSolDisjriNY"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Зададим параметры данных и модели\n",
|
||||
"num_classes = 10\n",
|
||||
"input_shape = (28, 28, 1)\n",
|
||||
"\n",
|
||||
"# Приведение входных данных к диапазону [0, 1]\n",
|
||||
"X_train = X_train / 255\n",
|
||||
"X_test = X_test / 255\n",
|
||||
"\n",
|
||||
"# Расширяем размерность входных данных, чтобы каждое изображение имело\n",
|
||||
"# размерность (высота, ширина, количество каналов)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"X_train = np.expand_dims(X_train, -1)\n",
|
||||
"X_test = np.expand_dims(X_test, -1)\n",
|
||||
"print('Shape of transformed X train:', X_train.shape)\n",
|
||||
"print('Shape of transformed X test:', X_test.shape)\n",
|
||||
"\n",
|
||||
"# переведем метки в one-hot\n",
|
||||
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
|
||||
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
|
||||
"print('Shape of transformed y train:', y_train.shape)\n",
|
||||
"print('Shape of transformed y test:', y_test.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tYOmNY6HqhgT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 5**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "J22FRrP6rp6H"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# создаем модель\n",
|
||||
"model = Sequential()\n",
|
||||
"model.add(layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\", input_shape=input_shape))\n",
|
||||
"model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"))\n",
|
||||
"model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(layers.Dropout(0.5))\n",
|
||||
"model.add(layers.Flatten())\n",
|
||||
"model.add(layers.Dense(num_classes, activation=\"softmax\"))\n",
|
||||
"\n",
|
||||
"model.summary()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CfKD5iNhqka1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# компилируем и обучаем модель\n",
|
||||
"batch_size = 512\n",
|
||||
"epochs = 15\n",
|
||||
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
|
||||
"model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XTdrbt_Vqmzn"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 6**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "g0NNH4Tfrvap"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "knUHNHoVqpLs"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 7**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yNmb5Ot3sKBx"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод первого тестового изображения и результата распознавания\n",
|
||||
"n = 123\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"plt.show()\n",
|
||||
"plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))\n",
|
||||
"print('Real mark: ', np.argmax(y_test[n]))\n",
|
||||
"print('NN answer: ', np.argmax(result))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ONaoSInWqtAV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод первого тестового изображения и результата распознавания\n",
|
||||
"n = 110\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"plt.show()\n",
|
||||
"plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))\n",
|
||||
"print('Real mark: ', np.argmax(y_test[n]))\n",
|
||||
"print('NN answer: ', np.argmax(result))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Xem4kHY3qvnH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 8**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ciRgy9visOnv"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# истинные метки классов\n",
|
||||
"true_labels = np.argmax(y_test, axis=1)\n",
|
||||
"# предсказанные метки классов\n",
|
||||
"predicted_labels = np.argmax(model.predict(X_test), axis=1)\n",
|
||||
"\n",
|
||||
"# отчет о качестве классификации\n",
|
||||
"print(classification_report(true_labels, predicted_labels))\n",
|
||||
"# вычисление матрицы ошибок\n",
|
||||
"conf_matrix = confusion_matrix(true_labels, predicted_labels)\n",
|
||||
"# отрисовка матрицы ошибок в виде \"тепловой карты\"\n",
|
||||
"display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)\n",
|
||||
"display.plot()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LMippjEhqyAQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 9**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Sb3HfMLpsUsT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения 1\n",
|
||||
"from PIL import Image\n",
|
||||
"file_data = Image.open('test.png')\n",
|
||||
"file_data = file_data.convert('L') # перевод в градации серого\n",
|
||||
"test_img = np.array(file_data)\n",
|
||||
"\n",
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# предобработка\n",
|
||||
"test_img = test_img / 255\n",
|
||||
"test_img = np.reshape(test_img, (1,28,28,1))\n",
|
||||
"\n",
|
||||
"# распознавание\n",
|
||||
"result = model.predict(test_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EWSA9wnQq0oH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка собственного изображения 2\n",
|
||||
"from PIL import Image\n",
|
||||
"file_data = Image.open('test2.png')\n",
|
||||
"file_data = file_data.convert('L') # перевод в градации серого\n",
|
||||
"test_img = np.array(file_data)\n",
|
||||
"\n",
|
||||
"# вывод собственного изображения\n",
|
||||
"plt.imshow(test_img, cmap=plt.get_cmap('gray'))\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# предобработка\n",
|
||||
"test_img = test_img / 255\n",
|
||||
"test_img = np.reshape(test_img, (1,28,28,1))\n",
|
||||
"\n",
|
||||
"# распознавание\n",
|
||||
"result = model.predict(test_img)\n",
|
||||
"print('I think it\\'s ', np.argmax(result))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HV_2ipNkq4PI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 10**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OsnJR4STsaCl"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# путь к сохранённой модели из ЛР1\n",
|
||||
"model_fc = keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')\n",
|
||||
"\n",
|
||||
"# архитектура модели\n",
|
||||
"model_fc.summary()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "w575Bu7Yq7W1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# подготовка тестовых данных для полносвязной модели\n",
|
||||
"X_test_fc = X_test.reshape(X_test.shape[0], 28*28) # (10000, 784)\n",
|
||||
"y_test_fc = y_test # если в ЛР3 ты уже перевёл метки в one-hot\n",
|
||||
"\n",
|
||||
"# оценка качества, как в п. 6\n",
|
||||
"scores = model_fc.evaluate(X_test_fc, y_test_fc, verbose=0)\n",
|
||||
"print('Loss on test data (FC model):', scores[0])\n",
|
||||
"print('Accuracy on test data (FC model):', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "83S9Lr-Bq9gD"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -0,0 +1,346 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4"
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 1**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "KR8uP1u_tFii"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "v8fjN3CMpmzp"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.chdir('/content/drive/MyDrive/Colab Notebooks/IS_LR3')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# импорт модулей\n",
|
||||
"from tensorflow import keras\n",
|
||||
"from tensorflow.keras import layers\n",
|
||||
"from tensorflow.keras.models import Sequential\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.metrics import classification_report, confusion_matrix\n",
|
||||
"from sklearn.metrics import ConfusionMatrixDisplay"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VMuk53SHqFE6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 2**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bie8IdvhtMwI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# загрузка датасета\n",
|
||||
"from keras.datasets import cifar10\n",
|
||||
"\n",
|
||||
"(X_train, y_train), (X_test, y_test) = cifar10.load_data()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zU_qTq3QpSaj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 3**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EKz2pMH5tPgM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# создание своего разбиения датасета\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"# объединяем в один набор\n",
|
||||
"X = np.concatenate((X_train, X_test))\n",
|
||||
"y = np.concatenate((y_train, y_test))\n",
|
||||
"\n",
|
||||
"# разбиваем по вариантам\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y,\n",
|
||||
" test_size = 10000,\n",
|
||||
" train_size = 50000,\n",
|
||||
" random_state = 15)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Tj2SdIX6qjyS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод размерностей\n",
|
||||
"print('Shape of X train:', X_train.shape)\n",
|
||||
"print('Shape of y train:', y_train.shape)\n",
|
||||
"\n",
|
||||
"print('Shape of X test:', X_test.shape)\n",
|
||||
"print('Shape of y test:', y_test.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "rxfIoGknpVr2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# вывод 25 изображений из обучающей выборки с подписями классов\n",
|
||||
"class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n",
|
||||
" 'dog', 'frog', 'horse', 'ship', 'truck']\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(10,10))\n",
|
||||
"for i in range(25):\n",
|
||||
" plt.subplot(5,5,i+1)\n",
|
||||
" plt.xticks([])\n",
|
||||
" plt.yticks([])\n",
|
||||
" plt.grid(False)\n",
|
||||
" plt.imshow(X_train[i])\n",
|
||||
" plt.xlabel(class_names[y_train[i][0]])\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ELkzGpxQpYss"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 4**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "R8UnsPwFtcT6"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Зададим параметры данных и модели\n",
|
||||
"num_classes = 10\n",
|
||||
"input_shape = (32, 32, 3)\n",
|
||||
"\n",
|
||||
"# Приведение входных данных к диапазону [0, 1]\n",
|
||||
"X_train = X_train / 255\n",
|
||||
"X_test = X_test / 255\n",
|
||||
"\n",
|
||||
"# Расширяем размерность входных данных, чтобы каждое изображение имело\n",
|
||||
"# размерность (высота, ширина, количество каналов)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print('Shape of transformed X train:', X_train.shape)\n",
|
||||
"print('Shape of transformed X test:', X_test.shape)\n",
|
||||
"\n",
|
||||
"# переведем метки в one-hot\n",
|
||||
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
|
||||
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
|
||||
"print('Shape of transformed y train:', y_train.shape)\n",
|
||||
"print('Shape of transformed y test:', y_test.shape)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tLtI_dWgpb5Q"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 5**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OQTGDyuytpyz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# создаем модель\n",
|
||||
"model = Sequential()\n",
|
||||
"model.add(layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\", input_shape=input_shape))\n",
|
||||
"model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"))\n",
|
||||
"model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(layers.Conv2D(128, kernel_size=(3, 3), activation=\"relu\"))\n",
|
||||
"model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(layers.Flatten())\n",
|
||||
"model.add(layers.Dense(128, activation='relu'))\n",
|
||||
"model.add(layers.Dropout(0.5))\n",
|
||||
"model.add(layers.Dense(num_classes, activation=\"softmax\"))\n",
|
||||
"\n",
|
||||
"model.summary()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fchBhH0mpffb"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"batch_size = 512\n",
|
||||
"epochs = 15\n",
|
||||
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
|
||||
"model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pt4hPpfLpiAR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 6**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CyI5uGgetwim"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Оценка качества работы модели на тестовых данных\n",
|
||||
"scores = model.evaluate(X_test, y_test)\n",
|
||||
"print('Loss on test data:', scores[0])\n",
|
||||
"print('Accuracy on test data:', scores[1])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "niQVFBRnpklL"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 7**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "-Os4bCnAtzCP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# ПРАВИЛЬНО распознанное изображение\n",
|
||||
"n = 10\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"\n",
|
||||
"plt.imshow(X_test[n])\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"print('Real class: ', np.argmax(y_test[n]), '->', class_names[np.argmax(y_test[n])])\n",
|
||||
"print('NN answer:', np.argmax(result), '->', class_names[np.argmax(result)])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "oLC2nN-MpnVD"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# НЕВЕРНО распознанное изображение\n",
|
||||
"n = 0\n",
|
||||
"result = model.predict(X_test[n:n+1])\n",
|
||||
"print('NN output:', result)\n",
|
||||
"\n",
|
||||
"plt.imshow(X_test[n])\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"print('Real class: ', np.argmax(y_test[n]), '->', class_names[np.argmax(y_test[n])])\n",
|
||||
"print('NN answer:', np.argmax(result), '->', class_names[np.argmax(result)])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qMkBgHiqppyZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Пункт 8**"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RVk_bSDct3Km"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# истинные метки классов\n",
|
||||
"true_labels = np.argmax(y_test, axis=1)\n",
|
||||
"\n",
|
||||
"# предсказанные метки классов\n",
|
||||
"predicted_labels = np.argmax(model.predict(X_test), axis=1)\n",
|
||||
"\n",
|
||||
"# отчет о качестве классификации\n",
|
||||
"print(classification_report(true_labels, predicted_labels, target_names=class_names))\n",
|
||||
"\n",
|
||||
"# вычисление матрицы ошибок\n",
|
||||
"conf_matrix = confusion_matrix(true_labels, predicted_labels)\n",
|
||||
"\n",
|
||||
"# отрисовка матрицы ошибок в виде \"тепловой карты\"\n",
|
||||
"display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix,\n",
|
||||
" display_labels=class_names)\n",
|
||||
"display.plot(xticks_rotation=45)\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "isaoRHSXpLSA"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,9 +0,0 @@
|
||||
## Лабораторныа работа №3
|
||||
|
||||
## Распознавание изображений
|
||||
|
||||
* [Задание](IS_Lab03_2023.pdf)
|
||||
|
||||
* [Методические указания](IS_Lab03_Metod_2023.pdf)
|
||||
|
||||
* <a href="https://youtube.com/playlist?list=PLZDCDMGmelH-pHt-Ij0nImVrOmj8DYKbB" target="_blank">Плейлист с видео о сверточных сетях</a>
|
||||
@ -0,0 +1,622 @@
|
||||
# Лабораторная работа №3: Распознавание изображений
|
||||
**Пивоваров Я.В; Сидора Д.А. — А-02-22**
|
||||
## Номер бригады - 4
|
||||
|
||||
### Цель работы
|
||||
|
||||
Получить практические навыки создания, обучения и применения сверточных нейронных сетей для распознавания изображений. Познакомиться с
|
||||
классическими показателями качества классификации.
|
||||
|
||||
### Определение варианта
|
||||
|
||||
- Номер бригады: k = 4
|
||||
- random_state = (4k - 1) = 15
|
||||
|
||||
### Подготовка среды
|
||||
|
||||
```python
|
||||
import os
|
||||
os.chdir('/content/drive/MyDrive/Colab Notebooks/IS_LR3')
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ЗАДАНИЕ 1
|
||||
|
||||
### Пункт №1. Импорт необходимых для работы библиотек и модулей.
|
||||
```python
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.models import Sequential
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from sklearn.metrics import classification_report, confusion_matrix
|
||||
from sklearn.metrics import ConfusionMatrixDisplay
|
||||
```
|
||||
|
||||
### Пункт №2. Загрузка набора данных MNIST.
|
||||
```python
|
||||
from keras.datasets import mnist
|
||||
(X_train, y_train), (X_test, y_test) = mnist.load_data()
|
||||
```
|
||||
|
||||
### Пункт №3. Разбиение набора данных на обучающие и тестовые данные.
|
||||
|
||||
```python
|
||||
# создание своего разбиения датасета
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
# объединяем в один набор
|
||||
X = np.concatenate((X_train, X_test))
|
||||
y = np.concatenate((y_train, y_test))
|
||||
|
||||
# разбиваем по вариантам
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y,
|
||||
test_size = 10000,
|
||||
train_size = 60000,
|
||||
random_state = 15)
|
||||
|
||||
# вывод размерностей
|
||||
print('Shape of X train:', X_train.shape)
|
||||
print('Shape of y train:', y_train.shape)
|
||||
|
||||
print('Shape of X test:', X_test.shape)
|
||||
print('Shape of y test:', y_test.shape)
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
Shape of X train: (60000, 28, 28)
|
||||
Shape of y train: (60000,)
|
||||
Shape of X test: (10000, 28, 28)
|
||||
Shape of y test: (10000,)
|
||||
```
|
||||
|
||||
### Пункт №4. Проведене предобработки данных.
|
||||
|
||||
```python
|
||||
# Зададим параметры данных и модели
|
||||
num_classes = 10
|
||||
input_shape = (28, 28, 1)
|
||||
|
||||
# Приведение входных данных к диапазону [0, 1]
|
||||
X_train = X_train / 255
|
||||
X_test = X_test / 255
|
||||
|
||||
# Расширяем размерность входных данных, чтобы каждое изображение имело
|
||||
# размерность (высота, ширина, количество каналов)
|
||||
|
||||
X_train = np.expand_dims(X_train, -1)
|
||||
X_test = np.expand_dims(X_test, -1)
|
||||
print('Shape of transformed X train:', X_train.shape)
|
||||
print('Shape of transformed X test:', X_test.shape)
|
||||
|
||||
# переведем метки в one-hot
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
print('Shape of transformed y train:', y_train.shape)
|
||||
print('Shape of transformed y test:', y_test.shape)
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
Shape of transformed X train: (60000, 28, 28, 1)
|
||||
Shape of transformed X test: (10000, 28, 28, 1)
|
||||
Shape of transformed y train: (60000, 10)
|
||||
Shape of transformed y test: (10000, 10)
|
||||
```
|
||||
|
||||
### Пункт №5. Реализация модели сверточной нейронной сети и ее обучение.
|
||||
|
||||
```python
|
||||
# создаем модель
|
||||
model = Sequential()
|
||||
model.add(layers.Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
|
||||
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(num_classes, activation="softmax"))
|
||||
|
||||
model.summary()
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
|
||||
| Layer (type) | Output Shape | Param # |
|
||||
|-----------------------|-------------------|---------|
|
||||
| conv2d (Conv2D) | (None, 26, 26, 32) | 320 |
|
||||
| max_pooling2d (MaxPooling2D) | (None, 13, 13, 32) | 0 |
|
||||
| conv2d_1 (Conv2D) | (None, 11, 11, 64) | 18,496 |
|
||||
| max_pooling2d_1 (MaxPooling2D) | (None, 5, 5, 64) | 0 |
|
||||
| dropout (Dropout) | (None, 5, 5, 64) | 0 |
|
||||
| flatten (Flatten) | (None, 1600) | 0 |
|
||||
| dense (Dense) | (None, 10) | 16,010 |
|
||||
|
||||
**Model: "sequential"**
|
||||
**Total params:** 34,826 (136.04 KB)
|
||||
**Trainable params:** 34,826 (136.04 KB)
|
||||
**Non-trainable params:** 0 (0.00 B)
|
||||
|
||||
```
|
||||
batch_size = 512
|
||||
epochs = 15
|
||||
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
|
||||
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
|
||||
```
|
||||
|
||||
### Пункт №6. Оценка качества обучения на тестовых данных.
|
||||
|
||||
```python
|
||||
# Оценка качества работы модели на тестовых данных
|
||||
scores = model.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
accuracy: 0.9879 - loss: 0.0347
|
||||
Loss on test data: 0.029493918642401695
|
||||
Accuracy on test data: 0.9897000193595886
|
||||
```
|
||||
|
||||
### Пункт №7. Выведение изображения, истинных меток и результатов распознавания.
|
||||
|
||||
```python
|
||||
# вывод первого тестового изображения и результата распознавания
|
||||
n = 123
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
plt.show()
|
||||
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
|
||||
print('Real mark: ', np.argmax(y_test[n]))
|
||||
print('NN answer: ', np.argmax(result))
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
NN output: [[2.3518596e-06 6.1697922e-09 4.1554195e-08 9.1088831e-10 6.7171044e-08
|
||||
4.2173593e-07 9.9999619e-01 4.8130029e-12 9.8848705e-07 3.4416045e-10]]
|
||||
Real mark: 6
|
||||
NN answer: 6
|
||||
```
|
||||
|
||||
```python
|
||||
# вывод второго тестового изображения и результата распознавания
|
||||
n = 110
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
plt.show()
|
||||
plt.imshow(X_test[n].reshape(28,28), cmap=plt.get_cmap('gray'))
|
||||
print('Real mark: ', np.argmax(y_test[n]))
|
||||
print('NN answer: ', np.argmax(result))
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
NN output: [[1.0611644e-07 1.0055461e-09 2.8356731e-06 1.8714800e-05 1.1500048e-09
|
||||
8.8623995e-07 1.1646066e-07 5.1164142e-12 9.9997735e-01 4.7718437e-08]]
|
||||
Real mark: 8
|
||||
NN answer: 8
|
||||
```
|
||||
|
||||
### Пункт №8. Вывод отчета о качестве классификации тестовой выборки.
|
||||
|
||||
```python
|
||||
# истинные метки классов
|
||||
true_labels = np.argmax(y_test, axis=1)
|
||||
# предсказанные метки классов
|
||||
predicted_labels = np.argmax(model.predict(X_test), axis=1)
|
||||
|
||||
# отчет о качестве классификации
|
||||
print(classification_report(true_labels, predicted_labels))
|
||||
# вычисление матрицы ошибок
|
||||
conf_matrix = confusion_matrix(true_labels, predicted_labels)
|
||||
# отрисовка матрицы ошибок в виде "тепловой карты"
|
||||
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)
|
||||
display.plot()
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
precision recall f1-score support
|
||||
|
||||
0 0.99 0.99 0.99 980
|
||||
1 0.99 1.00 0.99 1135
|
||||
2 0.98 0.99 0.99 1032
|
||||
3 0.99 0.99 0.99 1010
|
||||
4 0.99 0.99 0.99 982
|
||||
5 0.99 0.99 0.99 892
|
||||
6 1.00 0.99 0.99 958
|
||||
7 0.99 0.98 0.99 1028
|
||||
8 0.98 0.99 0.99 974
|
||||
9 0.99 0.98 0.99 1009
|
||||
|
||||
accuracy 0.99 10000
|
||||
macro avg 0.99 0.99 0.99 10000
|
||||
weighted avg 0.99 0.99 0.99 10000
|
||||
```
|
||||
|
||||
### Пункт №9. Подача на вход обученной нейронной сети собственного изображения.
|
||||
|
||||
```python
|
||||
# загрузка собственного изображения 1
|
||||
from PIL import Image
|
||||
file_data = Image.open('test.png')
|
||||
file_data = file_data.convert('L') # перевод в градации серого
|
||||
test_img = np.array(file_data)
|
||||
|
||||
# вывод собственного изображения
|
||||
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
|
||||
# предобработка
|
||||
test_img = test_img / 255
|
||||
test_img = np.reshape(test_img, (1,28,28,1))
|
||||
|
||||
# распознавание
|
||||
result = model.predict(test_img)
|
||||
print('I think it\'s ', np.argmax(result))
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
I think it's 2
|
||||
```
|
||||
|
||||
```python
|
||||
# загрузка собственного изображения 2
|
||||
from PIL import Image
|
||||
file_data = Image.open('test_2.png')
|
||||
file_data = file_data.convert('L') # перевод в градации серого
|
||||
test_img = np.array(file_data)
|
||||
|
||||
# вывод собственного изображения
|
||||
plt.imshow(test_img, cmap=plt.get_cmap('gray'))
|
||||
plt.show()
|
||||
|
||||
# предобработка
|
||||
test_img = test_img / 255
|
||||
test_img = np.reshape(test_img, (1,28,28,1))
|
||||
|
||||
# распознавание
|
||||
result = model.predict(test_img)
|
||||
print('I think it\'s ', np.argmax(result))
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
I think it's 8
|
||||
```
|
||||
|
||||
### Пункт №10. Загрузка с диска модели, сохраненной при выполнении лабораторной работы №1.
|
||||
|
||||
```python
|
||||
# путь к сохранённой модели из ЛР1
|
||||
model_fc = keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/best_model/model100.keras')
|
||||
|
||||
# архитектура модели
|
||||
model_fc.summary()
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
|
||||
| Layer (type) | Output Shape | Param # |
|
||||
|--------------------|-------------------|---------|
|
||||
| dense_1 (Dense) | (None, 100) | 78,500 |
|
||||
| dense_2 (Dense) | (None, 10) | 1,010 |
|
||||
|
||||
**Model: "sequential_1"**
|
||||
**Total params:** 79,512 (310.60 KB)
|
||||
**Trainable params:** 79,510 (310.59 KB)
|
||||
**Non-trainable params:** 0 (0.00 B)
|
||||
**Optimizer params:** 2 (12.00 B)
|
||||
|
||||
```python
|
||||
# подготовка тестовых данных для полносвязной модели
|
||||
X_test_fc = X_test.reshape(X_test.shape[0], 28*28) # (10000, 784)
|
||||
y_test_fc = y_test # если в ЛР3 ты уже перевёл метки в one-hot
|
||||
|
||||
# оценка качества, как в п. 6
|
||||
scores = model_fc.evaluate(X_test_fc, y_test_fc, verbose=0)
|
||||
print('Loss on test data (FC model):', scores[0])
|
||||
print('Accuracy on test data (FC model):', scores[1])
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
Loss on test data (FC model): 0.19745591282844543
|
||||
Accuracy on test data (FC model): 0.9442999958992004
|
||||
```
|
||||
|
||||
### Пункт №11. Сравнение обученной модели сверточной сети и наилучшей модели полносвязной сети из лабораторной работы №1.
|
||||
|
||||
**Сравнение моделей:**
|
||||
```
|
||||
Количество настраиваемых параметров в сети:
|
||||
Сверточная сеть: 34 826 параметров.
|
||||
Полносвязная сеть: 79 512 параметров.
|
||||
При том что число параметров сверточной сети меньше в 2 раза, она показывает более высокие результаты. Это связано с более эффективным использовании весов за счёт свёрток и фильтров.
|
||||
|
||||
Количество эпох обучения:
|
||||
Сверточная сеть обучалась 15 эпох.
|
||||
Полносвязная сеть обучалась 100 эпох.
|
||||
Cверточная модель достигает лучшего результата при меньшем количестве эпох, то есть сходится быстрее и обучается эффективнее.
|
||||
|
||||
Качество классификации тестовой выборки:
|
||||
Сверточная сеть: Accuracy ≈ 0.989, loss ≈ 0.025.
|
||||
Полносвязная сеть: Accuracy ≈ 0.944, loss ≈ 0.197.
|
||||
Сверточная нейросеть точнее на 4,5 процента, при этом её ошибка почти в 8 раз меньше.
|
||||
|
||||
Вывод:
|
||||
Использование сверточной нейронной сети для распознавания изображений даёт ощутимо лучший результат по сравнению с полносвязной моделью. Сверточная нейронная сеть требует меньше параметров, быстрее обучается и точнее распознаёт изображения, поскольку учитывает их структуру и выделяет важные визуальные особенности.
|
||||
```
|
||||
|
||||
## ЗАДАНИЕ 2
|
||||
|
||||
### Пункт №1. Импорт необходимых для работы библиотек и модулей.
|
||||
```python
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.models import Sequential
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from sklearn.metrics import classification_report, confusion_matrix
|
||||
from sklearn.metrics import ConfusionMatrixDisplay
|
||||
```
|
||||
|
||||
### Пункт №2. Загрузка набора данных CIFAR-10.
|
||||
```python
|
||||
from keras.datasets import cifar10
|
||||
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
|
||||
```
|
||||
|
||||
### Пункт №3. Разбиение набора данных на обучающие и тестовые данные.
|
||||
|
||||
```python
|
||||
# создание своего разбиения датасета
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
# объединяем в один набор
|
||||
X = np.concatenate((X_train, X_test))
|
||||
y = np.concatenate((y_train, y_test))
|
||||
|
||||
# разбиваем по вариантам
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y,
|
||||
test_size = 10000,
|
||||
train_size = 50000,
|
||||
random_state = 15)
|
||||
# вывод размерностей
|
||||
print('Shape of X train:', X_train.shape)
|
||||
print('Shape of y train:', y_train.shape)
|
||||
|
||||
print('Shape of X test:', X_test.shape)
|
||||
print('Shape of y test:', y_test.shape)
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
Shape of X train: (50000, 32, 32, 3)
|
||||
Shape of y train: (50000, 1)
|
||||
Shape of X test: (10000, 32, 32, 3)
|
||||
Shape of y test: (10000, 1)
|
||||
```
|
||||
|
||||
```python
|
||||
# вывод 25 изображений из обучающей выборки с подписями классов
|
||||
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
|
||||
'dog', 'frog', 'horse', 'ship', 'truck']
|
||||
|
||||
plt.figure(figsize=(10,10))
|
||||
for i in range(25):
|
||||
plt.subplot(5,5,i+1)
|
||||
plt.xticks([])
|
||||
plt.yticks([])
|
||||
plt.grid(False)
|
||||
plt.imshow(X_train[i])
|
||||
plt.xlabel(class_names[y_train[i][0]])
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Пункт №4. Проведене предобработки данных.
|
||||
|
||||
```python
|
||||
# Зададим параметры данных и модели
|
||||
num_classes = 10
|
||||
input_shape = (32, 32, 3)
|
||||
|
||||
# Приведение входных данных к диапазону [0, 1]
|
||||
X_train = X_train / 255
|
||||
X_test = X_test / 255
|
||||
|
||||
# Расширяем размерность входных данных, чтобы каждое изображение имело
|
||||
# размерность (высота, ширина, количество каналов)
|
||||
|
||||
|
||||
print('Shape of transformed X train:', X_train.shape)
|
||||
print('Shape of transformed X test:', X_test.shape)
|
||||
|
||||
# переведем метки в one-hot
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
print('Shape of transformed y train:', y_train.shape)
|
||||
print('Shape of transformed y test:', y_test.shape)
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
Shape of transformed X train: (50000, 32, 32, 3)
|
||||
Shape of transformed X test: (10000, 32, 32, 3)
|
||||
Shape of transformed y train: (50000, 10)
|
||||
Shape of transformed y test: (10000, 10)
|
||||
```
|
||||
|
||||
### Пункт №5. Реализация модели сверточной нейронной сети и ее обучение.
|
||||
|
||||
```python
|
||||
# создаем модель
|
||||
model = Sequential()
|
||||
model.add(layers.Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
|
||||
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(layers.Conv2D(128, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(128, activation='relu'))
|
||||
model.add(layers.Dropout(0.5))
|
||||
model.add(layers.Dense(num_classes, activation="softmax"))
|
||||
|
||||
model.summary()
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
|
||||
| Layer (type) | Output Shape | Param # |
|
||||
|-----------------------|-------------------|---------|
|
||||
| conv2d_2 (Conv2D) | (None, 30, 30, 32) | 896 |
|
||||
| max_pooling2d_2 (MaxPooling2D) | (None, 15, 15, 32) | 0 |
|
||||
| conv2d_3 (Conv2D) | (None, 13, 13, 64) | 18,496 |
|
||||
| max_pooling2d_3 (MaxPooling2D) | (None, 6, 6, 64) | 0 |
|
||||
| conv2d_4 (Conv2D) | (None, 4, 4, 128) | 73,856 |
|
||||
| max_pooling2d_4 (MaxPooling2D) | (None, 2, 2, 128) | 0 |
|
||||
| flatten_1 (Flatten) | (None, 512) | 0 |
|
||||
| dense_1 (Dense) | (None, 128) | 65,664 |
|
||||
| dropout_1 (Dropout) | (None, 128) | 0 |
|
||||
| dense_2 (Dense) | (None, 10) | 1,290 |
|
||||
|
||||
**Model: "sequential_1"**
|
||||
**Total params:** 160,202 (625.79 KB)
|
||||
**Trainable params:** 160,202 (625.79 KB)
|
||||
**Non-trainable params:** 0 (0.00 B)
|
||||
|
||||
```
|
||||
batch_size = 512
|
||||
epochs = 15
|
||||
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
|
||||
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
|
||||
```
|
||||
|
||||
### Пункт №6. Оценка качества обучения на тестовых данных.
|
||||
|
||||
```python
|
||||
# Оценка качества работы модели на тестовых данных
|
||||
scores = model.evaluate(X_test, y_test)
|
||||
print('Loss on test data:', scores[0])
|
||||
print('Accuracy on test data:', scores[1])
|
||||
```
|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
accuracy: 0.6606 - loss: 0.9661
|
||||
Loss on test data: 0.9636631608009338
|
||||
Accuracy on test data: 0.6610000133514404
|
||||
```
|
||||
|
||||
### Пункт №7. Выведение изображения, истинных меток и результатов распознавания.
|
||||
|
||||
```python
|
||||
# правильно распознанное изображение
|
||||
n = 10
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
|
||||
plt.imshow(X_test[n])
|
||||
plt.show()
|
||||
|
||||
print('Real class: ', np.argmax(y_test[n]), '->', class_names[np.argmax(y_test[n])])
|
||||
print('NN answer:', np.argmax(result), '->', class_names[np.argmax(result)])
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
NN output: [[0.10896349 0.00272794 0.09209334 0.0585838 0.10545123 0.01161931 0.02007959 0.00301177 0.5926725 0.00479708]]
|
||||
|
||||
Real class: 8 -> ship
|
||||
NN answer: 8 -> ship
|
||||
```
|
||||
|
||||
```python
|
||||
# неверно распознанное изображение
|
||||
n = 9
|
||||
result = model.predict(X_test[n:n+1])
|
||||
print('NN output:', result)
|
||||
|
||||
plt.imshow(X_test[n])
|
||||
plt.show()
|
||||
|
||||
print('Real class: ', np.argmax(y_test[n]), '->', class_names[np.argmax(y_test[n])])
|
||||
print('NN answer:', np.argmax(result), '->', class_names[np.argmax(result)])
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
NN output: [[1.3848362e-03 7.8314954e-01 3.4030385e-05 9.6045173e-04 9.4775232e-06 1.3942986e-04 1.3377360e-03 4.9578721e-06 9.0055494e-03 2.0397398e-01]]
|
||||
|
||||
Real class: 9 -> truck
|
||||
NN answer: 1 -> automobile
|
||||
```
|
||||
|
||||
### Пункт №8. Вывод отчета о качестве классификации тестовой выборки.
|
||||
|
||||
```python
|
||||
# истинные метки классов
|
||||
true_labels = np.argmax(y_test, axis=1)
|
||||
|
||||
# предсказанные метки классов
|
||||
predicted_labels = np.argmax(model.predict(X_test), axis=1)
|
||||
|
||||
# отчет о качестве классификации
|
||||
print(classification_report(true_labels, predicted_labels, target_names=class_names))
|
||||
|
||||
# вычисление матрицы ошибок
|
||||
conf_matrix = confusion_matrix(true_labels, predicted_labels)
|
||||
|
||||
# отрисовка матрицы ошибок в виде "тепловой карты"
|
||||
display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix,
|
||||
display_labels=class_names)
|
||||
display.plot(xticks_rotation=45)
|
||||
plt.show()
|
||||
```
|
||||
|
||||

|
||||
|
||||
**Результат выполнения:**
|
||||
```
|
||||
precision recall f1-score support
|
||||
|
||||
airplane 0.74 0.65 0.69 1015
|
||||
automobile 0.81 0.76 0.78 933
|
||||
bird 0.54 0.54 0.54 1010
|
||||
cat 0.50 0.40 0.44 1025
|
||||
deer 0.60 0.61 0.60 998
|
||||
dog 0.52 0.62 0.57 1006
|
||||
frog 0.70 0.77 0.73 1010
|
||||
horse 0.73 0.70 0.71 1005
|
||||
ship 0.77 0.79 0.78 1001
|
||||
truck 0.72 0.78 0.75 997
|
||||
|
||||
accuracy 0.66 10000
|
||||
macro avg 0.66 0.66 0.66 10000
|
||||
weighted avg 0.66 0.66 0.66 10000
|
||||
|
||||
```
|
||||
@ -1,7 +0,0 @@
|
||||
## Лабораторныа работа №4
|
||||
|
||||
## Распознавание последовательностей
|
||||
|
||||
* [Задание](IS_Lab04_2023.pdf)
|
||||
|
||||
* [Методические указания](IS_Lab04_Metod_2023.pdf)
|
||||