diff --git a/lw3 is/lw3.ipynb b/lw3 is/lw3.ipynb new file mode 100644 index 0000000..9ca3087 --- /dev/null +++ b/lw3 is/lw3.ipynb @@ -0,0 +1,419 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cjledORN0qWT" + }, + "outputs": [], + "source": [ + "from google.colab import drive\n", + "drive.mount('/content/drive')\n", + "\n", + "import os\n", + "os.chdir('/content/drive/MyDrive/Colab Notebooks/is_lab3')\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from tensorflow import keras\n", + "from tensorflow.keras import layers\n", + "from tensorflow.keras.models import Sequential\n", + "\n", + "from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "import tensorflow as tf\n", + "tf.random.set_seed(123)\n", + "np.random.seed(123)\n" + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.datasets import mnist\n", + "\n", + "(X_train_full, y_train_full), (X_test_full, y_test_full) = mnist.load_data()\n", + "\n", + "X = np.concatenate((X_train_full, X_test_full), axis=0)\n", + "y = np.concatenate((y_train_full, y_test_full), axis=0)" + ], + "metadata": { + "id": "ww1N34Ku1kDI" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "k = 5\n", + "random_state = 4 * k - 1\n", + "\n", + "X_train, X_test, y_train, y_test = train_test_split(\n", + " X, y, train_size=60000, test_size=10000, random_state=random_state, shuffle=True\n", + ")\n", + "\n", + "print('Shape of X_train:', X_train.shape)\n", + "print('Shape of y_train:', y_train.shape)\n", + "print('Shape of X_test:', X_test.shape)\n", + "print('Shape of y_test:', y_test.shape)\n" + ], + "metadata": { + "id": "N2SV0m7x1rTT" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "num_classes = 10\n", + "input_shape = (28, 28, 1)\n", + "\n", + "# приведение значений к диапазону [0,1]\n", + "X_train = X_train.astype('float32') / 255.0\n", + "X_test = X_test.astype('float32') / 255.0\n", + "\n", + "# добавление размерности каналов\n", + "X_train = np.expand_dims(X_train, -1)\n", + "X_test = np.expand_dims(X_test, -1)\n", + "\n", + "# one-hot кодирование меток\n", + "y_train_cat = keras.utils.to_categorical(y_train, num_classes)\n", + "y_test_cat = keras.utils.to_categorical(y_test, num_classes)\n", + "\n", + "print('Shape of transformed X_train:', X_train.shape)\n", + "print('Shape of transformed y_train:', y_train_cat.shape)\n", + "print('Shape of transformed X_test:', X_test.shape)\n", + "print('Shape of transformed y_test:', y_test_cat.shape)" + ], + "metadata": { + "id": "Ot_8FfXZ1y2I" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "batch_size = 512\n", + "epochs = 15\n", + "\n", + "model = Sequential()\n", + "model.add(layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape))\n", + "model.add(layers.MaxPooling2D(pool_size=(2,2)))\n", + "model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu'))\n", + "model.add(layers.MaxPooling2D(pool_size=(2,2)))\n", + "model.add(layers.Dropout(0.5))\n", + "model.add(layers.Flatten())\n", + "model.add(layers.Dense(num_classes, activation='softmax'))\n", + "\n", + "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n", + "model.summary()\n", + "\n", + "history = model.fit(X_train, y_train_cat, batch_size=batch_size, epochs=epochs, validation_split=0.1)\n" + ], + "metadata": { + "id": "eDCmBb6p180Y" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "scores = model.evaluate(X_test, y_test_cat, verbose=2)\n", + "print('Loss on test data:', scores[0])\n", + "print('Accuracy on test data:', scores[1])" + ], + "metadata": { + "id": "oWRp1dA92Itj" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "indices = [0, 1]\n", + "for n in indices:\n", + " result = model.predict(X_test[n:n+1])\n", + " plt.figure()\n", + " plt.imshow(X_test[n].reshape(28,28), cmap='gray')\n", + " plt.title(f\"Real: {y_test[n]} Pred: {np.argmax(result)}\")\n", + " plt.axis('off')\n", + " plt.show()\n", + " print('NN output vector:', result)\n", + " print('Real mark:', y_test[n])\n", + " print('NN answer:', np.argmax(result))" + ], + "metadata": { + "id": "HRTwkJ0W69gd" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "true_labels = y_test\n", + "predicted_labels = np.argmax(model.predict(X_test), axis=1)\n", + "\n", + "print(classification_report(true_labels, predicted_labels))\n", + "conf_matrix = confusion_matrix(true_labels, predicted_labels)\n", + "display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)\n", + "display.plot()\n", + "plt.show()" + ], + "metadata": { + "id": "qGEMo-ZW7IxB" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from PIL import Image\n", + "\n", + "img_path = '../5.png'\n", + "\n", + "file_data = Image.open(img_path)\n", + "file_data = file_data.convert('L') # перевод в градации серого\n", + "test_img = np.array(file_data)\n", + "\n", + "plt.imshow(test_img, cmap='gray')\n", + "plt.axis('off')\n", + "plt.show()\n", + "\n", + "# нормализация и изменение формы\n", + "test_proc = test_img.astype('float32') / 255.0\n", + "test_proc = np.reshape(test_proc, (1, 28, 28, 1))\n", + "\n", + "result = model.predict(test_proc)\n", + "print(\"NN output vector:\", result)\n", + "print(\"I think it's\", np.argmax(result))\n" + ], + "metadata": { + "id": "rjfX4LIP7ZTb" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "model_lr1_path = '../best_model_2x100.h5'\n", + "\n", + "if os.path.exists(model_lr1_path):\n", + " model_lr1 = load_model(model_lr1_path)\n", + " model_lr1.summary()\n", + "\n", + " # подготовка данных специально для полносвязной модели ЛР1\n", + " X_test_lr1 = X_test.reshape((X_test.shape[0], 28*28))\n", + " X_test_lr1 = X_test_lr1.astype('float32') / 255.0\n", + "\n", + " # здесь нужно использовать X_test_lr1 !\n", + " scores_lr1 = model_lr1.evaluate(X_test_lr1, y_test_cat, verbose=2)\n", + "\n", + " print('LR1 model - Loss on test data:', scores_lr1[0])\n", + " print('LR1 model - Accuracy on test data:', scores_lr1[1])\n", + "\n", + "else:\n", + " print(f\"Файл {model_lr1_path} не найден. Поместите сохранённую модель ЛР1 в рабочую директорию.\")\n" + ], + "metadata": { + "id": "rnMRFGLs7v-o" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# возьмём оригинальные X, y — до всех преобразований для CNN\n", + "(X_train_full, y_train_full), (X_test_full, y_test_full) = mnist.load_data()\n", + "\n", + "# объединим, чтобы сделать то же разбиение, что и в ЛР1\n", + "X_all = np.concatenate((X_train_full, X_test_full), axis=0)\n", + "y_all = np.concatenate((y_train_full, y_test_full), axis=0)\n", + "\n", + "from sklearn.model_selection import train_test_split\n", + "X_train_l1, X_test_l1, y_train_l1, y_test_l1 = train_test_split(\n", + " X_all, y_all, train_size=60000, test_size=10000, random_state=19\n", + ")\n", + "\n", + "# теперь — подготовка данных ЛР1\n", + "X_test_lr1 = X_test_l1.reshape((X_test_l1.shape[0], 28*28)).astype('float32') / 255.0\n", + "y_test_lr1 = keras.utils.to_categorical(y_test_l1, 10)\n", + "\n", + "# оценка модели\n", + "scores_lr1 = model_lr1.evaluate(X_test_lr1, y_test_lr1, verbose=2)\n", + "print(scores_lr1)\n" + ], + "metadata": { + "id": "4aRHHa_v8Rkl" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# загрузка сохранённой модели ЛР1\n", + "model_lr1_path = '../best_model_2x100.h5'\n", + "model_lr1 = load_model(model_lr1_path)\n", + "model_lr1.summary()\n", + "\n", + "# подготовка тестового набора для модели ЛР1\n", + "X_test_l1 = X_test_l1.reshape((X_test_l1.shape[0], 28 * 28)).astype('float32') / 255.0\n", + "y_test_l1_cat = keras.utils.to_categorical(y_test_l1, 10)\n", + "\n", + "# оценка модели ЛР1\n", + "scores_lr1 = model_lr1.evaluate(X_test_l1, y_test_l1_cat, verbose=2)\n", + "print('LR1 model - Loss:', scores_lr1[0])\n", + "print('LR1 model - Accuracy:', scores_lr1[1])\n", + "\n", + "# оценка сверточной модели ЛР3\n", + "scores_conv = model.evaluate(X_test, y_test_cat, verbose=2)\n", + "print('Conv model - Loss:', scores_conv[0])\n", + "print('Conv model - Accuracy:', scores_conv[1])\n", + "\n", + "# вывод числа параметров обеих моделей\n", + "print('LR1 model parameters:', model_lr1.count_params())\n", + "print('Conv model parameters:', model.count_params())\n" + ], + "metadata": { + "id": "N1oPuRH69nwK" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from keras.datasets import cifar10\n", + "\n", + "(X_train_c, y_train_c), (X_test_c, y_test_c) = cifar10.load_data()\n", + "\n", + "print('Shapes (original):', X_train_c.shape, y_train_c.shape, X_test_c.shape, y_test_c.shape)\n", + "\n", + "class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n", + " 'dog', 'frog', 'horse', 'ship', 'truck']\n", + "\n", + "# вывод 25 изображений\n", + "plt.figure(figsize=(10,10))\n", + "for i in range(25):\n", + " plt.subplot(5,5,i+1)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " plt.grid(False)\n", + " plt.imshow(X_train_c[i])\n", + " plt.xlabel(class_names[y_train_c[i][0]])\n", + "plt.show()\n" + ], + "metadata": { + "id": "hGnBZelW9y9Q" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "num_classes = 10\n", + "input_shape_cifar = (32, 32, 3)\n", + "\n", + "X_train_c = X_train_c.astype('float32') / 255.0\n", + "X_test_c = X_test_c.astype('float32') / 255.0\n", + "\n", + "y_train_c_cat = keras.utils.to_categorical(y_train_c, num_classes)\n", + "y_test_c_cat = keras.utils.to_categorical(y_test_c, num_classes)\n", + "\n", + "print('Transformed shapes:', X_train_c.shape, y_train_c_cat.shape, X_test_c.shape, y_test_c_cat.shape)\n" + ], + "metadata": { + "id": "VgA73god-gj_" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "model_cifar = Sequential()\n", + "model_cifar.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=input_shape_cifar))\n", + "model_cifar.add(layers.MaxPooling2D((2,2)))\n", + "model_cifar.add(layers.Conv2D(64, (3,3), activation='relu'))\n", + "model_cifar.add(layers.MaxPooling2D((2,2)))\n", + "model_cifar.add(layers.Conv2D(128, (3,3), activation='relu'))\n", + "model_cifar.add(layers.MaxPooling2D((2,2)))\n", + "model_cifar.add(layers.Flatten())\n", + "model_cifar.add(layers.Dense(128, activation='relu'))\n", + "model_cifar.add(layers.Dropout(0.5))\n", + "model_cifar.add(layers.Dense(num_classes, activation='softmax'))\n", + "\n", + "model_cifar.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n", + "model_cifar.summary()\n", + "\n", + "batch_size = 512\n", + "epochs = 20\n", + "history_cifar = model_cifar.fit(X_train_c, y_train_c_cat, batch_size=batch_size, epochs=epochs, validation_split=0.1)" + ], + "metadata": { + "id": "e3EzTnNS-jhQ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "scores_cifar = model_cifar.evaluate(X_test_c, y_test_c_cat, verbose=2)\n", + "print('CIFAR - Loss on test data:', scores_cifar[0])\n", + "print('CIFAR - Accuracy on test data:', scores_cifar[1])" + ], + "metadata": { + "id": "_1s1v6CUECcw" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "print(classification_report(true_cifar, preds_cifar, target_names=class_names))\n", + "\n", + "conf_matrix_cifar = confusion_matrix(true_cifar, preds_cifar)\n", + "display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix_cifar,\n", + " display_labels=class_names)\n", + "\n", + "plt.figure(figsize=(10,10)) # figsize задаётся здесь\n", + "display.plot(cmap='Blues', colorbar=False) # без figsize\n", + "plt.xticks(rotation=45)\n", + "plt.show()\n" + ], + "metadata": { + "id": "ElVAWuiyEPW-" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/lw3 is/lw3.py b/lw3 is/lw3.py new file mode 100644 index 0000000..1eabed1 --- /dev/null +++ b/lw3 is/lw3.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +"""lw3.ipynb + +Automatically generated by Colab. + +Original file is located at + https://colab.research.google.com/drive/1whkpae-DQ5QCfyJAnjIH0_Zff9zaT4po +""" + +from google.colab import drive +drive.mount('/content/drive') + +import os +os.chdir('/content/drive/MyDrive/Colab Notebooks/is_lab3') + +import numpy as np +import matplotlib.pyplot as plt + +from tensorflow import keras +from tensorflow.keras import layers +from tensorflow.keras.models import Sequential + +from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay +from sklearn.model_selection import train_test_split + +import tensorflow as tf +tf.random.set_seed(123) +np.random.seed(123) + +from keras.datasets import mnist + +(X_train_full, y_train_full), (X_test_full, y_test_full) = mnist.load_data() + +X = np.concatenate((X_train_full, X_test_full), axis=0) +y = np.concatenate((y_train_full, y_test_full), axis=0) + +k = 5 +random_state = 4 * k - 1 + +X_train, X_test, y_train, y_test = train_test_split( + X, y, train_size=60000, test_size=10000, random_state=random_state, shuffle=True +) + +print('Shape of X_train:', X_train.shape) +print('Shape of y_train:', y_train.shape) +print('Shape of X_test:', X_test.shape) +print('Shape of y_test:', y_test.shape) + +num_classes = 10 +input_shape = (28, 28, 1) + +# приведение значений к диапазону [0,1] +X_train = X_train.astype('float32') / 255.0 +X_test = X_test.astype('float32') / 255.0 + +# добавление размерности каналов +X_train = np.expand_dims(X_train, -1) +X_test = np.expand_dims(X_test, -1) + +# one-hot кодирование меток +y_train_cat = keras.utils.to_categorical(y_train, num_classes) +y_test_cat = keras.utils.to_categorical(y_test, num_classes) + +print('Shape of transformed X_train:', X_train.shape) +print('Shape of transformed y_train:', y_train_cat.shape) +print('Shape of transformed X_test:', X_test.shape) +print('Shape of transformed y_test:', y_test_cat.shape) + +batch_size = 512 +epochs = 15 + +model = Sequential() +model.add(layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape)) +model.add(layers.MaxPooling2D(pool_size=(2,2))) +model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu')) +model.add(layers.MaxPooling2D(pool_size=(2,2))) +model.add(layers.Dropout(0.5)) +model.add(layers.Flatten()) +model.add(layers.Dense(num_classes, activation='softmax')) + +model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) +model.summary() + +history = model.fit(X_train, y_train_cat, batch_size=batch_size, epochs=epochs, validation_split=0.1) + +scores = model.evaluate(X_test, y_test_cat, verbose=2) +print('Loss on test data:', scores[0]) +print('Accuracy on test data:', scores[1]) + +indices = [0, 1] +for n in indices: + result = model.predict(X_test[n:n+1]) + plt.figure() + plt.imshow(X_test[n].reshape(28,28), cmap='gray') + plt.title(f"Real: {y_test[n]} Pred: {np.argmax(result)}") + plt.axis('off') + plt.show() + print('NN output vector:', result) + print('Real mark:', y_test[n]) + print('NN answer:', np.argmax(result)) + +true_labels = y_test +predicted_labels = np.argmax(model.predict(X_test), axis=1) + +print(classification_report(true_labels, predicted_labels)) +conf_matrix = confusion_matrix(true_labels, predicted_labels) +display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix) +display.plot() +plt.show() + +from PIL import Image + +img_path = '../5.png' + +file_data = Image.open(img_path) +file_data = file_data.convert('L') # перевод в градации серого +test_img = np.array(file_data) + +plt.imshow(test_img, cmap='gray') +plt.axis('off') +plt.show() + +# нормализация и изменение формы +test_proc = test_img.astype('float32') / 255.0 +test_proc = np.reshape(test_proc, (1, 28, 28, 1)) + +result = model.predict(test_proc) +print("NN output vector:", result) +print("I think it's", np.argmax(result)) + +model_lr1_path = '../best_model_2x100.h5' + +if os.path.exists(model_lr1_path): + model_lr1 = load_model(model_lr1_path) + model_lr1.summary() + + # подготовка данных специально для полносвязной модели ЛР1 + X_test_lr1 = X_test.reshape((X_test.shape[0], 28*28)) + X_test_lr1 = X_test_lr1.astype('float32') / 255.0 + + # здесь нужно использовать X_test_lr1 ! + scores_lr1 = model_lr1.evaluate(X_test_lr1, y_test_cat, verbose=2) + + print('LR1 model - Loss on test data:', scores_lr1[0]) + print('LR1 model - Accuracy on test data:', scores_lr1[1]) + +else: + print(f"Файл {model_lr1_path} не найден. Поместите сохранённую модель ЛР1 в рабочую директорию.") + +# возьмём оригинальные X, y — до всех преобразований для CNN +(X_train_full, y_train_full), (X_test_full, y_test_full) = mnist.load_data() + +# объединим, чтобы сделать то же разбиение, что и в ЛР1 +X_all = np.concatenate((X_train_full, X_test_full), axis=0) +y_all = np.concatenate((y_train_full, y_test_full), axis=0) + +from sklearn.model_selection import train_test_split +X_train_l1, X_test_l1, y_train_l1, y_test_l1 = train_test_split( + X_all, y_all, train_size=60000, test_size=10000, random_state=19 +) + +# теперь — подготовка данных ЛР1 +X_test_lr1 = X_test_l1.reshape((X_test_l1.shape[0], 28*28)).astype('float32') / 255.0 +y_test_lr1 = keras.utils.to_categorical(y_test_l1, 10) + +# оценка модели +scores_lr1 = model_lr1.evaluate(X_test_lr1, y_test_lr1, verbose=2) +print(scores_lr1) + +# загрузка сохранённой модели ЛР1 +model_lr1_path = '../best_model_2x100.h5' +model_lr1 = load_model(model_lr1_path) +model_lr1.summary() + +# подготовка тестового набора для модели ЛР1 +X_test_l1 = X_test_l1.reshape((X_test_l1.shape[0], 28 * 28)).astype('float32') / 255.0 +y_test_l1_cat = keras.utils.to_categorical(y_test_l1, 10) + +# оценка модели ЛР1 +scores_lr1 = model_lr1.evaluate(X_test_l1, y_test_l1_cat, verbose=2) +print('LR1 model - Loss:', scores_lr1[0]) +print('LR1 model - Accuracy:', scores_lr1[1]) + +# оценка сверточной модели ЛР3 +scores_conv = model.evaluate(X_test, y_test_cat, verbose=2) +print('Conv model - Loss:', scores_conv[0]) +print('Conv model - Accuracy:', scores_conv[1]) + +# вывод числа параметров обеих моделей +print('LR1 model parameters:', model_lr1.count_params()) +print('Conv model parameters:', model.count_params()) + +from keras.datasets import cifar10 + +(X_train_c, y_train_c), (X_test_c, y_test_c) = cifar10.load_data() + +print('Shapes (original):', X_train_c.shape, y_train_c.shape, X_test_c.shape, y_test_c.shape) + +class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck'] + +# вывод 25 изображений +plt.figure(figsize=(10,10)) +for i in range(25): + plt.subplot(5,5,i+1) + plt.xticks([]) + plt.yticks([]) + plt.grid(False) + plt.imshow(X_train_c[i]) + plt.xlabel(class_names[y_train_c[i][0]]) +plt.show() + +num_classes = 10 +input_shape_cifar = (32, 32, 3) + +X_train_c = X_train_c.astype('float32') / 255.0 +X_test_c = X_test_c.astype('float32') / 255.0 + +y_train_c_cat = keras.utils.to_categorical(y_train_c, num_classes) +y_test_c_cat = keras.utils.to_categorical(y_test_c, num_classes) + +print('Transformed shapes:', X_train_c.shape, y_train_c_cat.shape, X_test_c.shape, y_test_c_cat.shape) + +model_cifar = Sequential() +model_cifar.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=input_shape_cifar)) +model_cifar.add(layers.MaxPooling2D((2,2))) +model_cifar.add(layers.Conv2D(64, (3,3), activation='relu')) +model_cifar.add(layers.MaxPooling2D((2,2))) +model_cifar.add(layers.Conv2D(128, (3,3), activation='relu')) +model_cifar.add(layers.MaxPooling2D((2,2))) +model_cifar.add(layers.Flatten()) +model_cifar.add(layers.Dense(128, activation='relu')) +model_cifar.add(layers.Dropout(0.5)) +model_cifar.add(layers.Dense(num_classes, activation='softmax')) + +model_cifar.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) +model_cifar.summary() + +batch_size = 512 +epochs = 20 +history_cifar = model_cifar.fit(X_train_c, y_train_c_cat, batch_size=batch_size, epochs=epochs, validation_split=0.1) + +scores_cifar = model_cifar.evaluate(X_test_c, y_test_c_cat, verbose=2) +print('CIFAR - Loss on test data:', scores_cifar[0]) +print('CIFAR - Accuracy on test data:', scores_cifar[1]) + +print(classification_report(true_cifar, preds_cifar, target_names=class_names)) + +conf_matrix_cifar = confusion_matrix(true_cifar, preds_cifar) +display = ConfusionMatrixDisplay(confusion_matrix=conf_matrix_cifar, + display_labels=class_names) + +plt.figure(figsize=(10,10)) # figsize задаётся здесь +display.plot(cmap='Blues', colorbar=False) # без figsize +plt.xticks(rotation=45) +plt.show() \ No newline at end of file