Implementation of image processing Algorithms for fracture detection on Different human body parts.(Minor 02) (3)
Implementation of image processing Algorithms for fracture detection on Different human body parts.(Minor 02) (3)
***
import os
import glob
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
warnings.filterwarnings("ignore")
import sklearn.metrics as metrics
from tensorflow.keras import layers, models
from tensorflow.keras.optimizers import Adamax
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img,img_to_array
from sklearn.metrics import roc_auc_score, f1_score, classification_report, confusion_matrix
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
images_fr = []
folder = r'D:\Fracture Detection System (Minor2)\A Dataset\All Fractured'
for filename in os.listdir(folder):
try:
img = mpimg.imread(os.path.join(folder, filename))
if img is not None:
images_fr.append(img)
except:
print('Cant import ' + filename)
images_fr = np.asarray(images_fr)
images_fr
...,
[[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
...,
[0.56078434, 0.56078434, 0.56078434, 1. ],
[0.56078434, 0.56078434, 0.56078434, 1. ],
[0.56078434, 0.56078434, 0.56078434, 1. ]],
[[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
...,
[0.5568628 , 0.5568628 , 0.5568628 , 1. ],
[0.5568628 , 0.5568628 , 0.5568628 , 1. ],
[0.5568628 , 0.5568628 , 0.5568628 , 1. ]],
...,
[[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0],
...,
[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0],
...,
[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0]],
...,
[[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
...,
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 0.41568628]],
[[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
...,
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 0.41568628]],
...,
[[0.08627451, 0.08627451, 0.08627451, 1. ],
[0.09803922, 0.09803922, 0.09803922, 1. ],
[0.03137255, 0.03137255, 0.03137255, 1. ],
...,
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 0.41568628]],
[[0. , 0. , 0. , 0.09411765],
[0. , 0. , 0. , 0.09411765],
[0. , 0. , 0. , 0.09411765],
...,
[0. , 0. , 0. , 0.09411765],
[0. , 0. , 0. , 0.09411765],
[0. , 0. , 0. , 0.03921569]]], dtype=float32),
array([[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]],
[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]],
[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]],
...,
[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]],
[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]],
[[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
...,
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]]], dtype=uint8)], dtype=object)
plt.imshow(images_fr[100])
<matplotlib.image.AxesImage at 0x13e8052e9a0>
images_nonfr = []
folder = r'D:\Fracture Detection System (Minor2)\A Dataset\All Non Fractured'
for filename in os.listdir(folder):
try:
img = mpimg.imread(os.path.join(folder, filename))
if img is not None:
images_nonfr.append(img)
except:
print('Cant import ' + filename)
images_nonfr = np.asarray(images_nonfr)
images_nonfr
[[ 0, 0, 0],
[ 1, 1, 1],
[ 1, 1, 1],
...,
[109, 109, 109],
[248, 248, 248],
[254, 254, 254]],
...,
...,
[[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0],
...,
[ 69, 67, 81],
[ 66, 65, 79],
[ 63, 62, 76]],
[[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0],
...,
[ 69, 67, 81],
[ 64, 63, 77],
[ 61, 60, 74]],
...,
[[ 7, 10, 17],
[ 7, 10, 17],
[ 8, 11, 18],
...,
[ 51, 74, 80],
[ 50, 73, 79],
[ 49, 72, 78]],
[[ 7, 10, 17],
[ 7, 10, 17],
[ 8, 11, 18],
...,
[ 42, 56, 65],
[ 42, 56, 65],
[ 41, 55, 64]],
...,
[[ 2, 1, 6],
[ 3, 3, 5],
[ 2, 2, 2],
...,
[ 2, 2, 2],
[ 3, 3, 5],
[ 3, 1, 4]],
[[ 4, 4, 6],
[ 3, 3, 5],
[ 2, 2, 2],
...,
[ 0, 2, 1],
[ 2, 2, 4],
[ 3, 1, 4]],
...,
plt.imshow(images_nonfr[110])
<matplotlib.image.AxesImage at 0x13e805bc5e0>
train_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.xception.preprocess_input,zoom_range
width_shift_range=0.1,height_shift_range=0.1,validation_split=0.1)
test_datagen=ImageDataGenerator(preprocessing_function=tf.keras.applications.xception.preprocess_input)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(150, 150, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 148, 148, 32) 896
=================================================================
Total params: 1,212,513
Trainable params: 1,212,513
Non-trainable params: 0
_________________________________________________________________
batch_size = 80
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
#Model Training
model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=20,
validation_data=validation_generator,
verbose=1,
validation_steps=800 // batch_size)
Epoch 1/20
25/25 [==============================] - 24s 923ms/step - loss: 0.6949 - accuracy: 0.5488 - val_loss: 0.6901 - va
l_accuracy: 0.5775
Epoch 2/20
25/25 [==============================] - 20s 805ms/step - loss: 0.6868 - accuracy: 0.5672 - val_loss: 0.6836 - va
l_accuracy: 0.5863
Epoch 3/20
25/25 [==============================] - 20s 809ms/step - loss: 0.6802 - accuracy: 0.5815 - val_loss: 0.6783 - va
l_accuracy: 0.5775
Epoch 4/20
25/25 [==============================] - 20s 797ms/step - loss: 0.6805 - accuracy: 0.5760 - val_loss: 0.6775 - va
l_accuracy: 0.5850
Epoch 5/20
25/25 [==============================] - 20s 805ms/step - loss: 0.6780 - accuracy: 0.5910 - val_loss: 0.6895 - va
l_accuracy: 0.5625
Epoch 6/20
25/25 [==============================] - 20s 781ms/step - loss: 0.6801 - accuracy: 0.5821 - val_loss: 0.6805 - va
l_accuracy: 0.5750
Epoch 7/20
25/25 [==============================] - 20s 787ms/step - loss: 0.6750 - accuracy: 0.5893 - val_loss: 0.6763 - va
l_accuracy: 0.5913
Epoch 8/20
25/25 [==============================] - 20s 789ms/step - loss: 0.6734 - accuracy: 0.5811 - val_loss: 0.6850 - va
l_accuracy: 0.5725
Epoch 9/20
25/25 [==============================] - 20s 788ms/step - loss: 0.6721 - accuracy: 0.5924 - val_loss: 0.6834 - va
l_accuracy: 0.5738
Epoch 10/20
25/25 [==============================] - 20s 807ms/step - loss: 0.6731 - accuracy: 0.5785 - val_loss: 0.6694 - va
l_accuracy: 0.5975
Epoch 11/20
25/25 [==============================] - 21s 837ms/step - loss: 0.6714 - accuracy: 0.5845 - val_loss: 0.6747 - va
l_accuracy: 0.5875
Epoch 12/20
25/25 [==============================] - 21s 828ms/step - loss: 0.6721 - accuracy: 0.5780 - val_loss: 0.6797 - va
l_accuracy: 0.5863
Epoch 13/20
25/25 [==============================] - 21s 849ms/step - loss: 0.6747 - accuracy: 0.5760 - val_loss: 0.6778 - va
l_accuracy: 0.5938
Epoch 14/20
25/25 [==============================] - 21s 824ms/step - loss: 0.6730 - accuracy: 0.5796 - val_loss: 0.6743 - va
l_accuracy: 0.5800
Epoch 15/20
25/25 [==============================] - 21s 840ms/step - loss: 0.6712 - accuracy: 0.5800 - val_loss: 0.6802 - va
l_accuracy: 0.5850
Epoch 16/20
25/25 [==============================] - 20s 780ms/step - loss: 0.6759 - accuracy: 0.5708 - val_loss: 0.6802 - va
l_accuracy: 0.5800
Epoch 17/20
25/25 [==============================] - 19s 767ms/step - loss: 0.6707 - accuracy: 0.5770 - val_loss: 0.6884 - va
l_accuracy: 0.5825
Epoch 18/20
25/25 [==============================] - 19s 775ms/step - loss: 0.6699 - accuracy: 0.5930 - val_loss: 0.6829 - va
l_accuracy: 0.5850
Epoch 19/20
25/25 [==============================] - 19s 761ms/step - loss: 0.6665 - accuracy: 0.5970 - val_loss: 0.6829 - va
l_accuracy: 0.5738
Epoch 20/20
25/25 [==============================] - 19s 744ms/step - loss: 0.6639 - accuracy: 0.5903 - val_loss: 0.6751 - va
l_accuracy: 0.5838
<keras.callbacks.History at 0x13ea2047910>
model.save_weights('Cnnmodel_2class_model.h5')
define Classes
batch_size01 = 32
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
cnn = models.Sequential([
layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(150,150, 3)),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(20, activation='softmax')
])
cnn.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
cnn.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_3 (Conv2D) (None, 148, 148, 32) 896
=================================================================
Total params: 5,329,172
Trainable params: 5,329,172
Non-trainable params: 0
_________________________________________________________________
Model Training
cnn.fit_generator(
train_generator01,
steps_per_epoch=3000 // batch_size01,
epochs=50,
validation_data=validation_generator01,
verbose=1,
validation_steps=1000 // batch_size01)
Epoch 1/50
93/93 [==============================] - 26s 280ms/step - loss: 1.9236 - accuracy: 0.3075
Epoch 2/50
93/93 [==============================] - 26s 283ms/step - loss: 1.8503 - accuracy: 0.3220
Epoch 3/50
93/93 [==============================] - 26s 280ms/step - loss: 1.8135 - accuracy: 0.3462
Epoch 4/50
93/93 [==============================] - 27s 284ms/step - loss: 1.7318 - accuracy: 0.3688
Epoch 5/50
93/93 [==============================] - 26s 281ms/step - loss: 1.6980 - accuracy: 0.3964
Epoch 6/50
93/93 [==============================] - 26s 276ms/step - loss: 1.6408 - accuracy: 0.4014
Epoch 7/50
93/93 [==============================] - 26s 277ms/step - loss: 1.5789 - accuracy: 0.4458
Epoch 8/50
93/93 [==============================] - 26s 281ms/step - loss: 1.5465 - accuracy: 0.4374
Epoch 9/50
93/93 [==============================] - 26s 282ms/step - loss: 1.5081 - accuracy: 0.4536
Epoch 10/50
93/93 [==============================] - 26s 279ms/step - loss: 1.4605 - accuracy: 0.4727
Epoch 11/50
93/93 [==============================] - 26s 279ms/step - loss: 1.4271 - accuracy: 0.4929
Epoch 12/50
93/93 [==============================] - 26s 284ms/step - loss: 1.3987 - accuracy: 0.4966
Epoch 13/50
93/93 [==============================] - 27s 284ms/step - loss: 1.3524 - accuracy: 0.5178
Epoch 14/50
93/93 [==============================] - 26s 276ms/step - loss: 1.3149 - accuracy: 0.5302
Epoch 15/50
93/93 [==============================] - 26s 276ms/step - loss: 1.2818 - accuracy: 0.5427
Epoch 16/50
93/93 [==============================] - 26s 280ms/step - loss: 1.2562 - accuracy: 0.5505
Epoch 17/50
93/93 [==============================] - 26s 282ms/step - loss: 1.2126 - accuracy: 0.5619
Epoch 18/50
93/93 [==============================] - 26s 277ms/step - loss: 1.1654 - accuracy: 0.5764
Epoch 19/50
93/93 [==============================] - 26s 276ms/step - loss: 1.1620 - accuracy: 0.5828
Epoch 20/50
93/93 [==============================] - 26s 277ms/step - loss: 1.1102 - accuracy: 0.5935
Epoch 21/50
93/93 [==============================] - 26s 277ms/step - loss: 1.0854 - accuracy: 0.6083
Epoch 22/50
93/93 [==============================] - 26s 279ms/step - loss: 1.0843 - accuracy: 0.6211
Epoch 23/50
93/93 [==============================] - 26s 277ms/step - loss: 1.0580 - accuracy: 0.6228
Epoch 24/50
93/93 [==============================] - 26s 281ms/step - loss: 1.0085 - accuracy: 0.6460
Epoch 25/50
93/93 [==============================] - 26s 279ms/step - loss: 1.0099 - accuracy: 0.6447
Epoch 26/50
93/93 [==============================] - 27s 287ms/step - loss: 0.9686 - accuracy: 0.6484
Epoch 27/50
93/93 [==============================] - 26s 283ms/step - loss: 0.9987 - accuracy: 0.6491
Epoch 28/50
93/93 [==============================] - 26s 281ms/step - loss: 0.9087 - accuracy: 0.6699
Epoch 29/50
93/93 [==============================] - 26s 280ms/step - loss: 0.9228 - accuracy: 0.6682
Epoch 30/50
93/93 [==============================] - 26s 278ms/step - loss: 0.9130 - accuracy: 0.6746
Epoch 31/50
93/93 [==============================] - 26s 279ms/step - loss: 0.8645 - accuracy: 0.6904
Epoch 32/50
93/93 [==============================] - 27s 284ms/step - loss: 0.9070 - accuracy: 0.6797
Epoch 33/50
93/93 [==============================] - 26s 284ms/step - loss: 0.8514 - accuracy: 0.6925
Epoch 34/50
93/93 [==============================] - 27s 288ms/step - loss: 0.8564 - accuracy: 0.7059
Epoch 35/50
93/93 [==============================] - 26s 283ms/step - loss: 0.8196 - accuracy: 0.7110
Epoch 36/50
93/93 [==============================] - 27s 286ms/step - loss: 0.8123 - accuracy: 0.7059
Epoch 37/50
93/93 [==============================] - 26s 279ms/step - loss: 0.8072 - accuracy: 0.7022
Epoch 38/50
93/93 [==============================] - 26s 279ms/step - loss: 0.7775 - accuracy: 0.7298
Epoch 39/50
93/93 [==============================] - 26s 280ms/step - loss: 0.7630 - accuracy: 0.7338
Epoch 40/50
93/93 [==============================] - 27s 284ms/step - loss: 0.7512 - accuracy: 0.7301
Epoch 41/50
93/93 [==============================] - 26s 283ms/step - loss: 0.7780 - accuracy: 0.7281
Epoch 42/50
93/93 [==============================] - 26s 281ms/step - loss: 0.7440 - accuracy: 0.7328
Epoch 43/50
93/93 [==============================] - 27s 284ms/step - loss: 0.7183 - accuracy: 0.7476
Epoch 44/50
93/93 [==============================] - 27s 286ms/step - loss: 0.7353 - accuracy: 0.7312
Epoch 45/50
93/93 [==============================] - 27s 287ms/step - loss: 0.6946 - accuracy: 0.7561
Epoch 46/50
93/93 [==============================] - 27s 286ms/step - loss: 0.7065 - accuracy: 0.7490
Epoch 47/50
93/93 [==============================] - 27s 287ms/step - loss: 0.7021 - accuracy: 0.7510
Epoch 48/50
93/93 [==============================] - 27s 291ms/step - loss: 0.6906 - accuracy: 0.7587
Epoch 49/50
93/93 [==============================] - 27s 287ms/step - loss: 0.6672 - accuracy: 0.7732
Epoch 50/50
93/93 [==============================] - 26s 283ms/step - loss: 0.6489 - accuracy: 0.7715
<keras.callbacks.History at 0x13eb977d490>
cnn.save_weights('Cnnmodel_10class_model.h5')
57
x = Flatten()(model_vgg.output)
prediction = Dense(len(folders)-56, activation='sigmoid')(x)
vgg_model = Model(inputs=model_vgg.input, outputs=prediction)
vgg_model.summary()
Model: "model_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 150, 150, 3)] 0
=================================================================
Total params: 14,722,881
Trainable params: 14,722,881
Non-trainable params: 0
_________________________________________________________________
vgg_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
Image Augumentation
train_datagen01 = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen01 = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator02 = train_datagen01.flow_from_directory(
r'D:\Fracture Detection System (Minor2)\Split_dataset\train', # this is the target directory
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size01,
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels
checkpoint = ModelCheckpoint(filepath='vgg16model.h5',
verbose=1, save_best_only=True)
callbacks = [checkpoint]
vgg_model_history=vgg_model.fit_generator(
train_generator02 ,
validation_data=validation_generator02 ,
epochs=20,
steps_per_epoch=15,
validation_steps=32,
callbacks=callbacks,
verbose=1)
Epoch 1/20
15/15 [==============================] - ETA: 0s - loss: 115.1899 - accuracy: 0.5229WARNING:tensorflow:Your input
ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per
_epoch * epochs` batches (in this case, 32 batches). You may need to use the repeat() function when building your
dataset.
batch_size02=128
train_generator03 = train_datagen01.flow_from_directory(
r'D:\Fracture Detection System (Minor2)\Split_dataset\train', # this is the target directory
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size02,
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels
pretrained_model= tf.keras.applications.ResNet50(include_top=False,
input_shape=(150,150,3),
pooling='avg',classes=5,
weights='imagenet') # Pre trained weight are taken from imagenet dataset and model.
for layer in pretrained_model.layers:
layer.trainable=False
resnet_model.add(pretrained_model)
resnet_model.add(Flatten())
resnet_model.add(Dense(512, activation='sigmoid'))
resnet_model.add(Dense(1, activation='softmax'))
resnet_model.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 2048) 23587712
=================================================================
Total params: 24,638,850
Trainable params: 1,051,138
Non-trainable params: 23,587,712
_________________________________________________________________
resnet_model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
Model Training
Epoch 1/20
28/28 [==============================] - 70s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 2/20
28/28 [==============================] - 67s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 3/20
28/28 [==============================] - 69s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 4/20
28/28 [==============================] - 68s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 5/20
28/28 [==============================] - 72s 3s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 6/20
28/28 [==============================] - 65s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 7/20
28/28 [==============================] - 66s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 8/20
28/28 [==============================] - 66s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 9/20
28/28 [==============================] - 65s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 10/20
28/28 [==============================] - 65s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 11/20
28/28 [==============================] - 64s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 12/20
28/28 [==============================] - 68s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 13/20
28/28 [==============================] - 65s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 14/20
28/28 [==============================] - 66s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 15/20
28/28 [==============================] - 66s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 16/20
28/28 [==============================] - 66s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 17/20
28/28 [==============================] - 65s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 18/20
28/28 [==============================] - 68s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 19/20
28/28 [==============================] - 68s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
Epoch 20/20
28/28 [==============================] - 68s 2s/step - loss: 0.0000e+00 - accuracy: 0.4207 - val_loss: 0.0000e+00
- val_accuracy: 0.4208
resnet_model.save_weights('Cnnmodel_10class_model.h5')
<matplotlib.legend.Legend at 0x13f48361b50>
END