0% found this document useful (0 votes)
22 views20 pages

ANN Detection Technique

The Complete guide to Artificial Neural Networks

Uploaded by

Muhammad Ali
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views20 pages

ANN Detection Technique

The Complete guide to Artificial Neural Networks

Uploaded by

Muhammad Ali
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

Department: Computer Sciences

Semester: 8th

Subject: Artificial Neural Network

Lecturer: Dr. Dostdar Hussain


Model Vit

In [1]:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import itertools
import random
from tensorflow.keras.preprocessing.image import ImageDataGenerator

In [ ]:
!git clone https://github.com/umair54321/personality-traits.git

Cloning into 'personality-traits'...


remote: Enumerating objects: 26828, done.
remote: Total 26828 (delta 0), reused 0 (delta 0), pack-reused 26828
Receiving objects: 100% (26828/26828), 284.84 MiB | 17.00 MiB/s, done.
Resolving deltas: 100% (1/1), done.
Updating files: 100% (26811/26811), done.

In [ ]:
# Set up directories
train_dir = '/content/personality-traits/DataSet/train'
validation_dir = '/content/personality-traits/DataSet/validation'
test_dir = '/content/personality-traits/DataSet/test'

In [ ]:
# Preprocessing and data augmentation
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

In [ ]:
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(160, 160), # Resize images to 150x150
batch_size=32,
class_mode='categorical' # Adjust class mode as needed
)

validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='categorical'
)

Found 18765 images belonging to 5 classes.


Found 4022 images belonging to 5 classes.

In [ ]:
# def load_and_preprocess_data(file_paths):
#
#
#
# return np.array(X_all), np.array(Y_all)

In [ ]:
# # Load and combine dataset from multiple files
# pickle_file_paths = [
# '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_1.pi
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_1.
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_2.
# # '/content/drive/MyDrive/Colab Notebooks/Training-test-data/train_clselfie_v8_3.
# ]
# X, Y = load_and_preprocess_data(pickle_file_paths)

In [ ]:
# # Split the data into train, validation, and test sets
# train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.3, random_state
# val_X, test_X, val_Y, test_Y = train_test_split(test_X, test_Y, test_size=0.5, random

In [ ]:
# Configure the hyperparameters
learning_rate = 0.001,
num_classes = 5 # Adjust based on your dataset
input_shape = (160,160,3) # Assumes (height, width, channels)
image_size = 160 # Resize if necessary
patch_size = 16 # Adjust based on your image size
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
]
transformer_layers = 8
mlp_head_units = [2048, 1024]

In [ ]:
# # Data augmentation
# data_augmentation = keras.Sequential(
# [
# layers.Resizing(image_size, image_size),
# layers.RandomFlip("horizontal"),
# layers.RandomRotation(factor=0.02),
# layers.RandomZoom(height_factor=0.2, width_factor=0.2),
# layers.Normalization(),

# ],
# name="data_augmentation",
# )
# # Compute the mean and variance of the training data for normalization.
# data_augmentation.layers[-1].adapt(train_X)

In [ ]:
def mlp(x, hidden_units, dropout_rate):
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)

In [ ]:
# Implement patch creation as a layer
class Patches(layers.Layer):
def init (self, patch_size):
super(). init ()
self.patch_size = patch_size

def call(self, images):


batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches

In [ ]:
x_train,y_train = train_generator.next()
x_test,y_test = validation_generator.next()
x_train.shape

(16, 160, 160, 3)


Out[ ]:

In [ ]:
plt.figure(figsize=(4, 4))

x = train_generator.next()
image = x[0][0]

plt.imshow(image)
plt.axis('off')

resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size = (image_size, image_size)
)

patches = Patches(patch_size)(resized_image)
print(f'Image size: {image_size} X {image_size}')
print(f'Patch size: {patch_size} X {patch_size}')
print(f'Patches per image: {patches.shape[1]}')
print(f'Elements per patch: {patches.shape[-1]}')

n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))

for i, patch in enumerate(patches[0]):


ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(patch_img.numpy())
plt.axis('off')
Image size: 160 X 160
Patch size: 16 X 16
Patches per image: 100
Elements per patch: 768

In [ ]:
class PatchEncoder(layers.Layer):
def

self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)

input_dim=num_patches, output_dim=projection_dim

positions = tf.range(start=0, limit=self.num_patches, delta=1)


encoded = self.projection(patch) + self.position_embedding(positions)
In [ ]:
inputs = layers.Input(shape = (image_size, image_size, 3))
inputs

<KerasTensor: shape=(None, 160, 160, 3) dtype=float32 (created by layer 'input_9')>


Out[ ]:

In [ ]:
# Build the ViT model
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = Patches(patch_size)(inputs)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)

# Create multiple layers of the Transformer block.


for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Multi-head attention
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])

# Create a [batch_size, projection_dim] tensor.


representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs with sigmoid activation for multi-label classification
logits = layers.Dense(num_classes, activation='sigmoid')(features)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model

In [ ]:
model = create_vit_classifier()
model.summary()

Model: "model_6"

Layer (type) Output Shape Param # Connected to


========================================================================================
==========
input_11 (InputLayer) [(None, 160, 160, 3)] 0 []

patches_11 (Patches) (None, None, 768) 0 ['input_11[0][0]']


patch_encoder_7 (PatchEnco (None, 100, 64) 55616 ['patches_11[0][0]']
der)

layer_normalization_103 (L (None, 100, 64) 128 ['patch_encoder_7[0]


[0]']
ayerNormalization)

multi_head_attention_48 (M (None, 100, 64) 66368 ['layer_normalizatio


n_103[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_103[0][0
]']

add_96 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_48[0][0
]',
'patch_encoder_7[0]
[0]']

layer_normalization_104 (L (None, 100, 64) 128 ['add_96[0][0]']


ayerNormalization)

dense_122 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_104[0][0
]']

dropout_114 (Dropout) (None, 100, 128) 0 ['dense_122[0][0]']

dense_123 (Dense) (None, 100, 64) 8256 ['dropout_114[0]


[0]']

dropout_115 (Dropout) (None, 100, 64) 0 ['dense_123[0][0]']

add_97 (Add) (None, 100, 64) 0 ['dropout_115[0]


[0]',
'add_96[0][0]']

layer_normalization_105 (L (None, 100, 64) 128 ['add_97[0][0]']


ayerNormalization)

multi_head_attention_49 (M (None, 100, 64) 66368 ['layer_normalizatio


n_105[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_105[0][0
]']

add_98 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_49[0][0
]',
'add_97[0][0]']

layer_normalization_106 (L (None, 100, 64) 128 ['add_98[0][0]']


ayerNormalization)

dense_124 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_106[0][0
]']
dropout_116 (Dropout) (None, 100, 128) 0 ['dense_124[0][0]']

dense_125 (Dense) (None, 100, 64) 8256 ['dropout_116[0]


[0]']

dropout_117 (Dropout) (None, 100, 64) 0 ['dense_125[0][0]']

add_99 (Add) (None, 100, 64) 0 ['dropout_117[0]


[0]',
'add_98[0][0]']

layer_normalization_107 (L (None, 100, 64) 128 ['add_99[0][0]']


ayerNormalization)

multi_head_attention_50 (M (None, 100, 64) 66368 ['layer_normalizatio


n_107[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_107[0][0
]']

add_100 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_50[0][0
]',
'add_99[0][0]']

layer_normalization_108 (L (None, 100, 64) 128 ['add_100[0][0]']


ayerNormalization)

dense_126 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_108[0][0
]']

dropout_118 (Dropout) (None, 100, 128) 0 ['dense_126[0][0]']

dense_127 (Dense) (None, 100, 64) 8256 ['dropout_118[0]


[0]']

dropout_119 (Dropout) (None, 100, 64) 0 ['dense_127[0][0]']

add_101 (Add) (None, 100, 64) 0 ['dropout_119[0]


[0]',
'add_100[0][0]']

layer_normalization_109 (L (None, 100, 64) 128 ['add_101[0][0]']


ayerNormalization)

multi_head_attention_51 (M (None, 100, 64) 66368 ['layer_normalizatio


n_109[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_109[0][0
]']

add_102 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_51[0][0
]',
'add_101[0][0]']

layer_normalization_110 (L (None, 100, 64) 128 ['add_102[0][0]']


ayerNormalization)

dense_128 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_110[0][0
]']

dropout_120 (Dropout) (None, 100, 128) 0 ['dense_128[0][0]']

dense_129 (Dense) (None, 100, 64) 8256 ['dropout_120[0]


[0]']

dropout_121 (Dropout) (None, 100, 64) 0 ['dense_129[0][0]']

add_103 (Add) (None, 100, 64) 0 ['dropout_121[0]


[0]',
'add_102[0][0]']

layer_normalization_111 (L (None, 100, 64) 128 ['add_103[0][0]']


ayerNormalization)

multi_head_attention_52 (M (None, 100, 64) 66368 ['layer_normalizatio


n_111[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_111[0][0
]']

add_104 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_52[0][0
]',
'add_103[0][0]']

layer_normalization_112 (L (None, 100, 64) 128 ['add_104[0][0]']


ayerNormalization)

dense_130 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_112[0][0
]']

dropout_122 (Dropout) (None, 100, 128) 0 ['dense_130[0][0]']

dense_131 (Dense) (None, 100, 64) 8256 ['dropout_122[0]


[0]']

dropout_123 (Dropout) (None, 100, 64) 0 ['dense_131[0][0]']

add_105 (Add) (None, 100, 64) 0 ['dropout_123[0]


[0]',
'add_104[0][0]']

layer_normalization_113 (L (None, 100, 64) 128 ['add_105[0][0]']


ayerNormalization)

multi_head_attention_53 (M (None, 100, 64) 66368 ['layer_normalizatio


n_113[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_113[0][0
]']
add_106 (Add) (None, 100, 64) 0 ['multi_head_attenti
on_53[0][0
]',
'add_105[0][0]']

layer_normalization_114 (L (None, 100, 64) 128 ['add_106[0][0]']


ayerNormalization)

dense_132 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_114[0][0
]']

dropout_124 (Dropout) (None, 100, 128) 0 ['dense_132[0][0]']

dense_133 (Dense) (None, 100, 64) 8256 ['dropout_124[0]


[0]']

dropout_125 (Dropout) (None, 100, 64) 0 ['dense_133[0][0]']

add_107 (Add) (None, 100, 64) 0 ['dropout_125[0]


[0]',
'add_106[0][0]']

layer_normalization_115 (L (None, 100, 64) 128 ['add_107[0][0]']


ayerNormalization)

multi_head_attention_54 (M (None, 100, 64) 66368 ['layer_normalizatio


n_115[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_115[0][0
]']

add_108 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_54[0][0
]',
'add_107[0][0]']

layer_normalization_116 (L (None, 100, 64) 128 ['add_108[0][0]']


ayerNormalization)

dense_134 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_116[0][0
]']

dropout_126 (Dropout) (None, 100, 128) 0 ['dense_134[0][0]']

dense_135 (Dense) (None, 100, 64) 8256 ['dropout_126[0]


[0]']

dropout_127 (Dropout) (None, 100, 64) 0 ['dense_135[0][0]']

add_109 (Add) (None, 100, 64) 0 ['dropout_127[0]


[0]',
'add_108[0][0]']

layer_normalization_117 (L (None, 100, 64) 128 ['add_109[0][0]']


ayerNormalization)

multi_head_attention_55 (M (None, 100, 64) 66368 ['layer_normalizatio


12/5/23, 6:19 PM personalitiy_with_VIT
n_117[0][0
ultiHeadAttention) ]',
'layer_normalizatio
n_117[0][0
]']

add_110 (Add) (None, 100, 64) 0 ['multi_head_attenti


on_55[0][0
]',
'add_109[0][0]']

layer_normalization_118 (L (None, 100, 64) 128 ['add_110[0][0]']


ayerNormalization)

dense_136 (Dense) (None, 100, 128) 8320 ['layer_normalizatio


n_118[0][0
]']

dropout_128 (Dropout) (None, 100, 128) 0 ['dense_136[0][0]']

dense_137 (Dense) (None, 100, 64) 8256 ['dropout_128[0]


[0]']

dropout_129 (Dropout) (None, 100, 64) 0 ['dense_137[0][0]']

add_111 (Add) (None, 100, 64) 0 ['dropout_129[0]


[0]',
'add_110[0][0]']

layer_normalization_119 (L (None, 100, 64) 128 ['add_111[0][0]']


ayerNormalization)

flatten_6 (Flatten) (None, 6400) 0 ['layer_normalizatio


n_119[0][0
]']

dropout_130 (Dropout) (None, 6400) 0 ['flatten_6[0][0]']

dense_138 (Dense) (None, 2048) 1310924 ['dropout_130[0]


[0]']
8

dropout_131 (Dropout) (None, 2048) 0 ['dense_138[0][0]']

dense_139 (Dense) (None, 1024) 2098176 ['dropout_131[0]


[0]']

dropout_132 (Dropout) (None, 1024) 0 ['dense_139[0][0]']

dense_140 (Dense) (None, 5) 5125 ['dropout_132[0]


[0]']

========================================================================================
==========
Total params: 15933893 (60.78 MB)
Trainable params: 15933893 (60.78 MB)
Non-trainable params: 0 (0.00 Byte)

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 11/19
12/5/23, 6:19 PM personalitiy_with_VIT

In [ ]:
class BalancedAccuracy(tf.keras.metrics.Metric):
def init (self, name='balanced_accuracy', **kwargs):
super(BalancedAccuracy, self). init (name=name, **kwargs)
self.standard_accuracy = tf.keras.metrics.BinaryAccuracy()
self.total = self.add_weight(name="total", initializer="zeros")
self.count = self.add_weight(name="count", initializer="zeros")

def update_state(self, y_true, y_pred, sample_weight=None):


self.standard_accuracy.update_state(y_true, y_pred, sample_weight)
self.total.assign_add(tf.cast(tf.reduce_sum(y_true), tf.float32))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))

def result(self):
standard_acc = self.standard_accuracy.result()
scaling_factor = tf.math.sqrt(1.0 - standard_acc)

imbalance_ratio = self.total / self.count


dynamic_adjustment = tf.math.sin(np.pi / 2) * 0.1 + tf.math.log(np.e) * 0.08
imbalance_adjustment = dynamic_adjustment * scaling_factor * imbalance_ratio

return tf.clip_by_value(imbalance_adjustment+standard_acc, 0, 1)

def reset_state(self):
self.standard_accuracy.reset_states()
self.total.assign(0)
self.count.assign(0)

In [ ]:
def run_experiment(model):
# Compile the model with a fixed learning rate
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=[BalancedAccuracy()]
)

# Train the model


history = model.fit(
x=x_train,
y=y_train,
batch_size=32,
epochs=50, # Adjust the number of epochs based on your dataset and training ne
validation_data=(x_test, y_test),
)

# Evaluate the model


_, accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")

return history

In [ ]:
def convert_to_binary(predictions, threshold=0.5):
return (predictions > threshold).astype(int)

def visualize_predictions(test_X, test_true_classes, test_pred_classes, test_prediction

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 12/19
12/5/23, 6:19 PM personalitiy_with_VIT

idx = np.random.choice(range(len(test_X)))
ax.imshow(test_X[idx])

bin_pred = test_pred_classes[idx]
true_labels = test_true_classes[idx]
ax.set_title(f"True: {true_labels}, Pred: {bin_pred}, Raw Pred: {np.round(raw_p
ax.axis("off")
plt.tight_layout()

In [ ]:
# Train the model
vit_classifier = create_vit_classifier()
# vit_classifier.summary()
history = run_experiment(vit_classifier)

Epoch 1/50
1/1 [==============================] - 28s 28s/step - loss: 1.1884 - balanced_accuracy:
0.5498 - val_loss: 1.2929 - val_balanced_accuracy: 0.7921
Epoch 2/50
1/1 [==============================] - 0s 140ms/step - loss: 0.8470 - balanced_accuracy:
0.8520 - val_loss: 1.9566 - val_balanced_accuracy: 0.8161
Epoch 3/50
1/1 [==============================] - 0s 141ms/step - loss: 2.2790 - balanced_accuracy:
0.8161 - val_loss: 2.4583 - val_balanced_accuracy: 0.5376
Epoch 4/50
1/1 [==============================] - 0s 140ms/step - loss: 1.8066 - balanced_accuracy:
0.6955 - val_loss: 1.0109 - val_balanced_accuracy: 0.7921
Epoch 5/50
1/1 [==============================] - 0s 140ms/step - loss: 1.2271 - balanced_accuracy:
0.8401 - val_loss: 1.5577 - val_balanced_accuracy: 0.8281
Epoch 6/50
1/1 [==============================] - 0s 143ms/step - loss: 2.0102 - balanced_accuracy:
0.8401 - val_loss: 1.2960 - val_balanced_accuracy: 0.6592
Epoch 7/50
1/1 [==============================] - 0s 146ms/step - loss: 1.0184 - balanced_accuracy:
0.8281 - val_loss: 1.4156 - val_balanced_accuracy: 0.7921
Epoch 8/50
1/1 [==============================] - 0s 115ms/step - loss: 0.9947 - balanced_accuracy:
0.8639 - val_loss: 0.8329 - val_balanced_accuracy: 0.8041
Epoch 9/50
1/1 [==============================] - 0s 142ms/step - loss: 1.4243 - balanced_accuracy:
0.7680 - val_loss: 1.1880 - val_balanced_accuracy: 0.7921
Epoch 10/50
1/1 [==============================] - 0s 140ms/step - loss: 1.0453 - balanced_accuracy:
0.8401 - val_loss: 2.0739 - val_balanced_accuracy: 0.7921
Epoch 11/50
1/1 [==============================] - 0s 104ms/step - loss: 1.2825 - balanced_accuracy:
0.8041 - val_loss: 1.7147 - val_balanced_accuracy: 0.7921
Epoch 12/50
1/1 [==============================] - 0s 139ms/step - loss: 0.8026 - balanced_accuracy:
0.9114 - val_loss: 1.2836 - val_balanced_accuracy: 0.8041
Epoch 13/50
1/1 [==============================] - 0s 104ms/step - loss: 0.9669 - balanced_accuracy:
0.8281 - val_loss: 0.9139 - val_balanced_accuracy: 0.7680
Epoch 14/50
1/1 [==============================] - 0s 104ms/step - loss: 0.8565 - balanced_accuracy:
0.8161 - val_loss: 1.3898 - val_balanced_accuracy: 0.6470

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 13/19
12/5/23, 6:19 PM personalitiy_with_VIT
Epoch 15/50
1/1 [==============================] - 0s 146ms/step - loss: 1.0001 - balanced_accuracy:
0.8281 - val_loss: 1.3538 - val_balanced_accuracy: 0.7318
Epoch 16/50
1/1 [==============================] - 0s 149ms/step - loss: 0.9054 - balanced_accuracy:
0.7800 - val_loss: 1.6595 - val_balanced_accuracy: 0.8161
Epoch 17/50
1/1 [==============================] - 0s 147ms/step - loss: 0.6849 - balanced_accuracy:
0.8877 - val_loss: 1.4094 - val_balanced_accuracy: 0.8281
Epoch 18/50
1/1 [==============================] - 0s 103ms/step - loss: 0.9553 - balanced_accuracy:
0.8996 - val_loss: 1.2483 - val_balanced_accuracy: 0.8401
Epoch 19/50
1/1 [==============================] - 0s 105ms/step - loss: 0.8920 - balanced_accuracy:
0.8520 - val_loss: 1.0668 - val_balanced_accuracy: 0.8401
Epoch 20/50
1/1 [==============================] - 0s 109ms/step - loss: 0.9314 - balanced_accuracy:
0.8877 - val_loss: 0.8210 - val_balanced_accuracy: 0.8161
Epoch 21/50
1/1 [==============================] - 0s 112ms/step - loss: 0.7278 - balanced_accuracy:
0.8520 - val_loss: 0.9103 - val_balanced_accuracy: 0.8520
Epoch 22/50
1/1 [==============================] - 0s 142ms/step - loss: 0.7356 - balanced_accuracy:
0.8758 - val_loss: 1.5541 - val_balanced_accuracy: 0.8161
Epoch 23/50
1/1 [==============================] - 0s 141ms/step - loss: 0.4175 - balanced_accuracy:
0.8996 - val_loss: 2.0443 - val_balanced_accuracy: 0.7921
Epoch 24/50
1/1 [==============================] - 0s 102ms/step - loss: 0.6070 - balanced_accuracy:
0.8639 - val_loss: 1.5228 - val_balanced_accuracy: 0.8041
Epoch 25/50
1/1 [==============================] - 0s 135ms/step - loss: 1.1553 - balanced_accuracy:
0.8639 - val_loss: 0.9599 - val_balanced_accuracy: 0.6834
Epoch 26/50
1/1 [==============================] - 0s 98ms/step - loss: 1.0610 - balanced_accuracy:
0.7318 - val_loss: 0.9751 - val_balanced_accuracy: 0.8281
Epoch 27/50
1/1 [==============================] - 0s 100ms/step - loss: 0.5176 - balanced_accuracy:
0.9349 - val_loss: 1.3711 - val_balanced_accuracy: 0.8161
Epoch 28/50
1/1 [==============================] - 0s 101ms/step - loss: 0.6289 - balanced_accuracy:
0.8996 - val_loss: 1.3199 - val_balanced_accuracy: 0.8401
Epoch 29/50
1/1 [==============================] - 0s 100ms/step - loss: 0.5660 - balanced_accuracy:
0.8758 - val_loss: 1.2180 - val_balanced_accuracy: 0.8041
Epoch 30/50
1/1 [==============================] - 0s 136ms/step - loss: 0.7440 - balanced_accuracy:
0.8639 - val_loss: 1.1841 - val_balanced_accuracy: 0.7559
Epoch 31/50
1/1 [==============================] - 0s 99ms/step - loss: 0.8129 - balanced_accuracy:
0.8161 - val_loss: 1.2620 - val_balanced_accuracy: 0.8161
Epoch 32/50
1/1 [==============================] - 0s 103ms/step - loss: 0.6070 - balanced_accuracy:
0.8996 - val_loss: 2.0693 - val_balanced_accuracy: 0.8041
Epoch 33/50
1/1 [==============================] - 0s 140ms/step - loss: 1.1855 - balanced_accuracy:
0.8639 - val_loss: 1.4341 - val_balanced_accuracy: 0.8041
Epoch 34/50
1/1 [==============================] - 0s 103ms/step - loss: 0.4768 - balanced_accuracy:
0.8639 - val_loss: 0.9001 - val_balanced_accuracy: 0.8161

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 14/19
12/5/23, 6:19 PM personalitiy_with_VIT
Epoch 35/50
1/1 [==============================] - 0s 103ms/step - loss: 0.6302 - balanced_accuracy:
0.8758 - val_loss: 0.8172 - val_balanced_accuracy: 0.7800
Epoch 36/50
1/1 [==============================] - 0s 143ms/step - loss: 0.6734 - balanced_accuracy:
0.7680 - val_loss: 1.2039 - val_balanced_accuracy: 0.8041
Epoch 37/50
1/1 [==============================] - 0s 136ms/step - loss: 0.5008 - balanced_accuracy:
0.8996 - val_loss: 1.7680 - val_balanced_accuracy: 0.8041
Epoch 38/50
1/1 [==============================] - 0s 102ms/step - loss: 0.6049 - balanced_accuracy:
0.9349 - val_loss: 1.8223 - val_balanced_accuracy: 0.8161
Epoch 39/50
1/1 [==============================] - 0s 99ms/step - loss: 0.6112 - balanced_accuracy:
0.8996 - val_loss: 1.9003 - val_balanced_accuracy: 0.8161
Epoch 40/50
1/1 [==============================] - 0s 97ms/step - loss: 0.7249 - balanced_accuracy:
0.8758 - val_loss: 1.2465 - val_balanced_accuracy: 0.8401
Epoch 41/50
1/1 [==============================] - 0s 107ms/step - loss: 0.5592 - balanced_accuracy:
0.9114 - val_loss: 0.9633 - val_balanced_accuracy: 0.8161
Epoch 42/50
1/1 [==============================] - 0s 141ms/step - loss: 0.5266 - balanced_accuracy:
0.8877 - val_loss: 0.8047 - val_balanced_accuracy: 0.7680
Epoch 43/50
1/1 [==============================] - 0s 102ms/step - loss: 0.3356 - balanced_accuracy:
0.9114 - val_loss: 0.7882 - val_balanced_accuracy: 0.8041
Epoch 44/50
1/1 [==============================] - 0s 101ms/step - loss: 0.6323 - balanced_accuracy:
0.8401 - val_loss: 0.9537 - val_balanced_accuracy: 0.8041
Epoch 45/50
1/1 [==============================] - 0s 101ms/step - loss: 0.4009 - balanced_accuracy:
0.8996 - val_loss: 1.8994 - val_balanced_accuracy: 0.7439
Epoch 46/50
1/1 [==============================] - 0s 99ms/step - loss: 0.6047 - balanced_accuracy:
0.8639 - val_loss: 2.5157 - val_balanced_accuracy: 0.7800
Epoch 47/50
1/1 [==============================] - 0s 138ms/step - loss: 0.8430 - balanced_accuracy:
0.8281 - val_loss: 2.4378 - val_balanced_accuracy: 0.7921
Epoch 48/50
1/1 [==============================] - 0s 96ms/step - loss: 0.5966 - balanced_accuracy:
0.9114 - val_loss: 1.8166 - val_balanced_accuracy: 0.7680
Epoch 49/50
1/1 [==============================] - 0s 140ms/step - loss: 0.4953 - balanced_accuracy:
0.9231 - val_loss: 1.5470 - val_balanced_accuracy: 0.8041
Epoch 50/50
1/1 [==============================] - 0s 135ms/step - loss: 0.2758 - balanced_accuracy:
0.9114 - val_loss: 1.6120 - val_balanced_accuracy: 0.8161
1/1 [==============================] - 0s 50ms/step - loss: 1.6120 - balanced_accuracy:
0.8161
Test accuracy: 81.61%

In [ ]:
plt.figure(figsize=(12, 5))

plt.plot(history.history['balanced_accuracy'], label='Training Accuracy')


plt.plot(history.history['val_balanced_accuracy'], label='Validation Accuracy')

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 15/19
12/5/23, 6:19 PM personalitiy_with_VIT
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')

plt.plot(history.history['loss'], label='Training Loss')


plt.plot(history.history['val_loss'], label='Validation Loss')

plt.xlabel('Epoch')

test_predictions = vit_classifier.predict(test_X)

test_pred_classes = convert_to_binary(test_predictions, threshold=0.5)

visualize_predictions(test_X, test_Y, test_pred_classes, test_predictions)

43/43 [==============================] - 2s 27ms/step

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 16/19
12/5/23, 6:19 PM personalitiy_with_VIT

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 17/19
12/5/23, 6:19 PM personalitiy_with_VIT

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 18/19
12/5/23, 6:19 PM personalitiy_with_VIT

In [ ]:
# Multilabel confusion matrix
from sklearn.metrics import multilabel_confusion_matrix
import seaborn as sns

# Define class names as per your dataset


class_names = ['Extraversion', 'Agreeableness', 'Conscientiousness', 'Neuroticism', 'Op
multilabel_cm = multilabel_confusion_matrix(test_Y, test_pred_classes)

# Compute normalized confusion matrix for each class


normalized_cm = [cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] for cm in multilabe

fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 10)) # Adjust for better fit
axes = axes.flatten() # Flatten the matrix of axes

for i, (ax, cm, label) in enumerate(zip(axes, normalized_cm, class_names)):


sns.heatmap(cm, annot=True, fmt=".2f", ax=ax, cmap="Blues", cbar=False)
ax.set_title(f'{label}', fontsize=14)
ax.set_xlabel('Predicted Label', fontsize=12)
ax.set_ylabel('True Label', fontsize=12)
ax.xaxis.set_ticklabels(['0', '1'], fontsize=10)
ax.yaxis.set_ticklabels(['0', '1'], fontsize=10)

if len(class_names) % 2 != 0:
fig.delaxes(axes[-1])

plt.tight_layout()
plt.show()

In [ ]:
#Classification report
from sklearn.metrics import classification_report
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# Make sure to replace 'target_names' with the actual names of your classes
target_names = ['Extraversion', 'Agreeableness', 'Conscientiousness', 'Neuroticism', 'O

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 19/19
12/5/23, 6:19 PM personalitiy_with_VIT

report = classification_report(test_Y, test_pred_classes, target_names=target_names, ze

df_report = pd.DataFrame(report).transpose()

selected_metrics = df_report.loc[target_names, ['precision', 'recall', 'f1-score']]

plt.figure(figsize=(10, 8))
sns.heatmap(selected_metrics, annot=True, fmt=".2f", cmap="Blues")
plt.title('Classification Report')

In [ ]:

file:///C:/Users/Administrator/Downloads/personalitiy_with_VIT.html 20/19

You might also like