B_DL_Assign
B_DL_Assign
# install libraries/packages/modules
# Download Dataset
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from tqdm.notebook import tqdm
import sys
sys.path.append('/content/object-localization-dataset')
Configurations
CSV_FILE = '/content/object-localization-dataset/train.csv'
DATA_DIR = '/content/object-localization-dataset/'
DEVICE = 'cuda'
BATCH_SIZE = 16
IMG_SIZE = 140
LR= 0.001
EPOCHS = 40
MODEL_NAME = 'efficientnet_b0'
NUM_COR = 4
df = pd.read_csv(CSV_FILE)
df.head(10)
df
label
0 mushroom
1 eggplant
2 mushroom
3 eggplant
4 eggplant
.. ...
181 eggplant
182 cucumber
183 mushroom
184 eggplant
185 mushroom
<matplotlib.image.AxesImage at 0x7efbe0e13b50>
train_df,valid_df = train_test_split(df,test_size = .20, random_state
= 42)
Augmentations
import albumentations as A
train_augs = A.Compose([
A.Resize(IMG_SIZE, IMG_SIZE),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Rotate()
], bbox_params = A.BboxParams(format = 'pascal_voc', label_fields =
['class_labels']))
valid_augs = A.Compose([
A.Resize(IMG_SIZE, IMG_SIZE),
], bbox_params = A.BboxParams(format = 'pascal_voc', label_fields =
['class_labels']))
def __len__(self):
return len(self.df)
row = self.df.iloc[idx]
xmin= row.xmin
ymin= row.ymin
xmax= row.xmax
ymax= row.ymax
bbox = [[xmin,ymin,xmax,ymax]]
if self.augmentations:
data = self.augmentations(image = img, bboxes = bbox,
class_labels = [None])
img = data['image']
bbox = data['bboxes'][0]
img = torch.from_numpy(img).permute(2,0,1)/255.0
bbox= torch.Tensor(bbox)
return img,bbox
trainset = ObjectLocDataset(train_df,train_augs)
validset = ObjectLocDataset(valid_df,valid_augs)
<matplotlib.image.AxesImage at 0x7efbcd28ddd0>
Create Model
from torch import nn
import timm
class ObjLocModel(nn.Module):
def __init__(self):
super(ObjLocModel,self).__init__()
if gt_bboxes != None:
loss = nn.MSELoss()(bboxes, gt_bboxes)
return bboxes,loss
return bboxes
model = ObjLocModel()
model.to(DEVICE);
random_img = torch.rand(1,3,140,140).to(DEVICE)
model(random_img).shape
torch.Size([1, 4])
bboxex,loss = model(images,gt_bboxes)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
with torch.no_grad():
for data in tqdm(dataloader):
bboxex,loss = model(images,gt_bboxes)
total_loss += loss.item()
#Training Loop
best_valid_loss = np.Inf
for i in range(EPOCHS):
if valid_loss<best_valid_loss:
torch.save(model.state_dict(),'best_model.pt')
print("Weights are saved")
best_valid_loss = valid_loss
{"model_id":"93cb1c8c695f4c18bdf5bc1245dfa8f4","version_major":2,"vers
ion_minor":0}
{"model_id":"e3c13bf0c4c6458a9b090da69575f19b","version_major":2,"vers
ion_minor":0}
{"model_id":"34e4beed8cd7416e813d432cdd138eb4","version_major":2,"vers
ion_minor":0}
{"model_id":"96c7d5bcd2cd42dcb819f0b28811fe00","version_major":2,"vers
ion_minor":0}
{"model_id":"9df95f743ffe44f9aaf71ae37f4c05d2","version_major":2,"vers
ion_minor":0}
{"model_id":"98d507882dee42839cf2802134baaccf","version_major":2,"vers
ion_minor":0}
{"model_id":"29aa091d59b74a81bdabd7d066671fef","version_major":2,"vers
ion_minor":0}
{"model_id":"540ac4419c0a457088ad93762eb0de47","version_major":2,"vers
ion_minor":0}
{"model_id":"83896e35d0dc4cad8641a2dd5581fde8","version_major":2,"vers
ion_minor":0}
{"model_id":"42ea7a894e72434db16e46e73bc6368c","version_major":2,"vers
ion_minor":0}
{"model_id":"be57c3d8959041bf9c238478eb55b123","version_major":2,"vers
ion_minor":0}
{"model_id":"7f5de4361ecf483c9801e6899fdb10b1","version_major":2,"vers
ion_minor":0}
{"model_id":"b53951f697d04deab673dc0f2b841969","version_major":2,"vers
ion_minor":0}
Epoch : 7 train loss : 360.6284713745117 valid loss :
305.06504313151044
{"model_id":"547b6d6dc51a4a198e4f6d204cb4cd63","version_major":2,"vers
ion_minor":0}
{"model_id":"bbdd270ea6f749bab25cc974219a02f9","version_major":2,"vers
ion_minor":0}
{"model_id":"991180bcffee4d7493eed2206968aa7d","version_major":2,"vers
ion_minor":0}
{"model_id":"d28c948835694c01a84cb7a8a4535305","version_major":2,"vers
ion_minor":0}
{"model_id":"956cd408b47940d68cfbee0916786177","version_major":2,"vers
ion_minor":0}
{"model_id":"83658e8f6f35467bb1f36c5a3bc87e64","version_major":2,"vers
ion_minor":0}
{"model_id":"561d2e29151c4171bd83e141810e0894","version_major":2,"vers
ion_minor":0}
{"model_id":"554f4f3769384df8b57b38428afb0051","version_major":2,"vers
ion_minor":0}
{"model_id":"df4bf081538a417aa6b992d7d85f4910","version_major":2,"vers
ion_minor":0}
{"model_id":"18fad7b07fd34450afc9d38fec6a6078","version_major":2,"vers
ion_minor":0}
{"model_id":"443205415dd24133a2bd68da63048d34","version_major":2,"vers
ion_minor":0}
{"model_id":"2f87224d401441518e91e47565aff433","version_major":2,"vers
ion_minor":0}
{"model_id":"f0cb06267c124920b06f951357a9390f","version_major":2,"vers
ion_minor":0}
{"model_id":"9ba34b9bb4be411e813c261ba1ab1d5b","version_major":2,"vers
ion_minor":0}
{"model_id":"522f4ccb9eac4a34a04c1af5d8f5a0b6","version_major":2,"vers
ion_minor":0}
{"model_id":"5a72382a0b2b4f5ba08f62d45a5047f4","version_major":2,"vers
ion_minor":0}
{"model_id":"c6c8e9b55ff748f3898ab2c582004de0","version_major":2,"vers
ion_minor":0}
{"model_id":"3834e81bd6ea4e76a52be80c9e74ce34","version_major":2,"vers
ion_minor":0}
{"model_id":"e8f7d62439b5430fb2ad3850c21ea739","version_major":2,"vers
ion_minor":0}
{"model_id":"58aed79a5d7a484dabf04ce2c1a75c32","version_major":2,"vers
ion_minor":0}
{"model_id":"791d53504d24473ab9992aa091112948","version_major":2,"vers
ion_minor":0}
Weights are saved
Epoch : 18 train loss : 73.33877410888672 valid loss :
73.88326644897461
{"model_id":"06248e87e2a74398bf3e198f379963bd","version_major":2,"vers
ion_minor":0}
{"model_id":"1ee74e47124a4451aaec9c5f9e96d7a9","version_major":2,"vers
ion_minor":0}
{"model_id":"f5c4f7d9fb13428ca463a26be46cfe38","version_major":2,"vers
ion_minor":0}
{"model_id":"8c8265bfc28e47c9a8068c43b865a3b9","version_major":2,"vers
ion_minor":0}
{"model_id":"6de4f48957a2491eac7f4d136898631d","version_major":2,"vers
ion_minor":0}
{"model_id":"f543253e199f4346b6a6110c8592250d","version_major":2,"vers
ion_minor":0}
{"model_id":"319422ae246f4dcc9544aeb6fe57e45d","version_major":2,"vers
ion_minor":0}
{"model_id":"cb44617bf22a459d92e937aec7107963","version_major":2,"vers
ion_minor":0}
{"model_id":"3c3aaf56d6004d5b9cd28f694ff4cd72","version_major":2,"vers
ion_minor":0}
{"model_id":"9949625ef78c4ee5a48d416dedf76cb0","version_major":2,"vers
ion_minor":0}
{"model_id":"102f9ce88a554c95ba2f8dd864993bf2","version_major":2,"vers
ion_minor":0}
{"model_id":"6b55ebce554347e29f42aae61f3b0d4d","version_major":2,"vers
ion_minor":0}
{"model_id":"65ac24c9c4694596abe79bfddf82c84a","version_major":2,"vers
ion_minor":0}
{"model_id":"75a13c0529a6425f9bf20ce764416607","version_major":2,"vers
ion_minor":0}
{"model_id":"37caf0a047544b27875b1d9b50099ef7","version_major":2,"vers
ion_minor":0}
{"model_id":"4bb87ebdfd264473b200e7f188514161","version_major":2,"vers
ion_minor":0}
{"model_id":"2d20c03c32ba4ff18b07f897a3f2ce7b","version_major":2,"vers
ion_minor":0}
{"model_id":"894a33e0aad54a1ab9d4d24c926b1a0d","version_major":2,"vers
ion_minor":0}
{"model_id":"cee7b51e5cf245a58e4f83af53f97df9","version_major":2,"vers
ion_minor":0}
{"model_id":"5f54a767c6da404a8fd0912ade63267a","version_major":2,"vers
ion_minor":0}
{"model_id":"951bf93ddb49421baa7c7ec673e2b1b1","version_major":2,"vers
ion_minor":0}
{"model_id":"c5e45a8c551c4f63a5f6ab4b913b9777","version_major":2,"vers
ion_minor":0}
{"model_id":"4d11953559f7452a8782a181a1f1e430","version_major":2,"vers
ion_minor":0}
{"model_id":"fe7f078c40334c7192f0c516a06f360f","version_major":2,"vers
ion_minor":0}
{"model_id":"286ed6c99a08433da0ba6b766469ede8","version_major":2,"vers
ion_minor":0}
{"model_id":"30a8ebc04962498a91ecca99560d3202","version_major":2,"vers
ion_minor":0}
{"model_id":"0cd8a0ce1455449aae9b15ab67b11a48","version_major":2,"vers
ion_minor":0}
{"model_id":"8d9a107d164743ae9c8694d72b3e50cb","version_major":2,"vers
ion_minor":0}
{"model_id":"4f0fae6c18fc445e9504ba5d3ce987d4","version_major":2,"vers
ion_minor":0}
{"model_id":"f8dfe8c6847b49829509c574678c0021","version_major":2,"vers
ion_minor":0}
{"model_id":"2b733eb023174ff4b5d4d87e6a665a7a","version_major":2,"vers
ion_minor":0}
{"model_id":"1f3c2ceee9a744a4b8cd76a726217c97","version_major":2,"vers
ion_minor":0}
{"model_id":"b0c3cb8fd7b840bbb763a984e54fa835","version_major":2,"vers
ion_minor":0}
{"model_id":"9be311c1ff1c44cab6e4a0629c38a6b0","version_major":2,"vers
ion_minor":0}
{"model_id":"b2e158d3d65e4b32907d1954d197c68e","version_major":2,"vers
ion_minor":0}
{"model_id":"12b580fc3dcd442a97fb6a97af53a4f5","version_major":2,"vers
ion_minor":0}
{"model_id":"12364ad74b574a69933f78d2a061deb3","version_major":2,"vers
ion_minor":0}
{"model_id":"278ea876608345f9bffc6752edd616a2","version_major":2,"vers
ion_minor":0}
{"model_id":"be961bb77f144408bf8e661ff7dc491e","version_major":2,"vers
ion_minor":0}
{"model_id":"b6ccd588718c4d42810ba9edb67c59db","version_major":2,"vers
ion_minor":0}
{"model_id":"f468b49a8252418e8ab8c0d285cb7e1d","version_major":2,"vers
ion_minor":0}
{"model_id":"9de0e44ee07d4f26b5d8eff6832f0cbc","version_major":2,"vers
ion_minor":0}
#Inference
import utils
model.load_state_dict(torch.load('best_model.pt'))
model.eval()
with torch.no_grad():
image,gt_bbox = validset[18]
image = image.unsqueeze(0).to(DEVICE)
out_bbox = model(image)
utils.compare_plots(image,gt_bbox, out_bbox)
This is a starter notebook for the guided project Tweet Emotion Recognition with TensorFlow
Task 1: Introduction
Task 2: Setup and Imports
1. Installing Hugging Face's nlp package
2. Importing libraries
!pip install nlp
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import nlp
import random
def show_history(h):
epochs_trained = len(h.history['loss'])
plt.figure(figsize=(16, 6))
plt.subplot(1, 2, 1)
plt.plot(range(0, epochs_trained), h.history.get('accuracy'),
label='Training')
plt.plot(range(0, epochs_trained), h.history.get('val_accuracy'),
label='Validation')
plt.ylim([0., 1.])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(range(0, epochs_trained), h.history.get('loss'),
label='Training')
plt.plot(range(0, epochs_trained), h.history.get('val_loss'),
label='Validation')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.figure(figsize=(8, 8))
sp = plt.subplot(1, 1, 1)
ctx = sp.matshow(cm)
plt.xticks(list(range(0, 6)), labels=classes)
plt.yticks(list(range(0, 6)), labels=classes)
plt.colorbar(ctx)
plt.show()
/usr/local/lib/python3.11/dist-packages/huggingface_hub/utils/
_auth.py:94: UserWarning:
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your
settings tab (https://huggingface.co/settings/tokens), set it as
secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to
access public models or datasets.
warnings.warn(
{"model_id":"48e6c423f89b49e8924d36611a5aeb13","version_major":2,"vers
ion_minor":0}
{"model_id":"685cae32b9df4be2a5bb8534b83da466","version_major":2,"vers
ion_minor":0}
{"model_id":"d3a263d898eb4d818da3982702ed86b4","version_major":2,"vers
ion_minor":0}
{"model_id":"5ccd40ac413a44628562db72c199eed9","version_major":2,"vers
ion_minor":0}
{"model_id":"49dd9fc602fa42c59ca1a9caaff88d7f","version_major":2,"vers
ion_minor":0}
{"model_id":"bf52282fb99f4c55a1fd15874062e68f","version_major":2,"vers
ion_minor":0}
{"model_id":"9153e850a4f44ff595b4f44a6a222307","version_major":2,"vers
ion_minor":0}
ds
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 16000
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 2000
})
test: Dataset({
features: ['text', 'label'],
num_rows: 2000
})
})
train = ds['train']
val = ds['validation']
test = ds['test']
train
Dataset({
features: ['text', 'label'],
num_rows: 16000
})
def get_tweets(x):
tweets = [x['text'] for x in x]
labels = [x['label'] for x in x]
tweets,labels=get_tweets(train)
tweets[0]
{"type":"string"}
labels[0]
tweets[1],labels[1]
import numpy as np
np.unique(labels)
array([0, 1, 2, 3, 4, 5])
Task 4: Tokenizer
1. Tokenizing the tweets
from tensorflow.keras.preprocessing.text import Tokenizer
tokenised.fit_on_texts(tweets)
tweets[0]
{"type":"string"}
tokenised.texts_to_sequences([tweets[0]])
padded
len(padded[0])
50
{0, 1, 2, 3, 4, 5}
plt.hist(labels)
plt.show()
print(labels[0])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/
embedding.py:90: UserWarning: Argument `input_length` is deprecated.
Just remove it.
warnings.warn(
model.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ embedding (Embedding) │ ? │
0 (unbuilt) │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ bidirectional (Bidirectional) │ ? │
0 (unbuilt) │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ bidirectional_1 (Bidirectional) │ ? │
0 (unbuilt) │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense (Dense) │ ? │
0 (unbuilt) │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
model.build(input_shape=(None, maxlen))
model.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳
━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃
Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇
━━━━━━━━━━━━━━━━━┩
│ embedding (Embedding) │ (None, 50, 16) │
160,000 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ bidirectional (Bidirectional) │ (None, 50, 40) │
5,920 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ bidirectional_1 (Bidirectional) │ (None, 40) │
9,760 │
├──────────────────────────────────────┼─────────────────────────────┼
─────────────────┤
│ dense (Dense) │ (None, 6) │
246 │
└──────────────────────────────────────┴─────────────────────────────┴
─────────────────┘
val_sequences = tokenised.texts_to_sequences(val_tweets)
val_padded = pad_sequences(val_sequences, maxlen=maxlen,
padding='post', truncating='post')
val_tweets[0], val_labels[0]
('im feeling quite sad and sorry for myself but ill snap out of it
soon', 0)
h = model.fit(
padded, np.array(labels),
validation_data=(val_padded, np.array(val_labels)),
epochs=20,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',
patience=2)
]
)
Epoch 1/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 19s 12ms/step - accuracy: 0.3874 - loss:
1.5271 - val_accuracy: 0.6530 - val_loss: 0.9299
Epoch 2/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 15s 11ms/step - accuracy: 0.6875 - loss:
0.8073 - val_accuracy: 0.7270 - val_loss: 0.7383
Epoch 3/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.8069 - loss:
0.4949 - val_accuracy: 0.8410 - val_loss: 0.4937
Epoch 4/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9061 - loss:
0.2860 - val_accuracy: 0.8650 - val_loss: 0.4383
Epoch 5/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 5s 11ms/step - accuracy: 0.9375 - loss:
0.2080 - val_accuracy: 0.8765 - val_loss: 0.4161
Epoch 6/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.9599 - loss:
0.1389 - val_accuracy: 0.8845 - val_loss: 0.3720
Epoch 7/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 6s 12ms/step - accuracy: 0.9689 - loss:
0.1004 - val_accuracy: 0.8975 - val_loss: 0.3425
Epoch 8/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.9708 - loss:
0.0948 - val_accuracy: 0.8975 - val_loss: 0.3518
Epoch 9/20
500/500 ━━━━━━━━━━━━━━━━━━━━ 6s 13ms/step - accuracy: 0.9775 - loss:
0.0735 - val_accuracy: 0.8880 - val_loss: 0.3817
test_tweets,test_labels = get_tweets(test)
test_sequences = tokenised.texts_to_sequences(test_tweets)
test_padded = pad_sequences(test_sequences, maxlen=maxlen,
padding='post', truncating='post')
_ = model.evaluate(test_padded, np.array(test_labels))
i = random.randint(0, len(test_tweets) - 1)
print('Sentence: ',test_tweets[i])
print('emotion linked with sentence: ',test_labels[i])
p = (np.expand_dims(test_sequences[i], axis=0))
predictions = model.predict(p)
pred=np.argmax(predictions,axis=1)[0]
print('Predicted emotion: ',pred)