0% found this document useful (0 votes)
19 views5 pages

Assignment Ai Paltforms Mostafa Hazem

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views5 pages

Assignment Ai Paltforms Mostafa Hazem

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 5

import torch

import torch.nn as nn
import seaborn as sns
import matplotlib.pyplot as plt

class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()

self.fc1 = nn.Linear(3, 3)
self.fc2 = nn.Linear(3, 1)

self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()

def forward(self, x):

x = self.sigmoid(self.fc1(x))
x = self.tanh(self.fc2(x))
return x

model = SimpleNN()

x = torch.rand((10, 3)) # Input tensor with shape (batch_size,


input_size)
initial_output = model(x).detach().numpy()

sns.lineplot(data=initial_output)
plt.title('Network Predictions Before Training')
plt.show()
M a k in g t h e n e t w o r k m o r e t r a i n a b l e .
for param in model.parameters():
param.requires_grad = True

optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


loss_fn = nn.MSELoss()

y_true = torch.rand((10, 1))

T r a i n in g l o o p .
# Training loop
epochs = 100
for epoch in range(epochs):
# Zero the gradients
optimizer.zero_grad()

# Forward pass
output = model(x)

# Compute the loss


loss = loss_fn(output, y_true)

# Backward pass and optimization


loss.backward()
optimizer.step()

# Print the loss after each epoch

print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item()}')

Epoch 1/100, Loss: 0.10669004917144775


Epoch 2/100, Loss: 0.1062290295958519
Epoch 3/100, Loss: 0.10578391700983047
Epoch 4/100, Loss: 0.10535403341054916
Epoch 5/100, Loss: 0.10493876785039902
Epoch 6/100, Loss: 0.10453753173351288
Epoch 7/100, Loss: 0.10414975881576538
Epoch 8/100, Loss: 0.10377492755651474
Epoch 9/100, Loss: 0.10341250896453857
Epoch 10/100, Loss: 0.10306201875209808
Epoch 11/100, Loss: 0.10272300243377686
Epoch 12/100, Loss: 0.10239502042531967
Epoch 13/100, Loss: 0.10207762569189072
Epoch 14/100, Loss: 0.10177043825387955
Epoch 15/100, Loss: 0.10147307068109512
Epoch 16/100, Loss: 0.10118510574102402
Epoch 17/100, Loss: 0.10090626776218414
Epoch 18/100, Loss: 0.10063614696264267
Epoch 19/100, Loss: 0.1003744974732399
Epoch 20/100, Loss: 0.10012093931436539
Epoch 21/100, Loss: 0.09987520426511765
Epoch 22/100, Loss: 0.0996369943022728
Epoch 23/100, Loss: 0.09940606355667114
Epoch 24/100, Loss: 0.09918215125799179
Epoch 25/100, Loss: 0.09896498918533325
Epoch 26/100, Loss: 0.09875435382127762
Epoch 27/100, Loss: 0.09854999929666519
Epoch 28/100, Loss: 0.09835171699523926
Epoch 29/100, Loss: 0.0981593057513237
Epoch 30/100, Loss: 0.09797254204750061
Epoch 31/100, Loss: 0.09779123961925507
Epoch 32/100, Loss: 0.09761520475149155
Epoch 33/100, Loss: 0.09744429588317871
Epoch 34/100, Loss: 0.09727828204631805
Epoch 35/100, Loss: 0.09711702913045883
Epoch 36/100, Loss: 0.0969604030251503
Epoch 37/100, Loss: 0.09680821746587753
Epoch 38/100, Loss: 0.0966603234410286
Epoch 39/100, Loss: 0.09651659429073334
Epoch 40/100, Loss: 0.09637688845396042
Epoch 41/100, Loss: 0.09624110162258148
Epoch 42/100, Loss: 0.0961090475320816
Epoch 43/100, Loss: 0.09598065912723541
Epoch 44/100, Loss: 0.09585578739643097
Epoch 45/100, Loss: 0.09573434293270111
Epoch 46/100, Loss: 0.09561620652675629
Epoch 47/100, Loss: 0.09550128877162933
Epoch 48/100, Loss: 0.0953894630074501
Epoch 49/100, Loss: 0.09528064727783203
Epoch 50/100, Loss: 0.09517475217580795
Epoch 51/100, Loss: 0.09507166594266891
Epoch 52/100, Loss: 0.09497131407260895
Epoch 53/100, Loss: 0.09487363696098328
Epoch 54/100, Loss: 0.09477851539850235
Epoch 55/100, Loss: 0.09468590468168259
Epoch 56/100, Loss: 0.09459570050239563
Epoch 57/100, Loss: 0.09450783580541611
Epoch 58/100, Loss: 0.09442225843667984
Epoch 59/100, Loss: 0.09433887898921967
Epoch 60/100, Loss: 0.094257652759552
Epoch 61/100, Loss: 0.09417849034070969
Epoch 62/100, Loss: 0.09410135447978973
Epoch 63/100, Loss: 0.09402619302272797
Epoch 64/100, Loss: 0.09395292401313782
Epoch 65/100, Loss: 0.0938815027475357
Epoch 66/100, Loss: 0.09381186217069626
Epoch 67/100, Loss: 0.09374396502971649
Epoch 68/100, Loss: 0.09367778152227402
Epoch 69/100, Loss: 0.09361321479082108
Epoch 70/100, Loss: 0.09355024993419647
Epoch 71/100, Loss: 0.09348883479833603
Epoch 72/100, Loss: 0.09342894703149796
Epoch 73/100, Loss: 0.09337051212787628
Epoch 74/100, Loss: 0.09331348538398743
Epoch 75/100, Loss: 0.0932578593492508
Epoch 76/100, Loss: 0.09320355951786041
Epoch 77/100, Loss: 0.09315059334039688
Epoch 78/100, Loss: 0.09309887886047363
Epoch 79/100, Loss: 0.09304840862751007
Epoch 80/100, Loss: 0.09299913793802261
Epoch 81/100, Loss: 0.09295105189085007
Epoch 82/100, Loss: 0.09290408343076706
Epoch 83/100, Loss: 0.092858225107193
Epoch 84/100, Loss: 0.09281345456838608
Epoch 85/100, Loss: 0.09276974201202393
Epoch 86/100, Loss: 0.09272702038288116
Epoch 87/100, Loss: 0.09268531203269958
Epoch 88/100, Loss: 0.09264455735683441
Epoch 89/100, Loss: 0.09260474145412445
Epoch 90/100, Loss: 0.0925658717751503
Epoch 91/100, Loss: 0.0925278589129448
Epoch 92/100, Loss: 0.09249073266983032
Epoch 93/100, Loss: 0.0924544483423233
Epoch 94/100, Loss: 0.09241898357868195
Epoch 95/100, Loss: 0.09238431602716446
Epoch 96/100, Loss: 0.09235041588544846
Epoch 97/100, Loss: 0.09231731295585632
Epoch 98/100, Loss: 0.0922849178314209
Epoch 99/100, Loss: 0.09225326776504517
Epoch 100/100, Loss: 0.09222230315208435

P r e d ic t i o n s b e f o r e a n d a f t e r T r a i n in g .
final_output = model(x).detach().numpy()

fig, axs = plt.subplots(1, 2, figsize=(10, 5))

sns.lineplot(data=initial_output, ax=axs[0])
axs[0].set_title('Predictions Before Training')

sns.lineplot(data=final_output, ax=axs[1])
axs[1].set_title('Predictions After Training')

plt.show()

You might also like