0% found this document useful (0 votes)
2 views

Deep Learning project

Uploaded by

hamed
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

Deep Learning project

Uploaded by

hamed
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

import pandas as pd

import numpy as np

import matplotlib.pyplot as plt


import seaborn as sns

from sklearn.model_selection import train_test_split


from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score

# deep learning liraries


import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# data
df = pd.read_csv(r'D:\ML\Machine_Learning\ML_files\Data sets\tips\Admission_Predict.csv')
df.head()

Serial No. GRE Score TOEFL Score University Rating SOP LOR CGPA Research Chance of Admit

0 1 337 118 4 4.5 4.5 9.65 1 0.92

1 2 324 107 4 4.0 4.5 8.87 1 0.76

2 3 316 104 3 3.0 3.5 8.00 1 0.72

3 4 322 110 3 3.5 2.5 8.67 1 0.80

4 5 314 103 2 2.0 3.0 8.21 0 0.65

# data shape
df.shape

(500, 9)

df.isnull().sum()

Serial No. 0
GRE Score 0
TOEFL Score 0
University Rating 0
SOP 0
LOR 0
CGPA 0
Research 0
Chance of Admit 0
dtype: int64

df.duplicated()

0 False
1 False
2 False
3 False
4 False
...
495 False
496 False
497 False
498 False
499 False
Length: 500, dtype: bool

# split data into X and y for training and testing


X = df.iloc[:,0:-1]
y = df.iloc[:,-1]

# Standardize the data

scaler = MinMaxScaler()

X = scaler.fit_transform(X)

# separated for training and test variables


X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)

# shape of X train
X_train.shape

(400, 8)
# sequential DL model
model = Sequential()

model.add(Dense(10,activation='relu',input_dim=8)) # input layer with 10 neurons and relu activation func


model.add(Dense(5,activation='relu')) # hidden layer 1 with 5 neruons and relu func
model.add(Dense(5,activation='relu')) # hidden layer2 with same
model.add(Dense(1,activation='linear')) # output layer with linear func

# summary about the parameters


model.summary()

Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_12 (Dense) (None, 10) 90

dense_13 (Dense) (None, 5) 55

dense_14 (Dense) (None, 5) 30

dense_15 (Dense) (None, 1) 6

=================================================================
Total params: 181 (724.00 Byte)
Trainable params: 181 (724.00 Byte)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________

# compiling the model


model.compile(loss='mean_squared_error',optimizer='adam')

# fitting the values fot training


history = model.fit(X_train,y_train,epochs=100,validation_data=(X_test,y_test))

Epoch 1/100
13/13 [==============================] - 1s 17ms/step - loss: 0.4376 - val_loss: 0.3378
Epoch 2/100
13/13 [==============================] - 0s 5ms/step - loss: 0.2613 - val_loss: 0.1825
Epoch 3/100
13/13 [==============================] - 0s 6ms/step - loss: 0.1343 - val_loss: 0.0807
Epoch 4/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0550 - val_loss: 0.0288
Epoch 5/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0210 - val_loss: 0.0151
Epoch 6/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0144 - val_loss: 0.0150
Epoch 7/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0139 - val_loss: 0.0139
Epoch 8/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0129 - val_loss: 0.0128
Epoch 9/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0123 - val_loss: 0.0120
Epoch 10/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0117 - val_loss: 0.0114
Epoch 11/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0111 - val_loss: 0.0108
Epoch 12/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0106 - val_loss: 0.0103
Epoch 13/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0101 - val_loss: 0.0098
Epoch 14/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0097 - val_loss: 0.0093
Epoch 15/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0092 - val_loss: 0.0089
Epoch 16/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0089 - val_loss: 0.0084
Epoch 17/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0084 - val_loss: 0.0079
Epoch 18/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0080 - val_loss: 0.0076
Epoch 19/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0077 - val_loss: 0.0072
Epoch 20/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0074 - val_loss: 0.0069
Epoch 21/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0072 - val_loss: 0.0065
Epoch 22/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0069 - val_loss: 0.0063
Epoch 23/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0067 - val_loss: 0.0059
Epoch 24/100
13/13 [==============================] - 0s 4ms/step - loss: 0.0065 - val_loss: 0.0057
Epoch 25/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0063 - val_loss: 0.0055
Epoch 26/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0061 - val_loss: 0.0053
Epoch 27/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0060 - val_loss: 0.0051
Epoch 28/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0058 - val_loss: 0.0049
Epoch 29/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0057 - val_loss: 0.0047
Epoch 30/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0055 - val_loss: 0.0046
Epoch 31/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0054 - val_loss: 0.0044
Epoch 32/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0052 - val_loss: 0.0044
Epoch 33/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0051 - val_loss: 0.0042
Epoch 34/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0050 - val_loss: 0.0042
Epoch 35/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0049 - val_loss: 0.0041
Epoch 36/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0049 - val_loss: 0.0040
Epoch 37/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0048 - val_loss: 0.0040
Epoch 38/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0047 - val_loss: 0.0039
Epoch 39/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0047 - val_loss: 0.0039
Epoch 40/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0047 - val_loss: 0.0038
Epoch 41/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0046 - val_loss: 0.0038
Epoch 42/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0046 - val_loss: 0.0038
Epoch 43/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0045 - val_loss: 0.0037
Epoch 44/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0045 - val_loss: 0.0037
Epoch 45/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0045 - val_loss: 0.0036
Epoch 46/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0044 - val_loss: 0.0036
Epoch 47/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0044 - val_loss: 0.0036
Epoch 48/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0043 - val_loss: 0.0035
Epoch 49/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0043 - val_loss: 0.0035
Epoch 50/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0043 - val_loss: 0.0034
Epoch 51/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0043 - val_loss: 0.0034
Epoch 52/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0042 - val_loss: 0.0034
Epoch 53/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0042 - val_loss: 0.0033
Epoch 54/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0042 - val_loss: 0.0034
Epoch 55/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0042 - val_loss: 0.0033
Epoch 56/100
13/13 [==============================] - 0s 8ms/step - loss: 0.0041 - val_loss: 0.0033
Epoch 57/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0041 - val_loss: 0.0033
Epoch 58/100
13/13 [==============================] - 0s 15ms/step - loss: 0.0041 - val_loss: 0.0033
Epoch 59/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0041 - val_loss: 0.0032
Epoch 60/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0041 - val_loss: 0.0032
Epoch 61/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0040 - val_loss: 0.0032
Epoch 62/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0040 - val_loss: 0.0032
Epoch 63/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0040 - val_loss: 0.0032
Epoch 64/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0040 - val_loss: 0.0032
Epoch 65/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0040 - val_loss: 0.0032
Epoch 66/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0039 - val_loss: 0.0032
Epoch 67/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0039 - val_loss: 0.0032
Epoch 68/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0039 - val_loss: 0.0032
Epoch 69/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0039 - val_loss: 0.0031
Epoch 70/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0039 - val_loss: 0.0031
Epoch 71/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0039 - val_loss: 0.0032
Epoch 72/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0039 - val_loss: 0.0031
Epoch 73/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0032
Epoch 74/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 75/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 76/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 77/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 78/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 79/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 80/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 81/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 82/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 83/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 84/100
13/13 [==============================] - 0s 8ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 85/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 86/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 87/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0038 - val_loss: 0.0031
Epoch 88/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 89/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 90/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 91/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 92/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 93/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 94/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 95/100
13/13 [==============================] - 0s 7ms/step - loss: 0.0037 - val_loss: 0.0031
Epoch 96/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 97/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 98/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 99/100
13/13 [==============================] - 0s 6ms/step - loss: 0.0036 - val_loss: 0.0031
Epoch 100/100
13/13 [==============================] - 0s 5ms/step - loss: 0.0036 - val_loss: 0.0031

# prediction
y_pred = model.predict(X_test)

y_pred

4/4 [==============================] - 0s 4ms/step


array([[0.6936888 ],
[0.92938626],
[0.74157476],
[0.6301266 ],
[0.68081284],
[0.6122888 ],
[0.73106897],
[0.60543776],
[0.85754573],
[0.6935367 ],
[0.7340158 ],
[0.95910454],
[0.74666995],
[0.7448854 ],
[0.71985066],
[0.6245278 ],
[0.8881661 ],
[0.8474545 ],
[0.5655311 ],
[0.50671387],
[0.61913395],
[0.6186676 ],
[0.685156 ],
[0.70812553],
[0.92674994],
[0.80991864],
[0.6627683 ],
[0.854504 ],
[0.9069913 ],
[0.7074957 ],
[0.67266953],
[0.8490604 ],
[0.88345313],
[0.93322647],
[0.73751926],
[0.9380456 ],
[0.8707433 ],
[0.981235 ],
[0.73912823],
[0.62074834],
[0.54591495],
[0.690287 ],
[0.8868509 ],
[0.5574669 ],
[0.8472129 ],
[0.5701669 ],
[0.6458797 ],
[0.66652954],
[0.6207608 ],
[0.78548795],
[0.63606215],
[0.5278509 ],
[0.74064046],
[0.6448438 ],
[0.77293485],
[0.7543216 ],
[0.6873395 ],
[0.6694189 ],
[0.5263325 ],
[0.57865405],
[0.8800019 ],
[0.81822246],
[0.6971517 ],
[0.9058571 ],
[0.7174143 ],
[0.65481156],
[0.9742506 ],
[0.70964247],
[0.6877571 ],
[0.7115272 ],
[0.59870607],
[0.559512 ],
[0.60999477],
[0.73319495],
[0.6387522 ],
[0.9804665 ],
[0.6763569 ],
[0.51305485],
[0.46277934],
[0.8231102 ],
[0.92015004],
[0.5582597 ],
[0.6548325 ],
[0.77275527],
[0.8372188 ],
[0.80997306],
[0.67692554],
[0.66746914],
[0.92748946],
[0.78086615],
[0.81939656],
[0.79209137],
[0.6725378 ],
[0.48249662],
[0.52589655],
[0.8017299 ],
[0.73891056],
[0.71139294],
[0.9425614 ],
[0.6574999 ]], dtype=float32)

# Score of the model


r2_score(y_test,y_pred)

0.8400383992995096

# plotting the loss


plt.plot(history.history['loss'],label='Training')
plt.plot(history.history['val_loss'],label='Testing')
plt.legend()
plt.show()

Loading [MathJax]/jax/output/CommonHTML/fonts/TeX/fontdata.js

You might also like