0% found this document useful (0 votes)
4 views2 pages

Lecture 3 StudentRecommedSystem

Uploaded by

Alisha Ashraf
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views2 pages

Lecture 3 StudentRecommedSystem

Uploaded by

Alisha Ashraf
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 2

In [160]: import pandas as pd

pd.options.mode.chained_assignment = None
import numpy as np
data = pd.read_csv('StudentsData.csv')
data.head()

Out[160]: Name OverAllGrade Obedient ResearchScore ProjectScore Recommend

0 Ahmed A Y 90 85 Yes

1 Zahid C N 85 51 Yes

2 Amjad F N 10 17 No

3 Tamoor B Y 75 71 No

4 Zahoor E N 20 30 No

In [161]: Features = data[['OverAllGrade','Obedient','ResearchScore','ProjectScore']]


ActualTargets = np.array(data['Recommend'])

In [162]: # Scaling for Numeric Columns Describe the Scaler and Reason
from sklearn.preprocessing import StandardScaler

ss = StandardScaler()
ss.fit(Features[['ResearchScore','ProjectScore']])
Features[['ResearchScore','ProjectScore']] = ss.transform(Features[['ResearchScore','ProjectScore']])
print(Features[['ResearchScore','ProjectScore']])

ResearchScore ProjectScore
0 0.899583 1.376650
1 0.730648 -0.091777
2 -1.803390 -1.560203
3 0.392776 0.772004
4 -1.465519 -0.998746
5 0.967158 1.117516
6 -0.114032 0.253735
7 0.392776 -0.869179

In [163]: # One-Hot CODING for Numerical Data Columns


Features = pd.get_dummies(Features,columns=['OverAllGrade','Obedient'])
Features.head()

Out[163]: ResearchScore ProjectScore OverAllGrade_A OverAllGrade_B OverAllGrade_C OverAllGrade_E OverAllGrade_F Obedient_N Obedien

0 0.899583 1.376650 1 0 0 0 0 0

1 0.730648 -0.091777 0 0 1 0 0 1

2 -1.803390 -1.560203 0 0 0 0 1 1

3 0.392776 0.772004 0 1 0 0 0 0

4 -1.465519 -0.998746 0 0 0 1 0 1

In [164]: # Modeling - Training Phase


from sklearn.linear_model import LogisticRegression

ModelLR = LogisticRegression()
ModelLR.fit(Features,ActualTargets)

LogisticRegression()
Out[164]:

In [165]: # Model Evaluation - Simple Evaluation on Training Data

PredictedLabels = ModelLR.predict(features)

print(f"Actual Labels / Targets{ActualTargets}")


print(f"Predicted Labels / Targets{PredictedLabels}")

from sklearn.metrics import accuracy_score


from sklearn.metrics import classification_report

accuracy = accuracy_score(ActualTargets,PredictedLabels)*100
classficationReport = classification_report(ActualTargets,PredictedLabels)

print(f"\nAccuracy:{accuracy}%")
print(f"\n{classficationReport}")
Actual Labels / Targets['Yes' 'Yes' 'No' 'No' 'No' 'Yes' 'No' 'No']
Predicted Labels / Targets['Yes' 'Yes' 'No' 'No' 'No' 'Yes' 'No' 'No']

Accuracy:100.0%

precision recall f1-score support

No 1.00 1.00 1.00 5


Yes 1.00 1.00 1.00 3

accuracy 1.00 8
macro avg 1.00 1.00 1.00 8
weighted avg 1.00 1.00 1.00 8

In [171]: # Save Trained Model and Scaler for future


import joblib
import os

if not os.path.exists('Model'):
os.mkdir('Model')

if not os.path.exists('Scalar'):
os.mkdir('Scalar')

joblib.dump(Model, r'Model/model.pickle' )
joblib.dump(ss, r'Scalar/scalar.pickle')

['Scalar/scalar.pickle']
Out[171]:

You might also like