0% found this document useful (0 votes)
76 views15 pages

ML Programs 1

The document contains code for 5 machine learning programs: 1. Generates a hypothesis from data and prints the initial and final hypotheses. 2. Generates specific and general hypotheses from data and prints the final general hypotheses. 3. Implements an ID3 decision tree algorithm on sample data and prints the resulting tree. 4. Implements a basic neural network on sample data to predict marks and prints the input, actual, and predicted outputs. 5. Calculates probabilities of features for each class in mushroom data and stores in dictionaries to classify new data.

Uploaded by

karthik
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
76 views15 pages

ML Programs 1

The document contains code for 5 machine learning programs: 1. Generates a hypothesis from data and prints the initial and final hypotheses. 2. Generates specific and general hypotheses from data and prints the final general hypotheses. 3. Implements an ID3 decision tree algorithm on sample data and prints the resulting tree. 4. Implements a basic neural network on sample data to predict marks and prints the input, actual, and predicted outputs. 5. Calculates probabilities of features for each class in mushroom data and stores in dictionaries to classify new data.

Uploaded by

karthik
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

11/22/2019 MLprograms

In [1]:

#Program 1
f = open('prg1.csv','r')
length = len(f.readline().split(','))
hypo = ['0']*(length-1)
print('Intital Hypo = ',hypo)
f.close()
f = open('prg1.csv','r')
count =1
for line in f:
lst = line.split(',')
for i in range(length-1):
if(lst[-1] == 'yes\n'):
if(hypo[i]!='0' and lst[i]!=hypo[i]):
hypo[i]='?'
else:
hypo[i] = lst[i]
print('Hypo ',hypo)
print('final hypo ',hypo)

Intital Hypo = ['0', '0', '0', '0', '0', '0']


Hypo ['sunny', 'warm', 'normal', 'strong', 'warm', 'same']
Hypo ['sunny', 'warm', '?', 'strong', 'warm', 'same']
Hypo ['sunny', 'warm', '?', 'strong', 'warm', 'same']
Hypo ['sunny', 'warm', '?', 'strong', '?', '?']
final hypo ['sunny', 'warm', '?', 'strong', '?', '?']

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 1/15


11/22/2019 MLprograms

In [2]:

#program2
f = open('prg1.csv','r')
length = len(f.readline().split(',')) -1
f.close()
f = open('prg1.csv','r')
shypo = ['0']*(length)
ghypo =['?']*(length)
print('Intital Specific hypothesis',shypo)
count = 1
print('Intital General hypothesis',ghypo)
ghypo.clear()
for line in f:
lst = line.split(',')
for i in range(length):
if(lst[-1] == 'yes\n'):
if shypo[i]!='0' and shypo[i]!=lst[i]:
shypo[i] ='?'
else:
shypo[i] = lst[i]
elif (lst[-1] == 'no\n'):
if '0' in shypo:
temp_lst = ['?']*i
temp_lst += [lst[i]]
temp_lst += ['?'] * (length-1-i)
ghypo.append(temp_lst)
elif shypo[i]!='?' and shypo[i]!=lst[i]:
temp_lst = ['?']*i
temp_lst = temp_lst + [shypo[i]]
temp_lst = temp_lst + ['?'] * (length-1-i)
if(temp_lst not in ghypo):
ghypo.append(temp_lst)
print('SHYPO ',count ," ",shypo)
print('GHYPO ',count ," ",ghypo)
count+=1
f_ghypo = list()
for i in range(len(ghypo)):
for j in range(len(ghypo[i])):
if(ghypo[i][j]!='?' and ghypo[i][j]==shypo[j]):
f_ghypo.append(ghypo[i])
print(f_ghypo)

Intital Specific hypothesis ['0', '0', '0', '0', '0', '0']


Intital General hypothesis ['?', '?', '?', '?', '?', '?']
SHYPO 1 ['sunny', 'warm', 'normal', 'strong', 'warm', 'same']
GHYPO 1 []
SHYPO 2 ['sunny', 'warm', '?', 'strong', 'warm', 'same']
GHYPO 2 []
SHYPO 3 ['sunny', 'warm', '?', 'strong', 'warm', 'same']
GHYPO 3 [['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?',
'?', '?'], ['?', '?', '?', '?', '?', 'same']]
SHYPO 4 ['sunny', 'warm', '?', 'strong', '?', '?']
GHYPO 4 [['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?',
'?', '?'], ['?', '?', '?', '?', '?', 'same']]
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?', '?', '?']]

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 2/15


11/22/2019 MLprograms

In [3]:

#program3
import numpy as np
import pandas as pd

def entropy(target_col):
val,counts = np.unique(target_col,return_counts = True)
ent = sum( (-counts[i]/np.sum(counts)) * np.log2( counts[i]/np.sum(counts) ) for i
in range(len(val)))
return ent

def infoGain(data,features,target):
te = entropy(data[target])
val,counts = np.unique(data[features],return_counts = True)
eg = sum((counts[i]/sum(counts)) * entropy(data[data[features] == val[i]][target] )
for i in range(len(val)))
InfoGain = te-eg
return InfoGain
def ID3(data,features,target,pnode):
if len(np.unique(data[target])) == 1:
return np.unique(data[target])[0]
elif len(features) == 0:
return pnode
else:
pnode = np.unique(data[target])[np.argmax(np.unique(data[target])[1])]
IG = [infoGain(data,f,target) for f in features]
index = np.argmax(IG)
col = features[index]
tree = {col:{}}
features = [f for f in features if f!=col]
for val in np.unique(data[col]):
sub_data = data[data[col]==val].dropna()
subtree = ID3(sub_data,features,target,pnode)
tree[col][val] = subtree
return tree

data = pd.read_csv('PlayTennis.csv')
testData = data.sample(frac = 0.1)
data.drop(testData.index,inplace = True)
print(data)
target = 'PlayTennis'
features = data.columns[data.columns!=target]
tree = ID3(data,features,target,None)
print (tree)
test = testData.to_dict('records')[0]
print(test,'=>', test['PlayTennis'])

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 3/15


11/22/2019 MLprograms

Outlook Temperature Humidity Wind PlayTennis


0 Sunny Hot High Weak No
1 Sunny Hot High Strong No
2 Overcast Hot High Weak Yes
3 Rain Mild High Weak Yes
4 Rain Cool Normal Weak Yes
6 Overcast Cool Normal Strong Yes
7 Sunny Mild High Weak No
8 Sunny Cool Normal Weak Yes
9 Rain Mild Normal Weak Yes
10 Sunny Mild Normal Strong Yes
11 Overcast Mild High Strong Yes
12 Overcast Hot Normal Weak Yes
13 Rain Mild High Strong No
{'Humidity': {'High': {'Outlook': {'Overcast': 'Yes', 'Rain': {'Wind': {'S
trong': 'No', 'Weak': 'Yes'}}, 'Sunny': 'No'}}, 'Normal': 'Yes'}}
{'Outlook': 'Rain', 'Temperature': 'Cool', 'Humidity': 'Normal', 'Wind':
'Strong', 'PlayTennis': 'No'} => No

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 4/15


11/22/2019 MLprograms

In [4]:

#program4
import numpy as np # numpy is commonly used to process number array
X = np.array([[2,9], [3,6], [4,8]]) # Features ( Hrs Slept, Hrs Studied)
y = np.array([[92], [86], [89]]) # Labels(Marks obtained)
X = X/np.amax(X,axis=0) # Normalize
y = y/100
def sigmoid(x):
return 1/(1 + np.exp(-x))
def sigmoid_grad(x):
return x * (1 - x)
# Variable initialization
epoch=1000 #Setting training iterations
eta =0.1 #Setting learning rate (eta)
input_neurons = 2 #number of features in data set
hidden_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
# Weight and bias - Random initialization
wh=np.random.uniform(size=(input_neurons,hidden_neurons)) # 2x3
bh=np.random.uniform(size=(1,hidden_neurons)) # 1x3
wout=np.random.uniform(size=(hidden_neurons,output_neurons)) # 1x1
bout=np.random.uniform(size=(1,output_neurons))
for i in range(epoch):
#Forward Propogation
h_ip=np.dot(X,wh) + bh # Dot product + bias
h_act = sigmoid(h_ip) # Activation function
o_ip=np.dot(h_act,wout) + bout
output = sigmoid(o_ip)
# Error at Output layer
Eo = y-output # Error at o/p
outgrad = sigmoid_grad(output)
d_output = Eo* outgrad # Errj=Oj(1-Oj)(Tj-Oj)
# Error at Hidden later
Eh = np.dot(d_output,wout.T) # .T means transpose
hiddengrad = sigmoid_grad(h_act) # How much hidden layer wts contributed to error
d_hidden = Eh * hiddengrad

wout += np.dot(h_act.T,d_output) *eta # Dotproduct of nextlayererror and currentlay


erop
wh += np.dot(X.T,d_hidden) *eta

print("Normalized Input: \n" ,X)


print("Actual Output: \n" ,y)
print("Predicted Output: \n" ,output)

Normalized Input:
[[0.5 1. ]
[0.75 0.66666667]
[1. 0.88888889]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.88719705]
[0.88752842]
[0.89654925]]

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 5/15


11/22/2019 MLprograms

In [5]:

#program5
import pandas as pd
mush = pd.read_csv('mushrooms.csv')
target = 'class'
classes = mush[target].unique()
features = mush.columns[mush.columns!=target]
testData = mush.sample(frac=0.3)
mush.drop(testData.index,inplace = True)
first ={}
fourth ={}
for x in classes:
mushcl = mush[mush[target]==x][features]
tot = len(mushcl)
second={}
for col in mushcl.columns:
third={}
for val,cnt in mushcl[col].value_counts().iteritems():
prob = cnt/tot
third[val]=prob
second[col]=third
first[x]=second
fourth[x]=len(mushcl)/len(mush)
def proabs(params):
proab={}
for x in classes:
calc = fourth[x]
for col, val in params.iteritems():
try:
calc = first[x][col][val]
except KeyError:
calc =0
proab[x]=calc
return proab
def maxx(params):
proab = proabs(params)
maxcl =''; maxv=0
for col,val in proab.items():
if(val>maxv):
maxv=val
maxcl=col
return maxcl

b=[]
for i in mush.index:
b.append( maxx(mush.loc[i,features]) == mush.loc[i,target]
)
print(sum(b),'correct of',len(b))
print('Accuracy =',sum(b)/len(b))
b=[]
for i in testData.index:
b.append( maxx(testData.loc[i,features]) == testData.loc[i,target]
)
print(sum(b),'correct of',len(b))
print('Accuracy =',sum(b)/len(b))

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 6/15


11/22/2019 MLprograms

3903 correct of 5687


Accuracy = 0.6863020924916476
1705 correct of 2437
Accuracy = 0.6996306934755847

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 7/15


11/22/2019 MLprograms

In [6]:

#program6
import pandas as pd
msg=pd.read_csv('naive.csv',names=['message','label'])
print('The dimensions of the dataset',msg.shape)
msg['labelnum']=msg.label.map({'pos':1,'neg':0})
X=msg.message
y=msg.labelnum
print(X)
print(y)
#splitting the dataset into train and test data
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(X,y)
print(xtest.shape)
print(xtrain.shape)
print(ytest.shape)
print(ytrain.shape)
#output of count vectoriser is a sparse matrix
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain)
xtest_dtm=count_vect.transform(xtest)
print(count_vect.get_feature_names())

df=pd.DataFrame(xtrain_dtm.toarray(),columns=count_vect.get_feature_names())
print(df)#tabular representation
print(xtrain_dtm) #sparse matrix representation
# Training Naive Bayes (NB) classifier on training data.
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(xtrain_dtm,ytrain)
predicted = clf.predict(xtest_dtm)
#printing accuracy metrics
from sklearn import metrics
print('Accuracy metrics')
print('Accuracy of the classifer is',metrics.accuracy_score(ytest,predicted))
print('Confusion matrix')
print(metrics.confusion_matrix(ytest,predicted))
print('Recall and Precison ')
print(metrics.recall_score(ytest,predicted))
print(metrics.precision_score(ytest,predicted))
'''docs_new = ['I like this place', 'My boss is not my saviour']
Dept of CSE, CIT Gubb Page 18
X_new_counts = count_vect.transform(docs_new)
predictednew = clf.predict(X_new_counts)
for doc, category in zip(docs_new, predictednew):
print('%s->%s' % (doc, msg.labelnum[category]))'''

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 8/15


11/22/2019 MLprograms

The dimensions of the dataset (18, 2)


0 I love this sandwich
1 This is an amazing place
2 I feel very good about these beers
3 This is my best work
4 What an awesome view
5 I do not like this restaurant
6 I am tired of this stuff
7 I can't deal with this
8 He is my sworn enemy
9 My boss is horrible
10 This is an awesome place
11 I do not like the taste of this juice
12 I love to dance
13 I am sick and tired of this place
14 What a great holiday
15 That is a bad locality to stay
16 We will have good fun tomorrow
17 I went to my enemy's house today
Name: message, dtype: object
0 1
1 1
2 1
3 1
4 1
5 0
6 0
7 0
8 0
9 0
10 1
11 0
12 1
13 0
14 1
15 0
16 1
17 0
Name: labelnum, dtype: int64
(5,)
(13,)
(5,)
(13,)
['am', 'amazing', 'an', 'and', 'awesome', 'bad', 'best', 'boss', 'can', 'd
eal', 'do', 'enemy', 'fun', 'good', 'great', 'have', 'he', 'holiday', 'hor
rible', 'is', 'like', 'locality', 'love', 'my', 'not', 'of', 'place', 'res
taurant', 'sandwich', 'sick', 'stay', 'stuff', 'sworn', 'that', 'this', 't
ired', 'to', 'tomorrow', 'view', 'we', 'what', 'will', 'with', 'work']
am amazing an and awesome bad best boss can deal ... this
\
0 0 0 0 0 0 0 0 0 0 0 ... 0
1 0 0 0 0 0 0 1 0 0 0 ... 1
2 0 0 0 0 0 0 0 1 0 0 ... 0
3 0 0 1 0 1 0 0 0 0 0 ... 0
4 0 0 0 0 0 0 0 0 0 0 ... 0
5 0 0 0 0 0 1 0 0 0 0 ... 0
6 1 0 0 1 0 0 0 0 0 0 ... 1
7 1 0 0 0 0 0 0 0 0 0 ... 1
8 0 0 0 0 0 0 0 0 1 1 ... 1
9 0 0 0 0 0 0 0 0 0 0 ... 1
10 0 1 1 0 0 0 0 0 0 0 ... 1
localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 9/15
11/22/2019 MLprograms

11 0 0 0 0 0 0 0 0 0 0 ... 0
12 0 0 0 0 0 0 0 0 0 0 ... 1

tired to tomorrow view we what will with work


0 0 0 0 0 0 1 0 0 0
1 0 0 0 0 0 0 0 0 1
2 0 0 0 0 0 0 0 0 0
3 0 0 0 1 0 1 0 0 0
4 0 0 0 0 0 0 0 0 0
5 0 1 0 0 0 0 0 0 0
6 1 0 0 0 0 0 0 0 0
7 1 0 0 0 0 0 0 0 0
8 0 0 0 0 0 0 0 1 0
9 0 0 0 0 0 0 0 0 0
10 0 0 0 0 0 0 0 0 0
11 0 0 1 0 1 0 1 0 0
12 0 0 0 0 0 0 0 0 0

[13 rows x 44 columns]


(0, 17) 1
(0, 14) 1
(0, 40) 1
(1, 43) 1
(1, 6) 1
(1, 23) 1
(1, 19) 1
(1, 34) 1
(2, 18) 1
(2, 7) 1
(2, 23) 1
(2, 19) 1
(3, 38) 1
(3, 4) 1
(3, 2) 1
(3, 40) 1
(4, 11) 1
(4, 32) 1
(4, 16) 1
(4, 23) 1
(4, 19) 1
(5, 30) 1
(5, 36) 1
(5, 21) 1
(5, 5) 1
: :
(7, 0) 1
(7, 34) 1
(8, 42) 1
(8, 9) 1
(8, 8) 1
(8, 34) 1
(9, 28) 1
(9, 22) 1
(9, 34) 1
(10, 1) 1
(10, 26) 1
(10, 2) 1
(10, 19) 1
(10, 34) 1
(11, 37) 1
(11, 12) 1
localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 10/15
11/22/2019 MLprograms

(11, 13) 1
(11, 15) 1
(11, 41) 1
(11, 39) 1
(12, 27) 1
(12, 20) 1
(12, 24) 1
(12, 10) 1
(12, 34) 1
Accuracy metrics
Accuracy of the classifer is 1.0
Confusion matrix
[[2 0]
[0 3]]
Recall and Precison
1.0
1.0

Out[6]:

"docs_new = ['I like this place', 'My boss is not my saviour']\nDept of CS


E, CIT Gubb Page 18\nX_new_counts = count_vect.transform(docs_new)\npredic
tednew = clf.predict(X_new_counts)\nfor doc, category in zip(docs_new, pre
dictednew):\nprint('%s->%s' % (doc, msg.labelnum[category]))"

In [7]:

#program7
import pandas as pd
from pgmpy.estimators import BayesianEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
f=open('data7_name.csv','r')
attributes= f.readline().split(',')
heartDisease=pd.read_csv('data7.csv',names=attributes)
print("\nAttributes and datatypes")
print(heartDisease.dtypes)
model=BayesianModel([('age','trestbps'),('age','fbs'),('sex','trestbps'),('exang','tres
tbps'),('trestbps','heartdisease'),('fbs','heartdisease')])
model.fit(heartDisease,BayesianEstimator)
HeartDisease_infer=VariableElimination(model)
print("\n 1. Probability heart disease given age=28")
q=HeartDisease_infer.query(['heartdisease'],{'age':28})
print(q['heartdisease'])
print("\n 2. Probability of heart disease for male")
q=HeartDisease_infer.query(['heartdisease'],{'sex':1})
print(q['heartdisease'])

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 11/15


11/22/2019 MLprograms

In [9]:

#program8
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('8-kmeansdata.csv')
f1 =data['Distance_Feature']
f2=data['Speeding_Feature']
X =np.array(list(zip(f1,f2)))
plt.scatter(f1,f2,color='black')
plt.show()
kmeans = KMeans(3).fit(X)
labels = kmeans.predict(X)
plt.scatter(f1,f2,c=labels)
plt.show()
gm = GaussianMixture(3).fit(X)
labels = gm.predict(X)
plt.scatter(f1,f2,c=labels)
plt.show()

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 12/15


11/22/2019 MLprograms

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 13/15


11/22/2019 MLprograms

In [10]:

#program9
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
iris=datasets.load_iris()
print("Iris Data set loaded...")
x_train, x_test, y_train, y_test = train_test_split(iris.data,iris.target)
classifier = KNeighborsClassifier(3).fit(x_train, y_train)
y_pred=classifier.predict(x_test)
print("Results of Classification using K-nn with K=1 ")
for r in range(0,len(x_test)):
print(" Sample:", str(x_test[r]), " Actual-label:", str(y_test[r]), " Predicted-lab
el:",str(y_pred[r]))
print("Classification Accuracy :" , classifier.score(x_test,y_test));

Iris Data set loaded...


Results of Classification using K-nn with K=1
Sample: [6.4 3.2 5.3 2.3] Actual-label: 2 Predicted-label: 2
Sample: [6.6 2.9 4.6 1.3] Actual-label: 1 Predicted-label: 1
Sample: [4.9 3.6 1.4 0.1] Actual-label: 0 Predicted-label: 0
Sample: [6.2 2.8 4.8 1.8] Actual-label: 2 Predicted-label: 2
Sample: [5. 3.4 1.6 0.4] Actual-label: 0 Predicted-label: 0
Sample: [6.3 2.3 4.4 1.3] Actual-label: 1 Predicted-label: 1
Sample: [6.7 3. 5.2 2.3] Actual-label: 2 Predicted-label: 2
Sample: [7.7 3. 6.1 2.3] Actual-label: 2 Predicted-label: 2
Sample: [5. 2. 3.5 1. ] Actual-label: 1 Predicted-label: 1
Sample: [6.1 2.8 4. 1.3] Actual-label: 1 Predicted-label: 1
Sample: [6.9 3.2 5.7 2.3] Actual-label: 2 Predicted-label: 2
Sample: [5.7 4.4 1.5 0.4] Actual-label: 0 Predicted-label: 0
Sample: [4.9 3. 1.4 0.2] Actual-label: 0 Predicted-label: 0
Sample: [5.5 2.5 4. 1.3] Actual-label: 1 Predicted-label: 1
Sample: [5.7 2.9 4.2 1.3] Actual-label: 1 Predicted-label: 1
Sample: [5. 3.6 1.4 0.2] Actual-label: 0 Predicted-label: 0
Sample: [6.1 3. 4.6 1.4] Actual-label: 1 Predicted-label: 1
Sample: [5.1 3.8 1.6 0.2] Actual-label: 0 Predicted-label: 0
Sample: [6.7 3. 5. 1.7] Actual-label: 1 Predicted-label: 1
Sample: [4.6 3.1 1.5 0.2] Actual-label: 0 Predicted-label: 0
Sample: [5.5 2.3 4. 1.3] Actual-label: 1 Predicted-label: 1
Sample: [5.1 3.7 1.5 0.4] Actual-label: 0 Predicted-label: 0
Sample: [5. 3.5 1.6 0.6] Actual-label: 0 Predicted-label: 0
Sample: [6.4 2.7 5.3 1.9] Actual-label: 2 Predicted-label: 2
Sample: [5.8 4. 1.2 0.2] Actual-label: 0 Predicted-label: 0
Sample: [5.3 3.7 1.5 0.2] Actual-label: 0 Predicted-label: 0
Sample: [4.5 2.3 1.3 0.3] Actual-label: 0 Predicted-label: 0
Sample: [5.2 3.5 1.5 0.2] Actual-label: 0 Predicted-label: 0
Sample: [5.4 3.7 1.5 0.2] Actual-label: 0 Predicted-label: 0
Sample: [7.4 2.8 6.1 1.9] Actual-label: 2 Predicted-label: 2
Sample: [5.6 2.7 4.2 1.3] Actual-label: 1 Predicted-label: 1
Sample: [4.8 3. 1.4 0.3] Actual-label: 0 Predicted-label: 0
Sample: [4.4 3. 1.3 0.2] Actual-label: 0 Predicted-label: 0
Sample: [6.5 3. 5.5 1.8] Actual-label: 2 Predicted-label: 2
Sample: [5.8 2.8 5.1 2.4] Actual-label: 2 Predicted-label: 2
Sample: [4.9 3.1 1.5 0.1] Actual-label: 0 Predicted-label: 0
Sample: [5. 2.3 3.3 1. ] Actual-label: 1 Predicted-label: 1
Sample: [5.1 2.5 3. 1.1] Actual-label: 1 Predicted-label: 1
Classification Accuracy : 1.0

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 14/15


11/22/2019 MLprograms

In [11]:

#program10
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def localWeigh(point,X,ymat,k):
m,n = np.shape(X)
weights = np.mat(np.eye(m))
for i in range(m):
diff = point - X[i]
weights[i,i] = np.exp(diff*diff.T/(-2.0*k**2))
W = (X.T *(weights*X)).I * (X.T*(weights*ymat.T))
return W
def localWeightReg(X,ymat,k):
m,n = np.shape(X)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = X[i] * localWeigh(X[i],X,ymat,k)
return ypred
def plott(X,pred):
sortIndex = X[:,1].argsort(0)
xsort = X[sortIndex][:,0][:,1]
ysort = pred[sortIndex]
plt.scatter(x,y,color='green')
plt.plot(xsort,ysort,color="red",linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tips')
plt.show()

data = pd.read_csv('data10.csv')
x=data['total_bill']
y = data['tip']
xmat = np.mat(x)
ymat = np.mat(y)
size = np.shape(xmat)[1]
ones = np.mat(np.ones(size))
X=np.hstack((ones.T,xmat.T))
pred = localWeightReg(X,ymat,3)
plott(X,pred)

In [ ]:

localhost:8888/nbconvert/html/Downloads/whatsapp/ml lab(executed)/Untitled Folder 1/MLprograms.ipynb?download=false 15/15

You might also like