ML Program Output
ML Program Output
import csv
a = []
a.append(row)
print(a)
num_attribute = len(a[0])-1
hypothesis = ['0']*num_attribute
print(hypothesis)
if a[i][num_attribute] == 'yes':
hypothesis[j] = a[i][j]
else:
hypothesis[j] = '?'
print("\n The Maximally specific hypothesis for the training instance is ")
print(hypothesis)
Output:
[['sky', 'airtemp', 'humidity', 'wind', 'water', 'forcast', 'enjoysport'], ['sunny', 'warm', 'normal', 'strong',
'warm', 'same', 'yes'], ['sunny', 'warm', 'high', 'strong', 'warm', 'same', 'yes'], ['rainy', 'cold', 'high',
'strong', 'warm', 'change', 'no'], ['sunny', 'warm', 'high', 'strong', 'cool', 'change', 'yes']]
import numpy as np
import pandas as pd
data = load_iris()
X=data.data
y=data.target
y = pd.get_dummies(y).values
learning_rate = 0.1
iterations = 10000
N = y_train.size
input_size = 4
hidden_size = 2
output_size = 3
np.random.seed(10)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def mean_squared_error(y_pred, y_true):
return acc.mean()
Z1 = np.dot(X_train, W1)
A1 = sigmoid(Z1)
Z2 = np.dot(A1, W2)
A2 = sigmoid(Z2)
results=results.append({"mse":mse, "accuracy":acc},ignore_index=True )
E1 = A2 - y_train
dW1 = E1 * A2 * (1 - A2)
E2 = np.dot(dW1, W2.T)
dW2 = E2 * A1 * (1 - A1)
W2 = W2 - learning_rate * W2_update
W1 = W1 - learning_rate * W1_update
results.accuracy.plot(title="Accuracy")
Z1 = np.dot(X_test, W1)
A3 = sigmoid(Z1)
Z2 = np.dot(A3, W2)
A4 = sigmoid(Z2)
print("Accuracy: {}".format(acc))
Output:
Accuracy: 0.95
EXNO:3 Implementation of Naive Bayesian Classifier
import pandas as pd
msg=pd.read_csv('naivetext.csv',names=['message','label'])
msg['labelnum']=msg.label.map({'pos':1,'neg':0})
X=msg.message
y=msg.labelnum
print(X)
print(y)
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y)
count_vect = CountVectorizer()
Xtrain_dtm = count_vect.fit_transform(Xtrain)
Xtest_dtm=count_vect.transform(Xtest)
print(count_vect.get_feature_names())
df=pd.DataFrame(Xtrain_dtm.toarray(),columns=count_vect.get_feature_names())
clf = MultinomialNB().fit(Xtrain_dtm,ytrain)
predicted = clf.predict(Xtest_dtm)
print(metrics.confusion_matrix(ytest,predicted))
Output:
8 He is my sworn enemy
9 My boss is horrible
12 I love to dance
0 1
1 1
2 1
3 1
4 1
5 0
6 0
7 0
8 0
9 0
10 1
11 0
12 1
13 0
14 1
15 0
16 1
17 0
['about', 'am', 'amazing', 'an', 'and', 'awesome', 'bad', 'beers', 'best', 'boss', 'dance', 'do', 'enemy', 'feel',
'fun', 'good', 'great', 'have', 'holiday', 'horrible', 'house', 'is', 'like', 'locality', 'love', 'my', 'not', 'of', 'place',
'restaurant', 'sick', 'stay', 'stuff', 'these', 'this', 'tired', 'to', 'today', 'tomorrow', 'very', 'view', 'we', 'went',
'what', 'will', 'work']
Confusion matrix
[[3 0]
[0 2]]
iris = datasets.load_iris()
import pandas as pd
dataset=pd.read_csv('wine.csv')
X=dataset.iloc [:,2:14].values
y=dataset.iloc [:,0].values
means = []
X, y, test_size=0.33, random_state=42)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
Output
0.9830508474576272
EXNO:5 K Nearest Neighbour Classifier
Output :
0.98
import numpy as np
data = load_iris()
features = data.data
feature_names = data.feature_names
feature_names = data.feature_names
target = data.target
target_names = data.target_names
labels = target_names[target]
classifier = KNeighborsClassifier(n_neighbors=1)
kf = KFold(n_splits=5, shuffle=True)
means = []
import pandas as pd
dataset=pd.read_csv('wine.csv')
X=dataset.iloc [:,2:14].values
y=dataset.iloc [:,0].values
classifier.fit(X_train, y_train)
print(classifier.score(X_test, y_test))
Output
0.7627118644067796
OUTPUT:
0.9424242424242424
EXNO:6 K Means Segmentation
import cv2
import numpy as np
image = cv2.imread(“image.jpg”)
pixel_values = np.float32(pixel_values)
print(pixel_values.shape)
k=3
centers = np.uint8(centers)
labels = labels.flatten()
segmented_image = centers[labels.flatten()]
segmented_image = segmented_image.reshape(image.shape)
plt.imshow(image)
plt.show()
plt.imshow(segmented_image)
plt.show()
np.set_printoptions(threshold=np.inf)
print(labels)
Output:
(139657,3)
EXNO:7 Linear Regression
get_ipython().magic('reset -sf')
import numpy as np
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X,y)
n=np.array([6.5]).reshape(1, 1)
y_pred = regressor.predict(n)
plt.title('Regression Model')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
print(y_pred)
Output :
[150000.]
Linear Regression
EXNO:8 Implementation of Dimensionality Reduction Algorithm (PCA)
import numpy as np
import pandas as pd
dataset = pd.read_csv(‘wine.csv’)
X = dataset.iloc[:, 1:13].values
y = dataset.iloc[:, 0].values
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
pca = PCA(n_components = 2)
X1_train = pca.fit_transform(X_train)
X1_test = pca.transform(X_test)
print(X.shape)
print(X1_train.shape)
variance = pca.explained_variance_ratio_
classifier = LogisticRegression(random_state = 0)
classifier.fit(X1_train, y_train)
y_pred = classifier.predict(X1_test)
print(classifier.score(X1_test, y_test))
classifier.fit(X_train, y_train)
y1_pred = classifier.predict(X_test)
print(classifier.score(X_test, y_test))
print(np.shape(X_train))
print(np.shape(X1_train))
plt.figure(figsize=(8,6))
plt.scatter(X1_train[:,0],X1_train[:,1],s=10,c=y_train,cmap=’rainbow’)
cm = confusion_matrix(y_test, y_pred)
Output :
(178, 12)
(142, 2)
0.9444444444444444
0.9444444444444444
(142, 12)
(142, 2)
EXNO:9 Implementation of Random Forest Classifier
import numpy as np
data = load_iris()
features = data.data
feature_names = data.feature_names
feature_names = data.feature_names
target = data.target
target_names = data.target_names
labels =target_names[target]
classifier = RandomForestClassifier(n_estimators=100)
kf = Kfold(n_splits=5, shuffle=True)
X=features
y=target
X, y, test_size=0.33, random_state=42)
classifier.fit(X_train, y_train)
print(classifier.score(X_test, y_test))
Output :
0.98
import tensorflow
import numpy as np
from matplotlib import pyplot as plt
from keras.datasets import mnist
(train_images,train_labels),(test_images,test_labels)=mnist.load_data()
from keras import models
from keras import layers
network=models.Sequential()
network.add(layers.Dense(512,activation='relu',input_shape=(28*28,)))
network.add(layers.Dense(10,activation='softmax'))
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_images=train_images.reshape((60000,28*28))
train_images=train_images.astype('float32')/255
test_images=test_images.reshape((10000,28*28))
test_images=test_images.astype('float32')/255
from tensorflow.keras.utils import to_categorical
train_labels=to_categorical(train_labels)
test_labels=to_categorical(test_labels)
network.fit(train_images,train_labels,
validation_data=(test_images,test_labels),epochs=5,batch_size=128)
(train_images,train_labels),(test_images,test_images)=mnist.load_data()
x=range(1,10)
for n in x:
plt.subplot(9,1,n)
plt.imshow(train_images[n],cmap=plt.get_cmap('gray'))
plt.show()
Output :
Epoch 1/5
Accuracy=0.9276 val_accuracy=0.9633
Epoch 2/5
Accuracy=0.9696 val_accuracy=0.9738
Epoch 3/5
Accuracy=0.9747 val_accuracy=0.9772
Epoch 4/5
Accuracy=0.9849 val_accuracy=0.9767
Epoch 5/5
Accuracy=0.9885 val_accuracy=0.9793