Lab 7
Lab 7
1. Write a Python program that computes the value of the Gaussian distribution at a given vector X. Hence,
plot the effect of varying mean and variance to the normal distribution.
import numpy as np
means = [0, 0, 1]
variances = [1, 2, 1]
plt.figure(figsize=(10, 6))
plt.xlabel('X')
plt.ylabel('Probability Density')
plt.legend()
plt.grid(True)
plt.show()
2. Write a python program to implement linear regression.
import numpy as np
y = np.array([1, 2, 3, 4, 5])
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)
plt.scatter(X, y, color='blue')
plt.title('Linear Regression')
plt.xlabel('X')
plt.ylabel('y')
plt.show()
import numpy as np
def f(x):
def df(x):
return 2*x - 4
def gradient_descent(initial_x, learning_rate, num_iterations):
x = initial_x
x_history = [x]
for i in range(num_iterations):
gradient = df(x)
x = x - learning_rate * gradient
x_history.append(x)
return x, x_history
initial_x = 0
learning_rate = 0.1
num_iterations = 50
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Gradient Descent')
plt.legend()
plt.show()
4. Write a python program to classify different flower images using MLP.
import os
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.layers import Flatten, Dense, Dropout, Input
val_data = data_gen.flow_from_directory(
directory=dataset_path,
target_size=(64, 64),
batch_size=32,
class_mode='categorical',
subset='validation'
)
model = Sequential([
Input(shape=(64, 64, 3)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(64, activation='relu'),
Dropout(0.5),
Dense(train_data.num_classes, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
steps_per_epoch = len(train_data)
validation_steps = len(val_data)
history = model.fit(
train_data,
validation_data=val_data,
epochs=20,
verbose=1,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps
)
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# Loss plot
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Final Evaluation
loss, accuracy = model.evaluate(val_data)
print(f"Validation Loss: {loss:.4f}")
print(f"Validation Accuracy: {accuracy:.4f}")
5. Write a python program to classify different flower images using the SVM classifier.
import os
import numpy as np
import cv2
batch_size = 32
train_data_gen = data_gen.flow_from_directory(
directory=dataset_path,
target_size=image_size,
batch_size=batch_size,
class_mode='categorical',
subset='training'
val_data_gen = data_gen.flow_from_directory(
directory=dataset_path,
target_size=image_size,
batch_size=batch_size,
class_mode='categorical',
subset='validation'
class_names = list(train_data_gen.class_indices.keys())
def extract_features_and_labels(data_gen):
features = []
labels = []
features.append(hog_features)
labels.append(np.argmax(label))
break
return np.array(features), np.array(labels)
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_val = le.transform(y_val)
clf = svm.SVC(kernel='linear')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
print("Classification Report:")
print("Accuracy Score:")
print(accuracy_score(y_val, y_pred))
import tensorflow as tf
import os
BATCH_SIZE = 32
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.2
train_generator = train_datagen.flow_from_directory(
dataset_path,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='training'
validation_generator = train_datagen.flow_from_directory(
dataset_path,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='validation'
model = Sequential([
MaxPooling2D(pool_size=(2, 2)),
MaxPooling2D(pool_size=(2, 2)),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(train_generator.num_classes, activation='softmax')
])
history = model.fit(
train_generator,
epochs=10,
validation_data=validation_generator
7. Write a python program to classify different handwritten character images using the SVM classifier.
import os
import cv2
import numpy as np
def load_images_from_folder(folder):
images = []
labels = []
label_names = os.listdir(folder)
if os.path.isdir(label_folder):
img = cv2.imread(img_path)
images.append(img.flatten())
labels.append(label_index)
print("Loading dataset...")
x, y, label_names = load_images_from_folder(dataset_path)
x = x / 255.0
clf = svm.SVC(kernel='linear')
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print("\nClassification Report:")
import pandas as pd
import numpy as np
df = pd.read_csv(csv_file)
print(df.info())
categorical_cols = df.select_dtypes(include=['object']).columns
if df[col].nunique() == 2:
le = LabelEncoder()
df[col] = le.fit_transform(df[col])
else:
df = pd.get_dummies(df, columns=[col])
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
def build_model(input_shape):
model = Sequential([
Input(shape=(input_shape,)),
Dense(128, activation='relu'),
Dropout(0.3),
Dense(64, activation='relu'),
Dropout(0.3),
Dense(32, activation='relu'),
Dense(1, activation='sigmoid')
])
return model
input_shape = X_train.shape[1]
model = build_model(input_shape)
history = model.fit(
X_train, y_train,
validation_data=(X_test, y_test),
epochs=50,
batch_size=32,
callbacks=[early_stopping]
y_pred = model.predict(X_test)
y_test = y_test.astype(int)
print("Confusion Matrix:")
print(conf_matrix)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.show()