0% found this document useful (0 votes)
34 views

Practical Assignment 2

Uploaded by

Status World
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
34 views

Practical Assignment 2

Uploaded by

Status World
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 11

Practical Assignment 2

Q1.Answer the following question.

1) Implementing Artificial Neural Network training process



import numpy as np

# Sigmoid function and derivative for activation


def sigmoid(x):
return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
return x * (1 - x)

# Neural Network Class


class SimpleANN:
def __init__(self, input_size, hidden_size, output_size, learning_rate=0.1):
self.learning_rate = learning_rate
# Initialize weights and biases
self.weights_input_hidden = np.random.rand(input_size, hidden_size)
self.weights_hidden_output = np.random.rand(hidden_size, output_size)
self.bias_hidden = np.random.rand(1, hidden_size)
self.bias_output = np.random.rand(1, output_size)

def forward(self, X):


# Forward propagation
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = sigmoid(self.hidden_input)
self.output_input = np.dot(self.hidden_output, self.weights_hidden_output) +
self.bias_output
self.output = sigmoid(self.output_input)
return self.output

def backward(self, X, y, output):


# Backpropagation
output_error = y - output
output_delta = output_error * sigmoid_derivative(output)

hidden_error = output_delta.dot(self.weights_hidden_output.T)
hidden_delta = hidden_error * sigmoid_derivative(self.hidden_output)

# Update weights and biases


self.weights_hidden_output += self.hidden_output.T.dot(output_delta) *
self.learning_rate
self.bias_output += np.sum(output_delta, axis=0, keepdims=True) *
self.learning_rate
self.weights_input_hidden += X.T.dot(hidden_delta) * self.learning_rate
self.bias_hidden += np.sum(hidden_delta, axis=0, keepdims=True) *
self.learning_rate

def train(self, X, y, epochs=1000):


for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y, output)
if epoch % 100 == 0:
loss = np.mean((y - output) ** 2)
print(f"Epoch {epoch}, Loss: {loss}")

# Example usage
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # XOR inputs
y = np.array([[0], [1], [1], [0]]) # XOR outputs
nn = SimpleANN(input_size=2, hidden_size=2, output_size=1)
nn.train(X, y)
print("Output after training:")
print(nn.forward(X))

2) Implement Back propagation.



import numpy as np

# Activation functions and their derivatives


def sigmoid(x):
return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
return x * (1 - x)

class NeuralNetworkWithBackprop:
def __init__(self, input_size, hidden_size, output_size, learning_rate=0.1):
self.learning_rate = learning_rate
# Initialize weights and biases
self.weights_input_hidden = np.random.rand(input_size, hidden_size)
self.weights_hidden_output = np.random.rand(hidden_size, output_size)
self.bias_hidden = np.random.rand(1, hidden_size)
self.bias_output = np.random.rand(1, output_size)

def forward(self, X):


# Forward propagation
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = sigmoid(self.hidden_input)
self.output_input = np.dot(self.hidden_output, self.weights_hidden_output) +
self.bias_output
self.output = sigmoid(self.output_input)
return self.output

def backward(self, X, y, output):


# Backpropagation
output_error = y - output
output_delta = output_error * sigmoid_derivative(output)

hidden_error = output_delta.dot(self.weights_hidden_output.T)
hidden_delta = hidden_error * sigmoid_derivative(self.hidden_output)

# Update weights and biases


self.weights_hidden_output += self.hidden_output.T.dot(output_delta) *
self.learning_rate
self.bias_output += np.sum(output_delta, axis=0, keepdims=True) *
self.learning_rate
self.weights_input_hidden += X.T.dot(hidden_delta) * self.learning_rate
self.bias_hidden += np.sum(hidden_delta, axis=0, keepdims=True) *
self.learning_rate

def train(self, X, y, epochs=1000):


for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y, output)
if epoch % 100 == 0:
loss = np.mean((y - output) ** 2)
print(f"Epoch {epoch}, Loss: {loss}")

# Example usage
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # XOR inputs
y = np.array([[0], [1], [1], [0]]) # XOR outputs
nn = NeuralNetworkWithBackprop(input_size=2, hidden_size=2, output_size=1)
nn.train(X, y)
print("Output after training:")
print(nn.forward(X))

3) Implement deep learning.



class DeepNN:
def __init__(self, layer_sizes, learning_rate=0.1):
self.learning_rate = learning_rate
self.weights = [np.random.rand(layer_sizes[i], layer_sizes[i + 1]) for i in
range(len(layer_sizes) - 1)]
self.biases = [np.random.rand(1, layer_sizes[i + 1]) for i in
range(len(layer_sizes) - 1)]

def forward(self, X):


self.layer_inputs = []
self.layer_outputs = [X]
for i in range(len(self.weights)):
X = sigmoid(np.dot(X, self.weights[i]) + self.biases[i])
self.layer_inputs.append(X)
self.layer_outputs.append(X)
return X

def backward(self, X, y):


output_error = y - self.layer_outputs[-1]
deltas = [output_error * sigmoid_derivative(self.layer_outputs[-1])]

for i in range(len(self.weights) - 2, -1, -1):


error = deltas[-1].dot(self.weights[i + 1].T)
delta = error * sigmoid_derivative(self.layer_outputs[i + 1])
deltas.append(delta)

deltas.reverse()
for i in range(len(self.weights)):
self.weights[i] += self.layer_outputs[i].T.dot(deltas[i]) * self.learning_rate
self.biases[i] += np.sum(deltas[i], axis=0, keepdims=True) *
self.learning_rate

def train(self, X, y, epochs=1000):


for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y)

# Example usage
layer_sizes = [2, 4, 3, 1] # input layer, two hidden layers, and output layer
deep_nn = DeepNN(layer_sizes)
deep_nn.train(X, y)

4) Implement Multilayer perceptron algorithm.


import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# XOR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])

# Building the MLP model


model = Sequential()
model.add(Dense(4, input_dim=2, activation='relu')) # First hidden layer
model.add(Dense(4, activation='relu')) # Second hidden layer
model.add(Dense(1, activation='sigmoid')) # Output layer

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(X, y, epochs=1000, verbose=0)

# Evaluate the model


print("MLP output for XOR problem:")
print(model.predict(X))

5) Write program to create target string, starting from random string


using Genetic Algorithm.

import random

target = "hello world"


population_size = 100
mutation_rate = 0.01
generation_limit = 1000

# Generate random string


def random_string(length):
return ''.join(random.choice('abcdefghijklmnopqrstuvwxyz ') for _ in
range(length))

# Fitness function
def fitness(candidate):
return sum(1 for expected, actual in zip(target, candidate) if expected ==
actual)

# Mutate a string
def mutate(string):
new_string = ''
for char in string:
if random.random() < mutation_rate:
new_string += random.choice('abcdefghijklmnopqrstuvwxyz ')
else:
new_string += char
return new_string

# Crossover
def crossover(parent1, parent2):
midpoint = random.randint(0, len(target))
return parent1[:midpoint] + parent2[midpoint:]

# Genetic Algorithm
population = [random_string(len(target)) for _ in range(population_size)]
for generation in range(generation_limit):
population = sorted(population, key=fitness, reverse=True)
if fitness(population[0]) == len(target):
print(f"Target string '{target}' created in generation {generation}")
break
next_population = population[:10] # Keep top 10
for _ in range(population_size - 10):
parent1, parent2 = random.sample(next_population, 2)
child = crossover(parent1, parent2)
next_population.append(mutate(child))
population = next_population
6) Write program to Implement travelling salesman problem using
genetic algorithm.

import random
import numpy as np

# Generate random cities as coordinates


num_cities = 10
cities = np.random.rand(num_cities, 2) # Random 2D coordinates for cities

# Distance calculation
def distance(a, b):
return np.linalg.norm(a - b)

# Fitness function (inverse of total path length)


def fitness(route):
total_distance = sum(distance(cities[route[i]], cities[route[i + 1]]) for i in
range(len(route) - 1))
total_distance += distance(cities[route[-1]], cities[route[0]]) # Return to start
return 1 / total_distance

# Mutation
def mutate(route):
a, b = random.sample(range(len(route)), 2)
route[a], route[b] = route[b], route[a]

# Crossover
def crossover(parent1, parent2):
start, end = sorted(random.sample(range(len(parent1)), 2))
child = [None] * len(parent1)
child[start:end] = parent1[start:end]
ptr = 0
for gene in parent2:
if gene not in child:
while child[ptr] is not None:
ptr += 1
child[ptr] = gene
return child

# Genetic Algorithm
population_size = 100
population = [random.sample(range(num_cities), num_cities) for _ in
range(population_size)]
generations = 500

for generation in range(generations):


population = sorted(population, key=fitness, reverse=True)
next_generation = population[:10] # Elitism
for _ in range(population_size - 10):
parent1, parent2 = random.sample(next_generation, 2)
child = crossover(parent1, parent2)
if random.random() < 0.1:
mutate(child)
next_generation.append(child)
population = next_generation

best_route = max(population, key=fitness)


print("Best route found:", best_route)
print("Total distance:", 1 / fitness(best_route))

7) Write program to study and analyze genetic life cycle.



import random
import matplotlib.pyplot as plt

# Problem setup: target array of ones


target = [1] * 10
population_size = 100
mutation_rate = 0.01
generations = 100

# Randomly initialize population with 0s and 1s


def random_individual():
return [random.randint(0, 1) for _ in range(len(target))]

# Fitness function: count of matching elements


def fitness(individual):
return sum(1 for i, j in zip(individual, target) if i == j)

# Mutation
def mutate(individual):
return [gene if random.random() > mutation_rate else 1 - gene for gene in
individual]

# Crossover
def crossover(parent1, parent2):
point = random.randint(1, len(target) - 1)
return parent1[:point] + parent2[point:]

# Genetic Algorithm
population = [random_individual() for _ in range(population_size)]
best_fitness_over_time = []

for generation in range(generations):


population = sorted(population, key=fitness, reverse=True)
best_fitness = fitness(population[0])
best_fitness_over_time.append(best_fitness)

if best_fitness == len(target):
print(f"Target reached in generation {generation}")
break

next_population = population[:10] # Elitism


for _ in range(population_size - 10):
parent1, parent2 = random.sample(next_population, 2)
child = mutate(crossover(parent1, parent2))
next_population.append(child)
population = next_population

# Plot fitness over generations


plt.plot(best_fitness_over_time)
plt.xlabel('Generation')
plt.ylabel('Best Fitness Score')
plt.title('Genetic Algorithm Fitness Progression')
plt.show()

You might also like