AML Programs
AML Programs
# Output decision
if weighted_sum >= threshold:
return 1
else:
return 0
# Example 2: OR gate
weights_or = [0.5, 0.5]
threshold_or = 0.5
# Test the neuron with different input patterns
print("McCulloch and Pitts Neuron - AND gate:")
for input_pattern in input_patterns:
result = mcculloch_pitts_neuron(input_pattern, weights_and, threshold_and)
print(f"Input: {input_pattern}, Output: {result}")
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.legend()
plt.title('Perceptron Decision Boundary on Iris Dataset (2 Features)')
plt.show()
3. Implement a feed forward neural network with a
differentiable threshold unit and train it using the back-
propagation algorithm to classify handwritten digits from the
MNIST dataset.
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
# Calculate accuracy
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy of Bagging: {accuracy:.2f}")
bagging_predictions = []
base_classifier_predictions = []
n_iterations = 100
for _ in range(n_iterations):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=None)
# Bagging predictions
bagging.fit(X_train, y_train)
y_pred = bagging.predict(X_test)
bagging_predictions.append(y_pred)
# GA Parameters
population_size = 100
num_generations = 50
mutation_rate = 0.1
# Initialization
population = [random.uniform(-5, 5) for _ in range(population_size)]
# Apply mutation
if random.random() < mutation_rate:
mutation = random.uniform(-0.5, 0.5)
child += mutation
new_population.append(child)
population = new_population
return similar_users.index[:10]
def index_to_state(index):
row = index // grid_world.shape[1]
col = index % grid_world.shape[1]
return (row, col)
# Q-Learning algorithm
for episode in range(num_episodes):
state = (0, 0) # Start state
while state != (4, 4): # Continue until reaching the goal state
current_state_index = state_to_index(state)
# Choose an action with epsilon-greedy policy
if random.uniform(0, 1) < exploration_prob:
action = random.choice(range(num_actions)) # Exploration
else:
action = np.argmax(q_table[current_state_index]) # Exploitation
# Take the chosen action and observe the next state and reward
new_state = (state[0] + actions[action][0], state[1] + actions[action][1])
if new_state[0] < 0 or new_state[0] >= grid_world.shape[0] or new_state[1] < 0 or
new_state[1] >= grid_world.shape[1] or grid_world[new_state] == -1:
# If the new state is outside the grid or is an obstacle, penalize the agent
reward = -1
new_state = state
else:
reward = grid_world[new_state]
new_state_index = state_to_index(new_state)
state = new_state
# Once training is complete, follow the learned Q-table to find the optimal path from the
start to the goal state
state = (0, 0)
path = [state]
while state != (4, 4):
state_index = state_to_index(state)
action = np.argmax(q_table[state_index])
new_state = (state[0] + actions[action][0], state[1] + actions[action][1])
path.append(new_state)
state = new_state