Learning Algorithms Implementation
Learning Algorithms Implementation
Algorithm
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.metrics import accuracy_score
```
```python
# Generate a nonlinearly separable dataset
X, y = make_moons(n_samples=100, noise=0.2, random_state=42)
```python
def perceptron(X, y, w0, max_iter=100):
w = w0
n = len(y)
errors = []
for _ in range(max_iter):
errors_count = 0
for i in range(n):
if np.sign(np.dot(w, X[i])) != y[i]:
w += y[i] * X[i] # Update weights
errors_count += 1
errors.append(errors_count / n)
if errors_count == 0: # No misclassifications
break
return w, errors
```
```python
def pocket(X, y, w0, max_iter=100):
w = w0
best_w = w.copy()
n = len(y)
best_error = float('inf')
errors = []
for _ in range(max_iter):
errors_count = 0
for i in range(n):
if np.sign(np.dot(w, X[i])) != y[i]:
w += y[i] * X[i] # Update weights
errors_count += 1
current_error = errors_count / n
errors.append(current_error)
```python
def delta_rule(X, y, w0, learning_rate=0.1, max_iter=100):
w = w0
n = len(y)
errors = []
for _ in range(max_iter):
for i in range(n):
# Update the weights using the Delta Rule
w += learning_rate * (y[i] - np.dot(w, X[i])) * X[i]
return w, errors
```
```python
# Initialize parameters
np.random.seed(42) # For reproducibility
w0 = np.random.rand(X.shape[1]) # Random initialization of weights
# Set maximum iterations
T_max = 100
We will plot the empirical error evolution over iterations for each algorithm.
```python
plt.figure(figsize=(12, 8))
plt.plot(errors_perceptron, label='Perceptron', color='blue')
plt.plot(errors_pocket, label='Pocket', color='orange')
plt.plot(errors_delta, label='Delta Rule', color='green')
plt.title('Empirical Error Evolution Over Iterations')
plt.xlabel('Iterations')
plt.ylabel('Empirical Error')
plt.legend()
plt.grid()
plt.ylim(0, 1)
plt.show()
```
```python
# Classification accuracy
y_pred_perceptron = np.sign(np.dot(X, w_perceptron))
y_pred_pocket = np.sign(np.dot(X, w_pocket))
y_pred_delta = np.sign(np.dot(X, w_delta))
# Print results
print(f'Perceptron Accuracy: {accuracy_perceptron:.2f}, Iterations:
{len(errors_perceptron)}')
print(f'Pocket Accuracy: {accuracy_pocket:.2f}, Iterations: {len(errors_pocket)}')
print(f'Delta Rule Accuracy: {accuracy_delta:.2f}, Iterations:
{len(errors_delta)}')
```
## Step 7: Conclusion
After executing the above code, you can analyze the outputs, the progression of
empirical error, and the accuracy of each algorithm. Here are points to consider
for your conclusion: