MMA Assignment-Python
MMA Assignment-Python
import numpy as np
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Steepest Descent Method:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history)
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Newton's Method Results:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history1)
Question 2
# Define the function
def f(x):
x, y = x
return (1 - x)**2 + (100*(y - (x*x))**2)
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Steepest Descent Results:")
print("Local Minima:", x)
print("Function Value at Minima:", np.round(f(x)))
print("Norm of Gradient at Minima:", np.round(grad_norm))
print_table(history)
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Newton's Method Results:")
print("Local Minima:", x)
print("Function Value at Minima:", np.round(f(x)))
print("Norm of Gradient at Minima:", np.round(grad_norm))
print_table(history1)
Question 3
# Define the function
import math
def f(x):
x1, x2 = x
return (1 - math.exp(-(x1**2 + x2**2)/2))
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Newton's Method Results:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history1)
x0 = [0.5,0.5]
newton_method(x0)
Newton's Method Results:
Local Minima: [0.5 0.5]
Function Value at Minima: 0.22119921692859512
Norm of Gradient at Minima: 0.5506953149031838
Iteration Table:
k xk f(xk) ∇f(xk)
||∇f(xk)|| αk
0 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
1 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
2 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
3 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
4 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
5 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
6 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
7 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
8 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
9 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
10 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
11 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
12 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
13 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
14 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
15 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
16 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
17 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
18 [0.5 0.5] 0.2212
[0.38940039 0.38940039] 0.5507 1.0000
19 [-0.5 -0.5] 0.2212 [-
0.38940039 -0.38940039] 0.5507 1.0000
We use grid search on [0,1] with step size 100 instead of closed form to calculate alpha
def exact_line_search1(x, direction):
alpha = np.linspace(0, 1, 100)
best_alpha = alpha[0]
best_fval = f(x + best_alpha * direction)
for a in alpha:
new_fval = f(x + a * direction)
if new_fval < best_fval:
best_alpha = a
best_fval = new_fval
return best_alpha
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Steepest Descent Results:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history)
# Initial point and calling the methods
x0 = [3,3]
for a in alpha:
new_fval = f(x + a * direction)
if new_fval < best_fval:
best_alpha = a
best_fval = new_fval
return best_alpha
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Steepest Descent Results:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history)
x0 = [4,4]
steepest_descent(x0)
for k in range(max_iter):
grad = gradient(x)
grad_norm = np.linalg.norm(grad)
# Print results
print("Newton's Method Results:")
print("Local Minima:", x)
print("Function Value at Minima:", f(x))
print("Norm of Gradient at Minima:", grad_norm)
print_table(history1)
So, we observe that as the initial points move farther and farther from the origin, the minima is
not attained. This is because as the points move away from the origin, the term exp-(x^2 + y^2)
decreases exponentially and the function becomes flatter and flatter. As that happens, it
becomes difficult for the methods to find the direction of the descent. Further, away from the
origin, the Hessian becomes nearly singular and the function loses its convex structure.