0% found this document useful (0 votes)
7 views4 pages

Bài 12 Min X + y - 2x-6y+14 Subject To X + y - 16 0 Code Matlab

The document contains code in MATLAB for solving three optimization problems using numerical methods like Newton's method and Lagrange multipliers method. The problems involve maximizing or minimizing functions subject to equality or inequality constraints. The code defines the objective functions, constraints, and their derivatives. It then initializes variables, sets tolerances, and runs the optimization methods like Newton iteratively to find the optimal solution.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views4 pages

Bài 12 Min X + y - 2x-6y+14 Subject To X + y - 16 0 Code Matlab

The document contains code in MATLAB for solving three optimization problems using numerical methods like Newton's method and Lagrange multipliers method. The problems involve maximizing or minimizing functions subject to equality or inequality constraints. The code defines the objective functions, constraints, and their derivatives. It then initializes variables, sets tolerances, and runs the optimization methods like Newton iteratively to find the optimal solution.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 4

Bài 12

min x² + y² -2x-6y+14
subject to x²+ y²-16=0

code matlab

% Initial guess
x0 = [0; 0];

% Define the objective function and its derivatives


fun = @(x) x(1)^2 + x(2)^2 - 2*x(1) - 6*x(2) + 14;
grad = @(x) [2*x(1) - 2; 2*x(2) - 6];
hessian = @(x) [2, 0; 0, 2];

% Define the constraint function and its derivatives


constraint = @(x) x(1)^2 + x(2)^2 - 16;
constraint_grad = @(x) [2*x(1); 2*x(2)];

% Set tolerance and maximum number of iterations


tol = 1e-6;
max_iter = 100;

% Newton's method
for iter = 1:max_iter
% Evaluate objective and constraint functions and their derivatives
f_val = fun(x0);
g_val = constraint(x0);
grad_f = grad(x0);
grad_g = constraint_grad(x0);

% Check for convergence


if abs(f_val) < tol && abs(g_val) < tol
break;
end

% Update variables using Newton's method


delta_x = -hessian(x0) \ grad_f;
x0 = x0 + delta_x;
end
fprintf('Optimal solution: x = %f, y = %f\n', x0(1), x0(2));
fprintf('Objective value at optimum: %f\n', fun(x0));
fprintf('Constraint value at optimum: %f\n', constraint(x0));

14 max x+y+z
subject to x²+ y²+z²
code malab
% Define the objective function and its derivatives
fun = @(x) -(x(1) + x(2) + x(3));
grad = @(x) [-1; -1; -1];

% Define the constraint function and its derivatives


constraint = @(x) x(1)^2 + x(2)^2 + x(3)^2 - 1;
constraint_grad = @(x) [2*x(1); 2*x(2); 2*x(3)];

% Set tolerance and maximum number of iterations


tol = 1e-6;
max_iter = 100;

% Initial guess
x0 = [1; 0; 0];

% Lagrange multiplier method


for iter = 1:max_iter
% Evaluate objective and constraint functions and their derivatives
f_val = fun(x0);
g_val = constraint(x0);
grad_f = grad(x0);
grad_g = constraint_grad(x0);

% Check for convergence


if norm([grad_f; g_val]) < tol
break;
end

% Construct the KKT system matrix


A = [eye(3), grad_g; grad_g', 0];

% Solve the KKT system for the Newton step


delta = -A \ [grad_f; -g_val];

% Update variables using the Newton step


x0 = x0 + delta(1:3);
end

fprintf('Optimal solution: x = %f, y = %f, z = %f\n', x0(1), x0(2), x0(3));


fprintf('Objective value at optimum: %f\n', -f_val);
fprintf('Constraint value at optimum: %f\n', g_val);

11 f(x)=x(1)e^ -||x|| 2 ^ 2 +||x|| 2 ^ 2 / 20


Code matlab

% Initial guess
x0 = [1; 1];
% Define the objective function and its derivatives
fun = @(x) x(1)*exp(-norm(x)^2) + norm(x)^2/20;
grad = @(x) [exp(-norm(x)^2) - 2*x(1)*norm(x)^2*exp(-norm(x)^2); 2*x(2)/20];
hessian = @(x) [(-4*x(1)^2*norm(x)^2 + 2 - 4*x(1)^2*norm(x)^4*exp(-
norm(x)^2))*exp(-norm(x)^2), 0; 0, 2/20];

% Set tolerance and maximum number of iterations


tol = 1e-6;
max_iter = 100;

% Newton's method
for iter = 1:max_iter
% Evaluate objective function and its derivatives
f_val = fun(x0);
grad_f = grad(x0);

% Check for convergence


if abs(f_val) < tol
break;
end

% Update variables using Newton's method


delta_x = -hessian(x0) \ grad_f;
x0 = x0 + delta_x;
end

fprintf('Optimal solution: x = %f, y = %f\n', x0(1), x0(2));


fprintf('Objective value at optimum: %f\n', fun(x0));

13 Max √ xy
Subjest 20x+10y= 200

You might also like