function [x, conv_flag] = tma4180s17_ex03_3(method, x, tol, maxiter) % Minimising Rosenbrock's function with backtracking line search, using % either gradient descent or Newton's method. % % Input: % method: 'gradientdescent' or 'newton', % x: starting point, % tol: tolerance for the norm of the gradient, and % maxiter: maximum number of iterations. % % Output: % x: stopping point (critical point), and % conv_flag: logical true if successful line search. % Set up the Rosenbrock function, its gradient, and its Hessian. f = @(x) 100 * (x(2) - x(1).^2).^2 + (1 - x(1)).^2; df = @(x) [-400 * x(1) .* (x(2) - x(1).^2) + 2 * (x(1) - 1); ... 200 * (x(2) - x(1).^2)]; d2f = @(x) [-400 * (x(2) - x(1).^2) + 800 * x(1).^2 + 2, - 400 * x(1); ... -400 * x(1), 200]; % Backtracking parameters. switch method case 'gradientdescent' alpha0 = 1; rho = 1/2; c = 1/4; case 'newton' alpha0 = 1; rho = 1/2; c = 1/4; end % Store gradient to avoid repeated evaluations. dfx = df(x); % Perform line search. Stop when we are close to a critical point or have % iterated for too long. n = 1; conv_flag = false; while norm(dfx) > tol && n <= maxiter % Choose search direction. switch method case 'gradientdescent' p = -dfx; case 'newton' p = - d2f(x) \ dfx; % Fall back to steepest descent direction if Newton direction % is not a descent direction. if dot(p, dfx) >= 0 p = -dfx; end end % Backtracking line search. Store fixed expressions in order to reduce % the number of evaluations. alpha = alpha0; fx = f(x); c_dot_dfx_p = c * dot(dfx, p); while f(x + alpha * p) > fx + alpha * c_dot_dfx_p alpha = rho * alpha; end % Take a new step. x = x + alpha * p; dfx = df(x); n = n + 1; end % Report successful line search. if norm(dfx) <= tol conv_flag = true; end end