function conjGrad() close all %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Exercise 5.1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% dim = [5 8 12 20]; % Problem dimensions tol = 1E-6; % Residual tolerance doCalcError = 0; % Option to calculate error, used in exercise 5.8 x_exact = 0; % Don't need the exact solution here % Go through all problem dimensions, find the amount of iterations needed. % The Conjugate Gradient Algorithm is implemented further down. itercount = zeros(size(dim)); for i = 1:length(dim) x0 = zeros(dim(i),1); b = ones(dim(i),1); A = hilb(dim(i)); [~, iter, ~, ~] = cgMeth(A,b,x0,tol,doCalcError,x_exact); itercount(i) = iter; end % Plot iteration count for each dimension. % Note the escalating amount of iterations needed for convergence. This is % due to the increasing condition number of the Hilbert matrices which % grows superlinearly with the dimension of the system. bar(itercount); set(gca,'XTickLabel',dim ); xlabel('Dimension') ylabel('Iterations') title('Exercise 5.1') %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% Exercise 5.8 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % SPD matrices are generated by using the eigenvalue decomposition. First, % an orthonormal matrix Q is generated, then a diagonal matrix D with the % desired eigenvalues. The matrix is then assembled as A = Q*D*Q'. dim = 12; % Set problem dimension tol = 1E-14; % Tolerance for CG algorithm Q = orth(randn(dim,dim)); % Random orthonormal matrix x0 = rand(dim,1); % Starting guess b = ones(dim,1); % Right hand side %%%%%%% Case 1: Uniformly distributed eigenvalues D = 1:dim; D = diag(D); A = Q*D*Q'; doCalcError = 1; % Here, we want information about the error in A-norm. x_exact = A\b; % Use direct solution to the problem as exact solution. [~,iter,~,errorVec] = cgMeth(A,b,x0,tol,doCalcError,x_exact); % Plot error as a function of number of iterations. % We expect no sharp decrease since all eigenvalues are evenly distributed, % exept after 12 iterations, when the problem is solved (nearly) exactly. figure semilogy(0:iter,errorVec) xlabel('Dimension') ylabel('Error in A-norm') title('Exercise 5.8, Case 1') %%%%%%% Case 2: Four eigenvalues identically 1, eight large uniformly % distributed eigenvalues D = zeros(dim,1); for i = 1:dim/3 D(i) = 1; end for i = dim/3+1:dim D(i) = 100+100*i; end D = diag(D); A = Q*D*Q'; doCalcError = 1; x_exact = A\b; [~,iter,~,errorVec] = cgMeth(A,b,x0,tol,doCalcError,x_exact); % We expect a sharp decrease after nine iterations, since we have nine % distinct eigenvalues and the problem should be solved nearly exactly % after nine iterations. figure semilogy(0:iter,errorVec) xlabel('Dimension') ylabel('Error in A-norm') title('Exercise 5.8, Case 2') %%%%%%%% Case 3: Four eigenvalues clustered around 1, eight large uniformly % distributed eigenvalues D = zeros(dim,1); for i = 1:dim/3 D(i) = 0.95 + i/100; end for i = dim/3+1:dim D(i) = 100+100*i; end D = diag(D); A = Q*D*Q'; cond(A) doCalcError = 1; x_exact = A\b; [~,iter,~,errorVec] = cgMeth(A,b,x0,tol,doCalcError,x_exact); % We expect a slight decrease after nine iterations, since we almost have % nine distinct eigenvalues. Note that the problem is not solved exactly % after twelve iterations, since we are not dealing with exact arithmetic. figure semilogy(0:iter,errorVec) xlabel('Dimension') ylabel('Error in A-norm') title('Exercise 5.8, Case 3') %%%%%%% Case 4: Four eigenvalues clustered around 1, four clustered around % 40, eight large uniformly distributed. D = zeros(dim,1); for i = 1:dim/3 D(i) = 0.95 + i/100; end for i = dim/3:2*dim/3 D(i) = 40 + i/100;%+100*dim; end for i = 2*dim/3+1:dim D(i) = 100+100*i;%+100*dim; end D = diag(D); A = Q*D*Q'; cond(A) doCalcError = 1; x_exact = A\b; [~,iter,~,errorVec] = cgMeth(A,b,x0,tol,doCalcError,x_exact); % We expect a slight decrease after six iterations, since we almost have % six distinct eigenvalues. Note that the problem is not solved exactly % after twelve iterations, since we are not dealing with exact arithmetic. figure semilogy(0:iter,errorVec) xlabel('Dimension') ylabel('Error in A-norm') title('Exercise 5.8, Case 4') %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% end % Conjugate Gradient Algorithm function [x,iter,resvec,errorvec] = cgMeth(A,b,x,tol,doCalcError,x_exact) % Initialize variables for first step r = A*x - b; p = -r; Ap = A*p; % To avoid having two matrix-vector products rr = r'*r; % To avoid excessive inner product evaluations res = sqrt(rr); % Residual norm iter = 0; resvec = res; if doCalcError % Calculate A-norm error of x for testing purposes errorvec = sqrt((x-x_exact)'*A*(x-x_exact)); else % Run algorithm normally without checking A-norm error errorvec = 0; end while res > tol alpha = rr/(p'*Ap); x = x + alpha*p; r = r + alpha*Ap; rr2 = r'*r; beta = rr2/rr; p = -r + beta*p; Ap = A*p; rr = rr2; iter = iter+1; res = sqrt(rr); resvec(end+1) = res; if doCalcError errorvec(end+1) = sqrt((x-x_exact)'*A*(x-x_exact)); end end end