1 function [opt_data, model]=gradient_opt(model,varargin)
2 %
function [x_opt,nr_fct_total,nr_grad_total]=gradient_opt(model, model_data, reduced_data)
4 % Function performing an optimization either reduced oder detailed
using gradient method
5 % with either Armijo-stepsize rule, Wolfe-Powell-stepsize rule or
8 %Required fields of model:
9 % model.optimization.init_params: initial value
10 % model.optimization.tol: tolerance how close to 0 shall the norm be?
11 % model.optimization.get_Jacobian: gradient
function
12 % model.optimization.objective_function: not needed in gradient_opt but
13 % needed in armijo_stepsize.m and wolfe_powell_stepsize.m
14 % model.optimization.stepsize_rule: either
'armijo' or
'wolfepowell' or
17 %Generated fields of opt_data:
18 % opt_data.x_optimal: the optimal value
19 % opt_data.nr_fct_calls: Number of
function calls
20 % opt_data.nr_grad_calc: Number of needed calculations of the gradient
23 % Oliver Zeeb 25.05.2010%
28 disp(
'entered gradient_opt')
31 %adding some fields to the model, if not existent
32 if ~isfield(model.optimization,'tol')
33 model.optimization.tol=1e-3;
35 if ~isfield(model.optimization,'stepsize_rule')
36 model.optimization.stepsize_rule='quotient';
38 if ~isfield(model.optimization,'min_or_max')
39 model.optimization.min_or_max = 'min';
42 if model.optimization.derivatives_available
43 model.compute_derivative_info = 1;
47 %setting signum for maximizing or minimizing,
48 % i.e. changes f(x) to -f(x) if function f(x) shall be maximized
49 if model.optimization.min_or_max == 'min'
51 elseif model.optimization.min_or_max == 'max'
53 else disp('maximize or minimize? model.optimization.min_or_max not properly defined')
58 nr_fct = 0; %counter for number of function calls
59 nr_grad = 0; %counter for number of gradient calculations
61 %save current mu values of the model
63 mu_original = get_mu(model);
66 %set the mus in the model to the mus given in init_params
67 %model.(model.mu_names{k})=model.optimization.init_params(k);
68 model = set_mu(model, model.optimization.init_params);
71 %
get initial parameters that should be optzimized
72 x = get_mu_to_optimize(model);
73 %
get the according boundaries
74 [lower_bound, upper_bound] = get_bound_to_optimize(model);
77 if strcmp(model.optimization.opt_mode,
'detailed')
78 %
if strcmp(inputname(2),
'model_data')
79 model_data = varargin{1};
82 %error(
'Its not called model data')
84 % model.optimization.opt_mode =
'detailed';
85 elseif strcmp(model.optimization.opt_mode,
'reduced')
86 %
if strcmp(inputname(2),
'reduced_data')
87 % error(
'input argument is not called reduced data')
90 reduced_data = varargin{1};
91 model_data = varargin{2};
92 detailed_data = varargin{3};
93 model.optimization.opt_mode =
'reduced';
95 reduced_data = varargin{1};
96 model.optimization.opt_mode =
'reduced';
101 tol = model.optimization.tol;
102 PM = x; %
for opt_data: array with all the mu_i
103 output = []; %array containing the f(mu_k)
104 max_iter = 50; %maximal number of iterations of the gradient method
105 opti_iter = 0; %counter
108 if strcmp(model.optimization.opt_mode,
'detailed')
112 grad_f_x = model.optimization.get_Jacobian(model,model_data,[],[])*min_or_max; nr_grad = nr_grad+1;
113 norm_grad_f_x = norm(grad_f_x);
115 % START OF OPTIMIZATION
116 while (norm(grad_f_x) > tol) && (opti_iter <= max_iter)
119 %calculation of the stepsize t:
120 switch model.optimization.stepsize_rule
121 case {
'fixed_stepsize'}
122 t = model.optimization.stepsize_fix;
123 case {
'armijo'} % Armijo Regel
124 [model,t,nr_fct_stepsize,output] = stepsize_armijo(model, model_data, output, x, d);
125 nr_fct = nr_fct+nr_fct_stepsize;
128 [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
129 nr_fct = nr_fct+nr_fct_stepsize;
132 if ~exist(
'exp_step')
135 [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
137 nr_fct = nr_fct+nr_fct_stepsize;
140 if ~exist(
'quot_step')
144 quot_step=quot_step+1;
145 nr_fct = nr_fct+nr_fct_stepsize;
148 %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
150 [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
151 nr_fct = nr_fct+nr_fct_stepsize;
152 nr_grad = nr_grad+nr_grad_stepsize;
155 fprintf(
'unknown stepsize rule')
160 opti_iter = opti_iter+1;
163 disp(['next parameter: ',num2str(x)])
164 disp('--------------------')
167 %check, whether the new x is allowed by the according mu_range or whether it exceeds the
168 %boundaries. If so: set this component to the boundary.
170 if x(k) > upper_bound(k)
171 x(k) = upper_bound(k);
172 elseif x(k) < lower_bound(k)
173 x(k) = lower_bound(k);
178 model =
set_mu_to_optimize(model,x); % write the actual parameter setting in the model
180 %additional break condition:
181 %if there were no major changes in the last n steps --> finish!
183 if (size(PM,1) > n) && (length(output) > n) %already more than n values to compare?
187 PM_diff = [PM_diff; abs(PM(end-k,:)-PM(end-k+1,:))]; %difference of the mus in the last n steps
188 output_diff = [output_diff; abs(output(end-k)-output(end-k+1))]; %difference of the objective function in the last n steps
190 if (max(PM_diff) <= tol*1e-2) & (max(output_diff) <= tol*1e-2)
191 disp('no major change in mu and objective function')
192 disp('leaving gradient_opt')
197 [grad_f_x,dummy,output]=model.optimization.get_Jacobian(model,model_data,[],[]);
198 norm_grad_f_x = [norm_grad_f_x,norm(grad_f_x)];
199 %output=output_array{1};
200 grad_f_x = grad_f_x * min_or_max;%now calculate the gradient with the
new parameter setting
202 disp([
'Actual gradient norm :',num2str(norm_grad_f_x(end))]);
204 % END OF OPTIMIZATION (detailed?)
207 %
if function was maximized instead of minimized, the values must be
209 output=output*min_or_max;
213 opt_data.optimal_params = x;
214 opt_data.nr_fct_calls = nr_fct;
215 opt_data.nr_grad_calc = nr_grad;
216 opt_data.parameter_sets = PM;
217 opt_data.output = output;
218 opt_data.max_output = max(output);
219 opt_data.max_output_paramsets = PM(end,:);
220 opt_data.nr_opti_iter=opti_iter;
221 opt_data.norm_grad_f_x = norm_grad_f_x;
223 % setting the mus values in the model back to the
default values
224 %
for k=1:length(model.mu_names)
225 % model.(model.mu_names{k})=mu_original(k);
228 else %here: reduced optimization
231 [grad_f_x,Delta_grad] = model.optimization.get_Jacobian(model,reduced_data);
232 grad_f_x = grad_f_x*min_or_max; nr_grad = nr_grad+1;
233 norm_grad_f_x = norm(grad_f_x);
234 % PM = x; %
for opt_data: array with all the mu_i
235 % output = []; %array containing the f(mu_k)
236 % max_iter = 40; %maximal number of iterations of the gradient method
237 % opti_iter = 0; %counter
238 if isfield(model,
'lipschitz_type')&&(strcmp(model.lipschitz_type,
'Hessian'))
239 Delta_mu = zeros(length(grad_f_x),1);
241 Delta_mu = 0; %Error bound
for reduced optimization optimal parameters
246 switch model.optimization.stepsize_rule
247 case {
'fixed_stepsize'}
248 C_alpha = model.optimization.stepsize_fix;
249 [C_L,model] = model.get_lipschitz_constant(model);
250 epsilon_m=zeros(max_iter+1,1);
252 epsilon_m=zeros(max_iter+1,1);
253 C_alpha=1;%upper bound
for the optimization stepsize
254 [C_L,model] = model.get_lipschitz_constant(model);
257 epsilon_m = ones(max_iter+1,1).*model.optimization.armijo_t_min*model.optimization.stepsize_factor;
259 [C_L,model] = model.get_lipschitz_constant(model);
261 disp(
'unknown stepsize rule');
267 % START OF OPTIMIZATION
268 while (norm(grad_f_x) > tol) && (opti_iter <= max_iter)
271 %calculation of the stepsize t:
272 switch model.optimization.stepsize_rule
273 case {
'fixed_stepsize'}
274 t = model.optimization.stepsize_fix;
275 case {
'armijo'} % Armijo Regel
276 [model,t,nr_fct_stepsize,output] = stepsize_armijo(model, reduced_data, output, x, d);
277 nr_fct = nr_fct+nr_fct_stepsize;
278 [C_L,model] = model.get_lipschitz_constant(model); %update Lipschitz constant
280 [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
281 nr_fct = nr_fct+nr_fct_stepsize;
284 if ~exist(
'exp_step')
287 [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
289 nr_fct = nr_fct+nr_fct_stepsize;
292 if ~exist(
'quot_step')
296 quot_step=quot_step+1;
297 nr_fct = nr_fct+nr_fct_stepsize;
298 C_alpha = t; %update of the maximum stepsize in this step for error estimation
299 [C_L,model] = model.get_lipschitz_constant(model); %update Lipschitz constant
301 %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
303 [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
304 nr_fct = nr_fct+nr_fct_stepsize;
305 nr_grad = nr_grad+nr_grad_stepsize;
308 fprintf(
'unknown stepsize rule')
313 opti_iter = opti_iter+1;
316 disp(['Next parameter in reduced gradient optimization: ',num2str(x)]);
317 disp('------------------------------------')
319 %check, whether the new x is allowed by the according mu_range or whether it exceeds the
320 %boundaries. If so: set this component to the boundary.
322 if x(k) > upper_bound(k)
323 x(k) = upper_bound(k);
324 elseif x(k) < lower_bound(k)
325 x(k) = lower_bound(k);
330 model =
set_mu_to_optimize(model,x); % write the actual parameter setting in the model
332 %additional break condition:
333 %if there were no major changes in the last n steps --> finish!
335 if (size(PM,1) > n) && (length(output) > n) %already more than n values to compare?
339 PM_diff = [PM_diff; abs(PM(end-k,:)-PM(end-k+1,:))]; %difference of the mus in the last n steps
340 output_diff = [output_diff; abs(output(end-k)-output(end-k+1))]; %difference of the objective function in the last n steps
342 if (max(max(PM_diff)) <= tol*1e-2) && (max(output_diff) <= tol*1e-2)
343 disp('no major change in mu and objective function')
344 disp('leaving gradient_opt')
350 n_Delta = size(Delta_mu,2);
351 disp('next estimates')
352 disp(['augmenting constants:',num2str(1+C_alpha*norm(C_L))]);
353 disp(['norm of the gradient:',num2str(norm(grad_f_x))]);
354 disp(['Error estimator Gradient:',num2str(Delta_grad(:,end)')]);
355 disp(['actual adding:',num2str(C_alpha*Delta_grad(:,end)')]);
357 if isfield(model,'lipschitz_type')&&(strcmp(model.lipschitz_type,'Hessian'))
358 Delta_mu_next = Delta_mu(:,n_Delta)+C_alpha.*(C_L*Delta_mu(:,n_Delta))+epsilon_m(n_Delta)*abs(grad_f_x(:,:))'+C_alpha*Delta_grad(:,end);
360 Delta_mu_next = Delta_mu(:,n_Delta)+C_alpha.*(C_L*Delta_mu(:,n_Delta))+epsilon_m(n_Delta)*norm(grad_f_x(:,:))'+C_alpha*norm(Delta_grad(:,end));
362 Delta_mu = [Delta_mu,Delta_mu_next];
364 disp(['actual error estimate for parameters:',num2str(Delta_mu_next')]);
366 %Calculate gradient at the new parameter point:
367 [grad_f_x, Delta_grad,output] = model.optimization.get_Jacobian(model,reduced_data);%now calculate the gradient with the new parameter setting
368 norm_grad_f_x = [norm_grad_f_x,norm(grad_f_x)];
369 %output=output_array{1};
370 grad_f_x=grad_f_x*min_or_max;
373 % END OF OPTIMIZATION
376 %
if function was maximized instead of minimized, the values must be
378 output=output*min_or_max;
382 opt_data.optimal_params = x;
383 opt_data.nr_fct_calls = nr_fct;
384 opt_data.nr_grad_calc = nr_grad;
385 opt_data.parameter_sets = PM;
386 opt_data.output = output;
387 opt_data.max_output = max(output);
388 opt_data.max_output_paramsets = PM(end,:);
389 opt_data.nr_opti_iter=opti_iter;
390 opt_data.Delta_mu = Delta_mu;
391 opt_data.norm_grad_f_x = norm_grad_f_x;
function r = verbose(level, message, messageId)
This function displays messages depending on a message-id and/or a level. Aditionally you can set/res...
function model = set_mu_to_optimize(model, x, varargin)
Funcion sets the parameters that are to be optimized to the values given by x Also usable for optimiz...
function [ model , t , nr_fct , output ] = stepsize_quotient(model, x, d, quot_step)
function [model, t_opt, output] = stepsize_quotient(model, model_data, output, x, d...