1 function [opt_data, model]=gradient_opt_non_iter_err(model,varargin)
2 %
function [x_opt,nr_fct_total,nr_grad_total]=gradient_opt(model, model_data, reduced_data)
4 % Function performing an optimization either reduced oder detailed
using gradient method
5 % with either Armijo-stepsize rule, Wolfe-Powell-stepsize rule or
8 %Required fields of model:
9 % model.optimization.init_params: initial value
10 % model.optimization.tol: tolerance how close to 0 shall the norm be?
11 % model.optimization.get_Jacobian: gradient
function
12 % model.optimization.objective_function: not needed in gradient_opt but
13 % needed in armijo_stepsize.m and wolfe_powell_stepsize.m
14 % model.optimization.stepsize_rule: either
'armijo' or
'wolfepowell' or
17 %Generated fields of opt_data:
18 % opt_data.x_optimal: the optimal value
19 % opt_data.nr_fct_calls: Number of
function calls
20 % opt_data.nr_grad_calc: Number of needed calculations of the gradient
23 % Oliver Zeeb 25.05.2010%
28 disp(
'entered gradient_opt')
31 %adding some fields to the model, if not existent
32 if ~isfield(model.optimization,'tol')
33 model.optimization.tol=1e-3;
35 if ~isfield(model.optimization,'stepsize_rule')
36 model.optimization.stepsize_rule='quotient';
38 if ~isfield(model.optimization,'min_or_max')
39 model.optimization.min_or_max = 'min';
42 if model.optimization.derivatives_available
43 model.compute_derivative_info = 1;
46 %setting signum for maximizing or minimizing,
47 % i.e. changes f(x) to -f(x) if function f(x) shall be maximized
48 if model.optimization.min_or_max == 'min'
50 elseif model.optimization.min_or_max == 'max'
52 else disp('maximize or minimize? model.optimization.min_or_max not properly defined')
57 nr_fct = 0; %counter for number of function calls
58 nr_grad = 0; %counter for number of gradient calculations
59 nRB= []; %basis sizes used for reduced simulations
60 nRB_der = []; %basis sizes used for reduced derivative simulations
62 %save current mu values of the model
64 mu_original = get_mu(model);
67 %set the mus in the model to the mus given in init_params
68 %model.(model.mu_names{k})=model.optimization.init_params(k);
69 model = set_mu(model, model.optimization.init_params);
72 %
get initial parameters that should be optzimized
73 x = get_mu_to_optimize(model);
74 %
get the according boundaries
75 [lower_bound, upper_bound] = get_bound_to_optimize(model);
78 if strcmp(model.optimization.opt_mode,
'detailed')
79 %
if strcmp(inputname(2),
'model_data')
80 model_data = varargin{1};
83 %error(
'Its not called model data')
85 % model.optimization.opt_mode =
'detailed';
86 elseif strcmp(model.optimization.opt_mode,
'reduced')
87 %
if strcmp(inputname(2),
'reduced_data')
88 % error(
'input argument is not called reduced data')
91 reduced_data = varargin{1};
92 model_data = varargin{2};
93 detailed_data = varargin{3};
94 model.optimization.opt_mode =
'reduced';
96 reduced_data = varargin{1};
97 model.optimization.opt_mode =
'reduced';
102 tol = model.optimization.tol;
103 PM = x; %
for opt_data: array with all the mu_i
104 output = []; %array containing the f(mu_k)
105 max_iter = 40; %maximal number of iterations of the gradient method
106 opti_iter = 0; %counter
109 if strcmp(model.optimization.opt_mode,
'detailed')
113 grad_f_x = model.optimization.get_Jacobian(model,model_data,[],[])*min_or_max; nr_grad = nr_grad+1;
114 norm_grad_f_x = norm(grad_f_x);
116 % START OF OPTIMIZATION
117 while (norm(grad_f_x) > tol) && (opti_iter <= max_iter)
120 %calculation of the stepsize t:
121 switch model.optimization.stepsize_rule
122 case {
'fixed_stepsize'}
123 t = model.optimization.stepsize_fix;
124 case {
'armijo'} % Armijo Regel
125 [model,t,nr_fct_stepsize,output] = stepsize_armijo(model, model_data, output, x, d);
126 nr_fct = nr_fct+nr_fct_stepsize;
129 [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
130 nr_fct = nr_fct+nr_fct_stepsize;
133 if ~exist(
'exp_step')
136 [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
138 nr_fct = nr_fct+nr_fct_stepsize;
141 if ~exist(
'quot_step')
145 quot_step=quot_step+1;
146 nr_fct = nr_fct+nr_fct_stepsize;
149 %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
151 [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
152 nr_fct = nr_fct+nr_fct_stepsize;
153 nr_grad = nr_grad+nr_grad_stepsize;
156 fprintf(
'unknown stepsize rule')
161 opti_iter = opti_iter+1;
164 disp(['next parameter: ',num2str(x)])
165 disp('--------------------')
167 %check, whether the new x is allowed by the according mu_range or whether it exceeds the
168 %boundaries. If so: set this component to the boundary.
170 if x(k) > upper_bound(k)
171 x(k) = upper_bound(k);
172 elseif x(k) < lower_bound(k)
173 x(k) = lower_bound(k);
178 model =
set_mu_to_optimize(model,x); % write the actual parameter setting in the model
180 %additional break condition:
181 %if there were no major changes in the last n steps --> finish!
183 if (size(PM,1) > n) && (length(output) > n) %already more than n values to compare?
187 PM_diff = [PM_diff; abs(PM(end-k,:)-PM(end-k+1,:))]; %difference of the mus in the last n steps
188 output_diff = [output_diff; abs(output(end-k)-output(end-k+1))]; %difference of the objective function in the last n steps
190 if (max(PM_diff) <= tol*1e-2) & (max(output_diff) <= tol*1e-2)
191 disp('no major change in mu and objective function')
192 disp('leaving gradient_opt')
197 [grad_f_x,dummy,output]=model.optimization.get_Jacobian(model,model_data,[],[]);
198 norm_grad_f_x = [norm_grad_f_x,norm(grad_f_x)];
200 disp(['actual output: ',num2str(output)]);
202 grad_f_x = grad_f_x * min_or_max;%now calculate the gradient with the new parameter setting
204 disp(['Actual gradient norm :',num2str(norm_grad_f_x)]);
206 % END OF OPTIMIZATION (detailed?)
209 %if function was maximized instead of minimized, the values must be
211 output=output*min_or_max;
215 opt_data.optimal_params = x;
216 opt_data.nr_fct_calls = nr_fct;
217 opt_data.nr_grad_calc = nr_grad;
218 opt_data.parameter_sets = PM;
219 opt_data.output = output;
220 opt_data.max_output = max(output);
221 opt_data.max_output_paramsets = PM(end,:);
222 opt_data.nr_opti_iter=opti_iter;
223 opt_data.norm_grad_f_x = norm_grad_f_x;
225 % setting the mus values in the model back to the default values
226 %for k=1:length(model.mu_names)
227 % model.(model.mu_names{k})=mu_original(k);
230 else %here: reduced optimization
233 [grad_f_x,Delta_grad,output,data] = model.optimization.get_Jacobian(model,reduced_data);
234 grad_f_x = grad_f_x*min_or_max; nr_grad = nr_grad+1;
235 norm_grad_f_x = norm(grad_f_x);
237 nRB_der = data.nRB_der;
238 [g_H,model] = model.gamma_H(model);
239 [L_H,model] = model.L_H(model);
240 break_value_vec = 2*(g_H)^2*(L_H)*(norm_grad_f_x);
241 Delta_mu_vec = 2*g_H * norm(norm_grad_f_x);
243 used_detailed_calculations = 0;
244 % PM = x; %
for opt_data: array with all the mu_i
245 % output = []; %array containing the f(mu_k)
246 % max_iter = 40; %maximal number of iterations of the gradient method
247 % opti_iter = 0; %counter
248 %
if isfield(model,
'lipschitz_type')&&(strcmp(model.lipschitz_type,
'Hessian'))
249 % Delta_mu = zeros(length(grad_f_x),1);
251 % Delta_mu = 0; %Error bound
for reduced optimization optimal parameters
256 %
switch model.optimization.stepsize_rule
258 % epsilon_m=zeros(max_iter+1,1);
259 % C_alpha=1;%upper bound
for the optimization stepsize
260 % [C_L,model] = model.get_lipschitz_constant(model);
263 % disp(
'unknown stepsize rule');
269 continue_optimization = 1;
271 % START OF OPTIMIZATION
272 while (continue_optimization)&& (opti_iter <= max_iter)
275 %calculation of the stepsize t:
276 switch model.optimization.stepsize_rule
277 case {
'fixed_stepsize'}
278 t = model.optimization.stepsize_fix;
279 case {
'armijo'} % Armijo Regel
280 [model,t,nr_fct_stepsize,output, data] = stepsize_armijo(model, reduced_data, output, x, d);
281 nr_fct = nr_fct+nr_fct_stepsize;
282 nRB = [nRB,data.nRB];
285 [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
286 nr_fct = nr_fct+nr_fct_stepsize;
289 if ~exist(
'exp_step')
292 [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
294 nr_fct = nr_fct+nr_fct_stepsize;
297 if ~exist(
'quot_step')
301 quot_step=quot_step+1;
302 nr_fct = nr_fct+nr_fct_stepsize;
303 %C_alpha = t; %update of the maximum stepsize in this step for error estimation
304 %[C_L,model] = model.get_lipschitz_constant(model); %update Lipschitz constant
306 %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
308 [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
309 nr_fct = nr_fct+nr_fct_stepsize;
310 nr_grad = nr_grad+nr_grad_stepsize;
313 fprintf(
'unknown stepsize rule')
318 opti_iter = opti_iter+1;
321 disp(['Next parameter in reduced gradient optimization: ',num2str(x)]);
322 disp('------------------------------------')
324 %check, whether the new x is allowed by the according mu_range or whether it exceeds the
325 %boundaries. If so: set this component to the boundary.
327 if x(k) > upper_bound(k)
328 x(k) = upper_bound(k);
329 elseif x(k) < lower_bound(k)
330 x(k) = lower_bound(k);
335 model =
set_mu_to_optimize(model,x); % write the actual parameter setting in the model
337 %additional break condition:
338 %if there were no major changes in the last n steps --> finish!
340 % if (size(PM,1) > n) && (length(output) > n) %already more than n values to compare?
344 % PM_diff = [PM_diff; abs(PM(end-k,:)-PM(end-k+1,:))]; %difference of the mus in the last n steps
345 % output_diff = [output_diff; abs(output(end-k)-output(end-k+1))]; %difference of the objective function in the last n steps
347 % if (max(max(PM_diff)) <= tol*1e-2) && (max(output_diff) <= tol*1e-2)
348 % disp('no major change in mu and objective function')
349 % disp('leaving gradient_opt')
355 % n_Delta = size(Delta_mu,2);
356 % disp('next estimates')
357 % disp(['augmenting constants:',num2str(1+C_alpha*norm(C_L))]);
358 % disp(['norm of the gradient:',num2str(norm(grad_f_x))]);
359 % disp(['Error estimator Gradient:',num2str(Delta_grad(:,end)')]);
360 % disp(['actual adding:',num2str(C_alpha*Delta_grad(:,end)')]);
362 % if isfield(model,'lipschitz_type')&&(strcmp(model.lipschitz_type,'Hessian'))
363 % Delta_mu_next = Delta_mu(:,n_Delta)+C_alpha.*(C_L*Delta_mu(:,n_Delta))+epsilon_m(n_Delta)*abs(grad_f_x(:,:))'+C_alpha*Delta_grad(:,end);
365 % Delta_mu_next = Delta_mu(:,n_Delta)+C_alpha.*(C_L*Delta_mu(:,n_Delta))+epsilon_m(n_Delta)*norm(grad_f_x(:,:))'+C_alpha*norm(Delta_grad(:,end));
367 % Delta_mu = [Delta_mu,Delta_mu_next];
369 % disp(['actual error estimate for parameters:',num2str(Delta_mu_next')]);
371 %Calculate gradient at the new parameter point:
372 [grad_f_x, Delta_grad,output, data] = model.optimization.get_Jacobian(model,reduced_data);%now calculate the gradient with the new parameter setting
373 norm_grad_f_x = norm(grad_f_x);
374 %output=output_array{1};
375 grad_f_x=grad_f_x*min_or_max;
377 nRB = [nRB,data.nRB];
378 nRB_der = [nRB_der, data.nRB_der];
380 %decide to
continue or not
381 %break_value = 2*model.gamma_H^2*model.L_H*(norm_grad_f_x+norm(Delta_grad(:,end)));
382 [g_H,model] = model.gamma_H(model);
383 [L_H,model] = model.L_H(model);
384 break_value = 2*(g_H)^2*(L_H)*(norm_grad_f_x+norm(Delta_grad(:,end)));
385 break_value_vec = [break_value_vec, break_value];
386 Delta_mu_vec = [Delta_mu_vec, 2*g_H * norm(norm_grad_f_x+norm(Delta_grad(:,end)))];
387 disp([
'Actual gradient norm :',num2str(norm_grad_f_x)]);
388 disp([
'Value of breaking criteria: ', num2str(break_value)]);
389 %
if (break_value<=1)&&(norm_grad_f_x<tol)
390 if (norm_grad_f_x<tol)
391 continue_optimization = 0;
395 % END OF OPTIMIZATION
397 %noniterative error estimator
398 %Delta_mu = 2*model.gamma_H * norm(norm_grad_f_x+norm(Delta_grad(:,end)));
400 %...the requirement is not fullfilled
401 if isfield(model.optimization,
'allow_constant_calculation')&&model.optimization.allow_constant_calculation
402 gamma = calculate_gamma_const(model.base_model);
403 Lip_H = calculate_Lipschitz_const(model.base_model);
404 break_value = 2*(gamma)^2*(Lip_H)*(norm_grad_f_x+norm(Delta_grad(:,end)));
405 used_detailed_calculations = 1;
409 Delta_mu = 2*gamma * norm(norm_grad_f_x+norm(Delta_grad(:,end)));
411 disp(
'Error estimator not valid!!!!!!!!!!!!')
415 %Error estimator not valid
416 disp('Error estimator not valid!!!!!!!')
420 Delta_mu = 2*model.gamma_H(model) * norm(norm_grad_f_x+norm(Delta_grad(:,end)));
422 %if function was maximized instead of minimized, the values must be
424 output=output*min_or_max;
428 opt_data.optimal_params = x;
429 opt_data.nr_fct_calls = nr_fct;
430 opt_data.nr_grad_calc = nr_grad;
431 opt_data.parameter_sets = PM;
432 opt_data.output = output;
433 opt_data.max_output = max(output);
434 opt_data.max_output_paramsets = PM(end,:);
435 opt_data.nr_opti_iter=opti_iter;
436 opt_data.Delta_mu = Delta_mu;
437 opt_data.break_value = break_value;
438 opt_data.norm_grad_f_x = norm_grad_f_x;
440 opt_data.nRB_der = nRB_der;
441 opt_data.used_detailed_calculations = used_detailed_calculations;
442 opt_data.gamma = g_H;
443 opt_data.Lipschitz_H = L_H;
444 opt_data.break_value_vec = break_value_vec;
445 opt_data.Delta_mu_vec = Delta_mu_vec;
function r = verbose(level, message, messageId)
This function displays messages depending on a message-id and/or a level. Aditionally you can set/res...
function model = set_mu_to_optimize(model, x, varargin)
Funcion sets the parameters that are to be optimized to the values given by x Also usable for optimiz...
function [ model , t , nr_fct , output ] = stepsize_quotient(model, x, d, quot_step)
function [model, t_opt, output] = stepsize_quotient(model, model_data, output, x, d...