1 function gamm2013_exp(step, exp_params)
2 %
function gamm2013_exp(step)
4 % experiments
for the GAMM2013 paper: optimization with 2x2 thermal block
6 % B. Haasdonk, M. Dihlmann, 22.5.2013
17 params.numintervals_per_block = 50;
19 model.RB_stop_epsilon = 1e-6;
20 model.RB_stop_Nmax = 30;
21 model.RB_train_size = 5000;
22 model_data = gen_model_data(model);
24 plot_params.axis_tight = 1;
25 plot_params.yscale_uicontrols = 0.5;
28 case 1 % 2x2 thermal block, check detailed simulation by gui
30 % model = model.set_mu(model,[0.1,0.8,0.1,0.1]);
31 % sim_data = detailed_simulation(model,model_data);
33 % detailed_data = gen_detailed_data(model,model_data);
34 % reduced_data = gen_reduced_data(model, detailed_data);
37 case 2 % reference solution
for optimization
39 model = model.set_mu(model,mu_star);
40 sim_data = detailed_simulation(model,model_data);
43 case 3 %detailed data generation, mu3, mu4 fixed
45 % keep parameters mu3 and mu4 fixed in greedy basis generation
47 model.mu_ranges = {[0.5,2.5],[0.5,2.5],[mu3,mu3],[mu4,mu4]};
48 %
id =
'MATLAB:nearlySingularMatrix';
50 % set random number generator
for deterministic result:
51 RandStream.setDefaultStream(RandStream(
'mt19937ar',
'seed',1010101));
52 detailed_data = gen_detailed_data(model, model_data);
54 % 15 Basisvektoren erzeugt
56 save(
'gamm2013_detailed_data.mat');
58 case 4 % inspect reduced basis and rb_demo_gui
60 load(
'gamm2013_detailed_data');
61 params.plot = @plot_vertex_data;
65 demo_rb_gui(model,detailed_data,reduced_data,plot_params);
69 case 5 % add optimization functionals to detailed_data
70 load('gamm2013_detailed_data');
72 %define optimization functionals by error to reference solution
74 model = model.set_mu(model,mu_star);
75 ref_sim_data = detailed_simulation(model,model_data);
76 ref_dofs = ref_sim_data.uh.dofs;
77 model.set_mu = @(model,mu)my_pfusch_set_mu(model,mu,mu3,mu4);
78 model.get_mu = @(model)model.mus(1:2);
79 J = @(dofs) J_functional(dofs, ...
80 model_data.df_info.l2_inner_product_matrix, ...
82 detailed_optimization_functional = @(mu1_mu2) ...
83 my_detailed_optimization_functional(mu1_mu2, J, model, model_data);
85 reduced_data = gen_reduced_data(model,detailed_data);
86 %add functinal fields in reduced data
87 RB = detailed_data.RB;
88 M = model_data.df_info.l2_inner_product_matrix;
89 reduced_data.J1 = RB'*M*RB;
90 reduced_data.J2 = ref_dofs'*M*RB;
91 reduced_data.J3 = ref_dofs'*M*ref_dofs;
92 %JN = 0.5*(a'*J1*a-2*J2*a+J3);
93 %gradient J = a'*gJ1*pmui_a - gJ2 pmui_a
94 reduced_data.gJ1 = RB'*M*RB;
95 reduced_data.gJ2 = ref_dofs'*M*RB;
97 reduced_optimization_functional = @(mu1_mu2) ...
98 my_reduced_optimization_functional(mu1_mu2, model,...
101 model_data.ref_dofs = ref_dofs;
103 save('gamm2013_detailed_data.mat');
105 case 6 % plot reduced output landscape and time measurement
106 load('gamm2013_detailed_data');
108 if isfield(exp_params, 'nmu')
109 n_mu1 = exp_params.nmu;
110 n_mu2 = exp_params.nmu;
112 n_mu1 = 21; n_mu2 = 21;
114 % mu1_range = [0.5,2.5];
115 % mu1_range = [1.9,2.1];
116 mu1_range = model.mu_ranges{1};
117 mu1s = linspace(mu1_range(1),mu1_range(2),n_mu1);
118 mu2_range = model.mu_ranges{2};
119 % mu2_range = [0.5,2.5];
120 % mu2_range = [0.9,1.1];
121 mu2s = linspace(mu2_range(1),mu2_range(2),n_mu2);
122 Js = zeros(n_mu1,n_mu2);
126 mu1_mu2=[mu1s(n1),mu2s(n2)];
127 Js(n1,n2)= reduced_optimization_functional(mu1_mu2);
132 disp([
'computation time: ',num2str(t),
' seconds.']);
135 cmaplog = create_log_colormap();
137 surf(mu1s,mu2s,Js
','FaceColor
','interp
');
141 % check reduced model and reduced optimization functional at
143 model = model.set_mu(model,mu_star);
144 sim_data = rb_simulation(model,reduced_data);
145 sim_data = rb_reconstruction(model, detailed_data, sim_data);
146 disp(['H10 error to reference solution:
']);
147 err = sim_data.uh.dofs - ref_dofs;
148 h10_error = sqrt(err'*model_data.df_info.h10_inner_product_matrix * err)
149 disp([
'Delta error estimator:']);
151 disp([
'L2 error to reference solution:']);
152 l2_error = sqrt(err
'*model_data.df_info.l2_inner_product_matrix * err)
154 JN_ref = reduced_optimization_functional(mu_star(1:2));
155 disp(['reduced optimization functional at reference parameter:
',...
160 case 7 % run detailed optimizer and time-measurement
161 load('gamm2013_detailed_data
');
166 options = optimset();
167 options.MaxFunEvals = 1000;
168 options.TolFun = 1e-14;
169 options.TolX = 1e-14;
170 fun = detailed_optimization_functional;
172 [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
173 [],[],lb,ub,[],options);
175 disp('found optimum mu_min:
')
179 disp('detailed optimization time:
')
181 disp(['functional gradient at optimum:
',num2str(grad')]);
183 case 8 % run reduced optimizer and time-measurement and error estimation
184 load(
'gamm2013_detailed_data');
189 options = optimset();
190 options.MaxFunEvals = 1000;
191 options.TolFun = 1e-14;
192 options.TolX = 1e-14;
193 fun = reduced_optimization_functional;
195 [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
196 [],[],lb,ub,[],options);
199 disp(
'found optimum mu_min:')
203 disp('reduced optimization time:')
206 disp('l2_error in mu_min:')
207 norm(mumin-mu_star(1:2))
209 disp('computing a-posteriori error estimator (expensive):')
211 % infinity norm * sqrt2 is upper bound for 2-norm of gradient
212 %epsilon_J = norm(grad);%output.firstorderopt*sqrt(2);
213 grad_J = my_reduced_functional_gradient(model,...
215 epsilon_J = norm(grad_J);
217 disp(['norm functional gradient at optimum: ',num2str(epsilon_J)]);
220 % error estimator computation (expensive!!!)
222 [Delta_mu, val_indicator] = my_expensive_error_estimator(mumin,model, ...
223 detailed_data,reduced_data,...
224 detailed_optimization_functional,...
231 disp('validity indicator, should be less than 1 for validity: ')
234 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%555
235 % do a variation of number of basis vectors N in RB
236 % track online simulation time
237 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
240 load('gamm2013_detailed_data');
241 detailed_data_full = detailed_data;
242 %J = @(dofs) J_functional(dofs, ...
243 % model_data.df_info.l2_inner_product_matrix, ...
246 %detailed_optimization_functional = @(mu1_mu2) ...
247 % my_detailed_optimization_functional(mu1_mu2, J, model, model_data, ...
250 %optimization settings
255 options = optimset();
256 options.MaxFunEvals = 1000;
257 options.TolFun = 1e-14;
258 options.TolX = 1e-14;
260 %detailed_optimization
261 fun = detailed_optimization_functional;
263 [mumin_det,fval_det,exitflag_det,output_det,lambda,grad_det] = fmincon(fun,mu0,[],[], ...
264 [],[],lb,ub,[],options);
269 Nmax = size(detailed_data.RB,2);
272 Delta_vec = zeros(1,length(N_vec));
273 Delta_pmui_vec = zeros(2,length(N_vec));
274 Delta_grad_J_vec = zeros(2,length(N_vec));
275 s_vec = zeros(1,length(N_vec));
276 t_sim = zeros(1,length(N_vec));
277 t_sim_ext = zeros(1,length(N_vec));
278 t_J = zeros(1,length(N_vec));
279 t_opt = zeros(1,length(N_vec));
280 mu_min_vec = zeros(2,length(N_vec));
281 val_ind_vec = zeros(1,length(N_vec));
282 Delta_mu_vec = zeros(1,length(N_vec));
283 %loop over several basis sizes
284 for n=1:length(N_vec)
287 detailed_data.RB = detailed_data_full.RB(:,1:N);
288 reduced_data = gen_reduced_data(model, detailed_data);
289 RB = detailed_data.RB;
290 M = model_data.df_info.l2_inner_product_matrix;
291 reduced_data.J1 = RB'*M*RB;
292 reduced_data.J2 = ref_dofs'*M*RB;
293 reduced_data.J3 = ref_dofs'*M*ref_dofs;
294 %JN = 0.5*(a'*J1*a-2*J2*a+J3);
295 %gradient J = a'*gJ1*pmui_a - gJ2 pmui_a
296 reduced_data.gJ1 = RB'*M*RB;
297 reduced_data.gJ2 = ref_dofs'*M*RB;
298 reduced_optimization_functional = @(mu1_mu2) ...
299 my_reduced_optimization_functional(mu1_mu2, model,...
303 tic;sim_data = rb_simulation(model, reduced_data);t_s = toc;
304 sim_data = rb_reconstruction(model, detailed_data,sim_data);
307 sim_data_ext = extended_rb_simulation(model,reduced_data,detailed_data);
311 Delta_grad_J_vec(:,n) = my_Delta_partial_Js(sim_data_ext,detailed_data,ref_dofs);
314 fun = reduced_optimization_functional;
316 [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
317 [],[],lb,ub,[],options);
319 %epsilon_J = norm(grad);%output.firstorderopt*sqrt(2);
320 model = set_mu(model,mumin);
321 grad_J = my_reduced_functional_gradient(model,reduced_data);
322 epsilon_J = norm(grad_J);
325 % error estimator computation (expensive!!!)
327 [Delta_mu_vec(n), val_ind_vec(n)] = my_expensive_error_estimator(mumin,model, ...
328 detailed_data,reduced_data,...
329 detailed_optimization_functional,...
336 Delta_vec(n) = sim_data.Delta;
337 Delta_pmui_vec(:,n) = sim_data_ext.Delta_partial_mui_us';
339 s_vec(n) = J(sim_data.uh.dofs);t_J(n) = toc;
342 mu_min_vec(:,n) = mumin';
348 semilogy(N_vec,Delta_vec)
349 title('Error estimator for solution')
353 semilogy(N_vec, Delta_pmui_vec(1,:))
355 semilogy(N_vec, Delta_pmui_vec(2,:),'r')
357 legend('pmu1','pmu2')
358 title('Fehlersch?tzer Ableitungen')
361 semilogy(N_vec, s_vec)
367 semilogy(N_vec,Delta_grad_J_vec(1,:));
369 semilogy(N_vec, Delta_grad_J_vec(2,:),'r');
370 title('Gradient J Error estimator')
372 legend('pmu1','pmu2')
376 hold on; plot(N_vec,t_sim_ext,'r')
377 title('online simulation times');
378 legend('rb sim','rb sim expensive')
382 mu_err = mu_min_vec-repmat(mu_star(1:2)',1,length(N_vec));
383 for i=1:length(N_vec)
384 mu_err_norm(i) = norm(mu_err(:,i));
387 semilogy(N_vec,mu_err_norm)
389 semilogy(N_vec,Delta_mu_vec,'r');
390 title('Error in optimal parameters')
394 %Aufteilung in g?ltige und ung?ltige Fehlersch?tzer
398 while val_ind_vec(i)>1
399 Delta_mu_iv = [Delta_mu_iv,Delta_mu_vec(i)];
402 Delta_mu_iv = [Delta_mu_iv,Delta_mu_vec(i)];
403 for j=i:length(N_vec)
404 Delta_mu_v = [Delta_mu_v, Delta_mu_vec(j)];
408 semilogy(N_vec,mu_err_norm,'LineWidth',2)
410 semilogy(N_vec(1:length(Delta_mu_iv)),Delta_mu_iv,'r-.','LineWidth',2);
411 semilogy(N_vec(length(Delta_mu_iv):end),Delta_mu_v,'r-','LineWidth',2);
412 %title('Error in optimal parameters')
413 legend('true error','error est. (invalid)','error est. (valid)')
414 xlabel('number of basis vectors N');
415 ylabel('error in optimal parameter')
417 disp('----------------------------------------')
418 disp('Simplex optimization (Matlab fmincon):')
419 disp('======================================')
420 disp(['Detailed optimization time :',num2str(t_det)]);
421 disp(['Reduced optimization time :', num2str(t_opt(end))]);
423 disp(['Last true error in optimization: ',num2str(mu_err_norm(end)),' and Delta_mu: ',num2str(Delta_mu_vec(end))])
424 disp(['Effectivity of error estimator: ',num2str(Delta_mu_vec(end)/mu_err_norm(end))])
426 disp(['Reduced optimal parameters: ',num2str(mumin(:)')])
427 disp(['Number of functional calculations : ',num2str(output.funcCount)])
428 disp(['Functional value at optimum: ',num2str(fval)]);
429 disp(['Gradient at optimum: ',num2str(norm(grad(:)))])
433 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
434 % implement and test reduced functional gradient
435 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
439 load('gamm2013_detailed_data');
442 model = set_mu(model,mu);
444 grad_J = my_reduced_functional_gradient(model, reduced_data);
446 %check numerically with fd
450 J0 = reduced_optimization_functional(mu);
452 gJn(i) = (detailed_optimization_functional(mu+hv(:,i))-J0);
460 Jdet = my_detailed_functional_gradient(model, model_data);
466 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
467 % gradient optimization
468 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
472 load('gamm2013_detailed_data');
473 model.optimization = [];
474 model.optimization.init_params = [0.5,0.5];
475 model.optimization.tol = 1e-7;
476 model.optimization.stepsize_rule = 'armijo';
477 model.optimization.stepsize_fix = 1;
478 model.optimization.get_Jacobian = @my_reduced_functional_gradient;
479 model.optimization.objective_function = @(model,reduced_data)my_reduced_optimization_functional([],model,reduced_data);
480 model.optimization.derivatives_available = 0;
481 model.optimization.params_to_optimize = [1,1,0,0];
482 model.optimization.opt_mode = 'reduced';
483 model.optimization.initial_stepsize = 1000;
484 model.optimization.sigma = 0.6;
486 model.compute_err_estimator = 0;
488 model.expensive_err_est = @(mu,model,grad_J)my_expensive_error_estimator(mu,model, ...
489 detailed_data,reduced_data,...
490 detailed_optimization_functional,...
496 [opt_data_red, model]=my_gradient_opt(model,reduced_data);
499 model.compute_err_estimator = 1;
500 [opt_data_red, model]=my_gradient_opt(model,reduced_data);
504 model.optimization.get_Jacobian = @my_detailed_functional_gradient;
505 model.optimization.objective_function = @(model,model_data)my_detailed_optimization_functional([],J,model,model_data);
507 [opt_data_det] = my_gradient_opt(model, model_data);
511 opt_data_red.t_opt = t_optred;
512 opt_data_det.t_opt = t_optdet;
514 save('Optimization_data_gamm.mat','opt_data_red','opt_data_det');
517 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
518 % plot figures for GAMM proceedings
519 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
521 %first conduct cases 3 and 5
523 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
524 % plot functional landscape
525 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
528 exp_params.nmu = 50; %wieviele Gitterpunkte
530 gamm2013_exp(6,exp_params);
534 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
535 % plot reference solution
536 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
539 load('gamm2013_detailed_data');
541 model = model.set_mu(model,[2,1]);
542 sim_data = detailed_simulation(model,model_data);
547 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
548 % plot gradient steps and error estimator decay
549 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
555 load('gamm2013_detailed_data.mat')
556 load('Optimization_data_gamm.mat');
558 %sortieren in g?ltige und ung?ltige Fehlersch?tzerwerte
559 Delta_mu_vec = opt_data_red.Delta_mu_vec;
560 norm_grad = opt_data_red.norm_grad_vec;
562 ind_v = find(opt_data_red.break_value_vec<1);
563 ind_iv = find(opt_data_red.break_value_vec>=1);
564 Delta_mu_iv = Delta_mu_vec(ind_iv(1):ind_iv(end)+1);
565 norm_grad_iv = norm_grad(ind_iv(1):ind_iv(end)+1);
566 Delta_mu_v = Delta_mu_vec(ind_v);
567 norm_grad_v = norm_grad(ind_v);
570 [norm_grad,idx1] = sort(norm_grad,'descend');
571 [norm_grad_iv,idx2] = sort(norm_grad_iv,'descend');
572 Delta_mu_iv = Delta_mu_iv(idx2);
573 [norm_grad_v,idx3] = sort(norm_grad_v,'descend');
574 Delta_mu_v = Delta_mu_v(idx3);
577 dist = opt_data_red.parameter_sets-repmat(mu_ref,1,size(opt_data_red.parameter_sets,2));
578 err = zeros(1,size(dist,2));
580 err(i) = norm(dist(:,i));
585 loglog(norm_grad,err,'LineWidth',2);
587 loglog(norm_grad_iv,Delta_mu_iv,'r-.','LineWidth',2);
588 loglog(norm_grad_v,Delta_mu_v,'r','LineWidth',2);
589 set(gca, 'XDir', 'reverse')
590 xlabel('gradient norm')
591 ylabel('error in parameters')
592 legend('true error','error est. (invalid)','error est. (valid)')
594 %show some data on screen
595 disp('----------------------------------------')
596 disp('Gradient optimization:')
597 disp('=======================')
598 disp(['Detailed optimization time :',num2str(opt_data_det.t_opt)]);
599 disp(['Reduced optimization time :', num2str(opt_data_red.t_opt)]);
601 disp(['Last true error in optimization: ',num2str(err(end)),' and Delta_mu: ',num2str(Delta_mu_vec(end))])
602 disp(['Effectivity of error estimator: ',num2str(Delta_mu_vec(end)/err(end))])
604 disp(['Reduced optimal parameters: ',num2str(opt_data_red.optimal_params(:)')])
605 disp(['Number of functional calculations (armijo): ',num2str(opt_data_red.nr_fct_calls)])
606 disp(['Number o gradient calculations : ',num2str(opt_data_red.nr_grad_calc)])
608 disp(['Functional value at optimum: ',num2str(my_reduced_optimization_functional(opt_data_red.optimal_params,model,reduced_data))]);
609 disp(['Functional value at optimum(det): ',num2str(my_detailed_optimization_functional(opt_data_red.optimal_params,J,model,model_data))]);
610 disp(['Gradient at optimum: ',num2str(opt_data_red.norm_grad_f_x)])
613 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
614 % plot_variation of number of basis vectors
615 % (simplex optimization)
616 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
624 error('step number unknown')
627 % functional J, here l2 error functional J(u) := \int_Omega |u-u_star|^2
628 function res = J_functional(dofs, l2_inner_product_matrix, ref_dofs)
630 res = 0.5* err'*l2_inner_product_matrix* err;
632 % Implementation of the detailed optimization functional
633 function [res,dummy] = my_detailed_optimization_functional(mu1_mu2, J, model, model_data)
636 model = model.set_mu(model,[mu1_mu2(:)]);
638 sim_data = detailed_simulation(model,model_data);
640 res = J(sim_data.uh.dofs);
642 % reduced optimization functional: non offline/online decomposed!
643 function [res,dummy] = my_reduced_optimization_functional(mu1_mu2, model, ...
647 model = model.set_mu(model,mu1_mu2(:));
649 sim_data = rb_simulation(model,reduced_data);
651 res = 0.5*(uN'*reduced_data.J1*uN-2*reduced_data.J2*uN+reduced_data.J3);%J(detailed_data.RB*sim_data.uN);
654 function [Delta_mu, val_indicator] = my_expensive_error_estimator(mumin,model, ...
655 detailed_data,reduced_data,...
656 optimization_functional,...
659 % compute parameter error estimator Delta_mu and
660 % validity indicator val_indicator = 4 gamma^2 L epsilon
661 % should be less than 1 in order the bound to be valid.
663 % gamma: Induced norm of inverse detailed hessian
664 % => approximate Hessian by finite difference +-h
666 H = FD_Hessian(mumin,optimization_functional, h)
669 % Compute Delta_Grad_J by extended RB-simulation (for sensitivities)
670 sim_data = extended_rb_simulation(model,reduced_data,detailed_data);
671 Delta_partial_Js = my_Delta_partial_Js(sim_data,detailed_data,ref_dofs);
672 % sum up all partial derivative estimators
673 Delta_Grad_J = norm(Delta_partial_Js);
675 epsilon = Delta_Grad_J + epsilon_J;
677 Delta_mu = 2*gamma*epsilon;
679 % L: maximum Hessian Lipschitz constant in 2*gamma*epsilon ball around mumin
680 % for now: simple maximum search over few points
682 mus = [0,1; 1,0; sqrt(2)/2,sqrt(2)/2];
685 H2 = FD_Hessian(mumin(:)'+mus(n,:)*Delta_mu,optimization_functional, h);
686 L2 = norm(H-H2)/Delta_mu; % Delta_mu is difference between mus
692 val_indicator = 4 * gamma^2 * L * epsilon;
694 % compute approximate Hessian by 6 point evaluations
696 % f01 f11 = f(x) f21 = f(x+[h,0])
698 function H = FD_Hessian(x,f, h);
708 H(1,1) = (f21-2*f11+f01)/h^2;
710 H(2,2) = (f12-2*f11+f10)/h^2;
711 % d1d2f: (a bit inaccurate...)
712 H(1,2)= ((f22-f21)/h -(f12-f11)/h)/h;
715 function grad_J = my_detailed_functional_gradient(model, model_data)
719 sim_data = extended_detailed_simulation(model, model_data);
721 M = model_data.df_info.l2_inner_product_matrix;
724 grad_J(i) = (sim_data.uh.dofs-model_data.ref_dofs)'*M*sim_data.u_der(:,i);
728 % return gradient of the functional J (reduced)
729 function grad_J = my_reduced_functional_gradient(model, reduced_data)
730 %perform a reduced simulation
733 model.compute_err_estimator = 0;
734 sim_data = extended_rb_simulation(model, reduced_data,[]);
736 %M = detailed_data.df_info.l2_inner_product_matrix;
737 %RB = detailed_data.RB;
739 % grad_J(i) = sim_data.uN'*RB'*M*RB*sim_data.partial_mui_uNs(:,i) ...
740 % - ref_dofs'*M*RB*sim_data.partial_mui_uNs(:,i);
743 grad_J(i) = sim_data.uN'*reduced_data.gJ1*sim_data.partial_mui_uNs(:,i) ...
744 - reduced_data.gJ2*sim_data.partial_mui_uNs(:,i);
748 % error estimators for gradient of J for quadratic functional
749 % |partial_mui J(u) - partial_mui J(uN)|
750 function Delta_partial_Js = my_Delta_partial_Js(sim_data,detailed_data,ref_dofs)
751 Delta_partial_Js = [0,0]; % using Delta_partial_us
752 err = detailed_data.RB * sim_data.uN - ref_dofs;
753 norm_uN_minus_ud = sqrt(err' * ...
754 detailed_data.df_info.h10_inner_product_matrix ...
757 partial_mui_uN_dofs = detailed_data.RB*sim_data.partial_mui_uNs(:,i);
758 norm_partial_uNi = sqrt(partial_mui_uN_dofs' * ...
759 detailed_data.df_info.h10_inner_product_matrix * ...
760 partial_mui_uN_dofs);
761 Delta_partial_Js(i) = sim_data.Delta* (sim_data.Delta_partial_mui_us(i) ...
762 + norm_partial_uNi) ...
763 + norm_uN_minus_ud*sim_data.Delta_partial_mui_us(i);
766 % RB-simulation including sensitivities and error estimators
767 % but expensive computation by high-dimensional operations!
768 function sim_data = extended_rb_simulation(model,reduced_data,detailed_data)
770 if ~isfield(model,'compute_err_estimator')
771 model.compute_err_estimator = 1;
773 sim_data = rb_simulation(model,reduced_data);
774 sim_data.Delta_s = []; %not available
775 sim_data.s = []; %not available here
776 % solve sensitivity problems:
778 %old_mode = model.decomp_mode;
779 model.decomp_mode = 2; % coefficients
780 [A_coeff,f_coeff] = ...
781 model.operators(model,[]);
782 AN = lincomb_sequence(reduced_data.AN_comp, A_coeff);
783 fN = lincomb_sequence(reduced_data.fN_comp, f_coeff);
786 % solve sensitivity problems, coefficient derivatives by finite difference:
788 % matrix of reduced sensitivity coefficients, each column one reduced solution:
789 sim_data.partial_mui_uNs = zeros(length(uN),2);
790 % vector of error estimators:
791 sim_data.Delta_partial_mui_us = zeros(1,2);
792 mu = model.get_mu(model);
795 if model.compute_err_estimator
796 K = model.get_inner_product_matrix(detailed_data);
797 model.decomp_mode = 1;
798 [A_comp,f_comp] = ...
799 model.operators(model,detailed_data);
801 model.decomp_mode = 2; % coefficients
802 alpha = model.coercivity_alpha(model);
806 % 1. solve for sensitivity solutions:
807 model = model.set_mu(model,mu+h*E(:,i));
808 [A_coeff2,f_coeff2] = ...
809 model.operators(model,[]);
810 partial_mui_A_coeff = (A_coeff2-A_coeff)/h;
811 partial_mui_f_coeff = (f_coeff2-f_coeff)/h;
812 partial_mui_AN = lincomb_sequence(reduced_data.AN_comp, partial_mui_A_coeff);
813 partial_mui_fN = lincomb_sequence(reduced_data.fN_comp, partial_mui_f_coeff);
814 partial_mui_uN = AN \ (partial_mui_fN - partial_mui_AN*uN);
815 sim_data.partial_mui_uNs(:,i) = partial_mui_uN;
817 if model.compute_err_estimator
818 %2. compute residuals and error estimators
819 % Delta_partial_mui_u = gamma_partial_mui_a/alpha * Delta_u + |r|/alpha
820 % with (r,v ) = partial_mui_i f(v)-a(partial_mui_uN,v) -partial_mui_a(u_N,v)
821 A = lincomb_sequence(A_comp, A_coeff);
822 partial_mui_A = lincomb_sequence(A_comp, partial_mui_A_coeff);
823 partial_mui_f = lincomb_sequence(f_comp, partial_mui_f_coeff);
825 res_partial_mui_A = partial_mui_A * (detailed_data.RB * uN);
826 res_A = A * (detailed_data.RB * partial_mui_uN);
827 res = partial_mui_f - res_partial_mui_A - res_A;
828 %%% residuum functional is res * v
829 %%% riesz representant: v_r' K v = (v_r,v) = res*v
830 %%% so res = K * v_r;
832 %%% res_norm_sqr = (v_r,v_r) = v_r' K v_r = v_r' * res;
833 res_norm_sqr = v_r' * res;
834 res_norm = sqrt(max(res_norm_sqr,0));
836 % continuity constant of bilinear form partial_mui_a:
838 gamma_partial_mui_a = get_continuity_constant(partial_mui_A,K);
840 sim_data.Delta_partial_mui_us(i) = ...
841 gamma_partial_mui_a / alpha * sim_data.Delta ...
847 %detailed simulation returning sensitivities
848 function sim_data = extended_detailed_simulation(model, model_data)
850 sim_data = detailed_simulation(model,model_data);
852 % solve sensitivity problems:
853 model.decomp_mode = 2; % coefficients
854 [A_coeff,f_coeff] = ...
855 model.operators(model,[]);
857 % solve sensitivity problems, coefficient derivatives by finite difference:
859 % matrix of reduced sensitivity coefficients, each column one reduced solution:
860 u = sim_data.uh.dofs;
861 u_der = zeros(length(u),2);
864 mu = model.get_mu(model);
867 %K = model.get_inner_product_matrix(detailed_data);
868 model.decomp_mode = 1;
869 [A_comp,f_comp] = ...
870 model.operators(model,model_data);
872 model.decomp_mode = 2; % coefficients
877 % 1. solve for sensitivity solutions:
878 model = model.set_mu(model,mu+h*E(:,i));
879 [A_coeff2,f_coeff2] = ...
880 model.operators(model,[]);
881 partial_mui_A_coeff = (A_coeff2-A_coeff)/h;
882 partial_mui_f_coeff = (f_coeff2-f_coeff)/h;
883 partial_mui_A = lincomb_sequence(A_comp, partial_mui_A_coeff);
884 partial_mui_f = lincomb_sequence(f_comp, partial_mui_f_coeff);
885 A = lincomb_sequence(A_comp, A_coeff);
886 u_der(:,i) = A \ (partial_mui_f - partial_mui_A*u);
887 sim_data.u_der = u_der;
892 % function computing the continuity constant of a sparse matrix A
893 % with respect to inner product matrix K
894 % formally: c = largest singular value of matrix (K^-0.5 * A * K^-0.5)
895 % where K^-0.5 are inverse cholesky factors. These are not
896 % explicitly computed as full, but linear systems are solved.
897 % largest singular value of M = inv(C')*A*inv(C) is computed by
898 % largest eigenvalue of (M'* M)
899 function c = get_continuity_constant(A,K)
901 %C = chol(K); % => K = C'*C
902 %f = @(x) my_matrix_prod(x,A,C);
903 %lambda_max = eigs(f,size(A,1),1);
904 %c = sqrt(lambda_max);
906 function res = my_matrix_prod(x,A,C)
907 y = C\x; %y = invC*x;
908 y2 = (C') \ (A*y);% y2 = inv(CT)*A*inv(C)*x
910 y4 = (C') \ (A'*y3); % y4 = inv(CT)*AT*inv(C)*inv(CT)*A*inv(C)*x
913 function cmaplog = create_log_colormap()
920 x = (len-1)/log10(len-1);
921 cmaplog = zeros(size(cmap));
934 cmaplog(n,:) = (1-m) * v1 + m*v2;
940 function model = my_pfusch_set_mu(model,mu1_2,mu3,mu4)
941 model.mus = [mu1_2(:);mu3;mu4];
943 %function [grad_f_x,Delta_grad,output,data] = my_return_jacobian(model,reduced_data)
944 %grad_f_x = my_reduced_functional_gradient(model, reduced_data);
948 %if model.compute_err_estimator
953 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
954 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
955 %gradient optimization
957 function [opt_data, model]=my_gradient_opt(model,reduced_data)
958 %function [x_opt,nr_fct_total,nr_grad_total]=gradient_opt(model, model_data, reduced_data)
960 % Function performing an optimization either reduced oder detailed using gradient method
961 % with either Armijo-stepsize rule, Wolfe-Powell-stepsize rule or
962 % dichotomie-algorithm
964 %Required fields of model:
965 % model.optimization.init_params: initial value
966 % model.optimization.tol: tolerance how close to 0 shall the norm be?
967 % model.optimization.get_Jacobian: gradient function
968 % model.optimization.objective_function: not needed in gradient_opt but
969 % needed in armijo_stepsize.m and wolfe_powell_stepsize.m
970 % model.optimization.stepsize_rule: either 'armijo' or 'wolfepowell' or
973 %Generated fields of opt_data:
974 % opt_data.x_optimal: the optimal value
975 % opt_data.nr_fct_calls: Number of function calls
976 % opt_data.nr_grad_calc: Number of needed calculations of the gradient
979 % Oliver Zeeb 25.05.2010%
984 disp('entered gradient_opt')
987 %adding some fields to the model, if not existent
988 if ~isfield(model.optimization,'tol')
989 model.optimization.tol=1e-3;
991 if ~isfield(model.optimization,'stepsize_rule')
992 model.optimization.stepsize_rule='quotient';
994 if ~isfield(model.optimization,'min_or_max')
995 model.optimization.min_or_max = 'min';
998 if model.optimization.derivatives_available
999 model.compute_derivative_info = 1;
1002 %setting signum for maximizing or minimizing,
1003 % i.e. changes f(x) to -f(x) if function f(x) shall be maximized
1004 if model.optimization.min_or_max == 'min'
1006 elseif model.optimization.min_or_max == 'max'
1008 else disp('maximize or minimize? model.optimization.min_or_max not properly defined')
1013 nr_fct = 0; %counter for number of function calls
1014 nr_grad = 0; %counter for number of gradient calculations
1015 nRB= []; %basis sizes used for reduced simulations
1016 nRB_der = []; %basis sizes used for reduced derivative simulations
1018 %save current mu values of the model
1020 mu_original = get_mu(model);
1023 %set the mus in the model to the mus given in init_params
1024 %model.(model.mu_names{k})=model.optimization.init_params(k);
1025 model = set_mu(model, model.optimization.init_params);
1028 %
get initial parameters that should be optzimized
1030 %
get the according boundaries
1031 [lower_bound, upper_bound] = get_bound_to_optimize(model);
1036 tol = model.optimization.tol;
1037 PM = x; %
for opt_data: array with all the mu_i
1038 output = []; %array containing the f(mu_k)
1039 max_iter = 100; %maximal number of iterations of the gradient method
1040 opti_iter = 0; %counter
1043 if strcmp(model.optimization.opt_mode,
'detailed')
1045 else %here: reduced optimization
1048 [grad_f_x] = model.optimization.get_Jacobian(model,reduced_data);
1049 grad_f_x = grad_f_x*min_or_max; nr_grad = nr_grad+1;
1050 norm_grad_f_x = norm(grad_f_x);
1051 norm_grad_vec = norm_grad_f_x;
1052 if model.compute_err_estimator
1053 [Delta_mu_vec,break_value_vec] = model.expensive_err_est(x,model,norm_grad_f_x);
1058 continue_optimization = 1;
1060 % START OF OPTIMIZATION
1061 while (continue_optimization)&& (opti_iter <= max_iter)
1064 %calculation of the stepsize t:
1065 switch model.optimization.stepsize_rule
1066 case {
'fixed_stepsize'}
1067 t = model.optimization.stepsize_fix;
1068 case {
'armijo'} % Armijo Regel
1069 [model,t,nr_fct_stepsize,output, data] = stepsize_armijo(model, reduced_data, output, x, d);
1070 nr_fct = nr_fct+nr_fct_stepsize;
1071 nRB = [nRB,data.nRB];
1074 [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
1075 nr_fct = nr_fct+nr_fct_stepsize;
1077 case {
'exponential'}
1078 if ~exist(
'exp_step')
1081 [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
1082 exp_step=exp_step+1;
1083 nr_fct = nr_fct+nr_fct_stepsize;
1086 if ~exist(
'quot_step')
1090 quot_step=quot_step+1;
1091 nr_fct = nr_fct+nr_fct_stepsize;
1092 %C_alpha = t; %update of the maximum stepsize in this step for error estimation
1093 %[C_L,model] = model.get_lipschitz_constant(model); %update Lipschitz constant
1095 %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
1096 case {
'wolfepowell'}
1097 [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
1098 nr_fct = nr_fct+nr_fct_stepsize;
1099 nr_grad = nr_grad+nr_grad_stepsize;
1102 fprintf(
'unknown stepsize rule')
1107 opti_iter = opti_iter+1;
1110 disp(['Next parameter in reduced gradient optimization: ',num2str(x(:)')]);
1111 disp('------------------------------------')
1113 %check, whether the new x is allowed by the according mu_range or whether it exceeds the
1114 %boundaries. If so: set this component to the boundary.
1116 if x(k) > upper_bound(k)
1117 x(k) = upper_bound(k);
1118 elseif x(k) < lower_bound(k)
1119 x(k) = lower_bound(k);
1124 model = set_mu(model,x); % write the actual parameter setting in the model
1127 %Calculate gradient at the new parameter point:
1128 [grad_f_x] = model.optimization.get_Jacobian(model,reduced_data);%now calculate the gradient with the new parameter setting
1129 norm_grad_f_x = norm(grad_f_x);
1130 norm_grad_vec = [norm_grad_vec,norm_grad_f_x];
1131 %output=output_array{1};
1132 grad_f_x=grad_f_x*min_or_max;
1133 nr_grad = nr_grad+1;
1135 %decide to
continue or not
1136 %break_value = 2*model.gamma_H^2*model.L_H*(norm_grad_f_x+norm(Delta_grad(:,end)));
1137 if model.compute_err_estimator
1138 [Delta_mu,break_value] = model.expensive_err_est(x,model,norm_grad_f_x);
1139 break_value_vec = [break_value_vec, break_value];
1140 Delta_mu_vec = [Delta_mu_vec, Delta_mu];
1141 disp([
'Actual gradient norm :',num2str(norm_grad_f_x(:)
')]);
1142 disp(['Value of breaking criteria:
', num2str(break_value)]);
1144 %if (break_value<=1)&&(norm_grad_f_x<tol)
1145 if (norm_grad_f_x<tol)
1146 continue_optimization = 0;
1150 % END OF OPTIMIZATION
1153 %if function was maximized instead of minimized, the values must be
1155 %output=output*min_or_max;
1159 opt_data.optimal_params = x;
1160 opt_data.nr_fct_calls = nr_fct;
1161 opt_data.nr_grad_calc = nr_grad;
1162 opt_data.parameter_sets = PM;
1163 %opt_data.output = output;
1164 %opt_data.max_output = max(output);
1165 opt_data.max_output_paramsets = PM(end,:);
1166 opt_data.norm_grad_f_x = norm_grad_f_x;
1167 opt_data.norm_grad_vec = norm_grad_vec;
1168 if model.compute_err_estimator
1169 opt_data.nr_opti_iter=opti_iter;
1170 opt_data.Delta_mu = Delta_mu;
1171 opt_data.break_value = break_value;
1172 %opt_data.nRB = nRB;
1173 %opt_data.nRB_der = nRB_der;
1174 %opt_data.used_detailed_calculations = used_detailed_calculations;
1175 %opt_data.gamma = g_H;
1176 %opt_data.Lipschitz_H = L_H;
1177 opt_data.break_value_vec = break_value_vec;
1178 opt_data.Delta_mu_vec = Delta_mu_vec;
1187 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1188 % Improvement possibilities:
1189 % - offline-online decomposition of reduced optimization
1191 % - extended RB simulation: offline online decomposition
1192 % - Make RB basisgeneration more stable (error estimator below 1e-6)?
1193 % currently about 20 basisvectors, then oscillations start
1194 % - determine number of function evaluations for detailed/reduced
1195 % - use gradient scheme for optimization and view improvements and
1196 % validity of error estimator
1199 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1201 % - full optimization: 35 sec. Reduced: 0.35 sec factor 100 acceleration
function r = verbose(level, message, messageId)
This function displays messages depending on a message-id and/or a level. Aditionally you can set/res...
function demo_rb_gui(varargin)
reduced basis demo with sliders
function model = thermalblock_model_struct(params)
Thermal Block example similar as described in the book of A.T. patera and G. Rozza (just one paramete...
function p = plot_sequence(varargin)
plotting a sequence of data slices on polygonal 2d grid (constructed from params if empty) and provid...
function demo_detailed_gui(varargin)
demo gui for detailed simulations (calling demo_rb_gui after switching some pointers) ...
function p = plot_sim_data(model, model_data, sim_data, plot_params)
function performing the plot of the simulation results as specified in model.
function [ model , t , nr_fct , output ] = stepsize_quotient(model, x, d, quot_step)
function [model, t_opt, output] = stepsize_quotient(model, model_data, output, x, d...