rbmatlab  1.16.09
 All Classes Namespaces Files Functions Variables Modules Pages
gamm2013_exp.m
1 function gamm2013_exp(step, exp_params)
2 %function gamm2013_exp(step)
3 %
4 % experiments for the GAMM2013 paper: optimization with 2x2 thermal block
5 
6 % B. Haasdonk, M. Dihlmann, 22.5.2013
7 
8 if nargin<1
9  step = 1;
10  exp_params =[];
11 elseif nargin<2
12  exp_params = [];
13 end;
14 
15 params.B1 = 2;
16 params.B2 = 2;
17 params.numintervals_per_block = 50;
18 model = thermalblock_model_struct(params);
19 model.RB_stop_epsilon = 1e-6;
20 model.RB_stop_Nmax = 30;
21 model.RB_train_size = 5000;
22 model_data = gen_model_data(model);
23 plot_params = [];
24 plot_params.axis_tight = 1;
25 plot_params.yscale_uicontrols = 0.5;
26 
27 switch step
28  case 1 % 2x2 thermal block, check detailed simulation by gui
29 
30  % model = model.set_mu(model,[0.1,0.8,0.1,0.1]);
31  % sim_data = detailed_simulation(model,model_data);
32  demo_detailed_gui(model, model_data,[],plot_params);
33  % detailed_data = gen_detailed_data(model,model_data);
34  % reduced_data = gen_reduced_data(model, detailed_data);
35  % demo_rb_gui(model,detailed_data,reduced_data);
36 
37  case 2 % reference solution for optimization
38  mu_star = [2,1,1,2];
39  model = model.set_mu(model,mu_star);
40  sim_data = detailed_simulation(model,model_data);
41  plot_sim_data(model,model_data,sim_data,plot_params);
42 
43  case 3 %detailed data generation, mu3, mu4 fixed
44  mu3 = 1; mu4 = 2;
45  % keep parameters mu3 and mu4 fixed in greedy basis generation
46  %model.RB_mu_list =
47  model.mu_ranges = {[0.5,2.5],[0.5,2.5],[mu3,mu3],[mu4,mu4]};
48  %id = 'MATLAB:nearlySingularMatrix';
49  %warning('off',id)
50  % set random number generator for deterministic result:
51  RandStream.setDefaultStream(RandStream('mt19937ar','seed',1010101));
52  detailed_data = gen_detailed_data(model, model_data);
53  %warning('on',id)
54  % 15 Basisvektoren erzeugt
55 
56  save('gamm2013_detailed_data.mat');
57 
58  case 4 % inspect reduced basis and rb_demo_gui
59 
60  load('gamm2013_detailed_data');
61  params.plot = @plot_vertex_data;
62  plot_sequence(detailed_data.RB, model_data.grid, params)
63  figure;
64 
65  demo_rb_gui(model,detailed_data,reduced_data,plot_params);
66 
67  figure;
68 
69  case 5 % add optimization functionals to detailed_data
70  load('gamm2013_detailed_data');
71 
72  %define optimization functionals by error to reference solution
73  mu_star = [2,1];
74  model = model.set_mu(model,mu_star);
75  ref_sim_data = detailed_simulation(model,model_data);
76  ref_dofs = ref_sim_data.uh.dofs;
77  model.set_mu = @(model,mu)my_pfusch_set_mu(model,mu,mu3,mu4);
78  model.get_mu = @(model)model.mus(1:2);
79  J = @(dofs) J_functional(dofs, ...
80  model_data.df_info.l2_inner_product_matrix, ...
81  ref_dofs);
82  detailed_optimization_functional = @(mu1_mu2) ...
83  my_detailed_optimization_functional(mu1_mu2, J, model, model_data);
84 
85  reduced_data = gen_reduced_data(model,detailed_data);
86  %add functinal fields in reduced data
87  RB = detailed_data.RB;
88  M = model_data.df_info.l2_inner_product_matrix;
89  reduced_data.J1 = RB'*M*RB;
90  reduced_data.J2 = ref_dofs'*M*RB;
91  reduced_data.J3 = ref_dofs'*M*ref_dofs;
92  %JN = 0.5*(a'*J1*a-2*J2*a+J3);
93  %gradient J = a'*gJ1*pmui_a - gJ2 pmui_a
94  reduced_data.gJ1 = RB'*M*RB;
95  reduced_data.gJ2 = ref_dofs'*M*RB;
96 
97  reduced_optimization_functional = @(mu1_mu2) ...
98  my_reduced_optimization_functional(mu1_mu2, model,...
99  reduced_data);
100 
101  model_data.ref_dofs = ref_dofs;
102 
103  save('gamm2013_detailed_data.mat');
104 
105  case 6 % plot reduced output landscape and time measurement
106  load('gamm2013_detailed_data');
107 
108  if isfield(exp_params, 'nmu')
109  n_mu1 = exp_params.nmu;
110  n_mu2 = exp_params.nmu;
111  else
112  n_mu1 = 21; n_mu2 = 21;
113  end
114 % mu1_range = [0.5,2.5];
115 % mu1_range = [1.9,2.1];
116  mu1_range = model.mu_ranges{1};
117  mu1s = linspace(mu1_range(1),mu1_range(2),n_mu1);
118  mu2_range = model.mu_ranges{2};
119 % mu2_range = [0.5,2.5];
120 % mu2_range = [0.9,1.1];
121  mu2s = linspace(mu2_range(1),mu2_range(2),n_mu2);
122  Js = zeros(n_mu1,n_mu2);
123  tic;
124  for n1 = 1:n_mu1
125  for n2= 1:n_mu2
126  mu1_mu2=[mu1s(n1),mu2s(n2)];
127  Js(n1,n2)= reduced_optimization_functional(mu1_mu2);
128  end;
129  end;
130 
131  t = toc;
132  disp(['computation time: ',num2str(t),' seconds.']);
133 
134 
135  cmaplog = create_log_colormap();
136  colormap(cmaplog);
137  surf(mu1s,mu2s,Js','FaceColor','interp');
138  xlabel('{\mu}_1');
139  ylabel('{\mu}_2');
140 
141  % check reduced model and reduced optimization functional at
142  % reference config:
143  model = model.set_mu(model,mu_star);
144  sim_data = rb_simulation(model,reduced_data);
145  sim_data = rb_reconstruction(model, detailed_data, sim_data);
146  disp(['H10 error to reference solution:']);
147  err = sim_data.uh.dofs - ref_dofs;
148  h10_error = sqrt(err'*model_data.df_info.h10_inner_product_matrix * err)
149  disp(['Delta error estimator:']);
150  sim_data.Delta
151  disp(['L2 error to reference solution:']);
152  l2_error = sqrt(err'*model_data.df_info.l2_inner_product_matrix * err)
153 
154  JN_ref = reduced_optimization_functional(mu_star(1:2));
155  disp(['reduced optimization functional at reference parameter: ',...
156  num2str(JN_ref)]);
157 
158  keyboard;
159 
160  case 7 % run detailed optimizer and time-measurement
161  load('gamm2013_detailed_data');
162  mu0 = [0.5,0.5];
163  lb = [0.5,0.5];
164  ub = [2.5,2.5];
165 % options = [];
166  options = optimset();
167  options.MaxFunEvals = 1000;
168  options.TolFun = 1e-14;
169  options.TolX = 1e-14;
170  fun = detailed_optimization_functional;
171  tic;
172  [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
173  [],[],lb,ub,[],options);
174  t = toc;
175  disp('found optimum mu_min:')
176  mumin
177  disp('J(mu_min): ');
178  fun(mumin)
179  disp('detailed optimization time:')
180  t
181  disp(['functional gradient at optimum: ',num2str(grad')]);
182 
183  case 8 % run reduced optimizer and time-measurement and error estimation
184  load('gamm2013_detailed_data');
185  mu0 = [0.5,0.5];
186  lb = [0.5,0.5];
187  ub = [2.5,2.5];
188 % options = [];
189  options = optimset();
190  options.MaxFunEvals = 1000;
191  options.TolFun = 1e-14;
192  options.TolX = 1e-14;
193  fun = reduced_optimization_functional;
194  tic;
195  [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
196  [],[],lb,ub,[],options);
197  t = toc;
198  format long;
199  disp('found optimum mu_min:')
200  mumin
201  disp('J(mu_min): ');
202  fun(mumin)
203  disp('reduced optimization time:')
204  t
205 
206  disp('l2_error in mu_min:')
207  norm(mumin-mu_star(1:2))
208 
209  disp('computing a-posteriori error estimator (expensive):')
210 
211  % infinity norm * sqrt2 is upper bound for 2-norm of gradient
212  %epsilon_J = norm(grad);%output.firstorderopt*sqrt(2);
213  grad_J = my_reduced_functional_gradient(model,...
214  reduced_data);
215  epsilon_J = norm(grad_J);
216 
217  disp(['norm functional gradient at optimum: ',num2str(epsilon_J)]);
218 
219 
220  % error estimator computation (expensive!!!)
221 
222  [Delta_mu, val_indicator] = my_expensive_error_estimator(mumin,model, ...
223  detailed_data,reduced_data,...
224  detailed_optimization_functional,...
225  epsilon_J, ...
226  ref_dofs);
227 
228  disp('error bound:')
229  Delta_mu
230 
231  disp('validity indicator, should be less than 1 for validity: ')
232  val_indicator
233 
234  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%555
235  % do a variation of number of basis vectors N in RB
236  % track online simulation time
237  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
238  case 9
239 
240  load('gamm2013_detailed_data');
241  detailed_data_full = detailed_data;
242  %J = @(dofs) J_functional(dofs, ...
243  % model_data.df_info.l2_inner_product_matrix, ...
244  % ref_dofs);
245  %
246  %detailed_optimization_functional = @(mu1_mu2) ...
247  % my_detailed_optimization_functional(mu1_mu2, J, model, model_data, ...
248  % mu3,mu4);
249 
250  %optimization settings
251  mu0 = [0.5,0.5];
252  lb = [0.5,0.5];
253  ub = [2.5,2.5];
254  % options = [];
255  options = optimset();
256  options.MaxFunEvals = 1000;
257  options.TolFun = 1e-14;
258  options.TolX = 1e-14;
259 
260  %detailed_optimization
261  fun = detailed_optimization_functional;
262  tic;
263  [mumin_det,fval_det,exitflag_det,output_det,lambda,grad_det] = fmincon(fun,mu0,[],[], ...
264  [],[],lb,ub,[],options);
265  t_det = toc;
266 
267 
268  %N - vector
269  Nmax = size(detailed_data.RB,2);
270  N_vec = 14:1:Nmax;
271  %initialization
272  Delta_vec = zeros(1,length(N_vec));
273  Delta_pmui_vec = zeros(2,length(N_vec));
274  Delta_grad_J_vec = zeros(2,length(N_vec));
275  s_vec = zeros(1,length(N_vec));
276  t_sim = zeros(1,length(N_vec));
277  t_sim_ext = zeros(1,length(N_vec));
278  t_J = zeros(1,length(N_vec));
279  t_opt = zeros(1,length(N_vec));
280  mu_min_vec = zeros(2,length(N_vec));
281  val_ind_vec = zeros(1,length(N_vec));
282  Delta_mu_vec = zeros(1,length(N_vec));
283  %loop over several basis sizes
284  for n=1:length(N_vec)
285  disp(n)
286  N=N_vec(n);
287  detailed_data.RB = detailed_data_full.RB(:,1:N);
288  reduced_data = gen_reduced_data(model, detailed_data);
289  RB = detailed_data.RB;
290  M = model_data.df_info.l2_inner_product_matrix;
291  reduced_data.J1 = RB'*M*RB;
292  reduced_data.J2 = ref_dofs'*M*RB;
293  reduced_data.J3 = ref_dofs'*M*ref_dofs;
294  %JN = 0.5*(a'*J1*a-2*J2*a+J3);
295  %gradient J = a'*gJ1*pmui_a - gJ2 pmui_a
296  reduced_data.gJ1 = RB'*M*RB;
297  reduced_data.gJ2 = ref_dofs'*M*RB;
298  reduced_optimization_functional = @(mu1_mu2) ...
299  my_reduced_optimization_functional(mu1_mu2, model,...
300  reduced_data);
301 
302 
303  tic;sim_data = rb_simulation(model, reduced_data);t_s = toc;
304  sim_data = rb_reconstruction(model, detailed_data,sim_data);
305 
306  tic;
307  sim_data_ext = extended_rb_simulation(model,reduced_data,detailed_data);
308  t_se = toc;
309 
310  %Delta grad
311  Delta_grad_J_vec(:,n) = my_Delta_partial_Js(sim_data_ext,detailed_data,ref_dofs);
312 
313 
314  fun = reduced_optimization_functional;
315  tic;
316  [mumin,fval,exitflag,output,lambda,grad] = fmincon(fun,mu0,[],[], ...
317  [],[],lb,ub,[],options);
318  t_opt(n) = toc;
319  %epsilon_J = norm(grad);%output.firstorderopt*sqrt(2);
320  model = set_mu(model,mumin);
321  grad_J = my_reduced_functional_gradient(model,reduced_data);
322  epsilon_J = norm(grad_J);
323 
324 
325  % error estimator computation (expensive!!!)
326 
327  [Delta_mu_vec(n), val_ind_vec(n)] = my_expensive_error_estimator(mumin,model, ...
328  detailed_data,reduced_data,...
329  detailed_optimization_functional,...
330  epsilon_J, ...
331  ref_dofs);
332 
333 
334 
335  %track data
336  Delta_vec(n) = sim_data.Delta;
337  Delta_pmui_vec(:,n) = sim_data_ext.Delta_partial_mui_us';
338  tic;
339  s_vec(n) = J(sim_data.uh.dofs);t_J(n) = toc;
340  t_sim(n) = t_s;
341  t_sim_ext(n) = t_se;
342  mu_min_vec(:,n) = mumin';
343  end
344 
345  t_J
346 
347  figure(1); clf
348  semilogy(N_vec,Delta_vec)
349  title('Error estimator for solution')
350  xlabel('N')
351 
352  figure(2);clf
353  semilogy(N_vec, Delta_pmui_vec(1,:))
354  hold on
355  semilogy(N_vec, Delta_pmui_vec(2,:),'r')
356  xlabel('N')
357  legend('pmu1','pmu2')
358  title('Fehlersch?tzer Ableitungen')
359 
360  figure(3); clf;
361  semilogy(N_vec, s_vec)
362  title('Output')
363  xlabel('N');
364  ylabel('s');
365 
366  figure(4); clf;
367  semilogy(N_vec,Delta_grad_J_vec(1,:));
368  hold on
369  semilogy(N_vec, Delta_grad_J_vec(2,:),'r');
370  title('Gradient J Error estimator')
371  xlabel('N');
372  legend('pmu1','pmu2')
373 
374  figure(5); clf;
375  plot(N_vec,t_sim)
376  hold on; plot(N_vec,t_sim_ext,'r')
377  title('online simulation times');
378  legend('rb sim','rb sim expensive')
379  xlabel('N')
380 
381 
382  mu_err = mu_min_vec-repmat(mu_star(1:2)',1,length(N_vec));
383  for i=1:length(N_vec)
384  mu_err_norm(i) = norm(mu_err(:,i));
385  end
386  figure(6);clf;
387  semilogy(N_vec,mu_err_norm)
388  hold on
389  semilogy(N_vec,Delta_mu_vec,'r');
390  title('Error in optimal parameters')
391  legend('true','est')
392  xlabel('N');
393 
394  %Aufteilung in g?ltige und ung?ltige Fehlersch?tzer
395  Delta_mu_v = [];
396  Delta_mu_iv = [];
397  i=1;
398  while val_ind_vec(i)>1
399  Delta_mu_iv = [Delta_mu_iv,Delta_mu_vec(i)];
400  i = i+1;
401  end
402  Delta_mu_iv = [Delta_mu_iv,Delta_mu_vec(i)];
403  for j=i:length(N_vec)
404  Delta_mu_v = [Delta_mu_v, Delta_mu_vec(j)];
405  end
406 
407  figure(7); clf;
408  semilogy(N_vec,mu_err_norm,'LineWidth',2)
409  hold on
410  semilogy(N_vec(1:length(Delta_mu_iv)),Delta_mu_iv,'r-.','LineWidth',2);
411  semilogy(N_vec(length(Delta_mu_iv):end),Delta_mu_v,'r-','LineWidth',2);
412  %title('Error in optimal parameters')
413  legend('true error','error est. (invalid)','error est. (valid)')
414  xlabel('number of basis vectors N');
415  ylabel('error in optimal parameter')
416 
417  disp('----------------------------------------')
418  disp('Simplex optimization (Matlab fmincon):')
419  disp('======================================')
420  disp(['Detailed optimization time :',num2str(t_det)]);
421  disp(['Reduced optimization time :', num2str(t_opt(end))]);
422  disp(' ')
423  disp(['Last true error in optimization: ',num2str(mu_err_norm(end)),' and Delta_mu: ',num2str(Delta_mu_vec(end))])
424  disp(['Effectivity of error estimator: ',num2str(Delta_mu_vec(end)/mu_err_norm(end))])
425  disp(' ')
426  disp(['Reduced optimal parameters: ',num2str(mumin(:)')])
427  disp(['Number of functional calculations : ',num2str(output.funcCount)])
428  disp(['Functional value at optimum: ',num2str(fval)]);
429  disp(['Gradient at optimum: ',num2str(norm(grad(:)))])
430 
431  keyboard;
432 
433 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
434 % implement and test reduced functional gradient
435 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
436 
437  case 10
438 
439  load('gamm2013_detailed_data');
440  mu1_2 = [0.5;0.5];
441  mu = mu1_2;
442  model = set_mu(model,mu);
443  tic;
444  grad_J = my_reduced_functional_gradient(model, reduced_data);
445  t = toc
446  %check numerically with fd
447  h = 0.0001;
448  hv = eye(2,2).*h;
449  gJn=zeros(2,1);
450  J0 = reduced_optimization_functional(mu);
451  for i=1:2
452  gJn(i) = (detailed_optimization_functional(mu+hv(:,i))-J0);
453  gJn(i) = gJn(i)/h;
454  end
455 
456  disp(grad_J)
457  disp(gJn)
458 
459  %detailed gradient
460  Jdet = my_detailed_functional_gradient(model, model_data);
461  disp(Jdet);
462 
463 
464  keyboard;
465 
466  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
467  % gradient optimization
468  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
469 
470  case 11
471 
472  load('gamm2013_detailed_data');
473  model.optimization = [];
474  model.optimization.init_params = [0.5,0.5];
475  model.optimization.tol = 1e-7;
476  model.optimization.stepsize_rule = 'armijo';
477  model.optimization.stepsize_fix = 1;
478  model.optimization.get_Jacobian = @my_reduced_functional_gradient;
479  model.optimization.objective_function = @(model,reduced_data)my_reduced_optimization_functional([],model,reduced_data);
480  model.optimization.derivatives_available = 0;
481  model.optimization.params_to_optimize = [1,1,0,0];
482  model.optimization.opt_mode = 'reduced';
483  model.optimization.initial_stepsize = 1000;
484  model.optimization.sigma = 0.6;
485 
486  model.compute_err_estimator = 0;
487 
488  model.expensive_err_est = @(mu,model,grad_J)my_expensive_error_estimator(mu,model, ...
489  detailed_data,reduced_data,...
490  detailed_optimization_functional,...
491  grad_J, ...
492  ref_dofs);
493 
494  %reduced
495  tic;
496  [opt_data_red, model]=my_gradient_opt(model,reduced_data);
497  t_optred = toc
498 
499  model.compute_err_estimator = 1;
500  [opt_data_red, model]=my_gradient_opt(model,reduced_data);
501 
502 
503  %detailed
504  model.optimization.get_Jacobian = @my_detailed_functional_gradient;
505  model.optimization.objective_function = @(model,model_data)my_detailed_optimization_functional([],J,model,model_data);
506  tic;
507  [opt_data_det] = my_gradient_opt(model, model_data);
508  t_optdet = toc
509 
510 
511  opt_data_red.t_opt = t_optred;
512  opt_data_det.t_opt = t_optdet;
513 
514  save('Optimization_data_gamm.mat','opt_data_red','opt_data_det');
515 
516  keyboard;
517 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
518 % plot figures for GAMM proceedings
519 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
520 
521 %first conduct cases 3 and 5
522 
523 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
524 % plot functional landscape
525 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
526  case 100
527 
528  exp_params.nmu = 50; %wieviele Gitterpunkte
529 
530  gamm2013_exp(6,exp_params);
531 
532  keyboard;
533 
534 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
535 % plot reference solution
536 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
537  case 101
538 
539  load('gamm2013_detailed_data');
540 
541  model = model.set_mu(model,[2,1]);
542  sim_data = detailed_simulation(model,model_data);
543  plot_sim_data(model,model_data,sim_data,plot_params);
544 
545 
546 
547 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
548 % plot gradient steps and error estimator decay
549 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
550  case 102
551 
552  %gamm2013_exp(11);
553 
554  mu_ref = [2;1];
555  load('gamm2013_detailed_data.mat')
556  load('Optimization_data_gamm.mat');
557 
558  %sortieren in g?ltige und ung?ltige Fehlersch?tzerwerte
559  Delta_mu_vec = opt_data_red.Delta_mu_vec;
560  norm_grad = opt_data_red.norm_grad_vec;
561 
562  ind_v = find(opt_data_red.break_value_vec<1);
563  ind_iv = find(opt_data_red.break_value_vec>=1);
564  Delta_mu_iv = Delta_mu_vec(ind_iv(1):ind_iv(end)+1);
565  norm_grad_iv = norm_grad(ind_iv(1):ind_iv(end)+1);
566  Delta_mu_v = Delta_mu_vec(ind_v);
567  norm_grad_v = norm_grad(ind_v);
568 
569  %sort
570  [norm_grad,idx1] = sort(norm_grad,'descend');
571  [norm_grad_iv,idx2] = sort(norm_grad_iv,'descend');
572  Delta_mu_iv = Delta_mu_iv(idx2);
573  [norm_grad_v,idx3] = sort(norm_grad_v,'descend');
574  Delta_mu_v = Delta_mu_v(idx3);
575 
576  %true error
577  dist = opt_data_red.parameter_sets-repmat(mu_ref,1,size(opt_data_red.parameter_sets,2));
578  err = zeros(1,size(dist,2));
579  for i=1:size(dist,2)
580  err(i) = norm(dist(:,i));
581  end
582  err = err(idx1);
583 
584  figure
585  loglog(norm_grad,err,'LineWidth',2);
586  hold on
587  loglog(norm_grad_iv,Delta_mu_iv,'r-.','LineWidth',2);
588  loglog(norm_grad_v,Delta_mu_v,'r','LineWidth',2);
589  set(gca, 'XDir', 'reverse')
590  xlabel('gradient norm')
591  ylabel('error in parameters')
592  legend('true error','error est. (invalid)','error est. (valid)')
593 
594  %show some data on screen
595  disp('----------------------------------------')
596  disp('Gradient optimization:')
597  disp('=======================')
598  disp(['Detailed optimization time :',num2str(opt_data_det.t_opt)]);
599  disp(['Reduced optimization time :', num2str(opt_data_red.t_opt)]);
600  disp(' ')
601  disp(['Last true error in optimization: ',num2str(err(end)),' and Delta_mu: ',num2str(Delta_mu_vec(end))])
602  disp(['Effectivity of error estimator: ',num2str(Delta_mu_vec(end)/err(end))])
603  disp(' ')
604  disp(['Reduced optimal parameters: ',num2str(opt_data_red.optimal_params(:)')])
605  disp(['Number of functional calculations (armijo): ',num2str(opt_data_red.nr_fct_calls)])
606  disp(['Number o gradient calculations : ',num2str(opt_data_red.nr_grad_calc)])
607  disp(' ')
608  disp(['Functional value at optimum: ',num2str(my_reduced_optimization_functional(opt_data_red.optimal_params,model,reduced_data))]);
609  disp(['Functional value at optimum(det): ',num2str(my_detailed_optimization_functional(opt_data_red.optimal_params,J,model,model_data))]);
610  disp(['Gradient at optimum: ',num2str(opt_data_red.norm_grad_f_x)])
611  keyboard;
612 
613 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
614 % plot_variation of number of basis vectors
615 % (simplex optimization)
616 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
617 
618  case 103
619 
620  gamm2013_exp(9);
621 
622 
623  otherwise
624  error('step number unknown')
625 end;
626 
627 % functional J, here l2 error functional J(u) := \int_Omega |u-u_star|^2
628 function res = J_functional(dofs, l2_inner_product_matrix, ref_dofs)
629 err = dofs-ref_dofs;
630 res = 0.5* err'*l2_inner_product_matrix* err;
631 
632 % Implementation of the detailed optimization functional
633 function [res,dummy] = my_detailed_optimization_functional(mu1_mu2, J, model, model_data)
634 dummy = [];
635 if ~isempty(mu1_mu2)
636  model = model.set_mu(model,[mu1_mu2(:)]);
637 end
638 sim_data = detailed_simulation(model,model_data);
639 fprintf('.');
640 res = J(sim_data.uh.dofs);
641 
642 % reduced optimization functional: non offline/online decomposed!
643 function [res,dummy] = my_reduced_optimization_functional(mu1_mu2, model, ...
644  reduced_data)
645  dummy = [];
646 if ~isempty(mu1_mu2)
647  model = model.set_mu(model,mu1_mu2(:));
648 end
649 sim_data = rb_simulation(model,reduced_data);
650 uN = sim_data.uN;
651 res = 0.5*(uN'*reduced_data.J1*uN-2*reduced_data.J2*uN+reduced_data.J3);%J(detailed_data.RB*sim_data.uN);
652 fprintf('.');
653 
654 function [Delta_mu, val_indicator] = my_expensive_error_estimator(mumin,model, ...
655  detailed_data,reduced_data,...
656  optimization_functional,...
657  epsilon_J, ...
658  ref_dofs);
659 % compute parameter error estimator Delta_mu and
660 % validity indicator val_indicator = 4 gamma^2 L epsilon
661 % should be less than 1 in order the bound to be valid.
662 
663 % gamma: Induced norm of inverse detailed hessian
664 % => approximate Hessian by finite difference +-h
665 h = 1e-6;
666 H = FD_Hessian(mumin,optimization_functional, h)
667 gamma = norm(inv(H))
668 
669 % Compute Delta_Grad_J by extended RB-simulation (for sensitivities)
670 sim_data = extended_rb_simulation(model,reduced_data,detailed_data);
671 Delta_partial_Js = my_Delta_partial_Js(sim_data,detailed_data,ref_dofs);
672 % sum up all partial derivative estimators
673 Delta_Grad_J = norm(Delta_partial_Js);
674 
675 epsilon = Delta_Grad_J + epsilon_J;
676 
677 Delta_mu = 2*gamma*epsilon;
678 
679 % L: maximum Hessian Lipschitz constant in 2*gamma*epsilon ball around mumin
680 % for now: simple maximum search over few points
681 L = 0;
682 mus = [0,1; 1,0; sqrt(2)/2,sqrt(2)/2];
683 nmus = size(mus,1);
684 for n = 1:nmus
685  H2 = FD_Hessian(mumin(:)'+mus(n,:)*Delta_mu,optimization_functional, h);
686  L2 = norm(H-H2)/Delta_mu; % Delta_mu is difference between mus
687  if L2>L
688  L = L2;
689  end;
690 end;
691 
692 val_indicator = 4 * gamma^2 * L * epsilon;
693 
694 % compute approximate Hessian by 6 point evaluations
695 % f12 f22
696 % f01 f11 = f(x) f21 = f(x+[h,0])
697 % f10
698 function H = FD_Hessian(x,f, h);
699 x = x(:);
700 H = zeros(2,2);
701 f11 = f(x);
702 f01 = f(x-[h;0]);
703 f21 = f(x+[h;0]);
704 f10 = f(x-[0;h]);
705 f12 = f(x+[0;h]);
706 f22 = f(x+[h;h]);
707 % d1d1f:
708 H(1,1) = (f21-2*f11+f01)/h^2;
709 % d2d2f:
710 H(2,2) = (f12-2*f11+f10)/h^2;
711 % d1d2f: (a bit inaccurate...)
712 H(1,2)= ((f22-f21)/h -(f12-f11)/h)/h;
713 H(2,1)=H(1,2);
714 
715 function grad_J = my_detailed_functional_gradient(model, model_data)
716 
717 grad_J = zeros(2,1);
718 
719 sim_data = extended_detailed_simulation(model, model_data);
720 
721 M = model_data.df_info.l2_inner_product_matrix;
722 
723 for i = 1:2
724  grad_J(i) = (sim_data.uh.dofs-model_data.ref_dofs)'*M*sim_data.u_der(:,i);
725 end
726 
727 
728 % return gradient of the functional J (reduced)
729 function grad_J = my_reduced_functional_gradient(model, reduced_data)
730 %perform a reduced simulation
731 grad_J = zeros(2,1);
732 
733 model.compute_err_estimator = 0;
734 sim_data = extended_rb_simulation(model, reduced_data,[]);
735 
736 %M = detailed_data.df_info.l2_inner_product_matrix;
737 %RB = detailed_data.RB;
738 %for i=1:2
739 % grad_J(i) = sim_data.uN'*RB'*M*RB*sim_data.partial_mui_uNs(:,i) ...
740 % - ref_dofs'*M*RB*sim_data.partial_mui_uNs(:,i);
741 %end
742 for i=1:2
743  grad_J(i) = sim_data.uN'*reduced_data.gJ1*sim_data.partial_mui_uNs(:,i) ...
744  - reduced_data.gJ2*sim_data.partial_mui_uNs(:,i);
745 end
746 
747 
748 % error estimators for gradient of J for quadratic functional
749 % |partial_mui J(u) - partial_mui J(uN)|
750 function Delta_partial_Js = my_Delta_partial_Js(sim_data,detailed_data,ref_dofs)
751 Delta_partial_Js = [0,0]; % using Delta_partial_us
752 err = detailed_data.RB * sim_data.uN - ref_dofs;
753 norm_uN_minus_ud = sqrt(err' * ...
754  detailed_data.df_info.h10_inner_product_matrix ...
755  * err);
756 for i = 1:2
757  partial_mui_uN_dofs = detailed_data.RB*sim_data.partial_mui_uNs(:,i);
758  norm_partial_uNi = sqrt(partial_mui_uN_dofs' * ...
759  detailed_data.df_info.h10_inner_product_matrix * ...
760  partial_mui_uN_dofs);
761  Delta_partial_Js(i) = sim_data.Delta* (sim_data.Delta_partial_mui_us(i) ...
762  + norm_partial_uNi) ...
763  + norm_uN_minus_ud*sim_data.Delta_partial_mui_us(i);
764 end;
765 
766 % RB-simulation including sensitivities and error estimators
767 % but expensive computation by high-dimensional operations!
768 function sim_data = extended_rb_simulation(model,reduced_data,detailed_data)
769 
770 if ~isfield(model,'compute_err_estimator')
771  model.compute_err_estimator = 1;
772 end
773 sim_data = rb_simulation(model,reduced_data);
774 sim_data.Delta_s = []; %not available
775 sim_data.s = []; %not available here
776 % solve sensitivity problems:
777 
778 %old_mode = model.decomp_mode;
779 model.decomp_mode = 2; % coefficients
780 [A_coeff,f_coeff] = ...
781  model.operators(model,[]);
782 AN = lincomb_sequence(reduced_data.AN_comp, A_coeff);
783 fN = lincomb_sequence(reduced_data.fN_comp, f_coeff);
784 uN = AN\fN;
785 
786 % solve sensitivity problems, coefficient derivatives by finite difference:
787 
788 % matrix of reduced sensitivity coefficients, each column one reduced solution:
789 sim_data.partial_mui_uNs = zeros(length(uN),2);
790 % vector of error estimators:
791 sim_data.Delta_partial_mui_us = zeros(1,2);
792 mu = model.get_mu(model);
793 h = 1e-6;
794 E = eye(2);
795 if model.compute_err_estimator
796  K = model.get_inner_product_matrix(detailed_data);
797  model.decomp_mode = 1;
798  [A_comp,f_comp] = ...
799  model.operators(model,detailed_data);
800 end
801 model.decomp_mode = 2; % coefficients
802 alpha = model.coercivity_alpha(model);
803 
804 
805 for i = 1:2
806  % 1. solve for sensitivity solutions:
807  model = model.set_mu(model,mu+h*E(:,i));
808  [A_coeff2,f_coeff2] = ...
809  model.operators(model,[]);
810  partial_mui_A_coeff = (A_coeff2-A_coeff)/h;
811  partial_mui_f_coeff = (f_coeff2-f_coeff)/h;
812  partial_mui_AN = lincomb_sequence(reduced_data.AN_comp, partial_mui_A_coeff);
813  partial_mui_fN = lincomb_sequence(reduced_data.fN_comp, partial_mui_f_coeff);
814  partial_mui_uN = AN \ (partial_mui_fN - partial_mui_AN*uN);
815  sim_data.partial_mui_uNs(:,i) = partial_mui_uN;
816 
817  if model.compute_err_estimator
818  %2. compute residuals and error estimators
819  % Delta_partial_mui_u = gamma_partial_mui_a/alpha * Delta_u + |r|/alpha
820  % with (r,v ) = partial_mui_i f(v)-a(partial_mui_uN,v) -partial_mui_a(u_N,v)
821  A = lincomb_sequence(A_comp, A_coeff);
822  partial_mui_A = lincomb_sequence(A_comp, partial_mui_A_coeff);
823  partial_mui_f = lincomb_sequence(f_comp, partial_mui_f_coeff);
824 
825  res_partial_mui_A = partial_mui_A * (detailed_data.RB * uN);
826  res_A = A * (detailed_data.RB * partial_mui_uN);
827  res = partial_mui_f - res_partial_mui_A - res_A;
828  %%% residuum functional is res * v
829  %%% riesz representant: v_r' K v = (v_r,v) = res*v
830  %%% so res = K * v_r;
831  v_r = K \ res;
832  %%% res_norm_sqr = (v_r,v_r) = v_r' K v_r = v_r' * res;
833  res_norm_sqr = v_r' * res;
834  res_norm = sqrt(max(res_norm_sqr,0));
835 
836  % continuity constant of bilinear form partial_mui_a:
837  % keyboard;
838  gamma_partial_mui_a = get_continuity_constant(partial_mui_A,K);
839 
840  sim_data.Delta_partial_mui_us(i) = ...
841  gamma_partial_mui_a / alpha * sim_data.Delta ...
842  + res_norm / alpha;
843  end
844 
845 end;
846 
847 %detailed simulation returning sensitivities
848 function sim_data = extended_detailed_simulation(model, model_data)
849 
850 sim_data = detailed_simulation(model,model_data);
851 
852 % solve sensitivity problems:
853 model.decomp_mode = 2; % coefficients
854 [A_coeff,f_coeff] = ...
855  model.operators(model,[]);
856 
857 % solve sensitivity problems, coefficient derivatives by finite difference:
858 
859 % matrix of reduced sensitivity coefficients, each column one reduced solution:
860 u = sim_data.uh.dofs;
861 u_der = zeros(length(u),2);
862 
863 
864 mu = model.get_mu(model);
865 h = 1e-6;
866 E = eye(2);
867 %K = model.get_inner_product_matrix(detailed_data);
868 model.decomp_mode = 1;
869 [A_comp,f_comp] = ...
870  model.operators(model,model_data);
871 
872 model.decomp_mode = 2; % coefficients
873 
874 
875 
876 for i = 1:2
877  % 1. solve for sensitivity solutions:
878  model = model.set_mu(model,mu+h*E(:,i));
879  [A_coeff2,f_coeff2] = ...
880  model.operators(model,[]);
881  partial_mui_A_coeff = (A_coeff2-A_coeff)/h;
882  partial_mui_f_coeff = (f_coeff2-f_coeff)/h;
883  partial_mui_A = lincomb_sequence(A_comp, partial_mui_A_coeff);
884  partial_mui_f = lincomb_sequence(f_comp, partial_mui_f_coeff);
885  A = lincomb_sequence(A_comp, A_coeff);
886  u_der(:,i) = A \ (partial_mui_f - partial_mui_A*u);
887  sim_data.u_der = u_der;
888 
889 end;
890 
891 
892 % function computing the continuity constant of a sparse matrix A
893 % with respect to inner product matrix K
894 % formally: c = largest singular value of matrix (K^-0.5 * A * K^-0.5)
895 % where K^-0.5 are inverse cholesky factors. These are not
896 % explicitly computed as full, but linear systems are solved.
897 % largest singular value of M = inv(C')*A*inv(C) is computed by
898 % largest eigenvalue of (M'* M)
899 function c = get_continuity_constant(A,K)
900 c=1;
901 %C = chol(K); % => K = C'*C
902 %f = @(x) my_matrix_prod(x,A,C);
903 %lambda_max = eigs(f,size(A,1),1);
904 %c = sqrt(lambda_max);
905 
906 function res = my_matrix_prod(x,A,C)
907 y = C\x; %y = invC*x;
908 y2 = (C') \ (A*y);% y2 = inv(CT)*A*inv(C)*x
909 y3 = C\y2;
910 y4 = (C') \ (A'*y3); % y4 = inv(CT)*AT*inv(C)*inv(CT)*A*inv(C)*x
911 res = y4;
912 
913 function cmaplog = create_log_colormap()
914 
915 %usual colormap
916 cmap = colormap;
917 
918 len = size(cmap,1);
919 
920 x = (len-1)/log10(len-1);
921 cmaplog = zeros(size(cmap));
922 
923 for n=1:len
924  ind = x*log10(n)+1;
925  a = floor(ind);
926  b = ceil(ind);
927  if b>len
928  b = len;
929  end
930  m = ind-a;
931  v1 = cmap(a,:);
932  v2 = cmap(b,:);
933 
934  cmaplog(n,:) = (1-m) * v1 + m*v2;
935 end
936 
937 cmap = cmaplog;
938 
939 
940 function model = my_pfusch_set_mu(model,mu1_2,mu3,mu4)
941 model.mus = [mu1_2(:);mu3;mu4];
942 
943 %function [grad_f_x,Delta_grad,output,data] = my_return_jacobian(model,reduced_data)
944 %grad_f_x = my_reduced_functional_gradient(model, reduced_data);
945 %Delta_grad = [];
946 %output = [];
947 %data = [];
948 %if model.compute_err_estimator
949 % Delta_grad =
950 %end
951 
952 
953 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
954 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
955 %gradient optimization
956 
957 function [opt_data, model]=my_gradient_opt(model,reduced_data)
958 %function [x_opt,nr_fct_total,nr_grad_total]=gradient_opt(model, model_data, reduced_data)
959 %
960 % Function performing an optimization either reduced oder detailed using gradient method
961 % with either Armijo-stepsize rule, Wolfe-Powell-stepsize rule or
962 % dichotomie-algorithm
963 %
964 %Required fields of model:
965 % model.optimization.init_params: initial value
966 % model.optimization.tol: tolerance how close to 0 shall the norm be?
967 % model.optimization.get_Jacobian: gradient function
968 % model.optimization.objective_function: not needed in gradient_opt but
969 % needed in armijo_stepsize.m and wolfe_powell_stepsize.m
970 % model.optimization.stepsize_rule: either 'armijo' or 'wolfepowell' or
971 % 'dichotomie'
972 %
973 %Generated fields of opt_data:
974 % opt_data.x_optimal: the optimal value
975 % opt_data.nr_fct_calls: Number of function calls
976 % opt_data.nr_grad_calc: Number of needed calculations of the gradient
977 %
978 %
979 % Oliver Zeeb 25.05.2010%
980 
981 
982 
983 if(model.verbose>=8)
984  disp('entered gradient_opt')
985 end
986 
987 %adding some fields to the model, if not existent
988 if ~isfield(model.optimization,'tol')
989  model.optimization.tol=1e-3;
990 end
991 if ~isfield(model.optimization,'stepsize_rule')
992  model.optimization.stepsize_rule='quotient';
993 end
994 if ~isfield(model.optimization,'min_or_max')
995  model.optimization.min_or_max = 'min';
996 end
997 
998 if model.optimization.derivatives_available
999  model.compute_derivative_info = 1;
1000 end
1001 
1002 %setting signum for maximizing or minimizing,
1003 % i.e. changes f(x) to -f(x) if function f(x) shall be maximized
1004 if model.optimization.min_or_max == 'min'
1005  min_or_max=1;
1006 elseif model.optimization.min_or_max == 'max'
1007  min_or_max=-1;
1008 else disp('maximize or minimize? model.optimization.min_or_max not properly defined')
1009  return
1010 end
1011 
1012 %for statistics
1013 nr_fct = 0; %counter for number of function calls
1014 nr_grad = 0; %counter for number of gradient calculations
1015 nRB= []; %basis sizes used for reduced simulations
1016 nRB_der = []; %basis sizes used for reduced derivative simulations
1017 
1018 %save current mu values of the model
1019 
1020 mu_original = get_mu(model);
1021 
1022 
1023 %set the mus in the model to the mus given in init_params
1024 %model.(model.mu_names{k})=model.optimization.init_params(k);
1025 model = set_mu(model, model.optimization.init_params);
1026 
1027 
1028 %get initial parameters that should be optzimized
1029 x = get_mu(model);
1030 %get the according boundaries
1031 [lower_bound, upper_bound] = get_bound_to_optimize(model);
1032 
1033 
1034 
1035 
1036 tol = model.optimization.tol;
1037 PM = x; %for opt_data: array with all the mu_i
1038 output = []; %array containing the f(mu_k)
1039 max_iter = 100; %maximal number of iterations of the gradient method
1040 opti_iter = 0; %counter
1041 %output_array={};
1042 
1043 if strcmp(model.optimization.opt_mode,'detailed')
1044 
1045 else %here: reduced optimization
1046 
1047 
1048 [grad_f_x] = model.optimization.get_Jacobian(model,reduced_data);
1049 grad_f_x = grad_f_x*min_or_max; nr_grad = nr_grad+1;
1050 norm_grad_f_x = norm(grad_f_x);
1051 norm_grad_vec = norm_grad_f_x;
1052 if model.compute_err_estimator
1053  [Delta_mu_vec,break_value_vec] = model.expensive_err_est(x,model,norm_grad_f_x);
1054 end
1055 
1056 
1057 
1058 continue_optimization = 1;
1059 
1060 % START OF OPTIMIZATION
1061 while (continue_optimization)&& (opti_iter <= max_iter)
1062  d = -grad_f_x;
1063 
1064  %calculation of the stepsize t:
1065  switch model.optimization.stepsize_rule
1066  case {'fixed_stepsize'}
1067  t = model.optimization.stepsize_fix;
1068  case {'armijo'} % Armijo Regel
1069  [model,t,nr_fct_stepsize,output, data] = stepsize_armijo(model, reduced_data, output, x, d);
1070  nr_fct = nr_fct+nr_fct_stepsize;
1071  nRB = [nRB,data.nRB];
1072 
1073  case {'dichotomie'}
1074  [model,t,nr_fct_stepsize,output] = stepsize_dichotomie(model, model_data, output, x, d);
1075  nr_fct = nr_fct+nr_fct_stepsize;
1076 
1077  case {'exponential'}
1078  if ~exist('exp_step')
1079  exp_step=0;
1080  end
1081  [model,t,nr_fct_stepsize,output] = stepsize_exponential(model, model_data, output, x, d, exp_step);
1082  exp_step=exp_step+1;
1083  nr_fct = nr_fct+nr_fct_stepsize;
1084 
1085  case {'quotient'}
1086  if ~exist('quot_step')
1087  quot_step=1;
1088  end
1089  [model,t,nr_fct_stepsize] = stepsize_quotient(model, x, d, quot_step);
1090  quot_step=quot_step+1;
1091  nr_fct = nr_fct+nr_fct_stepsize;
1092  %C_alpha = t; %update of the maximum stepsize in this step for error estimation
1093  %[C_L,model] = model.get_lipschitz_constant(model); %update Lipschitz constant
1094 
1095  %WOLFE POWELL NOT WORKING AT THE MOMENT!!!
1096  case {'wolfepowell'}
1097  [t, nr_fct_stepsize, nr_grad_stepsize] = stepsize_wolfe_powell(model, x, d);
1098  nr_fct = nr_fct+nr_fct_stepsize;
1099  nr_grad = nr_grad+nr_grad_stepsize;
1100 
1101  otherwise
1102  fprintf('unknown stepsize rule')
1103  opt_data=[];
1104  return
1105  end
1106 
1107  opti_iter = opti_iter+1;
1108  x = x+t*d;
1109  if model.verbose >3
1110  disp(['Next parameter in reduced gradient optimization: ',num2str(x(:)')]);
1111  disp('------------------------------------')
1112  end
1113  %check, whether the new x is allowed by the according mu_range or whether it exceeds the
1114  %boundaries. If so: set this component to the boundary.
1115  for k=1:length(x)
1116  if x(k) > upper_bound(k)
1117  x(k) = upper_bound(k);
1118  elseif x(k) < lower_bound(k)
1119  x(k) = lower_bound(k);
1120  end
1121  end
1122 
1123  PM = [PM, x(:)];
1124  model = set_mu(model,x); % write the actual parameter setting in the model
1125 
1126 
1127  %Calculate gradient at the new parameter point:
1128  [grad_f_x] = model.optimization.get_Jacobian(model,reduced_data);%now calculate the gradient with the new parameter setting
1129  norm_grad_f_x = norm(grad_f_x);
1130  norm_grad_vec = [norm_grad_vec,norm_grad_f_x];
1131  %output=output_array{1};
1132  grad_f_x=grad_f_x*min_or_max;
1133  nr_grad = nr_grad+1;
1134 
1135  %decide to continue or not
1136  %break_value = 2*model.gamma_H^2*model.L_H*(norm_grad_f_x+norm(Delta_grad(:,end)));
1137  if model.compute_err_estimator
1138  [Delta_mu,break_value] = model.expensive_err_est(x,model,norm_grad_f_x);
1139  break_value_vec = [break_value_vec, break_value];
1140  Delta_mu_vec = [Delta_mu_vec, Delta_mu];
1141  disp(['Actual gradient norm :',num2str(norm_grad_f_x(:)')]);
1142  disp(['Value of breaking criteria: ', num2str(break_value)]);
1143  end
1144  %if (break_value<=1)&&(norm_grad_f_x<tol)
1145  if (norm_grad_f_x<tol)
1146  continue_optimization = 0;
1147  end
1148 
1149 end
1150 % END OF OPTIMIZATION
1151 
1152 
1153 %if function was maximized instead of minimized, the values must be
1154 %multiplied by -1:
1155 %output=output*min_or_max;
1156 
1157 %setting opt_data
1158 opt_data = [];
1159 opt_data.optimal_params = x;
1160 opt_data.nr_fct_calls = nr_fct;
1161 opt_data.nr_grad_calc = nr_grad;
1162 opt_data.parameter_sets = PM;
1163 %opt_data.output = output;
1164 %opt_data.max_output = max(output);
1165 opt_data.max_output_paramsets = PM(end,:);
1166  opt_data.norm_grad_f_x = norm_grad_f_x;
1167  opt_data.norm_grad_vec = norm_grad_vec;
1168 if model.compute_err_estimator
1169  opt_data.nr_opti_iter=opti_iter;
1170  opt_data.Delta_mu = Delta_mu;
1171  opt_data.break_value = break_value;
1172  %opt_data.nRB = nRB;
1173  %opt_data.nRB_der = nRB_der;
1174  %opt_data.used_detailed_calculations = used_detailed_calculations;
1175  %opt_data.gamma = g_H;
1176  %opt_data.Lipschitz_H = L_H;
1177  opt_data.break_value_vec = break_value_vec;
1178  opt_data.Delta_mu_vec = Delta_mu_vec;
1179 end
1180 end
1181 
1182 
1183 
1184 
1185 
1186 
1187 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1188 % Improvement possibilities:
1189 % - offline-online decomposition of reduced optimization
1190 % functional
1191 % - extended RB simulation: offline online decomposition
1192 % - Make RB basisgeneration more stable (error estimator below 1e-6)?
1193 % currently about 20 basisvectors, then oscillations start
1194 % - determine number of function evaluations for detailed/reduced
1195 % - use gradient scheme for optimization and view improvements and
1196 % validity of error estimator
1197 % - nicer Hessian
1198 
1199 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1200 % Findings:
1201 % - full optimization: 35 sec. Reduced: 0.35 sec factor 100 acceleration
function r = verbose(level, message, messageId)
This function displays messages depending on a message-id and/or a level. Aditionally you can set/res...
Definition: verbose.m:17
function demo_rb_gui(varargin)
reduced basis demo with sliders
Definition: demo_rb_gui.m:17
function model = thermalblock_model_struct(params)
Thermal Block example similar as described in the book of A.T. patera and G. Rozza (just one paramete...
function p = plot_sequence(varargin)
plotting a sequence of data slices on polygonal 2d grid (constructed from params if empty) and provid...
Definition: plot_sequence.m:17
function demo_detailed_gui(varargin)
demo gui for detailed simulations (calling demo_rb_gui after switching some pointers) ...
function p = plot_sim_data(model, model_data, sim_data, plot_params)
function performing the plot of the simulation results as specified in model.
Definition: plot_sim_data.m:17
function [ model , t , nr_fct , output ] = stepsize_quotient(model, x, d, quot_step)
function [model, t_opt, output] = stepsize_quotient(model, model_data, output, x, d...