rbmatlab  1.13.10
 All Classes Namespaces Files Functions Variables Groups Pages
online_greedy.m
1 function res = online_greedy(step)
2 % function online_greedy(step)
3 
4 % function for test of online-greedy procedure with the new
5 % thermalblock model
6 
7 % B. Haasdonk 17.2.2012
8 
9 if nargin<1
10  step = 0 % test multiscale model
11 end;
12 res = [];
13 
14 params.B1 = 16;
15 params.B2 = 16;
16 params.Qa = 3; % at most 4 parameters
17 params.mu_range = [0.01,10];
18 model = multiscale_thermalblock_model(params);
19 
20 %params.mu_range = [0.01,100];
21 %params.parameter_pattern = [0,1,0,0;
22 % 1,1,0,0;
23 % 0,1,1,1;
24 % 0,1,0,1];
25 %model = twoscale_thermalblock_model(params);
26 
27 model.gen_reduced_data = @dictionary_gen_reduced_data;
28 model.gen_detailed_data = @dictionary_gen_detailed_data;
29 model.rb_simulation = @dictionary_rb_simulation;
30 model.rb_online_mode = 0; % standard rb simulation
31 %params.B1 = 1;
32 %params.B2 = 2;
33 %model = thermalblock_model(params);
34 % generate basis of up to 400 vectors;
35 model.RB_stop_Nmax = 1000;
36 model.RB_stop_epsilon = 1e-6;
37 model.RB_train_size = 10000;
38 
39 model_data = gen_model_data(model);
40 
41 switch step
42  case 0 % test multiscale model
43 % mu = rand_uniform(1,model.mu_ranges);
44 % mu = [10,0.01,0.001];
45 % mu = [0.001,10,1,0.01];
46 % model = set_mu(model,mu);
47 % sim_data = detailed_simulation(model,model_data);
48  plot_params.subsampling_level = 0; % for linear
49  plot_params.axis_equal = 1;
50  plot_params.axis_tight = 1;
51 % plot_sim_data(model,model_data,sim_data,plot_params);
52  detailed_data = model_data;
53  detailed_data.RB =[];
54  demo_detailed_gui(model,detailed_data,[],plot_params);
55 
56  Perm = reshape(model.parameter_mapping,model.B1,model.B2);
57 % Perm(2) = 100;
58  Perm = Perm';
59  Perm = [Perm,zeros(model.B2,1);zeros(1,model.B1+1)];
60  figure, pcolor(Perm);
61  colormap;
62  keyboard;
63 
64  % assume diagonal diff-tensor, then scalar conversion is sum/2
65  diff_scalar = @(varargin) sum(model.diffusivity_tensor(varargin{:}),2)/2;
66  diff_h = femdiscfunc([],model_data.df_info);
67  model.decomp_mode = 0;
68  diff_h = fem_interpol_global(diff_scalar,diff_h,model);
69  figure,plot(diff_h);
70 
71  model.RB_stop_Nmax = 20;
72  model.RB_train_size = 20;
73  detailed_data = gen_detailed_data(model,model_data);
74  model.N = size(detailed_data.RB,2);
75  %model_data = gen_model_data(dmodel);
76  %detailed_data = gen_detailed_data(dmodel,model_data);
77  %demo_rb_gui(rmodel,detailed_data,[],plot_params);
78  %plot_params.axis_tight = 1;
79  %plot_params.yscale_uicontrols = 0.5;
80  demo_rb_gui(model,detailed_data);
81  keyboard;
82 
83  case 1
84  % time-measurement for 10 ordinary RB simulations, basis 400!
85  model.RB_stop_Nmax = 10;
86  model.RB_train_size = 100;
87  detailed_data= filecache_function(@gen_detailed_data,model,model_data);
88 
89  nruns = 10;
90  t = zeros(nruns,1);
91  reduced_data = gen_reduced_data(model,detailed_data);
92  for i = 1:nruns
93  mu = rand_uniform(1,model.mu_ranges);
94  model = set_mu(model,mu);
95  tic;
96  rb_sim_data= rb_simulation(model,reduced_data);
97  t(i) = toc;
98  end;
99  disp(['mean time for rb simulation (with few basis vectors): ',num2str(mean(t))])
100 
101  case 2
102  % construct dictionary and adaptive online simulations for
103  % train and test point
104 
105  % construct dictionary and offline data
106  disp('constructing offline dictionary');
107  model.rb_online_mode = 1; % -Delta(v) online adaptivity
108  detailed_data = filecache_function(...
109  @dictionary_gen_detailed_data,model,model_data);
110  disp(['computed dictionary in ',...
111  num2str(detailed_data.RB_info.time),...
112  ' sec.']);
113 
114  % online simulation with adaptive algorithm for training point
115  disp('generating reduced_data');
116  % reduced_data = filecache_function(@dictionary_gen_reduced_data,model,detailed_data);
117  model.dummy = 0.1;
118  reduced_data = filecache_function(@dictionary_gen_reduced_data,model,detailed_data);
119  % keyboard;
120 
121  model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
122  disp('performing online simulation for training parameter 5');
123  model = set_mu(model,detailed_data.RB_info.mus(:,5));
124  % for debugging:
125  reduced_data.RB = detailed_data.RB;
126  reduced_data.df_info = detailed_data.df_info;
127  reduced_data.grid = detailed_data.grid;
128 
129  rb_sim_data = dictionary_rb_simulation(model,reduced_data)
130  rb_sim_data= dictionary_rb_reconstruction(detailed_data,rb_sim_data);
131  plot_sim_data(model,model_data,rb_sim_data,[]);
132  title('reduced solution')
133 
134  figure, plot(rb_sim_data.eps_sequence,'b*');
135  title('residual norm over online iterations');
136  set(gca,'Yscale','log');
137 
138  disp('performing online simulation for test parameter');
139  mu = [0.1, ones(1,length(model.mu_ranges)-1)];
140 % mu = detailed_data.RB_info.mus(:,5)+0.01*rand(1);
141 
142  model = set_mu(model,mu);
143  % for debugging:
144  % reduced_data.RB = detailed_data.RB;
145  % reduced_data.df_info = detailed_data.df_info;
146 
147  rb_sim_data = dictionary_rb_simulation(model,reduced_data)
148  rb_sim_data= dictionary_rb_reconstruction(detailed_data,rb_sim_data);
149  figure;
150  plot_sim_data(model,model_data,rb_sim_data,[]);
151  title('reduced solution')
152 
153  figure, plot(rb_sim_data.eps_sequence,'b*');
154  title('residual norm over online iterations');
155  set(gca,'Yscale','log');
156 
157  keyboard;
158 
159  case 3 % time measurement
160 
161  % use_dictionary_gen_reduced_data = 0;
162  % more efficient due to better ordering of reduced data:
163  % use_dictionary_gen_reduced_data = 1;
164  model.rb_online_mode = 1; % -Delta(v) online adaptivity
165 
166  mu_init = get_mu(model);
167  disp('constructing offline dictionary');
168  detailed_data = filecache_function(...
169  @dictionary_gen_detailed_data,model,model_data);
170  disp(['computed dictionary in ',...
171  num2str(detailed_data.RB_info.time),...
172  ' sec.']);
173 
174  disp('generating reduced_data');
175  model.dummy = 1.2; % counter: change for enforcing filecaching recomputation
176 % if use_dictionary_gen_reduced_data
177 % disp('using new efficient gen_reduced_data');
178  reduced_data = filecache_function(@dictionary_gen_reduced_data, ...
179  model,detailed_data);
180 % else
181 % reduced_data = filecache_function(@lin_stat_gen_reduced_data, ...
182 % model,detailed_data);
183 % end;
184 % keyboard;
185 
186  disp('-----------------------------------------------------');
187  % time-measurement for 10 adaptive RB simulations over training parameters
188  disp('time measurement over 10 dictionary rb simulations over training parameters')
189  nruns = 10;
190  model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
191  ts_lincomb = zeros(nruns,1);
192  ts_solution = zeros(nruns,1);
193  Deltas = zeros(nruns,1);
194  Ns = zeros(nruns,1);
195  for i = 1:nruns
196  mu = detailed_data.RB_info.mus(:,i);
197  model = set_mu(model,mu);
198  rb_sim_data= dictionary_rb_simulation(model,reduced_data);
199  % rb_sim_data= rb_simulation(model,reduced_data);
200  ts_lincomb(i) = rb_sim_data.t_lincomb;
201  ts_solution(i) = rb_sim_data.t_solution;
202  Deltas(i) = rb_sim_data.Delta;
203  Ns(i) = length(rb_sim_data.uN);
204  end;
205  disp(['mean time for rb linear combinations: ',num2str(mean(ts_lincomb))])
206  disp(['mean time for rb basis choice and simulation: ',num2str(mean(ts_solution))])
207  disp(['mean dimension of RB model: ',num2str(mean(Ns))])
208  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
209 
210  disp('-----------------------------------------------------');
211  % time-measurement for 10 adaptive RB simulations over training parameters
212  disp('time measurement over 10 dictionary rb simulations over distorted training parameters')
213  RandStream.setDefaultStream(RandStream('mt19937ar','seed',100000));
214  nruns = 10;
215  model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
216  ts_lincomb = zeros(nruns,1);
217  ts_solution = zeros(nruns,1);
218  Deltas = zeros(nruns,1);
219  Ns = zeros(nruns,1);
220  for i = 1:nruns
221 % mu = detailed_data.RB_info.mus(:,i) + 0.000001*rand_uniform(1,model.mu_ranges);
222  mu = detailed_data.RB_info.mus(:,i) + 0.0001*rand_uniform(1,model.mu_ranges);
223 % mu = rand_uniform(1,model.mu_ranges);
224  model = set_mu(model,mu);
225  rb_sim_data= dictionary_rb_simulation(model,reduced_data);
226 % rb_sim_data= rb_simulation(model,reduced_data);
227  ts_lincomb(i) = rb_sim_data.t_lincomb;
228  ts_solution(i) = rb_sim_data.t_solution;
229  Deltas(i) = rb_sim_data.Delta;
230  Ns(i) = length(rb_sim_data.uN);
231  end;
232  disp(['mean time for rb linear combinations: ',num2str(mean(ts_lincomb))])
233  disp(['mean time for rb basis choice and simulation: ',num2str(mean(ts_solution))])
234  disp(['mean dimension of RB model: ',num2str(mean(Ns))])
235  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
236 
237  disp('-----------------------------------------------------');
238  % time-measurement for 10 adaptive RB simulations
239  disp(['time measurement over 10 dictionary rb simulations on random ',...
240  'parameters']);
241  nruns = 10;
242  model.RB_online_stop_Nmax = 30; % online max 25 basis vectors
243  ts_lincomb = zeros(nruns,1);
244  ts_solution = zeros(nruns,1);
245  Deltas = zeros(nruns,1);
246  Ns = zeros(nruns,1);
247  for i = 1:nruns
248  mu = rand_uniform(1,model.mu_ranges);
249  model = set_mu(model,mu);
250  rb_sim_data= dictionary_rb_simulation(model,reduced_data);
251 % rb_sim_data= rb_simulation(model,reduced_data);
252  ts_lincomb(i) = rb_sim_data.t_lincomb;
253  ts_solution(i) = rb_sim_data.t_solution;
254  Deltas(i) = rb_sim_data.Delta;
255  Ns(i) = length(rb_sim_data.uN);
256  end;
257  disp(['mean time for rb linear combinations: ',num2str(mean(ts_lincomb))])
258  disp(['mean time for rb basis choice and simulation: ',num2str(mean(ts_solution))])
259  disp(['mean dimension of RB model: ',num2str(mean(Ns))])
260  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
261 
262  disp('-----------------------------------------------------');
263  disp('-----------------------------------------------------');
264  % time-measurement for 10 ordinary RB simulations of size 200
265  % time-measurement for 10 ordinary RB simulations of size 200
266  disp(['time measurement over 10 standard rb simulations over', ...
267  ' training parameters']);
268  model.rb_online_mode = 0; % standard rb simulation
269  model.dummy = 1; % different from optimized from below
270  warning off
271  nruns = 10;
272  t = zeros(nruns,1);
273  Deltas = zeros(nruns,1);
274  for i = 1:nruns
275  mu = detailed_data.RB_info.mus(:,i);
276  model = set_mu(model,mu);
277  rb_sim_data= rb_simulation(model,reduced_data);
278  Deltas(i) = rb_sim_data.Delta;
279  ts_lincomb(i) = rb_sim_data.t_lincomb;
280  ts_solution(i) = rb_sim_data.t_solution;
281  end;
282  disp(['mean time for rb linear combinations: ',num2str(mean(ts_lincomb))])
283  disp(['mean time for rb basis choice and simulation: ',num2str(mean(ts_solution))])
284  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
285  disp(['basis size N = ', num2str(size(detailed_data.RB,2))]);
286 
287  disp('-----------------------------------------------------');
288  clear('reduced_data');
289  % time-measurement for 10 ordinary RB simulations of size 200
290  disp(['time measurement over 10 optimized rb simulations over', ...
291  ' training parameters']);
292  Nmax = size(detailed_data.RB,2);
293  model.rb_online_mode = 0; % standard rb simulation
294  warning off
295  nruns = 10;
296  t = zeros(nruns,1);
297  Deltas = zeros(nruns,1);
298  model = set_mu(model,mu_init);
299  model.gen_reduced_data = @lin_stat_gen_reduced_data;
300  model.rb_simulation = @lin_stat_rb_simulation;
301  model.dummy = 2;
302  reduced_data = filecache_function(@gen_reduced_data,model,detailed_data);
303 
304  for i = 1:nruns
305  mu = detailed_data.RB_info.mus(:,i);
306  model = set_mu(model,mu);
307  tic;
308  rb_sim_data= rb_simulation(model,reduced_data);
309  Deltas(i) = rb_sim_data.Delta;
310  t(i) = toc;
311  end;
312 
313  disp(['mean time for standard rb simulation (with ',num2str(Nmax),' basis vectors): ',num2str(mean(t))])
314  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
315 
316  disp('-----------------------------------------------------');
317 % clear('reduced_data');
318  % time-measurement for 10 ordinary RB simulations of size 200
319  disp(['time measurement over 10 optimized rb simulations over', ...
320  ' random parameters'])
321  RandStream.setDefaultStream(RandStream('mt19937ar','seed',100000));
322  warning off
323  Nmax = size(detailed_data.RB,2);
324  nruns = 10;
325  t = zeros(nruns,1);
326  Deltas = zeros(nruns,1);
327  model = set_mu(model,mu_init);
328 % model.gen_reduced_data = @lin_stat_gen_reduced_data;
329  % model.rb_simulation = @lin_stat_rb_simulation;
330 % reduced_data = filecache_function(@gen_reduced_data,model,detailed_data);
331  for i = 1:nruns
332  mu = rand_uniform(1,model.mu_ranges);
333  model = set_mu(model,mu);
334  tic;
335  rb_sim_data= rb_simulation(model,reduced_data);
336  Deltas(i) = rb_sim_data.Delta;
337  t(i) = toc;
338  end;
339  disp(['mean time for standard rb simulation (with ',num2str(Nmax),' basis vectors): ',num2str(mean(t))])
340  disp(['maximum Delta of RB model: ',num2str(max(Deltas))])
341 
342  case 4 % debug delta of new rb simulation
343 
344  case 5.0
345 
346  % for different block sizes determine greedy error decay
347 
348 % B1s = [10,15,20,25,30,35,40];
349 % B2s = [10,15,20,25,30,35,40];
350 % B1s = [10,15,20,25];
351 % B2s = [10,15,20,25];
352 %=> errs = 5.9417 11.8685 12.2661 13.9614
353 % 6.4436 7.8761 9.1320 11.1612
354 % 7.7663 8.0952 8.9250 9.1970
355 % 9.0527 7.9496 9.1340 9.2640
356  B1s = [5,10,15];
357  B2s = [20,25,30,35,40];
358 
359  nB1s = length(B1s);
360  nB2s = length(B2s);
361  errs = zeros(nB1s,nB2s);
362 
363  for n1 = 1:nB1s
364  for n2 = 1:nB2s
365  params.B1 = B1s(n1);
366  params.B2 = B2s(n2);
367  % B1=B2=20 leads to N = 73, max error estimator: 0.001505
368  % params.numintervals_per_block = 5*5;
369  params.Qa = 3; % at most 3 parameters
370  params.mu_range = [0.01,10];
371  model = multiscale_thermalblock_model(params);
372  model.rb_online_mode = 0; % standard RB
373  model.gen_detailed_data = @dictionary_gen_greedy_detailed_data;
374  model.gen_reduced_data = @dictionary_gen_reduced_data;
375 % model.gen_detailed_data = @dictionary_gen_detailed_data;
376  model.rb_simulation = @dictionary_rb_simulation;
377  model.RB_stop_epsilon = 1e-6;
378  model.RB_train_size = 10000;
379  model.RB_stop_Nmax = 30;
380  model_data = gen_model_data(model);
381  disp(['------------------------------------------------------' ...
382  ' -----------'])
383  disp(['generating detailed greedy basis of size 30 for ',...
384  'B1 = ',num2str(B1s(n1)),', B2 = ',num2str(B2s(n2))]);
385  detailed_data = [];
386  detailed_data = gen_detailed_data(model,model_data);
387  errs(n1,n2) = min(detailed_data.RB_info.max_error_sequence);
388  end;
389  end;
390 
391  errs
392  disp('please choose manually maximal error in above');
393  keyboard;
394 
395  % then choose these values in case 5.1
396 
397  case 5.1 % compare with greedy basis
398 
399  clear all;
400  params.B1 = 20;
401  params.B2 = 20;
402  % B1=B2=20 leads to N = 73, max error estimator: 0.001505
403  % params.numintervals_per_block = 5*5;
404  params.Qa = 3; % at most 3 parameters
405  params.mu_range = [0.01,100];
406  model = multiscale_thermalblock_model(params);
407  model.gen_reduced_data = @dictionary_gen_reduced_data;
408  model.gen_detailed_data = @dictionary_gen_detailed_data;
409  model.rb_simulation = @dictionary_rb_simulation;
410 
411 % s = warning('error','MATLAB:nearlySingularMatrix');
412  warning('OFF','MATLAB:nearlySingularMatrix');
413 
414  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
415 
416  % first generate standard greedy basis (no orthonormalization)
417  % goal: about 500 vectors
418 
419  % model.rb_online_mode = 1; % -Delta(v) online adaptivity
420  model.RB_stop_epsilon = 1e-8;
421  model.RB_train_size = 10000;
422  model.RB_stop_Nmax = 500;
423  model.rb_online_mode = 0; % standard RB
424  model.gen_detailed_data = @dictionary_gen_greedy_detailed_data;
425  model_data = gen_model_data(model);
426 % if exist('greedy_basis.mat')
427 % disp('skipping generation of greedy basis');
428 % else
429  detailed_data = filecache_function(...
430  @gen_detailed_data,model,model_data);
431  disp(['computed greedy basis in ',...
432  num2str(detailed_data.RB_info.time),...
433  ' sec.']);
434  save('greedy_basis3.mat','model','detailed_data');
435 % end;
436  keyboard;
437 
438  detailed_data = [];
439  % now construct adaptive online dictionary of about 1000 vectors
440  model.rb_online_mode = 1; % adaptive online RB
441  model.RB_online_stop_Nmax = 30; % online max 30 basis vectors
442  model.RB_stop_Nmax = 500; % dictionary of larger size
443  detailed_data = filecache_function(...
444  @gen_detailed_data,model,model_data);
445  disp(['computed dictionary in ',...
446  num2str(detailed_data.RB_info.time),...
447  ' sec.']);
448  save('greedy_dictionary.mat','model','detailed_data');
449 
450  otherwise
451  error('step unknown');
452 end;
453 
454 
455 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
456 % auxiliary functions
457 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
458 
459 function reduced_data = dictionary_gen_reduced_data(model,detailed_data)
460 %% The following works, but results in slow linear combination
461 %reduced_data= lin_stat_gen_reduced_data(model,detailed_data);
462 %
463 % later: different saving of error-estimator riesz-representers such that
464 % simple multiplication with f and a coefficient vectors is possible
465 %
466 % instead: refinement of lin-stat-gen-reduced-data:
467 reduced_data = [];
468 old_mode = model.decomp_mode;
469 model.decomp_mode = 1; % == components
470 [A_comp,f_comp] = model.operators(model,detailed_data);
471 Qa = length(A_comp);
472 reduced_data.Qa = Qa;
473 Qf = length(f_comp);
474 reduced_data.Qf = Qf;
475 
476 reduced_data.AN_comp = cell(1,length(A_comp));
477 for q = 1:Qa
478  reduced_data.AN_comp{q} = ...
479  detailed_data.RB'*A_comp{q}*detailed_data.RB;
480 end;
481 
482 reduced_data.fN_comp = cell(1,length(f_comp));
483 for q = 1:Qf
484  reduced_data.fN_comp{q} = ...
485  detailed_data.RB' * f_comp{q};
486 end;
487 
488 if model.compute_output_functional
489  % assumption: nonparametic output functional, then simple RB
490  % evaluation possible
491  l_comp = ...
492  model.operators_output(model,detailed_data);
493  Q_l = length(l_comp);
494  reduced_data.lN_comp = cell(1,Q_l);
495  for q = 1:Q_l
496  reduced_data.lN_comp{q} = detailed_data.RB' * l_comp{q};
497  end;
498 end;
499 
500 N = model.get_rb_size(model,detailed_data);
501 reduced_data.N = N;
502 
503 % plus error estimation quantities
504 
505 % G = (v_r^q, v_r^q) = v_r^q' * K * v_r^q
506 % with {v_r^q}_q = (v_f^q, v_a^qq')_{qq'}
507 % G = [Gff, Gfa; Gfa', Gaa];
508 %
509 % (v_f^q,v_f^q') = v_f^q' * K * v_f^q
510 
511 % matrices of coefficient vectors of Riesz-representers:
512 % K * v_f^q = f^q (coefficient vector equations)
513 K = model.get_inner_product_matrix(detailed_data);
514 % search solution in H10, i.e. set dirichlet DOFs
515 
516 %v_f = zeros(size(detailed_data.RB,1),Qf);
517 K_v_f = zeros(size(detailed_data.RB,1),Qf);
518 for q = 1:Qf
519  K_v_f(:,q) = f_comp{q};
520  v_f = K \ K_v_f;
521  % v_f(:,q) = K \ f_comp{q};
522 end;
523 
524 v_a = cell(N,1);
525 K_v_a = cell(N,1);
526 for n = 1:N
527  K_v_a{n} = zeros(size(K,1),Qa);
528  v_a{n} = zeros(size(K,1),Qa);
529  for q = 1:Qa
530  K_v_a{n}(:,q) = (A_comp{q}*detailed_data.RB(:,n));
531  v_a{n} = K \ K_v_a{n};
532  % v_a{n}(:,q) = K \ K_v_a{n}(:,q);
533  end;
534 end;
535 
536 % finally assemble G = [G_ff, G_fa{n}; G_fa{n}', G_aa{n}];
537 %Q_r = Qf + N * Qa;
538 %G = zeros(Q_r,Q_r);
539 %%G(1:Qf,1:Qf) = reduced_data.Gff;
540 %G(1:Qf,1:Qf) = v_f' * K_v_f;
541 %for n = 1:N
542 % G(1:Qf,Qf+(n-1)*Qa +(1:Qa)) = ...
543 % v_f' * K_v_a{n};
544 % % reduced_data.Gfa{n};
545 % G(Qf+(n-1)*Qa +(1:Qa),1:Qf) = ...
546 % transpose(v_f' * K_v_a{n});
547 % % reduced_data.Gfa{n}';
548 %end;
549 %for n1 = 1:N
550 % for n2 = 1:N
551 % G(Qf+(n1-1)*Qa+(1:Qa),Qf+(n2-1)*Qa +(1:Qa)) = ...
552 % v_a{n1}' * K_v_a{n2};
553 % % reduced_data.Gaa{n1,n2};
554 % end;
555 %end;
556 
557 % assemble Qf x Qf matrix Hff = (<vf1,vf1>, ... ,<vf1,vfQf>)
558 % .........
559 % (<vfQf,vf1>, ... ,<vfQf,vfQf>)
560 reduced_data.bar_Hff = v_f' * K_v_f;
561 
562 % assemble N x QaQf matrix
563 % Haf = (<vf1,va11>...<vf1,va1Qa> | <vf2,va11> ... <vf2,va1Qa> | ... <vfQf,va1Qa>)
564 % <vf1,va21>...<vf1,va2Qa> | <vf2,va21> ... <vf2,va2Qa> | ... <vfQf,va2Qa>)
565 % ............
566 % <vf1,vaN1>...<vf1,vaNQa> | <vf2,vaN1> ... <vf2,vaNQa> | ... <vfQf,vaNQa>)
567 Haf = zeros(N,Qa * Qf);
568 for q = 1:Qf
569  for n = 1:N
570  % insert row vector in block matrix:
571  vaf = v_f(:,q)' * K_v_a{n};
572  Haf(n,(1+(q-1)*Qa):(q*Qa)) = vaf;
573  end;
574 end;
575 reduced_data.bar_Haf = Haf;
576 
577 % assemble N^2 x Qa^2 matrix
578 % Haa = (<va11,va11>...<va11,va1Qa> | <va12,va11> ... <va12,va1Qa> | <va1Qa,va11>... <va1Qa,va1Qa>)
579 % ............
580 % <vaN1,va11>...<vaN1,va1Qa> | <vaN2,va11> ... <vaN2,va1Qa> | <vaNQa,va11>... <vaNQa,va1Qa>)
581 % -------------------------------------------------------------------------------------------
582 % <va11,va21>...<va11,va2Qa> | <va12,va21> ... <va12,va2Qa> | <va1Qa,va21>... <va1Qa,va2Qa>
583 % ............
584 % <vaN1,va21>...<vaN1,va2Qa> | <vaN2,va21> ... <vaN2,va2Qa> | <vaNQa,va21>... <vaNQa,va2Qa>)
585 % -------------------------------------------------------------------------------------------
586 % ............
587 % -------------------------------------------------------------------------------------------
588 % <va11,vaN1>...<va11,vaNQa> | <va12,vaN1> ... <va12,vaNQa> | <va1Qa,vaN1>... <va1Qa,vaNQa>
589 % ............
590 % <vaN1,vaN1>...<vaN1,vaNQa> | <vaN2,vaN1> ... <vaN2,vaNQa> | <vaNQa,vaN1>... <vaNQa,vaNQa> )
591 
592 Haa = zeros(N*N,Qa*Qa);
593 % insert row vectors in block matrix:
594 for q = 1:Qa % loop over column-blocks
595  for n1 = 1:N % loop over block-rows
596  for n2 = 1:N % loop over rows within one block;
597  vaa = v_a{n2}(:,q)' * K_v_a{n1};
598  Haa((n1-1)*N + n2, (1+(q-1)*Qa):q*Qa) = vaa;
599  end;
600  end;
601 end;
602 
603 reduced_data.bar_Haa = Haa;
604 
605 reduced_data.used_dictionary_gen_reduced_data = 1;
606 % set back old model mode
607 model.decomp_mode = old_mode;
608 
609 function detailed_data = dictionary_gen_detailed_data(model,model_data)
610 tic;
611 sim_data = detailed_simulation(model,model_data);
612 h10_matrix = sim_data.uh.df_info.h10_inner_product_matrix;
613 mus = rand_uniform(model.RB_train_size,model.mu_ranges);
614 RB = zeros(size(h10_matrix,1),model.RB_train_size);
615 for mui = 1:model.RB_train_size;
616  model = model.set_mu(model,mus(:,mui));
617  sim_data = detailed_simulation(model,model_data);
618  RB(:,mui) = sim_data.uh.dofs;
619  fprintf('.');
620 end;
621 fprintf('\n');
622 detailed_data = model_data;
623 detailed_data.RB_info.mus = mus;
624 detailed_data.RB_info.time = toc;
625 detailed_data.RB = RB;
626 
627 % the following should both work for standard rb as well as dictionary
628 % generating a greedy RB only doing a normalization,
629 % but no orthogonalization
630 function detailed_data = dictionary_gen_greedy_detailed_data(model,model_data)
631 tic;
632 RandStream.setDefaultStream(RandStream('mt19937ar','seed',1234));
633 % uniform:
634 %% mus = rand_uniform(model.RB_train_size,model.mu_ranges);
635 % logarithmic instead of uniform
636 %log_mu_ranges = model.mu_ranges;
637 %for i = 1:length(model.mu_ranges);
638 % log_mu_ranges{i} = log(model.mu_ranges{1});
639 %end;
640 %log_mus = rand_uniform(model.RB_train_size,log_mu_ranges);
641 %mus = exp(log_mus);
642 mus = rand_log(model.RB_train_size,model.mu_ranges);
643 detailed_data.RB_info.mus = mus;
644 max_err_est = 1e10;
645 model = set_mu(model,mus(:,1));
646 sim_data = detailed_simulation(model,model_data);
647 n = fem_h10_norm(sim_data.uh);
648 detailed_data = model_data;
649 detailed_data.RB = sim_data.uh.dofs / n;
650 reduced_data = gen_reduced_data(model,detailed_data);
651 h10_matrix = sim_data.uh.df_info.h10_inner_product_matrix;
652 fprintf('\n');
653 max_error_sequence = [];
654 
655 try
656  while( max_err_est> model.RB_stop_epsilon) & ...
657  (size(detailed_data.RB,2)<model.RB_stop_Nmax)
658  max_err_est = 0;
659  mu_max = [];
660  for i = 1:size(mus,2);
661  model = model.set_mu(model,mus(:,i));
662  rb_sim_data = rb_simulation(model,reduced_data);
663  if rb_sim_data.Delta > max_err_est
664  mu_max = mus(:,i);
665  max_err_est = rb_sim_data.Delta;
666  end;
667  end;
668  disp(['max error estimator: ',num2str(max_err_est),...
669  ' for mu = ',num2str(mu_max')]);
670  model = model.set_mu(model,mu_max);
671  sim_data = detailed_simulation(model,detailed_data);
672  pr = sim_data.uh.dofs;
673  % pr = sim_data.uh.dofs - ...
674  % detailed_data.RB * (detailed_data.RB' * h10_matrix * ...
675  % sim_data.uh.dofs);
676  n = sqrt(max(pr'*h10_matrix*pr,0));
677  detailed_data.RB = [detailed_data.RB,pr/n];
678  reduced_data = gen_reduced_data(model,detailed_data);
679  disp(['new basis size: ',num2str(size(detailed_data.RB,2))]);
680  max_error_sequence =[max_error_sequence, max_err_est];
681  end;
682 catch % else set error to 0
683  disp('matrix close to singular... ending rb greedy extension loop');
684 end;
685 
686 detailed_data.RB_info.time = toc;
687 detailed_data.RB_info.max_error_sequence = max_error_sequence;
688 
689 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
690 % Online rb simulation by online greedy minimizing redidual norm
691 % over dictionary
692 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
693 function rb_sim_data = dictionary_rb_simulation(model, ...
694  reduced_data);
695 % model.rb_online_mode: 0 standard rb, no adaptivity
696 % model.rb_online_mode: 1 Delta-extended indicator for online extension
697 % prepare mu-dependent quantities
698 
699 rb_sim_data =[];
700 
701 old_mode = model.decomp_mode;
702 model.decomp_mode = 2; % coefficients
703 
704 tic; % measure linear combination time
705 
706 [a_coeff,f_coeff] = ...
707  model.operators(model,[]);
708 
709 AN = lincomb_sequence(reduced_data.AN_comp, a_coeff);
710 fN = lincomb_sequence(reduced_data.fN_comp, f_coeff);
711 Qf = length(f_coeff);
712 Qa = length(a_coeff);
713 N = reduced_data.N;
714 
715 
716 % transform G matrix, such that later simple mult:
717 % G_mu matrix has <vf,vf>, <vf,va_phi_1(mu)> , <vf,va_phi_N>
718 % as first row and column, and <va_phi_i,va_phi_j(mu)> as
719 % remaining entries
720 % the following is the correct code, if lin_stat_reduced_data is
721 % passed as argument.
722 %if ~isfield(reduced_data,'used_dictionary_gen_reduced_data');
723 % Saa = kron(speye(N),sparse(1:Qa,1,a_coeff(:),Qa,1));
724 % S = [f_coeff(:), sparse([],[],[],Qf, N); ...
725 % sparse([],[],[], Qa * N, 1), Saa];
726 % G = S' * reduced_data.G * S;
727 %else
728 Hff = f_coeff' * reduced_data.bar_Hff * f_coeff;
729 af_coeff = a_coeff * f_coeff';
730 Haf = reduced_data.bar_Haf * af_coeff(:);
731 aa_coeff = a_coeff * a_coeff';
732 Haa_vec = reduced_data.bar_Haa * aa_coeff(:);
733 Haa = reshape(Haa_vec,[N,N]);
734 G = [Hff,Haf'; Haf,Haa];
735 %end;
736 G = 0.5*(G+G'); % G is positive definite.
737 diag_G = diag(G);
738 
739 t_lincomb = toc;
740 
741 %if 0
742 % disp('check, whether G has correct entries');
743 % % get full operators
744 % model.decomp_mode = 0; % complete
745 % [A ,f] = ...
746 % model.operators(model,reduced_data);
747 % K = model.get_inner_product_matrix(reduced_data);
748 % vf = K\f;
749 % G(1,1)
750 % vf'*K * vf
751 % va_phi1 = K\(A*reduced_data.RB(:,1));
752 % G(2,1)
753 % vf'*K*va_phi1
754 % G(2,2)
755 % va_phi1'*K*va_phi1
756 % keyboard;
757 %end;
758 
759 tic;
760 
761 if model.rb_online_mode == 0
762  % standard rb simulation, no online enrichment
763  uN = AN\fN;
764  % the following works with standard lin_stat_gen_reduced_data:
765  % Q_r = size(reduced_data.G,1);
766  % neg_auN_coeff = -A_coeff * uN';
767  % res_coeff = [f_coeff; neg_auN_coeff(:)];
768  % res_norm_sqr = res_coeff' * reduced_data.G * res_coeff;
769  % this is required for dictionary_gen_reduced_data:
770  res_coeff = [1;-uN];
771  res_norm_sqr = res_coeff' * G * res_coeff;
772  res_norm_sqr = max(res_norm_sqr,0);
773  res_norm = sqrt(res_norm_sqr);
774  rb_sim_data.Delta = ...
775  res_norm/model.coercivity_alpha(model);
776 
777 elseif model.rb_online_mode == 1
778  % online enrichment by eta(v) = - Delta_N(v; Phi \cup v)
779  diag_AN = diag(AN);
780 
781  %uN = ones(size(AN,1),1);
782 
783  % run iterative loop of online_greedy by minimum residual
784 
785  eps_sequence = [];
786  current_eps = 1e10; % do at least one extension
787  basis_indices = [];
788  non_basis_indices = 1:N;
789  iter = 0;
790  ANinv = [];
791  UN = zeros(0,N);
792  uN = zeros(0,1);
793 
794  while (current_eps > model.RB_stop_epsilon) && (iter < model.RB_online_stop_Nmax)
795  % compute all new uN vectors at once:
796  a = diag_AN(non_basis_indices)';
797  bar_a = AN(basis_indices,non_basis_indices);
798  bar_bar_a = AN(non_basis_indices,basis_indices)';
799  % tilde_f = fN(non_basis_indices)' - sum(bar_bar_a.*UN,1);
800  tilde_f = fN(non_basis_indices)' - uN' * bar_bar_a;
801  ANinv_bar_a = (ANinv*bar_a);
802  tilde_a = a - sum(bar_bar_a .* ANinv_bar_a,1);
803  % UN_old = UN;
804  UN = repmat(uN,1,length(non_basis_indices));
805  uN_old = uN;
806  UN = [UN; zeros(1,length(non_basis_indices))] ...
807  + [-ANinv_bar_a;ones(1,length(non_basis_indices))] * ...
808  spdiags((tilde_f./tilde_a)',0,length(tilde_f),length(tilde_f));
809  UN_new = UN;
810  % compute all residualnormssquare at once:
811  G11 = G([1,1+basis_indices],[1,1+basis_indices]);
812  UN1 = [ones(1,length(non_basis_indices)); - UN(1:end-1,:)];
813  UN2 = -UN(end,:);
814  G12 = G([1,1+basis_indices],1+non_basis_indices);
815  g = [G11 * UN1 + G12*spdiags(UN2(:),0,length(UN2),length(UN2)); ...
816  sum(G12 .* UN1,1) + diag_G(1+non_basis_indices)'.*UN2];
817  resnormsqr = sum([ones(1,length(non_basis_indices)); - UN] .* g,1);
818  if min(resnormsqr<-1e-2)
819  disp('error: negative residual, inspect workspace!');
820  % test for first vector:
821  keyboard;
822  end;
823  % if resnormsqr(3)>1e-10
824  % disp('error: residual for func 3 should be 0!!!');
825  % plot(resnormsqr);
826  % title('residual norms');
827  % % the following test was successful
828  % % G1 = [G11, G12(:,1); G12(:,1)', diag_G(1+1)];
829  % % g1 = G1 * [1; -UN(:,1)]
830  % % g(:,1)
831  % % resnormsqr1 = [1; -UN(:,1)]' * G1 * [1; -UN(:,1)]
832  % % resnormsqr(1)
833  % % G3 = [G11, G12(:,3); G12(:,3)', diag_G(1+3)];
834  % % resnormsqr3 = [1; -UN(:,3)]' * G3 * [1; -UN(:,3)]
835  % resnormsqr(3)
836  % keyboard;
837  % end;
838  % end;
839  % take positive part to avoid numerical instabilities
840  resnormsqr = max(resnormsqr,0);
841  % search maximum
842  [minresnormsqr, new_index] = min(resnormsqr);
843  % select one if multiple minima
844  current_eps = sqrt(minresnormsqr(1));
845  eps_sequence = [eps_sequence, current_eps];
846  new_index = new_index(1);
847  % remove new basis vector from search set
848  basis_indices = [basis_indices, non_basis_indices(new_index)];
849  non_basis_indices = [non_basis_indices(1:new_index-1),...
850  non_basis_indices(new_index+1:end)];
851  ANinv = inv(AN(basis_indices,basis_indices));
852  uN = UN(:,new_index);
853  UN = [UN(:,1:new_index-1),UN(:,new_index+1:end)];
854  iter = iter + 1;
855  if iter > 100
856  disp('investigate iteration')
857  % true reduced solution with selected basis vectors of previous
858  % step:
859  detailed_data = reduced_data;
860  detailed_data.RB = detailed_data.RB(:,basis_indices(1:end-1));
861  reduced_data_tmp = gen_reduced_data(model,detailed_data);
862  rb_sim_data_tmp = rb_simulation(model,reduced_data_tmp);
863  disp('correct UN old:')
864  rb_sim_data_tmp.uN
865  disp('current UN_old:')
866  uN_old
867  % UN_new(:,new_index)
868 
869  % true reduced solution with selected basis vectors:
870  detailed_data = reduced_data;
871  detailed_data.RB = detailed_data.RB(:,basis_indices);
872  reduced_data_tmp = gen_reduced_data(model,detailed_data);
873  rb_sim_data_tmp = rb_simulation(model,reduced_data_tmp);
874  disp('correct UN:')
875  rb_sim_data_tmp.uN
876  disp('current UN:')
877  uN
878  % UN_new(:,new_index)
879 
880  % recompute by hand:
881  %disp('recomputed current uN')
882  %uN_new = [uN_old; 0] + tilde_f(new_index)/tilde_a(new_index)*[-ANinv_bar_a(:,new_index);1]
883 
884  keyboard;
885  end;
886  % keyboard;
887  end;
888 
889  % return outputs
890 
891  rb_sim_data.eps_sequence = eps_sequence;
892  rb_sim_data.basis_indices = basis_indices;
893  rb_sim_data.iter = iter;
894  rb_sim_data.current_eps = current_eps;
895  rb_sim_data.Delta = ...
896  current_eps/model.coercivity_alpha(model);
897 end; % rb_online_mode
898 
899 t_solution = toc;
900 
901 rb_sim_data.uN = uN;
902 rb_sim_data.t_lincomb = t_lincomb;
903 rb_sim_data.t_solution = t_solution;
904 
905 model.decomp_mode = old_mode;
906 
907 function rb_sim_data = dictionary_rb_reconstruction(detailed_data, ...
908  rb_sim_data);
909 
910 if ~isfield(rb_sim_data,'uh')
911  rb_sim_data.uh = femdiscfunc([],detailed_data.df_info);
912 end;
913 rb_sim_data.uh.dofs = ...
914  detailed_data.RB(:,rb_sim_data.basis_indices) * rb_sim_data.uN;
915 
916 % Nmax von 200 auf 400, QA hochschrauben, damit Greedy schwieriger
917 % Offline daten anders organisieren
918 % distroted training data vergroessern
919 % Greedy fuer N=400?
920 
921 
922 % RESULTS:
923 %>> online_greedy(3) with OLD lin_stat_gen_reduced_data:
924 %constructing offline dictionary
925 %call of @dictionary_gen_detailed_data, successfully read from cache, key=672472054
926 %computed dictionary in 15.0562 sec.
927 %generating reduced_data
928 %result not found in cache
929 %call of @lin_stat_gen_reduced_data, now computed and cached, key=923266952
930 %-----------------------------------------------------
931 %time measurement over 10 random dictionary rb simulations
932 %mean time for rb linear combinations: 0.089998
933 %mean time for rb basis choice and simulation: 0.03184
934 %mean dimension of RB model: 30
935 %mean Delta of RB model: 0.001852
936 %-----------------------------------------------------
937 %time measurement over 10 dictionary rb simulations over training parameters
938 %mean time for rb linear combinations: 0.085595
939 %mean time for rb basis choice and simulation: 0.00063634
940 %mean dimension of RB model: 1
941 %mean Delta of RB model: 1.8032e-008
942 %-----------------------------------------------------
943 %time measurement over 10 dictionary rb simulations over distorted training parameters
944 %mean time for rb linear combinations: 0.085588
945 %mean time for rb basis choice and simulation: 0.0049095
946 %mean dimension of RB model: 7.4
947 %mean Delta of RB model: 2.2049e-006
948 %-----------------------------------------------------
949 %time measurement over 10 random standard rb simulations
950 %call of @gen_reduced_data, successfully read from cache, key=454421169
951 %mean time for standard rb simulation (with 350 basis vectors): 0.052783
952 %mean Delta of RB model: 2.4026e-006
953 %-----------------------------------------------------
954 %time measurement over 10 training standard rb simulations
955 %mean time for standard rb simulation (with 350 basis vectors): 0.05388
956 %mean Delta of RB model: 5.7061e-006
957 %
958 %-------------------------------------------------------
959 % IMPROVEMENT iN THE FOLLOWING: 0.02 sec in linear combination:
960 %-------------------------------------------------------
961 %
962 %constructing offline dictionary
963 %call of @dictionary_gen_detailed_data, successfully read from cache, key=672472054
964 %computed dictionary in 15.0562 sec.
965 %generating reduced_data
966 %result not found in cache
967 %call of @dictionary_gen_reduced_data, now computed and cached, key=748022049
968 %-----------------------------------------------------
969 %time measurement over 10 random dictionary rb simulations
970 %mean time for rb linear combinations: 0.065533
971 %mean time for rb basis choice and simulation: 0.031136
972 %mean dimension of RB model: 30
973 %mean Delta of RB model: 0.001852
974 %-----------------------------------------------------
975 %time measurement over 10 dictionary rb simulations over training parameters
976 %mean time for rb linear combinations: 0.065579
977 %mean time for rb basis choice and simulation: 0.00065698
978 %mean dimension of RB model: 1
979 %mean Delta of RB model: 2.0037e-008
980 %-----------------------------------------------------
981 %time measurement over 10 dictionary rb simulations over distorted training parameters
982 %mean time for rb linear combinations: 0.065631
983 %mean time for rb basis choice and simulation: 0.004756
984 %mean dimension of RB model: 7.4
985 %mean Delta of RB model: 2.2047e-006
986 %-----------------------------------------------------
987 %time measurement over 10 random standard rb simulations
988 %result not found in cache
989 %call of @gen_reduced_data, now computed and cached, key=939761750
990 %mean time for standard rb simulation (with 350 basis vectors): 0.056699
991 %mean Delta of RB model: 2.4026e-006
992 %-----------------------------------------------------
993 %time measurement over 10 training standard rb simulations
994 %mean time for standard rb simulation (with 350 basis vectors): 0.053373
995 %mean Delta of RB model: 5.7061e-006
996 %-------------------------------------------------------
997 
998 
999 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
1000 %
1001 % Ansatz: Mittelgrosse Lagrange Basis (N=400) und Laufzeit und
1002 % maximaler Testfehler
1003 % mittlere Laufzeit bestimmen
1004 %
1005 % => Das ist maximaler Testfehler, den wir unterbieten koennen,
1006 % d.h. online-eps wird auf diesen Wert gesetzt
1007 %
1008 % step 5 erzeugt diese greedy basis
1009 %
1010 % - Grosses dictionary (1000) erstellen und Vergleich
1011 % mittlere Basisgroesse
1012 % mittlere Laufzeit
1013 %
1014 % Statt Lagrange eine Greedy basis: Immer noch besser?
1015 %
1016 % Linearkombination: Nur fuer Teile, welche notwendig sind?
1017 % caching, computation on demand?
1018 %
1019 
1020 
1021 % STEP 5:
1022 % B1=B2 = 20
1023 %=>
1024 %max error estimator: 9.8224 for mu = 0.25626 8.2271 0.10697
1025 %new basis size: 2
1026 %max error estimator: 8.5185 for mu = 3.8811 0.15771 0.10371
1027 %new basis size: 3
1028 %max error estimator: 8.4647 for mu = 0.11595 0.14404 5.6561
1029 %new basis size: 4
1030 %max error estimator: 6.1665 for mu = 8.014 0.10153 5.6284
1031 %new basis size: 5
1032 %max error estimator: 5.1794 for mu = 2.7683 4.835 0.10213
1033 %new basis size: 6
1034 %max error estimator: 4.8041 for mu = 0.10243 9.8289 2.0605
1035 %new basis size: 7
1036 %max error estimator: 4.9079 for mu = 0.1032 4.2649 9.6909
1037 %new basis size: 8
1038 %max error estimator: 3.7885 for mu = 1.7253 0.10151 8.775
1039 %new basis size: 9
1040 %max error estimator: 3.1312 for mu = 8.4301 2.1146 0.107
1041 %new basis size: 10
1042 %max error estimator: 2.5548 for mu = 1.9628 0.10026 1.5733
1043 %new basis size: 11
1044 %max error estimator: 1.8652 for mu = 0.10307 7.1583 0.42625
1045 %new basis size: 12
1046 %max error estimator: 1.6943 for mu = 0.11033 0.89484 4.0744
1047 %new basis size: 13
1048 %max error estimator: 1.5111 for mu = 9.7004 0.13858 1.9245
1049 %new basis size: 14
1050 %max error estimator: 1.2297 for mu = 0.24571 0.89305 0.10468
1051 %new basis size: 15
1052 %max error estimator: 1.0896 for mu = 1.9541 0.91071 0.12458
1053 %new basis size: 16
1054 %max error estimator: 0.75964 for mu = 0.18811 0.15704 1.0872
1055 %new basis size: 17
1056 %max error estimator: 0.58983 for mu = 0.81222 5.7646 0.10967
1057 %new basis size: 18
1058 %max error estimator: 0.55249 for mu = 0.11305 3.4472 2.5762
1059 %new basis size: 19
1060 %max error estimator: 0.48138 for mu = 2.4522 0.18095 0.27056
1061 %new basis size: 20
1062 %max error estimator: 0.4546 for mu = 8.2814 9.9166 0.12226
1063 %new basis size: 21
1064 %max error estimator: 0.45274 for mu = 0.51312 0.14674 9.29
1065 %new basis size: 22
1066 %max error estimator: 0.28178 for mu = 4.9214 0.50476 0.10382
1067 %new basis size: 23
1068 %max error estimator: 0.26892 for mu = 0.15448 4.4655 0.54738
1069 %new basis size: 24
1070 %max error estimator: 0.25045 for mu = 0.62626 0.15637 0.79931
1071 %new basis size: 25
1072 %max error estimator: 0.22665 for mu = 0.12261 9.5943 6.7078
1073 %new basis size: 26
1074 % ...
1075 %new basis size: 73
1076 %max error estimator: 0.001505 for mu = 1.178 0.17993 9.6819
1077 %new basis size: 74
1078 
1079 % STEP 5:
1080 % B1=B2 = 40
1081 %=>
1082 %max error estimator: 9.005 for mu = 0.25626 8.2271 0.10697
1083 %new basis size: 2
1084 %max error estimator: 8.4454 for mu = 3.8811 0.15771 0.10371
1085 %new basis size: 3
1086 %max error estimator: 8.3351 for mu = 0.11595 0.14404 5.6561
1087 %new basis size: 4
1088 %max error estimator: 5.9311 for mu = 8.014 0.10153 5.6284
1089 %new basis size: 5
1090 %max error estimator: 5.1034 for mu = 2.7683 4.835 0.10213
1091 %new basis size: 6
1092 %max error estimator: 4.2945 for mu = 0.10243 9.8289 2.0605
1093 %
1094 % => schneller als bei 20x20!!!!
1095 
1096 
1097 
1098 % once more with logarithmical uniform training data, 10000
1099 % training points
1100 % B1=B2=40:
1101 %max error estimator: 10.3628 for mu = 0.10011 9.2349 0.24892
1102 %new basis size: 2
1103 %max error estimator: 9.5436 for mu = 0.12119 0.1022 7.1872
1104 %new basis size: 3
1105 %max error estimator: 8.6987 for mu = 8.6991 0.10181 0.23366
1106 %new basis size: 4
1107 %max error estimator: 6.3551 for mu = 8.8078 0.10279 9.2802
1108 %new basis size: 5
1109 %max error estimator: 6.2258 for mu = 9.9205 4.5107 0.10198
1110 %new basis size: 6
1111 %max error estimator: 4.9601 for mu = 1.5491 8.1627 0.10109
1112 %new basis size: 7
1113 %max error estimator: 4.8369 for mu = 0.1016 1.0157 7.883
1114 %new basis size: 8
1115 %max error estimator: 3.6565 for mu = 0.58057 0.10272 0.10017
1116 %new basis size: 9
1117 %max error estimator: 3.2278 for mu = 0.20563 0.10107 1.105
1118 %new basis size: 10
1119 %max error estimator: 2.9 for mu = 8.6036 0.46089 0.10361
1120 %
1121 % => bisschen langsamer als zuvor, schoen.
1122 
1123 % jetzt mu_min auf 0.01 statt 0.1:
1124 %max error estimator: 133.6249 for mu = 0.010017 8.8746 0.039272
1125 %new basis size: 2
1126 %max error estimator: 98.2215 for mu = 0.010022 0.18285 8.0601
1127 %new basis size: 3
1128 %max error estimator: 88.0778 for mu = 3.344 0.040784 0.010049
1129 %new basis size: 4
1130 %max error estimator: 106.0299 for mu = 8.4309 0.014886 0.011095
1131 %new basis size: 5
1132 %max error estimator: 75.0994 for mu = 2.5013 0.010011 0.47344
1133 %new basis size: 6
1134 %max error estimator: 90.6464 for mu = 0.35635 0.010006 7.9812
1135 %new basis size: 7
1136 %max error estimator: 101.1528 for mu = 0.012804 0.010849 7.4964
1137 %new basis size: 8
1138 %max error estimator: 80.2765 for mu = 9.3943 0.010423 0.10133
1139 %
1140 % => schwierig! Wird ueberaupt gescheit geloest? Ja, coercivity
1141 % einfach Faktor 10 groesser, also Fehler entsprechend groesser.
1142 
1143 
1144 
1145 
1146 % STEP 5:
1147 % params.B1 = 40;
1148 % params.B2 = 10;