1 function res = online_greedy(step)
2 %
function online_greedy(step)
4 %
function for test of online-greedy procedure with the new
7 % B. Haasdonk 17.2.2012
10 step = 0 % test multiscale model
16 params.Qa = 3; % at most 4 parameters
17 params.mu_range = [0.01,10];
18 model = multiscale_thermalblock_model(params);
20 %params.mu_range = [0.01,100];
21 %params.parameter_pattern = [0,1,0,0;
25 %model = twoscale_thermalblock_model(params);
27 model.gen_reduced_data = @dictionary_gen_reduced_data;
28 model.gen_detailed_data = @dictionary_gen_detailed_data;
29 model.rb_simulation = @dictionary_rb_simulation;
30 model.rb_online_mode = 0; % standard rb simulation
34 % generate basis of up to 400 vectors;
35 model.RB_stop_Nmax = 1000;
36 model.RB_stop_epsilon = 1e-6;
37 model.RB_train_size = 10000;
39 model_data = gen_model_data(model);
42 case 0 % test multiscale model
43 % mu = rand_uniform(1,model.mu_ranges);
44 % mu = [10,0.01,0.001];
45 % mu = [0.001,10,1,0.01];
46 % model = set_mu(model,mu);
47 % sim_data = detailed_simulation(model,model_data);
48 plot_params.subsampling_level = 0; %
for linear
49 plot_params.axis_equal = 1;
50 plot_params.axis_tight = 1;
52 detailed_data = model_data;
54 demo_detailed_gui(model,detailed_data,[],plot_params);
56 Perm = reshape(model.parameter_mapping,model.B1,model.B2);
59 Perm = [Perm,zeros(model.B2,1);zeros(1,model.B1+1)];
64 % assume diagonal diff-tensor, then scalar conversion is sum/2
65 diff_scalar = @(varargin) sum(model.diffusivity_tensor(varargin{:}),2)/2;
66 diff_h = femdiscfunc([],model_data.df_info);
67 model.decomp_mode = 0;
68 diff_h = fem_interpol_global(diff_scalar,diff_h,model);
71 model.RB_stop_Nmax = 20;
72 model.RB_train_size = 20;
73 detailed_data = gen_detailed_data(model,model_data);
74 model.N = size(detailed_data.RB,2);
75 %model_data = gen_model_data(dmodel);
76 %detailed_data = gen_detailed_data(dmodel,model_data);
77 %demo_rb_gui(rmodel,detailed_data,[],plot_params);
78 %plot_params.axis_tight = 1;
79 %plot_params.yscale_uicontrols = 0.5;
80 demo_rb_gui(model,detailed_data);
84 % time-measurement for 10 ordinary RB simulations, basis 400!
85 model.RB_stop_Nmax = 10;
86 model.RB_train_size = 100;
87 detailed_data= filecache_function(@gen_detailed_data,model,model_data);
91 reduced_data = gen_reduced_data(model,detailed_data);
93 mu = rand_uniform(1,model.mu_ranges);
94 model = set_mu(model,mu);
96 rb_sim_data= rb_simulation(model,reduced_data);
99 disp(['mean time
for rb simulation (with few basis vectors):
',num2str(mean(t))])
102 % construct dictionary and adaptive online simulations for
103 % train and test point
105 % construct dictionary and offline data
106 disp('constructing offline dictionary
');
107 model.rb_online_mode = 1; % -Delta(v) online adaptivity
108 detailed_data = filecache_function(...
109 @dictionary_gen_detailed_data,model,model_data);
110 disp(['computed dictionary in
',...
111 num2str(detailed_data.RB_info.time),...
114 % online simulation with adaptive algorithm for training point
115 disp('generating reduced_data
');
116 % reduced_data = filecache_function(@dictionary_gen_reduced_data,model,detailed_data);
118 reduced_data = filecache_function(@dictionary_gen_reduced_data,model,detailed_data);
121 model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
122 disp('performing online simulation for training parameter 5
');
123 model = set_mu(model,detailed_data.RB_info.mus(:,5));
125 reduced_data.RB = detailed_data.RB;
126 reduced_data.df_info = detailed_data.df_info;
127 reduced_data.grid = detailed_data.grid;
129 rb_sim_data = dictionary_rb_simulation(model,reduced_data)
130 rb_sim_data= dictionary_rb_reconstruction(detailed_data,rb_sim_data);
131 plot_sim_data(model,model_data,rb_sim_data,[]);
132 title('reduced solution
')
134 figure, plot(rb_sim_data.eps_sequence,'b*
');
135 title('residual norm over online iterations
');
136 set(gca,'Yscale
','log
');
138 disp('performing online simulation for test parameter
');
139 mu = [0.1, ones(1,length(model.mu_ranges)-1)];
140 % mu = detailed_data.RB_info.mus(:,5)+0.01*rand(1);
142 model = set_mu(model,mu);
144 % reduced_data.RB = detailed_data.RB;
145 % reduced_data.df_info = detailed_data.df_info;
147 rb_sim_data = dictionary_rb_simulation(model,reduced_data)
148 rb_sim_data= dictionary_rb_reconstruction(detailed_data,rb_sim_data);
150 plot_sim_data(model,model_data,rb_sim_data,[]);
151 title('reduced solution
')
153 figure, plot(rb_sim_data.eps_sequence,'b*
');
154 title('residual norm over online iterations
');
155 set(gca,'Yscale
','log
');
159 case 3 % time measurement
161 % use_dictionary_gen_reduced_data = 0;
162 % more efficient due to better ordering of reduced data:
163 % use_dictionary_gen_reduced_data = 1;
164 model.rb_online_mode = 1; % -Delta(v) online adaptivity
166 mu_init = get_mu(model);
167 disp('constructing offline dictionary
');
168 detailed_data = filecache_function(...
169 @dictionary_gen_detailed_data,model,model_data);
170 disp(['computed dictionary in
',...
171 num2str(detailed_data.RB_info.time),...
174 disp('generating reduced_data
');
175 model.dummy = 1.2; % counter: change for enforcing filecaching recomputation
176 % if use_dictionary_gen_reduced_data
177 % disp('using new efficient gen_reduced_data
');
178 reduced_data = filecache_function(@dictionary_gen_reduced_data, ...
179 model,detailed_data);
181 % reduced_data = filecache_function(@lin_stat_gen_reduced_data, ...
182 % model,detailed_data);
186 disp('-----------------------------------------------------
');
187 % time-measurement for 10 adaptive RB simulations over training parameters
188 disp('time measurement over 10 dictionary rb simulations over training parameters
')
190 model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
191 ts_lincomb = zeros(nruns,1);
192 ts_solution = zeros(nruns,1);
193 Deltas = zeros(nruns,1);
196 mu = detailed_data.RB_info.mus(:,i);
197 model = set_mu(model,mu);
198 rb_sim_data= dictionary_rb_simulation(model,reduced_data);
199 % rb_sim_data= rb_simulation(model,reduced_data);
200 ts_lincomb(i) = rb_sim_data.t_lincomb;
201 ts_solution(i) = rb_sim_data.t_solution;
202 Deltas(i) = rb_sim_data.Delta;
203 Ns(i) = length(rb_sim_data.uN);
205 disp(['mean time for rb linear combinations:
',num2str(mean(ts_lincomb))])
206 disp(['mean time for rb basis choice and simulation:
',num2str(mean(ts_solution))])
207 disp(['mean dimension of RB model:
',num2str(mean(Ns))])
208 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
210 disp('-----------------------------------------------------
');
211 % time-measurement for 10 adaptive RB simulations over training parameters
212 disp('time measurement over 10 dictionary rb simulations over distorted training parameters
')
213 RandStream.setDefaultStream(RandStream('mt19937ar
','seed
',100000));
215 model.RB_online_stop_Nmax = 50; % online max 50 basis vectors
216 ts_lincomb = zeros(nruns,1);
217 ts_solution = zeros(nruns,1);
218 Deltas = zeros(nruns,1);
221 % mu = detailed_data.RB_info.mus(:,i) + 0.000001*rand_uniform(1,model.mu_ranges);
222 mu = detailed_data.RB_info.mus(:,i) + 0.0001*rand_uniform(1,model.mu_ranges);
223 % mu = rand_uniform(1,model.mu_ranges);
224 model = set_mu(model,mu);
225 rb_sim_data= dictionary_rb_simulation(model,reduced_data);
226 % rb_sim_data= rb_simulation(model,reduced_data);
227 ts_lincomb(i) = rb_sim_data.t_lincomb;
228 ts_solution(i) = rb_sim_data.t_solution;
229 Deltas(i) = rb_sim_data.Delta;
230 Ns(i) = length(rb_sim_data.uN);
232 disp(['mean time for rb linear combinations:
',num2str(mean(ts_lincomb))])
233 disp(['mean time for rb basis choice and simulation:
',num2str(mean(ts_solution))])
234 disp(['mean dimension of RB model:
',num2str(mean(Ns))])
235 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
237 disp('-----------------------------------------------------
');
238 % time-measurement for 10 adaptive RB simulations
239 disp(['time measurement over 10 dictionary rb simulations on random
',...
242 model.RB_online_stop_Nmax = 30; % online max 25 basis vectors
243 ts_lincomb = zeros(nruns,1);
244 ts_solution = zeros(nruns,1);
245 Deltas = zeros(nruns,1);
248 mu = rand_uniform(1,model.mu_ranges);
249 model = set_mu(model,mu);
250 rb_sim_data= dictionary_rb_simulation(model,reduced_data);
251 % rb_sim_data= rb_simulation(model,reduced_data);
252 ts_lincomb(i) = rb_sim_data.t_lincomb;
253 ts_solution(i) = rb_sim_data.t_solution;
254 Deltas(i) = rb_sim_data.Delta;
255 Ns(i) = length(rb_sim_data.uN);
257 disp(['mean time for rb linear combinations:
',num2str(mean(ts_lincomb))])
258 disp(['mean time for rb basis choice and simulation:
',num2str(mean(ts_solution))])
259 disp(['mean dimension of RB model:
',num2str(mean(Ns))])
260 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
262 disp('-----------------------------------------------------
');
263 disp('-----------------------------------------------------
');
264 % time-measurement for 10 ordinary RB simulations of size 200
265 % time-measurement for 10 ordinary RB simulations of size 200
266 disp(['time measurement over 10 standard rb simulations over
', ...
267 ' training parameters
']);
268 model.rb_online_mode = 0; % standard rb simulation
269 model.dummy = 1; % different from optimized from below
273 Deltas = zeros(nruns,1);
275 mu = detailed_data.RB_info.mus(:,i);
276 model = set_mu(model,mu);
277 rb_sim_data= rb_simulation(model,reduced_data);
278 Deltas(i) = rb_sim_data.Delta;
279 ts_lincomb(i) = rb_sim_data.t_lincomb;
280 ts_solution(i) = rb_sim_data.t_solution;
282 disp(['mean time for rb linear combinations:
',num2str(mean(ts_lincomb))])
283 disp(['mean time for rb basis choice and simulation:
',num2str(mean(ts_solution))])
284 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
285 disp(['basis size N =
', num2str(size(detailed_data.RB,2))]);
287 disp('-----------------------------------------------------
');
288 clear('reduced_data
');
289 % time-measurement for 10 ordinary RB simulations of size 200
290 disp(['time measurement over 10 optimized rb simulations over
', ...
291 ' training parameters
']);
292 Nmax = size(detailed_data.RB,2);
293 model.rb_online_mode = 0; % standard rb simulation
297 Deltas = zeros(nruns,1);
298 model = set_mu(model,mu_init);
299 model.gen_reduced_data = @lin_stat_gen_reduced_data;
300 model.rb_simulation = @lin_stat_rb_simulation;
302 reduced_data = filecache_function(@gen_reduced_data,model,detailed_data);
305 mu = detailed_data.RB_info.mus(:,i);
306 model = set_mu(model,mu);
308 rb_sim_data= rb_simulation(model,reduced_data);
309 Deltas(i) = rb_sim_data.Delta;
313 disp(['mean time for standard rb simulation (with
',num2str(Nmax),' basis vectors):
',num2str(mean(t))])
314 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
316 disp('-----------------------------------------------------
');
317 % clear('reduced_data
');
318 % time-measurement for 10 ordinary RB simulations of size 200
319 disp(['time measurement over 10 optimized rb simulations over
', ...
320 ' random parameters
'])
321 RandStream.setDefaultStream(RandStream('mt19937ar
','seed
',100000));
323 Nmax = size(detailed_data.RB,2);
326 Deltas = zeros(nruns,1);
327 model = set_mu(model,mu_init);
328 % model.gen_reduced_data = @lin_stat_gen_reduced_data;
329 % model.rb_simulation = @lin_stat_rb_simulation;
330 % reduced_data = filecache_function(@gen_reduced_data,model,detailed_data);
332 mu = rand_uniform(1,model.mu_ranges);
333 model = set_mu(model,mu);
335 rb_sim_data= rb_simulation(model,reduced_data);
336 Deltas(i) = rb_sim_data.Delta;
339 disp(['mean time for standard rb simulation (with
',num2str(Nmax),' basis vectors):
',num2str(mean(t))])
340 disp(['maximum Delta of RB model:
',num2str(max(Deltas))])
342 case 4 % debug delta of new rb simulation
346 % for different block sizes determine greedy error decay
348 % B1s = [10,15,20,25,30,35,40];
349 % B2s = [10,15,20,25,30,35,40];
350 % B1s = [10,15,20,25];
351 % B2s = [10,15,20,25];
352 %=> errs = 5.9417 11.8685 12.2661 13.9614
353 % 6.4436 7.8761 9.1320 11.1612
354 % 7.7663 8.0952 8.9250 9.1970
355 % 9.0527 7.9496 9.1340 9.2640
357 B2s = [20,25,30,35,40];
361 errs = zeros(nB1s,nB2s);
367 % B1=B2=20 leads to N = 73, max error estimator: 0.001505
368 % params.numintervals_per_block = 5*5;
369 params.Qa = 3; % at most 3 parameters
370 params.mu_range = [0.01,10];
371 model = multiscale_thermalblock_model(params);
372 model.rb_online_mode = 0; % standard RB
373 model.gen_detailed_data = @dictionary_gen_greedy_detailed_data;
374 model.gen_reduced_data = @dictionary_gen_reduced_data;
375 % model.gen_detailed_data = @dictionary_gen_detailed_data;
376 model.rb_simulation = @dictionary_rb_simulation;
377 model.RB_stop_epsilon = 1e-6;
378 model.RB_train_size = 10000;
379 model.RB_stop_Nmax = 30;
380 model_data = gen_model_data(model);
381 disp(['------------------------------------------------------
' ...
383 disp(['generating detailed greedy basis of size 30 for
',...
384 'B1 =
',num2str(B1s(n1)),', B2 =
',num2str(B2s(n2))]);
386 detailed_data = gen_detailed_data(model,model_data);
387 errs(n1,n2) = min(detailed_data.RB_info.max_error_sequence);
392 disp('please choose manually maximal error in above
');
395 % then choose these values in case 5.1
397 case 5.1 % compare with greedy basis
402 % B1=B2=20 leads to N = 73, max error estimator: 0.001505
403 % params.numintervals_per_block = 5*5;
404 params.Qa = 3; % at most 3 parameters
405 params.mu_range = [0.01,100];
406 model = multiscale_thermalblock_model(params);
407 model.gen_reduced_data = @dictionary_gen_reduced_data;
408 model.gen_detailed_data = @dictionary_gen_detailed_data;
409 model.rb_simulation = @dictionary_rb_simulation;
411 % s = warning('error
','MATLAB:nearlySingularMatrix
');
412 warning('OFF
','MATLAB:nearlySingularMatrix
');
414 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
416 % first generate standard greedy basis (no orthonormalization)
417 % goal: about 500 vectors
419 % model.rb_online_mode = 1; % -Delta(v) online adaptivity
420 model.RB_stop_epsilon = 1e-8;
421 model.RB_train_size = 10000;
422 model.RB_stop_Nmax = 500;
423 model.rb_online_mode = 0; % standard RB
424 model.gen_detailed_data = @dictionary_gen_greedy_detailed_data;
425 model_data = gen_model_data(model);
426 % if exist('greedy_basis.mat
')
427 % disp('skipping generation of greedy basis
');
429 detailed_data = filecache_function(...
430 @gen_detailed_data,model,model_data);
431 disp(['computed greedy basis in
',...
432 num2str(detailed_data.RB_info.time),...
434 save('greedy_basis3.mat
','model
','detailed_data
');
439 % now construct adaptive online dictionary of about 1000 vectors
440 model.rb_online_mode = 1; % adaptive online RB
441 model.RB_online_stop_Nmax = 30; % online max 30 basis vectors
442 model.RB_stop_Nmax = 500; % dictionary of larger size
443 detailed_data = filecache_function(...
444 @gen_detailed_data,model,model_data);
445 disp(['computed dictionary in
',...
446 num2str(detailed_data.RB_info.time),...
448 save('greedy_dictionary.mat
','model
','detailed_data
');
451 error('step unknown
');
455 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
456 % auxiliary functions
457 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
459 function reduced_data = dictionary_gen_reduced_data(model,detailed_data)
460 %% The following works, but results in slow linear combination
461 %reduced_data= lin_stat_gen_reduced_data(model,detailed_data);
463 % later: different saving of error-estimator riesz-representers such that
464 % simple multiplication with f and a coefficient vectors is possible
466 % instead: refinement of lin-stat-gen-reduced-data:
468 old_mode = model.decomp_mode;
469 model.decomp_mode = 1; % == components
470 [A_comp,f_comp] = model.operators(model,detailed_data);
472 reduced_data.Qa = Qa;
474 reduced_data.Qf = Qf;
476 reduced_data.AN_comp = cell(1,length(A_comp));
478 reduced_data.AN_comp{q} = ...
479 detailed_data.RB'*A_comp{q}*detailed_data.RB;
482 reduced_data.fN_comp = cell(1,length(f_comp));
484 reduced_data.fN_comp{q} = ...
485 detailed_data.RB
' * f_comp{q};
488 if model.compute_output_functional
489 % assumption: nonparametic output functional, then simple RB
490 % evaluation possible
492 model.operators_output(model,detailed_data);
493 Q_l = length(l_comp);
494 reduced_data.lN_comp = cell(1,Q_l);
496 reduced_data.lN_comp{q} = detailed_data.RB' * l_comp{q};
500 N = model.get_rb_size(model,detailed_data);
503 % plus error estimation quantities
505 % G = (v_r^q, v_r^q) = v_r^q
' * K * v_r^q
506 % with {v_r^q}_q = (v_f^q, v_a^qq')_{qq
'}
507 % G = [Gff, Gfa; Gfa', Gaa];
509 % (v_f^q,v_f^q
') = v_f^q' * K * v_f^q
511 % matrices of coefficient vectors of Riesz-representers:
512 % K * v_f^q = f^q (coefficient vector equations)
513 K = model.get_inner_product_matrix(detailed_data);
514 % search solution in H10, i.e.
set dirichlet DOFs
516 %v_f = zeros(size(detailed_data.RB,1),Qf);
517 K_v_f = zeros(size(detailed_data.RB,1),Qf);
519 K_v_f(:,q) = f_comp{q};
521 % v_f(:,q) = K \ f_comp{q};
527 K_v_a{n} = zeros(size(K,1),Qa);
528 v_a{n} = zeros(size(K,1),Qa);
530 K_v_a{n}(:,q) = (A_comp{q}*detailed_data.RB(:,n));
531 v_a{n} = K \ K_v_a{n};
532 % v_a{n}(:,q) = K \ K_v_a{n}(:,q);
536 %
finally assemble G = [G_ff, G_fa{n}; G_fa{n}', G_aa{n}];
539 %%G(1:Qf,1:Qf) = reduced_data.Gff;
540 %G(1:Qf,1:Qf) = v_f
' * K_v_f;
542 % G(1:Qf,Qf+(n-1)*Qa +(1:Qa)) = ...
544 % % reduced_data.Gfa{n};
545 % G(Qf+(n-1)*Qa +(1:Qa),1:Qf) = ...
546 % transpose(v_f
' * K_v_a{n});
547 % % reduced_data.Gfa{n}';
551 % G(Qf+(n1-1)*Qa+(1:Qa),Qf+(n2-1)*Qa +(1:Qa)) = ...
552 % v_a{n1}
' * K_v_a{n2};
553 % % reduced_data.Gaa{n1,n2};
557 % assemble Qf x Qf matrix Hff = (<vf1,vf1>, ... ,<vf1,vfQf>)
559 % (<vfQf,vf1>, ... ,<vfQf,vfQf>)
560 reduced_data.bar_Hff = v_f' * K_v_f;
562 % assemble N x QaQf matrix
563 % Haf = (<vf1,va11>...<vf1,va1Qa> | <vf2,va11> ... <vf2,va1Qa> | ... <vfQf,va1Qa>)
564 % <vf1,va21>...<vf1,va2Qa> | <vf2,va21> ... <vf2,va2Qa> | ... <vfQf,va2Qa>)
566 % <vf1,vaN1>...<vf1,vaNQa> | <vf2,vaN1> ... <vf2,vaNQa> | ... <vfQf,vaNQa>)
567 Haf = zeros(N,Qa * Qf);
570 % insert row vector in block matrix:
571 vaf = v_f(:,q)
' * K_v_a{n};
572 Haf(n,(1+(q-1)*Qa):(q*Qa)) = vaf;
575 reduced_data.bar_Haf = Haf;
577 % assemble N^2 x Qa^2 matrix
578 % Haa = (<va11,va11>...<va11,va1Qa> | <va12,va11> ... <va12,va1Qa> | <va1Qa,va11>... <va1Qa,va1Qa>)
580 % <vaN1,va11>...<vaN1,va1Qa> | <vaN2,va11> ... <vaN2,va1Qa> | <vaNQa,va11>... <vaNQa,va1Qa>)
581 % -------------------------------------------------------------------------------------------
582 % <va11,va21>...<va11,va2Qa> | <va12,va21> ... <va12,va2Qa> | <va1Qa,va21>... <va1Qa,va2Qa>
584 % <vaN1,va21>...<vaN1,va2Qa> | <vaN2,va21> ... <vaN2,va2Qa> | <vaNQa,va21>... <vaNQa,va2Qa>)
585 % -------------------------------------------------------------------------------------------
587 % -------------------------------------------------------------------------------------------
588 % <va11,vaN1>...<va11,vaNQa> | <va12,vaN1> ... <va12,vaNQa> | <va1Qa,vaN1>... <va1Qa,vaNQa>
590 % <vaN1,vaN1>...<vaN1,vaNQa> | <vaN2,vaN1> ... <vaN2,vaNQa> | <vaNQa,vaN1>... <vaNQa,vaNQa> )
592 Haa = zeros(N*N,Qa*Qa);
593 % insert row vectors in block matrix:
594 for q = 1:Qa % loop over column-blocks
595 for n1 = 1:N % loop over block-rows
596 for n2 = 1:N % loop over rows within one block;
597 vaa = v_a{n2}(:,q)' * K_v_a{n1};
598 Haa((n1-1)*N + n2, (1+(q-1)*Qa):q*Qa) = vaa;
603 reduced_data.bar_Haa = Haa;
605 reduced_data.used_dictionary_gen_reduced_data = 1;
606 %
set back old model mode
607 model.decomp_mode = old_mode;
609 function detailed_data = dictionary_gen_detailed_data(model,model_data)
611 sim_data = detailed_simulation(model,model_data);
612 h10_matrix = sim_data.uh.df_info.h10_inner_product_matrix;
613 mus = rand_uniform(model.RB_train_size,model.mu_ranges);
614 RB = zeros(size(h10_matrix,1),model.RB_train_size);
615 for mui = 1:model.RB_train_size;
616 model = model.set_mu(model,mus(:,mui));
617 sim_data = detailed_simulation(model,model_data);
618 RB(:,mui) = sim_data.uh.dofs;
622 detailed_data = model_data;
623 detailed_data.RB_info.mus = mus;
624 detailed_data.RB_info.time = toc;
625 detailed_data.RB = RB;
627 % the following should both work for standard rb as well as dictionary
628 % generating a greedy RB only doing a normalization,
629 % but no orthogonalization
630 function detailed_data = dictionary_gen_greedy_detailed_data(model,model_data)
632 RandStream.setDefaultStream(RandStream('mt19937ar','seed',1234));
634 %% mus = rand_uniform(model.RB_train_size,model.mu_ranges);
635 % logarithmic instead of uniform
636 %log_mu_ranges = model.mu_ranges;
637 %for i = 1:length(model.mu_ranges);
638 % log_mu_ranges{i} = log(model.mu_ranges{1});
640 %log_mus = rand_uniform(model.RB_train_size,log_mu_ranges);
642 mus = rand_log(model.RB_train_size,model.mu_ranges);
643 detailed_data.RB_info.mus = mus;
645 model = set_mu(model,mus(:,1));
646 sim_data = detailed_simulation(model,model_data);
647 n = fem_h10_norm(sim_data.uh);
648 detailed_data = model_data;
649 detailed_data.RB = sim_data.uh.dofs / n;
650 reduced_data = gen_reduced_data(model,detailed_data);
651 h10_matrix = sim_data.uh.df_info.h10_inner_product_matrix;
653 max_error_sequence = [];
656 while( max_err_est> model.RB_stop_epsilon) & ...
657 (size(detailed_data.RB,2)<model.RB_stop_Nmax)
660 for i = 1:size(mus,2);
661 model = model.set_mu(model,mus(:,i));
662 rb_sim_data = rb_simulation(model,reduced_data);
663 if rb_sim_data.Delta > max_err_est
665 max_err_est = rb_sim_data.Delta;
668 disp([
'max error estimator: ',num2str(max_err_est),...
669 ' for mu = ',num2str(mu_max
')]);
670 model = model.set_mu(model,mu_max);
671 sim_data = detailed_simulation(model,detailed_data);
672 pr = sim_data.uh.dofs;
673 % pr = sim_data.uh.dofs - ...
674 % detailed_data.RB * (detailed_data.RB' * h10_matrix * ...
676 n = sqrt(max(pr
'*h10_matrix*pr,0));
677 detailed_data.RB = [detailed_data.RB,pr/n];
678 reduced_data = gen_reduced_data(model,detailed_data);
679 disp(['new basis size:
',num2str(size(detailed_data.RB,2))]);
680 max_error_sequence =[max_error_sequence, max_err_est];
682 catch % else set error to 0
683 disp('matrix close to singular... ending rb greedy extension loop
');
686 detailed_data.RB_info.time = toc;
687 detailed_data.RB_info.max_error_sequence = max_error_sequence;
689 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
690 % Online rb simulation by online greedy minimizing redidual norm
692 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
693 function rb_sim_data = dictionary_rb_simulation(model, ...
695 % model.rb_online_mode: 0 standard rb, no adaptivity
696 % model.rb_online_mode: 1 Delta-extended indicator for online extension
697 % prepare mu-dependent quantities
701 old_mode = model.decomp_mode;
702 model.decomp_mode = 2; % coefficients
704 tic; % measure linear combination time
706 [a_coeff,f_coeff] = ...
707 model.operators(model,[]);
709 AN = lincomb_sequence(reduced_data.AN_comp, a_coeff);
710 fN = lincomb_sequence(reduced_data.fN_comp, f_coeff);
711 Qf = length(f_coeff);
712 Qa = length(a_coeff);
716 % transform G matrix, such that later simple mult:
717 % G_mu matrix has <vf,vf>, <vf,va_phi_1(mu)> , <vf,va_phi_N>
718 % as first row and column, and <va_phi_i,va_phi_j(mu)> as
720 % the following is the correct code, if lin_stat_reduced_data is
721 % passed as argument.
722 %if ~isfield(reduced_data,'used_dictionary_gen_reduced_data
');
723 % Saa = kron(speye(N),sparse(1:Qa,1,a_coeff(:),Qa,1));
724 % S = [f_coeff(:), sparse([],[],[],Qf, N); ...
725 % sparse([],[],[], Qa * N, 1), Saa];
726 % G = S' * reduced_data.G * S;
728 Hff = f_coeff
' * reduced_data.bar_Hff * f_coeff;
729 af_coeff = a_coeff * f_coeff';
730 Haf = reduced_data.bar_Haf * af_coeff(:);
731 aa_coeff = a_coeff * a_coeff
';
732 Haa_vec = reduced_data.bar_Haa * aa_coeff(:);
733 Haa = reshape(Haa_vec,[N,N]);
734 G = [Hff,Haf'; Haf,Haa];
736 G = 0.5*(G+G'); % G is positive definite.
742 % disp(
'check, whether G has correct entries');
743 % %
get full operators
744 % model.decomp_mode = 0; % complete
746 % model.operators(model,reduced_data);
747 % K = model.get_inner_product_matrix(reduced_data);
751 % va_phi1 = K\(A*reduced_data.RB(:,1));
761 if model.rb_online_mode == 0
762 % standard rb simulation, no online enrichment
764 % the following works with standard lin_stat_gen_reduced_data:
765 % Q_r = size(reduced_data.G,1);
766 % neg_auN_coeff = -A_coeff * uN';
767 % res_coeff = [f_coeff; neg_auN_coeff(:)];
768 % res_norm_sqr = res_coeff
' * reduced_data.G * res_coeff;
769 % this is required for dictionary_gen_reduced_data:
771 res_norm_sqr = res_coeff' * G * res_coeff;
772 res_norm_sqr = max(res_norm_sqr,0);
773 res_norm = sqrt(res_norm_sqr);
774 rb_sim_data.Delta = ...
775 res_norm/model.coercivity_alpha(model);
777 elseif model.rb_online_mode == 1
778 % online enrichment by eta(v) = - Delta_N(v; Phi \cup v)
781 %uN = ones(size(AN,1),1);
783 % run iterative loop of online_greedy by minimum residual
786 current_eps = 1e10; %
do at least one extension
788 non_basis_indices = 1:N;
794 while (current_eps > model.RB_stop_epsilon) && (iter < model.RB_online_stop_Nmax)
795 % compute all
new uN vectors at once:
796 a = diag_AN(non_basis_indices)
';
797 bar_a = AN(basis_indices,non_basis_indices);
798 bar_bar_a = AN(non_basis_indices,basis_indices)';
799 % tilde_f = fN(non_basis_indices)
' - sum(bar_bar_a.*UN,1);
800 tilde_f = fN(non_basis_indices)' - uN' * bar_bar_a;
801 ANinv_bar_a = (ANinv*bar_a);
802 tilde_a = a - sum(bar_bar_a .* ANinv_bar_a,1);
804 UN = repmat(uN,1,length(non_basis_indices));
806 UN = [UN; zeros(1,length(non_basis_indices))] ...
807 + [-ANinv_bar_a;ones(1,length(non_basis_indices))] * ...
808 spdiags((tilde_f./tilde_a)
',0,length(tilde_f),length(tilde_f));
810 % compute all residualnormssquare at once:
811 G11 = G([1,1+basis_indices],[1,1+basis_indices]);
812 UN1 = [ones(1,length(non_basis_indices)); - UN(1:end-1,:)];
814 G12 = G([1,1+basis_indices],1+non_basis_indices);
815 g = [G11 * UN1 + G12*spdiags(UN2(:),0,length(UN2),length(UN2)); ...
816 sum(G12 .* UN1,1) + diag_G(1+non_basis_indices)'.*UN2];
817 resnormsqr = sum([ones(1,length(non_basis_indices)); - UN] .* g,1);
818 if min(resnormsqr<-1e-2)
819 disp('error: negative residual, inspect workspace!');
820 % test for first vector:
823 % if resnormsqr(3)>1e-10
824 % disp('error: residual for func 3 should be 0!!!');
826 % title('residual norms');
827 % % the following test was successful
828 % % G1 = [G11, G12(:,1); G12(:,1)', diag_G(1+1)];
829 % % g1 = G1 * [1; -UN(:,1)]
831 % % resnormsqr1 = [1; -UN(:,1)]' * G1 * [1; -UN(:,1)]
833 % % G3 = [G11, G12(:,3); G12(:,3)', diag_G(1+3)];
834 % % resnormsqr3 = [1; -UN(:,3)]' * G3 * [1; -UN(:,3)]
839 % take positive part to avoid numerical instabilities
840 resnormsqr = max(resnormsqr,0);
842 [minresnormsqr, new_index] = min(resnormsqr);
843 % select one if multiple minima
844 current_eps = sqrt(minresnormsqr(1));
845 eps_sequence = [eps_sequence, current_eps];
846 new_index = new_index(1);
847 % remove new basis vector from search set
848 basis_indices = [basis_indices, non_basis_indices(new_index)];
849 non_basis_indices = [non_basis_indices(1:new_index-1),...
850 non_basis_indices(new_index+1:end)];
851 ANinv = inv(AN(basis_indices,basis_indices));
852 uN = UN(:,new_index);
853 UN = [UN(:,1:new_index-1),UN(:,new_index+1:end)];
856 disp('investigate iteration')
857 % true reduced solution with selected basis vectors of previous
859 detailed_data = reduced_data;
860 detailed_data.RB = detailed_data.RB(:,basis_indices(1:end-1));
861 reduced_data_tmp = gen_reduced_data(model,detailed_data);
862 rb_sim_data_tmp = rb_simulation(model,reduced_data_tmp);
863 disp('correct UN old:')
865 disp('current UN_old:')
867 % UN_new(:,new_index)
869 % true reduced solution with selected basis vectors:
870 detailed_data = reduced_data;
871 detailed_data.RB = detailed_data.RB(:,basis_indices);
872 reduced_data_tmp = gen_reduced_data(model,detailed_data);
873 rb_sim_data_tmp = rb_simulation(model,reduced_data_tmp);
878 % UN_new(:,new_index)
881 %disp('recomputed current uN')
882 %uN_new = [uN_old; 0] + tilde_f(new_index)/tilde_a(new_index)*[-ANinv_bar_a(:,new_index);1]
891 rb_sim_data.eps_sequence = eps_sequence;
892 rb_sim_data.basis_indices = basis_indices;
893 rb_sim_data.iter = iter;
894 rb_sim_data.current_eps = current_eps;
895 rb_sim_data.Delta = ...
896 current_eps/model.coercivity_alpha(model);
897 end; % rb_online_mode
902 rb_sim_data.t_lincomb = t_lincomb;
903 rb_sim_data.t_solution = t_solution;
905 model.decomp_mode = old_mode;
907 function rb_sim_data = dictionary_rb_reconstruction(detailed_data, ...
910 if ~isfield(rb_sim_data,'uh')
911 rb_sim_data.uh =
femdiscfunc([],detailed_data.df_info);
913 rb_sim_data.uh.dofs = ...
914 detailed_data.RB(:,rb_sim_data.basis_indices) * rb_sim_data.uN;
916 % Nmax von 200 auf 400, QA hochschrauben, damit Greedy schwieriger
917 % Offline daten anders organisieren
918 % distroted training data vergroessern
923 %>> online_greedy(3) with OLD lin_stat_gen_reduced_data:
924 %constructing offline dictionary
925 %call of @dictionary_gen_detailed_data, successfully read from cache, key=672472054
926 %computed dictionary in 15.0562 sec.
927 %generating reduced_data
928 %result not found in cache
929 %call of @lin_stat_gen_reduced_data, now computed and cached, key=923266952
930 %-----------------------------------------------------
931 %time measurement over 10 random dictionary rb simulations
932 %mean time for rb linear combinations: 0.089998
933 %mean time for rb basis choice and simulation: 0.03184
934 %mean dimension of RB model: 30
935 %mean Delta of RB model: 0.001852
936 %-----------------------------------------------------
937 %time measurement over 10 dictionary rb simulations over training parameters
938 %mean time for rb linear combinations: 0.085595
939 %mean time for rb basis choice and simulation: 0.00063634
940 %mean dimension of RB model: 1
941 %mean Delta of RB model: 1.8032e-008
942 %-----------------------------------------------------
943 %time measurement over 10 dictionary rb simulations over distorted training parameters
944 %mean time for rb linear combinations: 0.085588
945 %mean time for rb basis choice and simulation: 0.0049095
946 %mean dimension of RB model: 7.4
947 %mean Delta of RB model: 2.2049e-006
948 %-----------------------------------------------------
949 %time measurement over 10 random standard rb simulations
950 %call of @gen_reduced_data, successfully read from cache, key=454421169
951 %mean time for standard rb simulation (with 350 basis vectors): 0.052783
952 %mean Delta of RB model: 2.4026e-006
953 %-----------------------------------------------------
954 %time measurement over 10 training standard rb simulations
955 %mean time for standard rb simulation (with 350 basis vectors): 0.05388
956 %mean Delta of RB model: 5.7061e-006
958 %-------------------------------------------------------
959 % IMPROVEMENT iN THE FOLLOWING: 0.02 sec in linear combination:
960 %-------------------------------------------------------
962 %constructing offline dictionary
963 %call of @dictionary_gen_detailed_data, successfully read from cache, key=672472054
964 %computed dictionary in 15.0562 sec.
965 %generating reduced_data
966 %result not found in cache
967 %call of @dictionary_gen_reduced_data, now computed and cached, key=748022049
968 %-----------------------------------------------------
969 %time measurement over 10 random dictionary rb simulations
970 %mean time for rb linear combinations: 0.065533
971 %mean time for rb basis choice and simulation: 0.031136
972 %mean dimension of RB model: 30
973 %mean Delta of RB model: 0.001852
974 %-----------------------------------------------------
975 %time measurement over 10 dictionary rb simulations over training parameters
976 %mean time for rb linear combinations: 0.065579
977 %mean time for rb basis choice and simulation: 0.00065698
978 %mean dimension of RB model: 1
979 %mean Delta of RB model: 2.0037e-008
980 %-----------------------------------------------------
981 %time measurement over 10 dictionary rb simulations over distorted training parameters
982 %mean time for rb linear combinations: 0.065631
983 %mean time for rb basis choice and simulation: 0.004756
984 %mean dimension of RB model: 7.4
985 %mean Delta of RB model: 2.2047e-006
986 %-----------------------------------------------------
987 %time measurement over 10 random standard rb simulations
988 %result not found in cache
989 %call of @gen_reduced_data, now computed and cached, key=939761750
990 %mean time for standard rb simulation (with 350 basis vectors): 0.056699
991 %mean Delta of RB model: 2.4026e-006
992 %-----------------------------------------------------
993 %time measurement over 10 training standard rb simulations
994 %mean time for standard rb simulation (with 350 basis vectors): 0.053373
995 %mean Delta of RB model: 5.7061e-006
996 %-------------------------------------------------------
999 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5
1001 % Ansatz: Mittelgrosse Lagrange Basis (N=400) und Laufzeit und
1002 % maximaler Testfehler
1003 % mittlere Laufzeit bestimmen
1005 % => Das ist maximaler Testfehler, den wir unterbieten koennen,
1006 % d.h. online-eps wird auf diesen Wert gesetzt
1008 % step 5 erzeugt diese greedy basis
1010 % - Grosses dictionary (1000) erstellen und Vergleich
1011 % mittlere Basisgroesse
1014 % Statt Lagrange eine Greedy basis: Immer noch besser?
1016 % Linearkombination: Nur fuer Teile, welche notwendig sind?
1017 % caching, computation on demand?
1024 %max error estimator: 9.8224 for mu = 0.25626 8.2271 0.10697
1026 %max error estimator: 8.5185 for mu = 3.8811 0.15771 0.10371
1028 %max error estimator: 8.4647 for mu = 0.11595 0.14404 5.6561
1030 %max error estimator: 6.1665 for mu = 8.014 0.10153 5.6284
1032 %max error estimator: 5.1794 for mu = 2.7683 4.835 0.10213
1034 %max error estimator: 4.8041 for mu = 0.10243 9.8289 2.0605
1036 %max error estimator: 4.9079 for mu = 0.1032 4.2649 9.6909
1038 %max error estimator: 3.7885 for mu = 1.7253 0.10151 8.775
1040 %max error estimator: 3.1312 for mu = 8.4301 2.1146 0.107
1042 %max error estimator: 2.5548 for mu = 1.9628 0.10026 1.5733
1044 %max error estimator: 1.8652 for mu = 0.10307 7.1583 0.42625
1046 %max error estimator: 1.6943 for mu = 0.11033 0.89484 4.0744
1048 %max error estimator: 1.5111 for mu = 9.7004 0.13858 1.9245
1050 %max error estimator: 1.2297 for mu = 0.24571 0.89305 0.10468
1052 %max error estimator: 1.0896 for mu = 1.9541 0.91071 0.12458
1054 %max error estimator: 0.75964 for mu = 0.18811 0.15704 1.0872
1056 %max error estimator: 0.58983 for mu = 0.81222 5.7646 0.10967
1058 %max error estimator: 0.55249 for mu = 0.11305 3.4472 2.5762
1060 %max error estimator: 0.48138 for mu = 2.4522 0.18095 0.27056
1062 %max error estimator: 0.4546 for mu = 8.2814 9.9166 0.12226
1064 %max error estimator: 0.45274 for mu = 0.51312 0.14674 9.29
1066 %max error estimator: 0.28178 for mu = 4.9214 0.50476 0.10382
1068 %max error estimator: 0.26892 for mu = 0.15448 4.4655 0.54738
1070 %max error estimator: 0.25045 for mu = 0.62626 0.15637 0.79931
1072 %max error estimator: 0.22665 for mu = 0.12261 9.5943 6.7078
1076 %max error estimator: 0.001505 for mu = 1.178 0.17993 9.6819
1082 %max error estimator: 9.005 for mu = 0.25626 8.2271 0.10697
1084 %max error estimator: 8.4454 for mu = 3.8811 0.15771 0.10371
1086 %max error estimator: 8.3351 for mu = 0.11595 0.14404 5.6561
1088 %max error estimator: 5.9311 for mu = 8.014 0.10153 5.6284
1090 %max error estimator: 5.1034 for mu = 2.7683 4.835 0.10213
1092 %max error estimator: 4.2945 for mu = 0.10243 9.8289 2.0605
1094 % => schneller als bei 20x20!!!!
1098 % once more with logarithmical uniform training data, 10000
1101 %max error estimator: 10.3628 for mu = 0.10011 9.2349 0.24892
1103 %max error estimator: 9.5436 for mu = 0.12119 0.1022 7.1872
1105 %max error estimator: 8.6987 for mu = 8.6991 0.10181 0.23366
1107 %max error estimator: 6.3551 for mu = 8.8078 0.10279 9.2802
1109 %max error estimator: 6.2258 for mu = 9.9205 4.5107 0.10198
1111 %max error estimator: 4.9601 for mu = 1.5491 8.1627 0.10109
1113 %max error estimator: 4.8369 for mu = 0.1016 1.0157 7.883
1115 %max error estimator: 3.6565 for mu = 0.58057 0.10272 0.10017
1117 %max error estimator: 3.2278 for mu = 0.20563 0.10107 1.105
1119 %max error estimator: 2.9 for mu = 8.6036 0.46089 0.10361
1121 % => bisschen langsamer als zuvor, schoen.
1123 % jetzt mu_min auf 0.01 statt 0.1:
1124 %max error estimator: 133.6249 for mu = 0.010017 8.8746 0.039272
1126 %max error estimator: 98.2215 for mu = 0.010022 0.18285 8.0601
1128 %max error estimator: 88.0778 for mu = 3.344 0.040784 0.010049
1130 %max error estimator: 106.0299 for mu = 8.4309 0.014886 0.011095
1132 %max error estimator: 75.0994 for mu = 2.5013 0.010011 0.47344
1134 %max error estimator: 90.6464 for mu = 0.35635 0.010006 7.9812
1136 %max error estimator: 101.1528 for mu = 0.012804 0.010849 7.4964
1138 %max error estimator: 80.2765 for mu = 9.3943 0.010423 0.10133
1140 % => schwierig! Wird ueberaupt gescheit geloest? Ja, coercivity
1141 % einfach Faktor 10 groesser, also Fehler entsprechend groesser.