1 function detailed_data = vi_gen_detailed_data(model,model_data)
2 %
function detailed_data = vi_gen_detailed_data(model,model_data)
4 %
function generating the required detailed data
for the RB-Method
5 %
for Parametrized Variational Inequalities. currently two
6 % generation-modes are available:
8 % - enrichment_by_supremizers
9 % both modes work with a uniform parameter grid and
10 % qr-orthonormalization of the reduced primal space.
12 % Generated fields of detailed_data:
13 % RB_U: reduced space
for the primal solution
14 % RB_L: reduced space
for the Lagrange multiplier
18 detailed_data = model_data;
22 switch model.RB_generation_mode
26 % generate uniformly distributed parameter values
27 par.numintervals = model.RB_numintervals;
28 par.range = model.mu_ranges;
31 mu_train =
get(mu_mesh,
'vertex');
33 K = model.get_inner_product_matrix(model,model_data);
35 U = zeros(model_data.H,size(mu_train,1));
36 L = zeros(model_data.H,size(mu_train,1));
37 for m = 1:size(mu_train,1)
39 model = set_mu(model,mu_train(m,:));
40 sim_data = detailed_simulation(model,model_data);
45 detailed_data.RB_U = improve_conditioning(U,K);
46 detailed_data.RB_L = L;
47 detailed_data.RB_info.mu_train = mu_train;
49 case 'enrichment_by_supremizers'
51 % generate uniformly distributed parameter values
52 par.numintervals = model.RB_numintervals;
53 par.range = model.mu_ranges;
56 mu_train = get(mu_mesh,'vertex');
58 K = model.get_inner_product_matrix(model,model_data);
60 U = zeros(model_data.H,size(mu_train,1)*2);
61 L = zeros(model_data.H,size(mu_train,1));
62 for m = 1:size(mu_train,1)
64 model = set_mu(model,mu_train(m,:));
65 sim_data = detailed_simulation(model,model_data);
66 U(:,2*m-1) = sim_data.U;
67 U(:,2*m) = K\sim_data.L;
71 detailed_data.RB_U = orthonormalize(U,K,1e-16,'qr');
72 detailed_data.RB_L = L;
73 detailed_data.RB_info.mu_train = mu_train;
75 case 'separate-greedy'
76 % separate generation of U and lambda basis
77 % by projection error minimization
79 % generate uniformly distributed parameter values
80 par.numintervals = model.RB_numintervals;
81 par.range = model.mu_ranges;
84 mu_train = get(mu_mesh,'vertex');
85 ntrain = size(mu_train,1);
87 K = model.get_inner_product_matrix(model,model_data);
89 disp('computing training snapshots');
91 @compute_training_snapshots,model,model_data,mu_train);
93 % params.plot = @(data,grid,params) plot(data);
94 % detailed_data.RB_L = Ltrain;
95 % detailed_data.RB_U = Utrain;
96 % plot_detailed_data(model,detailed_data);
99 % initialize empty bases
100 U = zeros(model_data.H,0);
101 L = zeros(model_data.H,0);
102 max_u_err_sequence = [];
103 max_lambda_err_sequence = [];
104 mu_u_sequence = zeros(length(model.mu_names),0);
105 mu_lambda_sequence = zeros(length(model.mu_names),0);
106 mu_u_index_sequence = [];
107 mu_lambda_index_sequence = [];
110 disp('
Greedy loop for u');
113 % initial mu: middle value:
114 max_i = (length(mu_train)+1)/2;
115 mu_max = mu_train(max_i,:);
116 continue_loop = (err > model.RB_stop_epsilon && N < ...
120 model = set_mu(model,mu_max);
121 sim_data = detailed_simulation(model,model_data);
123 mu_u_sequence = [mu_u_sequence,mu_max(:)];
124 mu_u_index_sequence = [mu_u_index_sequence, max_i]
126 % (the following should be done without orthonormalization in
128 % compute projection error:
129 UO = orthonormalize_loop(U,K,2e-16);
130 % UO = improve_conditioning(U,K);
131 PUtrain = UO * (UO' * K * Utrain);
132 err_u = sqrt(sum((Utrain-PUtrain).* (K*(Utrain-PUtrain))));
133 if max(err_u(mu_u_index_sequence))>1e-8
134 disp('accuracy problem in orthogonalization, please check')
136 else % set to zero, such that not selected again
137 err_u(mu_u_index_sequence);
139 [err, max_i] = max(err_u);
140 if ~isempty(find(mu_u_index_sequence==max_i))
141 disp('selected mu twice, training set exhausted')
145 mu_max = mu_train(max_i,:);
146 max_u_err_sequence = [max_u_err_sequence, err];
148 continue_loop = err > model.RB_stop_epsilon && N < model.RB_stop_Nmax;
150 disp(['N = ',num2str(N),...
151 ', err_u = ',num2str(err_u(max_i))]);
154 % greedy loop for lambda
155 disp('
Greedy loop for lambda');
158 % initial mu: middle value:
159 mu_max = mu_train((length(mu_train)+1)/2,:);
160 max_i = (length(mu_train)+1)/2;
161 mu_lambda_index_sequence = [];
162 continue_loop = (err > model.RB_stop_epsilon && N < model.RB_stop_Nmax);
164 model = set_mu(model,mu_max);
165 sim_data = detailed_simulation(model,model_data);
168 mu_lambda_sequence = [mu_lambda_sequence,mu_max(:)];
169 mu_lambda_index_sequence = [mu_lambda_index_sequence, max_i];
171 % (the following should be done without orthonormalization in
173 % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
174 %LO = K * orthonormalize_loop(Kinv_L,K,2e-16);
175 % Kinv_Ltrain = K\Ltrain;
176 % PLtrain = LO * (LO' * Kinv_Ltrain);
179 % for each single snapshot compute projection by small QP....
180 %%% ordinary l2-projection:
181 %%% PLtrain = L * pinv(L'*Kinv_L)* (Kinv_L' * Ltrain);
187 options = optimset('TolFun',0); % relative function change limit
188 options.MaxIter = 1000;
189 options.Display='off';
191 f = -2*Ltrain(:,m)'*Kinv_L;
193 [alphaN,Fval,Exitflag] = quadprog(H,f,[],[],[],[],0,Inf,[],options);
195 PLtrain(:,m) = L*alphaN;
196 % if m == max_i % error should be minimum!
197 % plot([L*alphaN,Ltrain(:,m)]);
203 Kinv_PLerr = K\(Ltrain-PLtrain);
204 err_lambda = sqrt(sum((Ltrain-PLtrain).* (Kinv_PLerr)));
205 % set earlier errors to 0...
206 err_lambda(mu_lambda_index_sequence)= 0;
207 [err, max_i] = max(err_lambda);
208 if ~isempty(find(mu_lambda_index_sequence==max_i))
209 % disp('selected mu twice, training set exhausted? Please check')
212 % f = -2*Ltrain(:,m)'*Kinv_L;
213 % [alphaN,Fval,Exitflag] = quadprog(H,f,[],[],[],[],0,Inf,[],options);
215 % plot([L*alphaN,Ltrain(:,m)]);
216 % disp('error from optimization functional:')
217 % sqrt(Fval + Ltrain(:,m)'*(K\Ltrain(:,m)))
218 % disp('error from norm:')
219 % Ldiff = L*alphaN-Ltrain(:,m);
220 % sqrt(Ldiff'*(K\Ldiff))
224 mu_max = mu_train(max_i,:);
225 max_lambda_err_sequence = [max_lambda_err_sequence, err];
227 continue_loop = (err > model.RB_stop_epsilon && N < model.RB_stop_Nmax);
229 disp(['N = ',num2str(N),...
230 ', err_lambda = ',num2str(err_lambda(max_i))]);
233 if model.supremizer_enrichment
234 % include supremizers into primal basis
236 % U_part = [U(:,1:n_part), K\(L(:,1:n_part))];
237 detailed_data.RB_U_with_supr = U;
240 detailed_data.RB_U = improve_conditioning(U,K);
241 detailed_data.RB_L = L;
242 detailed_data.RB_info.max_u_err_sequence = max_u_err_sequence;
243 detailed_data.RB_info.max_lambda_err_sequence = max_lambda_err_sequence;
244 detailed_data.RB_info.mu_u_sequence = mu_u_sequence;
245 detailed_data.RB_info.mu_lambda_sequence = mu_lambda_sequence;
246 detailed_data.RB_info.mu_lambda_index_sequence = ...
247 mu_lambda_index_sequence;
249 case {
'greedy-true-projection-error',
'greedy-true-rb-error',...
250 'greedy-rb-estimator'};
252 if ~isfield(model,
'dual_lin_independence_mode')
253 model.dual_lin_independence_mode = 0;
256 if isequal(model.RB_generation_mode, ...
257 'greedy-true-projection-error')
258 use_projection_error = 1;
260 use_projection_error = 0;
263 if isequal(model.RB_generation_mode, ...
264 'greedy-true-rb-error')
265 use_true_rb_error = 1;
267 use_true_rb_error = 0;
270 % generate uniformly distributed parameter values
271 par.numintervals = model.RB_numintervals;
272 par.range = model.mu_ranges;
275 mu_train = get(mu_mesh,'vertex');
276 ntrain = size(mu_train,2);
278 K = model.get_inner_product_matrix(model,model_data);
280 if (use_true_rb_error ~= 0) | (use_projection_error ~= 0)
281 disp('computing training snapshots');
283 @compute_training_snapshots,model,model_data,mu_train);
286 % initialize empty bases
287 U = zeros(model_data.H,0);
288 L = zeros(model_data.H,0);
289 max_err_sequence = [];
290 mu_sequence = zeros(length(model.mu_names),0);
291 mu_index_sequence = [];
292 max_u_err_sequence = [];
293 max_lambda_err_sequence = [];
298 % initial mu: middle value:
299 mu_max = mu_train((length(mu_train)+1)/2,:);
300 max_i = (length(mu_train)+1)/2;
302 while err > model.RB_stop_epsilon && N < model.RB_stop_Nmax;
303 model = set_mu(model,mu_max);
305 % sim_data = detailed_simulation(model,model_data);
308 mu_sequence = [mu_sequence,mu_max(:)];
309 mu_index_sequence = [mu_index_sequence,max_i];
310 if use_projection_error
311 % (the following should be done without orthonormalization in
313 % compute projection error:
314 UO = orthonormalize_loop(U,K,2e-16);
315 % UO = improve_conditioning(U,K);
317 PUtrain = UO * (UO' * K * Utrain);
319 LO = K * orthonormalize_loop(Kinv_L,K,2e-16);
320 % Kinv_Ltrain = K\Ltrain;
321 % PLtrain = LO * (LO' * Kinv_Ltrain);
323 PLtrain = L * pinv(L'*Kinv_L)* (Kinv_L' * Ltrain);
325 Kinv_PLerr = K\(Ltrain-PLtrain);
327 err_u = sqrt(sum((Utrain-PUtrain).* (K*(Utrain-PUtrain))));
328 % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
329 err_lambda = sqrt(sum((Ltrain-PLtrain).* (Kinv_PLerr)));
330 else % use true rb error or estimator
332 % construct reduced basis (with supr.) for rb simulation:
333 detailed_data = model_data;
334 % detailed_data = [];
335 if model.dual_lin_independence_mode == 0 %
336 detailed_data.RB_L = L;
337 elseif model.dual_lin_independence_mode == 1
338 dual_speye = speye(size(L,1));
339 L_dofs = find(max(abs(L),[],2)>1e-11);
341 detailed_data.RB_L = full(dual_speye(:,L_dofs));
342 disp(['Found Linear independent dual basis of size ' ,...
343 num2str(length(L_dofs)),' for ',...
344 num2str(size(L,2)),' dual snapshots']);
346 if model.supremizer_enrichment
347 % detailed_data.RB_U = [U,K\L];
348 % detailed_data.RB_U = orthonormalize_loop([U, K\L] ,K,2e-16);
349 detailed_data.RB_U = improve_conditioning(...
350 [U, K\detailed_data.RB_L],K);
352 % detailed_data.RB_U = U;
353 % detailed_data.RB_U = orthonormalize_loop([U] ,K,2e-16);
354 detailed_data.RB_U = improve_conditioning([U],K);
356 % detailed_data.X = model_data.X;
357 % detailed_data.dx = model_data.dx;
358 disp(['Size of U without enrichment: ',num2str(size(U,2)),...
359 ', Size of orth(U + possible supr.) : ',num2str(size(detailed_data.RB_U,2))])
362 UNtrain = zeros(model_data.H,ntrain);
363 LNtrain = zeros(model_data.H,ntrain);
364 else % use rb error estimator
365 err_u= zeros(1,size(mu_train,1));
366 err_lambda= zeros(1,size(mu_train,1));
367 model.enable_error_estimator = 1;
369 reduced_data = gen_reduced_data(model, detailed_data);
370 for m = 1:size(mu_train,1)
371 model = set_mu(model,mu_train(m,:));
372 sim_data = rb_simulation(model,reduced_data);
374 UNtrain(:,m) = detailed_data.RB_U * sim_data.UN;
375 LNtrain(:,m) = detailed_data.RB_L * sim_data.LN;
376 else % use rb_error_estimator
377 err_u(m) = sim_data.Delta_u;
378 err_lambda(m) = sim_data.Delta_lambda;
382 err_u = sqrt(sum((Utrain-UNtrain).* (K*(Utrain-UNtrain))));
383 % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
384 Kinv_Lerr = K\(Ltrain-LNtrain);
385 err_lambda = sqrt(sum((Ltrain-LNtrain).* (Kinv_Lerr)));
389 % set errors of recent values to 0 in order to prevent "doubles"
390 err_u(mu_index_sequence) = 0;
391 err_lambda(mu_index_sequence) = 0;
392 [err, max_i] = max(model.w_u * err_u + model.w_lambda * ...
394 disp(['N = ',num2str(N),...
395 ', err = ',num2str(err),...
396 ', err_u = ',num2str(err_u(max_i)),...
397 ', err_lambda = ',num2str(err_lambda(max_i)) ]);
398 mu_max = mu_train(max_i,:);
399 max_err_sequence = [max_err_sequence, err];
400 max_u_err_sequence = [max_u_err_sequence, err_u(max_i)];
401 max_lambda_err_sequence = [max_lambda_err_sequence, err_lambda(max_i)];
406 detailed_data = model_data;
408 detailed_data.RB_U_no_supr = U;
409 %n_part = ceil(size(U,2)/2);
410 %U_part = [U(:,1:n_part)];
412 if model.dual_lin_independence_mode == 0 %
413 detailed_data.RB_L = L;
414 elseif model.dual_lin_independence_mode == 1
415 dual_speye = speye(size(L,1));
416 L_dofs = find(max(abs(L),[],2)>1e-11);
417 detailed_data.RB_L = full(dual_speye(:,L_dofs));
418 detailed_data.L_no_post_processing = L;
421 if model.supremizer_enrichment
422 % include supremizers into primal basis
423 U = [U, K\detailed_data.RB_L];
424 % U_part = [U(:,1:n_part), K\(L(:,1:n_part))];
425 detailed_data.RB_U_with_supr = U;
428 % no orthonormalization, works for basis up to 50:
429 %detailed_data.RB_U = U;
431 % numerical orthonormalization may lead to additional approximation error
432 % detailed_data.RB_U = orthonormalize_loop(U,K,2e-16);
433 detailed_data.RB_U = improve_conditioning(U,K);
434 % return part of the basis to see, whether order is maintained
435 %detailed_data.RB_U_part = orthonormalize_loop(U_part,K,2e-16);
437 % detailed_data.RB_L = L;
438 detailed_data.RB_info.max_err_sequence = max_err_sequence;
439 detailed_data.RB_info.max_u_err_sequence = max_u_err_sequence;
440 detailed_data.RB_info.max_lambda_err_sequence = max_lambda_err_sequence;
441 detailed_data.RB_info.mu_sequence = mu_sequence;
444 error('RB generation mode unknwon');
448 detailed_data.RB_info.computation_time = t;
450 function [Utrain,Ltrain] = compute_training_snapshots(model,model_data,mu_train)
451 disp('computing training snapshots');
452 % compute training set
453 Utrain = zeros(model_data.H,size(mu_train,1));
454 Ltrain = zeros(model_data.H,size(mu_train,1));
455 for m = 1:size(mu_train,1)
456 model = set_mu(model,mu_train(m,:));
457 sim_data = detailed_simulation(model,model_data);
458 Utrain(:,m) = sim_data.U;
459 Ltrain(:,m) = sim_data.L;
464 function UO = orthonormalize_loop(U,K,eps);
466 %if (cond(U'*K*U)>1e6);
467 disp('orthogonalizing!');
469 % perform not exact orthongonalization, but
470 % conditioning improvement.
474 %unorminv = (sqrt(sum((K * UO).*UO,1))).^(-1);
475 %UO = UO * diag(unorminv(:));
477 %% the following performs repeated qr orthogonalization
478 UO = orthonormalize_qr(U,K,eps);
480 e = (max(max(abs(KN-eye(size(KN))))));
482 disp('Detected orthonormalization inaccuracy.');
483 disp('PERFORMING REPEATED ORTHONORMALIZATION');
484 UO = orthonormalize_qr(UO,K,2e-16);
486 e = (max(max(abs(KN-eye(size(KN))))));
487 % error('error in orthonormalization, please check!');
function varargout = filecache_function(funcptr, varargin)
function used for file-caching other function calls.
A hierarchical cubegrid of arbitrary dimension.
Customizable implementation of an abstract greedy algorithm.