rbmatlab  1.16.09
 All Classes Namespaces Files Functions Variables Modules Pages
vi_gen_detailed_data.m
1 function detailed_data = vi_gen_detailed_data(model,model_data)
2 %function detailed_data = vi_gen_detailed_data(model,model_data)
3 %
4 % function generating the required detailed data for the RB-Method
5 % for Parametrized Variational Inequalities. currently two
6 % generation-modes are available:
7 % - pure_snapshots
8 % - enrichment_by_supremizers
9 % both modes work with a uniform parameter grid and
10 % qr-orthonormalization of the reduced primal space.
11 %
12 % Generated fields of detailed_data:
13 % RB_U: reduced space for the primal solution
14 % RB_L: reduced space for the Lagrange multiplier
15 
16 % I.Maier 30.05.2011
17 
18 detailed_data = model_data;
19 
20 tic;
21 
22 switch model.RB_generation_mode
23 
24  case 'pure_snapshots'
25 
26  % generate uniformly distributed parameter values
27  par.numintervals = model.RB_numintervals;
28  par.range = model.mu_ranges;
29 
30  mu_mesh = cubegrid(par);
31  mu_train = get(mu_mesh,'vertex');
32 
33  K = model.get_inner_product_matrix(model,model_data);
34 
35  U = zeros(model_data.H,size(mu_train,1));
36  L = zeros(model_data.H,size(mu_train,1));
37  for m = 1:size(mu_train,1)
38 
39  model = set_mu(model,mu_train(m,:));
40  sim_data = detailed_simulation(model,model_data);
41  U(:,m) = sim_data.U;
42  L(:,m) = sim_data.L;
43  end;
44 
45  detailed_data.RB_U = improve_conditioning(U,K);
46  detailed_data.RB_L = L;
47  detailed_data.RB_info.mu_train = mu_train;
48 
49  case 'enrichment_by_supremizers'
50 
51  % generate uniformly distributed parameter values
52  par.numintervals = model.RB_numintervals;
53  par.range = model.mu_ranges;
54 
55  mu_mesh = cubegrid(par);
56  mu_train = get(mu_mesh,'vertex');
57 
58  K = model.get_inner_product_matrix(model,model_data);
59 
60  U = zeros(model_data.H,size(mu_train,1)*2);
61  L = zeros(model_data.H,size(mu_train,1));
62  for m = 1:size(mu_train,1)
63 
64  model = set_mu(model,mu_train(m,:));
65  sim_data = detailed_simulation(model,model_data);
66  U(:,2*m-1) = sim_data.U;
67  U(:,2*m) = K\sim_data.L;
68  L(:,m) = sim_data.L;
69  end;
70 
71  detailed_data.RB_U = orthonormalize(U,K,1e-16,'qr');
72  detailed_data.RB_L = L;
73  detailed_data.RB_info.mu_train = mu_train;
74 
75  case 'separate-greedy'
76  % separate generation of U and lambda basis
77  % by projection error minimization
78 
79  % generate uniformly distributed parameter values
80  par.numintervals = model.RB_numintervals;
81  par.range = model.mu_ranges;
82 
83  mu_mesh = cubegrid(par);
84  mu_train = get(mu_mesh,'vertex');
85  ntrain = size(mu_train,1);
86 
87  K = model.get_inner_product_matrix(model,model_data);
88 
89  disp('computing training snapshots');
90  [Utrain, Ltrain ] = filecache_function(...
91  @compute_training_snapshots,model,model_data,mu_train);
92 
93 % params.plot = @(data,grid,params) plot(data);
94 % detailed_data.RB_L = Ltrain;
95 % detailed_data.RB_U = Utrain;
96 % plot_detailed_data(model,detailed_data);
97 % keyboard;
98 
99  % initialize empty bases
100  U = zeros(model_data.H,0);
101  L = zeros(model_data.H,0);
102  max_u_err_sequence = [];
103  max_lambda_err_sequence = [];
104  mu_u_sequence = zeros(length(model.mu_names),0);
105  mu_lambda_sequence = zeros(length(model.mu_names),0);
106  mu_u_index_sequence = [];
107  mu_lambda_index_sequence = [];
108 
109  % greedy loop for u
110  disp('Greedy loop for u');
111  err = 1e10;
112  N = 0;
113  % initial mu: middle value:
114  max_i = (length(mu_train)+1)/2;
115  mu_max = mu_train(max_i,:);
116  continue_loop = (err > model.RB_stop_epsilon && N < ...
117  model.RB_stop_Nmax);
118 
119  while continue_loop
120  model = set_mu(model,mu_max);
121  sim_data = detailed_simulation(model,model_data);
122  U = [U,sim_data.U];
123  mu_u_sequence = [mu_u_sequence,mu_max(:)];
124  mu_u_index_sequence = [mu_u_index_sequence, max_i]
125 
126  % (the following should be done without orthonormalization in
127  % the future...)
128  % compute projection error:
129  UO = orthonormalize_loop(U,K,2e-16);
130  % UO = improve_conditioning(U,K);
131  PUtrain = UO * (UO' * K * Utrain);
132  err_u = sqrt(sum((Utrain-PUtrain).* (K*(Utrain-PUtrain))));
133  if max(err_u(mu_u_index_sequence))>1e-8
134  disp('accuracy problem in orthogonalization, please check')
135  keyboard;
136  else % set to zero, such that not selected again
137  err_u(mu_u_index_sequence);
138  end;
139  [err, max_i] = max(err_u);
140  if ~isempty(find(mu_u_index_sequence==max_i))
141  disp('selected mu twice, training set exhausted')
142  N = N+1;
143  continue_loop = 0;
144  else
145  mu_max = mu_train(max_i,:);
146  max_u_err_sequence = [max_u_err_sequence, err];
147  N = N+1;
148  continue_loop = err > model.RB_stop_epsilon && N < model.RB_stop_Nmax;
149  end;
150  disp(['N = ',num2str(N),...
151  ', err_u = ',num2str(err_u(max_i))]);
152  end;
153 
154  % greedy loop for lambda
155  disp('Greedy loop for lambda');
156  err = 1e10;
157  N = 0;
158  % initial mu: middle value:
159  mu_max = mu_train((length(mu_train)+1)/2,:);
160  max_i = (length(mu_train)+1)/2;
161  mu_lambda_index_sequence = [];
162  continue_loop = (err > model.RB_stop_epsilon && N < model.RB_stop_Nmax);
163  while continue_loop
164  model = set_mu(model,mu_max);
165  sim_data = detailed_simulation(model,model_data);
166  L = [L,sim_data.L];
167  Kinv_L = K\L;
168  mu_lambda_sequence = [mu_lambda_sequence,mu_max(:)];
169  mu_lambda_index_sequence = [mu_lambda_index_sequence, max_i];
170 
171  % (the following should be done without orthonormalization in
172  % the future...)
173  % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
174  %LO = K * orthonormalize_loop(Kinv_L,K,2e-16);
175  % Kinv_Ltrain = K\Ltrain;
176  % PLtrain = LO * (LO' * Kinv_Ltrain);
177  Kinv_L = K\L;
178  PLtrain = Ltrain;
179  % for each single snapshot compute projection by small QP....
180  %%% ordinary l2-projection:
181  %%% PLtrain = L * pinv(L'*Kinv_L)* (Kinv_L' * Ltrain);
182 % warning off
183  H = 2*L'*Kinv_L;
184  H = 0.5*(H+H');
185  b = zeros(N,1);
186  A = -eye(N,N);
187  options = optimset('TolFun',0); % relative function change limit
188  options.MaxIter = 1000;
189  options.Display='off';
190  for m = 1:ntrain;
191  f = -2*Ltrain(:,m)'*Kinv_L;
192  fprintf('*');
193  [alphaN,Fval,Exitflag] = quadprog(H,f,[],[],[],[],0,Inf,[],options);
194 % keyboard;
195  PLtrain(:,m) = L*alphaN;
196 % if m == max_i % error should be minimum!
197 % plot([L*alphaN,Ltrain(:,m)]);
198 % keyboard;
199 % end;
200  end;
201  fprintf('\n');
202 % warning on
203  Kinv_PLerr = K\(Ltrain-PLtrain);
204  err_lambda = sqrt(sum((Ltrain-PLtrain).* (Kinv_PLerr)));
205  % set earlier errors to 0...
206  err_lambda(mu_lambda_index_sequence)= 0;
207  [err, max_i] = max(err_lambda);
208  if ~isempty(find(mu_lambda_index_sequence==max_i))
209 % disp('selected mu twice, training set exhausted? Please check')
210 % keyboard;
211  % m = max_i;
212  % f = -2*Ltrain(:,m)'*Kinv_L;
213  % [alphaN,Fval,Exitflag] = quadprog(H,f,[],[],[],[],0,Inf,[],options);
214  % % keyboard;
215  % plot([L*alphaN,Ltrain(:,m)]);
216  % disp('error from optimization functional:')
217  % sqrt(Fval + Ltrain(:,m)'*(K\Ltrain(:,m)))
218  % disp('error from norm:')
219  % Ldiff = L*alphaN-Ltrain(:,m);
220  % sqrt(Ldiff'*(K\Ldiff))
221  N = N+1;
222  continue_loop = 0;
223  else
224  mu_max = mu_train(max_i,:);
225  max_lambda_err_sequence = [max_lambda_err_sequence, err];
226  N = N+1;
227  continue_loop = (err > model.RB_stop_epsilon && N < model.RB_stop_Nmax);
228  end;
229  disp(['N = ',num2str(N),...
230  ', err_lambda = ',num2str(err_lambda(max_i))]);
231  end;
232 
233  if model.supremizer_enrichment
234  % include supremizers into primal basis
235  U = [U, K\L];
236  % U_part = [U(:,1:n_part), K\(L(:,1:n_part))];
237  detailed_data.RB_U_with_supr = U;
238  end;
239 
240  detailed_data.RB_U = improve_conditioning(U,K);
241  detailed_data.RB_L = L;
242  detailed_data.RB_info.max_u_err_sequence = max_u_err_sequence;
243  detailed_data.RB_info.max_lambda_err_sequence = max_lambda_err_sequence;
244  detailed_data.RB_info.mu_u_sequence = mu_u_sequence;
245  detailed_data.RB_info.mu_lambda_sequence = mu_lambda_sequence;
246  detailed_data.RB_info.mu_lambda_index_sequence = ...
247  mu_lambda_index_sequence;
248 
249  case {'greedy-true-projection-error','greedy-true-rb-error',...
250  'greedy-rb-estimator'};
251 
252  if ~isfield(model,'dual_lin_independence_mode')
253  model.dual_lin_independence_mode = 0;
254  end;
255 
256  if isequal(model.RB_generation_mode, ...
257  'greedy-true-projection-error')
258  use_projection_error = 1;
259  else
260  use_projection_error = 0;
261  end;
262 
263  if isequal(model.RB_generation_mode, ...
264  'greedy-true-rb-error')
265  use_true_rb_error = 1;
266  else
267  use_true_rb_error = 0;
268  end;
269 
270  % generate uniformly distributed parameter values
271  par.numintervals = model.RB_numintervals;
272  par.range = model.mu_ranges;
273 
274  mu_mesh = cubegrid(par);
275  mu_train = get(mu_mesh,'vertex');
276  ntrain = size(mu_train,2);
277 
278  K = model.get_inner_product_matrix(model,model_data);
279 
280  if (use_true_rb_error ~= 0) | (use_projection_error ~= 0)
281  disp('computing training snapshots');
282  [Utrain, Ltrain ] = filecache_function(...
283  @compute_training_snapshots,model,model_data,mu_train);
284  end;
285 
286  % initialize empty bases
287  U = zeros(model_data.H,0);
288  L = zeros(model_data.H,0);
289  max_err_sequence = [];
290  mu_sequence = zeros(length(model.mu_names),0);
291  mu_index_sequence = [];
292  max_u_err_sequence = [];
293  max_lambda_err_sequence = [];
294 
295  % greedy loop
296  err = 1e10;
297  N = 0;
298  % initial mu: middle value:
299  mu_max = mu_train((length(mu_train)+1)/2,:);
300  max_i = (length(mu_train)+1)/2;
301 
302  while err > model.RB_stop_epsilon && N < model.RB_stop_Nmax;
303  model = set_mu(model,mu_max);
304  sim_data = filecache_function(@detailed_simulation,model,model_data);
305 % sim_data = detailed_simulation(model,model_data);
306  U = [U,sim_data.U];
307  L = [L,sim_data.L];
308  mu_sequence = [mu_sequence,mu_max(:)];
309  mu_index_sequence = [mu_index_sequence,max_i];
310  if use_projection_error
311  % (the following should be done without orthonormalization in
312  % the future...)
313  % compute projection error:
314  UO = orthonormalize_loop(U,K,2e-16);
315  % UO = improve_conditioning(U,K);
316  Kinv_L = K\L;
317  PUtrain = UO * (UO' * K * Utrain);
318 
319  LO = K * orthonormalize_loop(Kinv_L,K,2e-16);
320  % Kinv_Ltrain = K\Ltrain;
321  % PLtrain = LO * (LO' * Kinv_Ltrain);
322  Kinv_L = K\L;
323  PLtrain = L * pinv(L'*Kinv_L)* (Kinv_L' * Ltrain);
324 
325  Kinv_PLerr = K\(Ltrain-PLtrain);
326 
327  err_u = sqrt(sum((Utrain-PUtrain).* (K*(Utrain-PUtrain))));
328  % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
329  err_lambda = sqrt(sum((Ltrain-PLtrain).* (Kinv_PLerr)));
330  else % use true rb error or estimator
331 
332  % construct reduced basis (with supr.) for rb simulation:
333  detailed_data = model_data;
334 % detailed_data = [];
335  if model.dual_lin_independence_mode == 0 %
336  detailed_data.RB_L = L;
337  elseif model.dual_lin_independence_mode == 1
338  dual_speye = speye(size(L,1));
339  L_dofs = find(max(abs(L),[],2)>1e-11);
340 % keyboard;
341  detailed_data.RB_L = full(dual_speye(:,L_dofs));
342  disp(['Found Linear independent dual basis of size ' ,...
343  num2str(length(L_dofs)),' for ',...
344  num2str(size(L,2)),' dual snapshots']);
345  end;
346  if model.supremizer_enrichment
347  % detailed_data.RB_U = [U,K\L];
348  % detailed_data.RB_U = orthonormalize_loop([U, K\L] ,K,2e-16);
349  detailed_data.RB_U = improve_conditioning(...
350  [U, K\detailed_data.RB_L],K);
351  else
352  % detailed_data.RB_U = U;
353  % detailed_data.RB_U = orthonormalize_loop([U] ,K,2e-16);
354  detailed_data.RB_U = improve_conditioning([U],K);
355  end;
356 % detailed_data.X = model_data.X;
357 % detailed_data.dx = model_data.dx;
358  disp(['Size of U without enrichment: ',num2str(size(U,2)),...
359  ', Size of orth(U + possible supr.) : ',num2str(size(detailed_data.RB_U,2))])
360 
361  if use_true_rb_error
362  UNtrain = zeros(model_data.H,ntrain);
363  LNtrain = zeros(model_data.H,ntrain);
364  else % use rb error estimator
365  err_u= zeros(1,size(mu_train,1));
366  err_lambda= zeros(1,size(mu_train,1));
367  model.enable_error_estimator = 1;
368  end;
369  reduced_data = gen_reduced_data(model, detailed_data);
370  for m = 1:size(mu_train,1)
371  model = set_mu(model,mu_train(m,:));
372  sim_data = rb_simulation(model,reduced_data);
373  if use_true_rb_error
374  UNtrain(:,m) = detailed_data.RB_U * sim_data.UN;
375  LNtrain(:,m) = detailed_data.RB_L * sim_data.LN;
376  else % use rb_error_estimator
377  err_u(m) = sim_data.Delta_u;
378  err_lambda(m) = sim_data.Delta_lambda;
379  end;
380  end;
381  if use_true_rb_error
382  err_u = sqrt(sum((Utrain-UNtrain).* (K*(Utrain-UNtrain))));
383  % err_lambda = sqrt(sum((Ltrain-PLtrain).* (K*(Ltrain-PLtrain))));
384  Kinv_Lerr = K\(Ltrain-LNtrain);
385  err_lambda = sqrt(sum((Ltrain-LNtrain).* (Kinv_Lerr)));
386  end;
387  end;
388 
389  % set errors of recent values to 0 in order to prevent "doubles"
390  err_u(mu_index_sequence) = 0;
391  err_lambda(mu_index_sequence) = 0;
392  [err, max_i] = max(model.w_u * err_u + model.w_lambda * ...
393  err_lambda);
394  disp(['N = ',num2str(N),...
395  ', err = ',num2str(err),...
396  ', err_u = ',num2str(err_u(max_i)),...
397  ', err_lambda = ',num2str(err_lambda(max_i)) ]);
398  mu_max = mu_train(max_i,:);
399  max_err_sequence = [max_err_sequence, err];
400  max_u_err_sequence = [max_u_err_sequence, err_u(max_i)];
401  max_lambda_err_sequence = [max_lambda_err_sequence, err_lambda(max_i)];
402  N = N+1;
403  end;
404 
405  % return results
406  detailed_data = model_data;
407 
408  detailed_data.RB_U_no_supr = U;
409  %n_part = ceil(size(U,2)/2);
410  %U_part = [U(:,1:n_part)];
411 
412  if model.dual_lin_independence_mode == 0 %
413  detailed_data.RB_L = L;
414  elseif model.dual_lin_independence_mode == 1
415  dual_speye = speye(size(L,1));
416  L_dofs = find(max(abs(L),[],2)>1e-11);
417  detailed_data.RB_L = full(dual_speye(:,L_dofs));
418  detailed_data.L_no_post_processing = L;
419  end;
420 
421  if model.supremizer_enrichment
422  % include supremizers into primal basis
423  U = [U, K\detailed_data.RB_L];
424  % U_part = [U(:,1:n_part), K\(L(:,1:n_part))];
425  detailed_data.RB_U_with_supr = U;
426  end;
427 
428  % no orthonormalization, works for basis up to 50:
429  %detailed_data.RB_U = U;
430 
431  % numerical orthonormalization may lead to additional approximation error
432  % detailed_data.RB_U = orthonormalize_loop(U,K,2e-16);
433  detailed_data.RB_U = improve_conditioning(U,K);
434  % return part of the basis to see, whether order is maintained
435  %detailed_data.RB_U_part = orthonormalize_loop(U_part,K,2e-16);
436 
437  % detailed_data.RB_L = L;
438  detailed_data.RB_info.max_err_sequence = max_err_sequence;
439  detailed_data.RB_info.max_u_err_sequence = max_u_err_sequence;
440  detailed_data.RB_info.max_lambda_err_sequence = max_lambda_err_sequence;
441  detailed_data.RB_info.mu_sequence = mu_sequence;
442 
443  otherwise
444  error('RB generation mode unknwon');
445 end;
446 
447 t = toc;
448 detailed_data.RB_info.computation_time = t;
449 
450 function [Utrain,Ltrain] = compute_training_snapshots(model,model_data,mu_train)
451 disp('computing training snapshots');
452 % compute training set
453 Utrain = zeros(model_data.H,size(mu_train,1));
454 Ltrain = zeros(model_data.H,size(mu_train,1));
455 for m = 1:size(mu_train,1)
456  model = set_mu(model,mu_train(m,:));
457  sim_data = detailed_simulation(model,model_data);
458  Utrain(:,m) = sim_data.U;
459  Ltrain(:,m) = sim_data.L;
460  fprintf('.');
461 end;
462 fprintf('\n');
463 
464 function UO = orthonormalize_loop(U,K,eps);
465 
466 %if (cond(U'*K*U)>1e6);
467  disp('orthogonalizing!');
468 
469  % perform not exact orthongonalization, but
470  % conditioning improvement.
471 % keyboard;
472  %[u,s,v] = svd(U,0);
473  %UO = u;
474  %unorminv = (sqrt(sum((K * UO).*UO,1))).^(-1);
475  %UO = UO * diag(unorminv(:));
476 
477  %% the following performs repeated qr orthogonalization
478  UO = orthonormalize_qr(U,K,eps);
479  KN = UO' * K * UO;
480  e = (max(max(abs(KN-eye(size(KN))))));
481  while e > 1e-12
482  disp('Detected orthonormalization inaccuracy.');
483  disp('PERFORMING REPEATED ORTHONORMALIZATION');
484  UO = orthonormalize_qr(UO,K,2e-16);
485  KN = UO' * K * UO;
486  e = (max(max(abs(KN-eye(size(KN))))));
487  % error('error in orthonormalization, please check!');
488  end;
489 
490 %else
491 % UO = U;
492 %end;
493 
494 
function varargout = filecache_function(funcptr, varargin)
function used for file-caching other function calls.
A hierarchical cubegrid of arbitrary dimension.
Definition: cubegrid.m:17
Customizable implementation of an abstract greedy algorithm.
Definition: DuneRBLeafNode.m:1