rbmatlab  1.16.09
 All Classes Namespaces Files Functions Variables Modules Pages
Algorithm.m
1 classdef Algorithm < handle
2 %ALGORITHM The main implementation of the LRFG algorithm
3 % This algorithm is an implementation of the low-rank factor greedy
4 % algorithm as it was presented in the paper by A. Schmidt and B.
5 % Haasdonk (2016). It is a generic class which can be applied to all OOP
6 % models that implement a certain interface.
7 %
8 
9  properties(GetAccess=protected, SetAccess=protected)
10  model; % Pointer to the ReducedModel class
11  model_data; % The reference to the ModelData class
12  end
13  properties
14  M_train; % Save the M_train class for subsequent use
15 
16  error_indicator = 'normalized_residual'; % Use the normalized residual as
17  % error indicator
18 
19  orthonormalize_E = true; % Orthonormalize the basis wrt. to the mass matrix
20  end
21 
22  methods
23  function this = Algorithm(model, model_data, M_train)
24  % ALGORITHM Constructor for the LRFG algorithm
25  %
26  this.model = model;
27  this.model_data = model_data;
28 
29  if ~exist('M_train', 'var')
30  M_train = ParameterSampling.Uniform(9);
31  end
32 
33  this.M_train = M_train;
34  end
35 
36  function set.M_train(this, M_train)
37  if ~isa(M_train, 'ParameterSampling.Interface')
38  error('M_train must be of type ParameterSampling.Interface')
39  end
40 
41  this.M_train = M_train;
42  end
43 
44  end
45 
46  methods(Access=protected)
47 
48  function this = initialize_basis(this, model, model_data)
49  % This function should initialize the basis with a first
50  % element. The default implementation takes the solution
51  if ~isempty(this.RB_V) || ~isempty(this.RB_W)
52  error('The basis is not empty, so the initialize_basis method cannot be called');
53  end
54 
55  mus = this.M_train.sample;
56  model.set_mu(mus(1,:));
57 
58  sim = detailed_simulation(model, model_data);
59 
60  [V,~,~] = svds(sim.Z, 1);
61  this.RB_V = V;
62  this.RB_W = V;
63  end
64 
65  end
66 
67  % =====================================================================
68  % Public methods
69  methods
70 
71  % =================================================================
72  function [V,W,info] = run(this)
73  % RUN
74  % Implementation of the LRFG algorithm.
75  % This function actually performs the algorithm and fills
76  % the corresponding fields in detailed_data
77  %
78  if init_required(this.M_train)
79  init_sample(this.M_train, this.model);
80  end
81 
82  % Cache the mus since we need them a few times below and
83  % abbreviate this.rmodel
84  mus = this.M_train.sample;
85  model = copy(this.model);
86 
87  t_start = tic;
88 
89  % This class will later be populated into the DetailedData.info
90  % property
91  info = Greedy.LRFG.BasisInfo;
92 
93  info.greedy_tolerance = model.RB_greedy_tolerance;
94  gtol = info.greedy_tolerance;
95  this.orthonormalize_E = model.RB_orthonormalize_E;
96  info.orthonormalize_E = this.orthonormalize_E;
97  this.error_indicator = model.RB_error_indicator;
98  info.error_indicator = this.error_indicator;
99  info.pod_max_extension = model.RB_pod_max_extension;
100 
101  % To store the error decay:
102  errors = ones(size(mus, 1), 1);
103 
104  % The reduced basis will be stored here:
105  RB_V = [];
106  RB_W = [];
107 
108  % Certain arrays that contain statistical information about the
109  % LRFG run:
110  chosen_mu = [];
111  error_decay = [];
112  Z_sizes = [];
113  vectors_added = [];
114  num_solves = 0;
115 
116  % Some additional helper variables:
117  i = 1; z0 = 0; sims = cell(size(mus,1),1);
118 
119  % For the detailed simulation time
120  dsim_times = [];
121  dsim_times_real = [];
122 
123  % Inner tolerance for the POD:
124  podtol = model.RB_pod_tolerance;
125  info.pod_tolerance = podtol;
126 
127  % Grab the mass matrix, such that we can normalize
128  E = speye(model.n);
129  if this.orthonormalize_E
130  E = model.mass_matrix(this.model_data);
131  end
132  n = model.n;
133 
134  display('================================================================')
135  display('Starting Basis Generation')
136  display('----------------------------------------------------------------')
137  display('Basis generation settings:')
138  display([' - Number of parameters ', num2str(length(mus))])
139  display([' - Inner tolerance ', num2str(podtol)])
140  display([' - Greedy tolerance ', num2str(gtol)])
141  display([' - System dimension ', num2str(n)])
142  display('+------+---------------+-------+------------+--------+----------+')
143  display('| i | max. err.| idx.| Z size | added | time |')
144  display('+------+---------------+-------+------------+--------+----------+')
145 
146  % Here starts the main LRFG loop:
147  while max(errors) > gtol
148  if isempty(RB_V) && isempty(RB_W)
149  % Use the first element in the training set for the
150  % initalization of the basis
151  theta = 1;
152  max_err = Inf;
153  else
154  % Select the worst approximated element
155  [max_err, theta] = max(errors);
156  end
157 
158  % Save the chosen parameter index:
159  chosen_mu = [chosen_mu theta];
160 
161  % Calculate the full dimensional solution
162  model.set_mu(mus(theta,:));
163  t = tic;
164  [dsim,cached] = filecache_function(@detailed_simulation, model, this.model_data);
165  time = (cached<1)*toc(t);
166  num_solves = num_solves + 1;
167  dsim_times_real = [dsim_times_real time];
168  dsim_times = [dsim_times dsim.time];
169 
170  Z_sizes = [Z_sizes size(dsim.Z, 2)];
171  if ~isempty(RB_V) && ~isempty(RB_W)
172  Z = dsim.Z - RB_W*((RB_V'*RB_W)\(RB_V'*dsim.Z));
173  else
174  Z = dsim.Z;
175  end
176 
177  % Perform the basis extension:
178  [Phi, Lambda, ~] = svd(Z, 'econ');
179  dLambda = sqrt(diag(Lambda));
180  sumS = sum(dLambda);
181  for k = 1:length(dLambda)
182  eE = sum(dLambda(1:k))/sumS;
183  if eE >= podtol || (info.pod_max_extension > 0 && k == info.pod_max_extension)
184  break
185  end
186  end
187 
188  Vextend = Phi(:, 1:k);
189  c = size(Vextend, 2);
190 
191  % Just make sure the basis is orthogonal in cases where numerical
192  % errors occur:
193  RB_W = orthonormalize_gram_schmidt([RB_W Vextend], E);
194  RB_V = RB_W;
195 
196  vectors_added = [vectors_added c];
197 
198  % Recalculate the errors:
199  [errs_normalized,errs_absolute] = this.calc_error_indicator(RB_W, RB_V, mus);
200 
201  errors = errs_normalized;
202  switch info.error_indicator
203  case 'residual'
204  errors = errs_absolute;
205  case 'normalized_residual'
206  errors = errs_normalized;
207  case 'relative_change'
208  if i == 1
209  initial_residuals = errs_absolute;
210  errors = ones(1,length(errs_absolute));
211  else
212  errors = errs_absolute ./ initial_residuals;
213  end
214  otherwise
215  error(['Error indicator mode "', info.error_indicator, '" not known'])
216  end
217 
218  error_decay = [error_decay max(errors)];
219  error_history_normalized{i} = errs_normalized;
220  error_history_absolute{i} = errs_absolute;
221 
222  display(sprintf('| %4d | %.7d | %5d | %10d | %6d | %7f |', i, max(errors), theta, size(dsim.Z,2), c, time))
223  i = i + 1;
224  end
225  display('+------+---------------+-------+------------+--------+----------+')
226 
227  V = RB_V;
228  W = RB_W;
229 
230  % Fill in the remaining information:
231  info.Z_sizes = Z_sizes;
232  info.vectors_added = vectors_added;
233  info.detailed_simulation_times = dsim_times;
234  info.detailed_simulation_times_real = dsim_times_real;
235  info.chosen_mu = chosen_mu;
236  info.error_history_normalized = error_history_normalized;
237  info.error_history_absolute = error_history_absolute;
238  info.error_decay = error_decay;
239  info.num_solves = num_solves;
240  info.M_train = this.M_train;
241 
242  % End of basisgen: save the complete offline time
243  info.runtime = toc(t_start);
244  end
245 
246 
247  % Calculate the errors for the basis stored in the matrix RB
248  function [errs_normalized, errs_absolute, avg_time] = calc_error_indicator(this, RB_W, RB_V, mus)
249  errs_normalized = zeros(size(mus,1),1);
250  errs_absolute = zeros(size(mus,1),1);
251 
252  % Create a DetailedData object and a ReducedData object for
253  % temporary use:
254  pt = this.model.problem_type();
255  dd = eval([pt, '.DetailedData(this.model, this.model_data)']);
256  dd.RB_V = RB_V;
257  dd.RB_W = RB_W;
258 
259  rd = gen_reduced_data(this.model, dd);
260  t = tic;
261  model = copy(this.model);
262 
263  for i = 1:size(mus,1)
264  tmpModel = model;
265  tmpModel.set_mu(mus(i, :));
266 
267  % Disable the error estimator but enable the calculation of
268  % the residual:
269  tmpModel.enable_error_estimator = false;
270  tmpModel.calc_residual = true;
271 
272  % Simulate and get the errors:
273  rsim = rb_simulation(tmpModel, rd);
274  errs_normalized(i) = rsim.nresidual;
275  errs_absolute(i) = rsim.residual;
276  end
277 
278  avg_time = toc(t)/size(mus,1);
279  end
280  end
281 end
Interface class for all kind of reduced basis generation algorithms
Definition: Interface.m:18
function varargout = filecache_function(funcptr, varargin)
function used for file-caching other function calls.
Parameter sampling sets.
Definition: Interface.m:1
ALGORITHM The main implementation of the LRFG algorithm This algorithm is an implementation of the lo...
Definition: Algorithm.m:19
Customizable implementation of an abstract greedy algorithm.
Definition: DuneRBLeafNode.m:1
Struct with high dimensional data needed for a high dimensional simulation.
Definition: dummyclasses.c:1