1 classdef Algorithm < handle
2 %ALGORITHM The main implementation of the LRFG algorithm
3 % This algorithm is an implementation of the low-rank factor greedy
4 % algorithm as it was presented in the paper by A. Schmidt and B.
5 % Haasdonk (2016). It is a
generic class which can be applied to all OOP
6 % models that implement a certain interface.
9 properties(GetAccess=
protected, SetAccess=
protected)
10 model; % Pointer to the ReducedModel class
11 model_data; % The reference to the
ModelData class
14 M_train; % Save the M_train class for subsequent use
16 error_indicator = 'normalized_residual'; % Use the normalized residual as
19 orthonormalize_E = true; % Orthonormalize the basis wrt. to the mass matrix
23 function this =
Algorithm(model, model_data, M_train)
24 % ALGORITHM Constructor for the LRFG algorithm
27 this.model_data = model_data;
29 if ~exist('M_train', 'var')
33 this.M_train = M_train;
36 function set.M_train(this, M_train)
41 this.M_train = M_train;
46 methods(Access=protected)
48 function this = initialize_basis(this, model, model_data)
49 % This function should initialize the basis with a first
50 % element. The default implementation takes the solution
51 if ~isempty(this.RB_V) || ~isempty(this.RB_W)
52 error('The basis is not empty, so the initialize_basis method cannot be called');
55 mus = this.M_train.sample;
56 model.set_mu(mus(1,:));
58 sim = detailed_simulation(model, model_data);
60 [V,~,~] = svds(sim.Z, 1);
67 % =====================================================================
71 % =================================================================
72 function [V,W,info] = run(this)
74 % Implementation of the LRFG algorithm.
75 % This function actually performs the algorithm and fills
76 % the corresponding fields in detailed_data
78 if init_required(this.M_train)
79 init_sample(this.M_train, this.model);
82 % Cache the mus since we need them a few times below and
83 % abbreviate this.rmodel
84 mus = this.M_train.sample;
85 model = copy(this.model);
89 % This class will later be populated into the DetailedData.info
93 info.greedy_tolerance = model.RB_greedy_tolerance;
94 gtol = info.greedy_tolerance;
95 this.orthonormalize_E = model.RB_orthonormalize_E;
96 info.orthonormalize_E = this.orthonormalize_E;
97 this.error_indicator = model.RB_error_indicator;
98 info.error_indicator = this.error_indicator;
99 info.pod_max_extension = model.RB_pod_max_extension;
101 % To store the error decay:
102 errors = ones(size(mus, 1), 1);
104 % The reduced basis will be stored here:
108 % Certain arrays that contain statistical information about the
116 % Some additional helper variables:
117 i = 1; z0 = 0; sims = cell(size(mus,1),1);
119 % For the detailed simulation time
121 dsim_times_real = [];
123 % Inner tolerance for the POD:
124 podtol = model.RB_pod_tolerance;
125 info.pod_tolerance = podtol;
127 % Grab the mass matrix, such that we can normalize
129 if this.orthonormalize_E
130 E = model.mass_matrix(this.model_data);
134 display('================================================================')
135 display('Starting Basis Generation')
136 display('----------------------------------------------------------------')
137 display('Basis generation settings:')
138 display([' - Number of parameters ', num2str(length(mus))])
139 display([' - Inner tolerance ', num2str(podtol)])
140 display([' -
Greedy tolerance ', num2str(gtol)])
141 display([' - System dimension ', num2str(n)])
142 display('+------+---------------+-------+------------+--------+----------+')
143 display('| i | max. err.| idx.| Z size | added | time |')
144 display('+------+---------------+-------+------------+--------+----------+')
146 % Here starts the main LRFG loop:
147 while max(errors) > gtol
148 if isempty(RB_V) && isempty(RB_W)
149 % Use the first element in the training set for the
150 % initalization of the basis
154 % Select the worst approximated element
155 [max_err, theta] = max(errors);
158 % Save the chosen parameter index:
159 chosen_mu = [chosen_mu theta];
161 % Calculate the full dimensional solution
162 model.set_mu(mus(theta,:));
165 time = (cached<1)*toc(t);
166 num_solves = num_solves + 1;
167 dsim_times_real = [dsim_times_real time];
168 dsim_times = [dsim_times dsim.time];
170 Z_sizes = [Z_sizes size(dsim.Z, 2)];
171 if ~isempty(RB_V) && ~isempty(RB_W)
172 Z = dsim.Z - RB_W*((RB_V'*RB_W)\(RB_V'*dsim.Z));
177 % Perform the basis extension:
178 [Phi, Lambda, ~] = svd(Z, 'econ');
179 dLambda = sqrt(diag(Lambda));
181 for k = 1:length(dLambda)
182 eE = sum(dLambda(1:k))/sumS;
183 if eE >= podtol || (info.pod_max_extension > 0 && k == info.pod_max_extension)
188 Vextend = Phi(:, 1:k);
189 c = size(Vextend, 2);
191 % Just make sure the basis is orthogonal in cases where numerical
193 RB_W = orthonormalize_gram_schmidt([RB_W Vextend], E);
196 vectors_added = [vectors_added c];
198 % Recalculate the errors:
199 [errs_normalized,errs_absolute] = this.calc_error_indicator(RB_W, RB_V, mus);
201 errors = errs_normalized;
202 switch info.error_indicator
204 errors = errs_absolute;
205 case 'normalized_residual'
206 errors = errs_normalized;
207 case 'relative_change'
209 initial_residuals = errs_absolute;
210 errors = ones(1,length(errs_absolute));
212 errors = errs_absolute ./ initial_residuals;
215 error(['Error indicator mode "', info.error_indicator, '" not known'])
218 error_decay = [error_decay max(errors)];
219 error_history_normalized{i} = errs_normalized;
220 error_history_absolute{i} = errs_absolute;
222 display(sprintf(
'| %4d | %.7d | %5d | %10d | %6d | %7f |', i, max(errors), theta, size(dsim.Z,2), c, time))
225 display('+------+---------------+-------+------------+--------+----------+')
230 % Fill in the remaining information:
231 info.Z_sizes = Z_sizes;
232 info.vectors_added = vectors_added;
233 info.detailed_simulation_times = dsim_times;
234 info.detailed_simulation_times_real = dsim_times_real;
235 info.chosen_mu = chosen_mu;
236 info.error_history_normalized = error_history_normalized;
237 info.error_history_absolute = error_history_absolute;
238 info.error_decay = error_decay;
239 info.num_solves = num_solves;
240 info.M_train = this.M_train;
242 % End of basisgen: save the complete offline time
243 info.runtime = toc(t_start);
247 % Calculate the errors for the basis stored in the matrix RB
248 function [errs_normalized, errs_absolute, avg_time] = calc_error_indicator(this, RB_W, RB_V, mus)
249 errs_normalized = zeros(size(mus,1),1);
250 errs_absolute = zeros(size(mus,1),1);
252 % Create a DetailedData
object and a ReducedData
object for
254 pt = this.model.problem_type();
255 dd = eval([pt, '.DetailedData(this.model, this.model_data)']);
259 rd = gen_reduced_data(this.model, dd);
261 model = copy(this.model);
263 for i = 1:size(mus,1)
265 tmpModel.set_mu(mus(i, :));
267 % Disable the error estimator but enable the calculation of
269 tmpModel.enable_error_estimator = false;
270 tmpModel.calc_residual = true;
272 % Simulate and get the errors:
273 rsim = rb_simulation(tmpModel, rd);
274 errs_normalized(i) = rsim.nresidual;
275 errs_absolute(i) = rsim.residual;
278 avg_time = toc(t)/size(mus,1);
Interface class for all kind of reduced basis generation algorithms
function varargout = filecache_function(funcptr, varargin)
function used for file-caching other function calls.
ALGORITHM The main implementation of the LRFG algorithm This algorithm is an implementation of the lo...
Customizable implementation of an abstract greedy algorithm.
Struct with high dimensional data needed for a high dimensional simulation.