2 % greedy basis generation extension which adaptively refines the trainings
5 % @todo add reference to adaptive paper.
9 % cell array of field names to be copied to the detailed data node instance
10 % during init_basis() call
11 info_fields = {
'max_refinement_steps',
'max_refinement_level', ...
12 'refinement_mode',
'refinement_theta',
'element_indicator_mode', ...
13 'element_indicator_s_max',
'force_stop_at_max_refinement_level' };
18 % maximum number of refinement steps before the algorithm stops.
20 % If
this level is reached, the algorithm terminates and returns the
21 % generated reduced basis spaces.
22 max_refinement_steps = inf;
24 % maximum number of refinement levels in the refineable trainings parameter
27 % If
this level is reached, the algorithm terminates and returns the
28 % generated reduced basis spaces.
29 max_refinement_level = 5;
31 %
string specifying the method type
for the adaptation of the parameter
34 % Possible values are:
35 % -
'uniform': the parameter sampling is refined uniformly
36 % -
'adaptive': the parameter sampling is refined adaptively, such that
37 % only regions of the parameter space with high estimated
39 refinement_mode =
'uniform'; %
'uniform' or
'adaptive'
41 % ratio of elements which are refined during an adaptive refinement step
43 % This value `\theta` is multiplied by the number of
leaf elements in the
44 % current parameter grid in order to compute the number grid cells to be
46 refinement_theta = 0.5;
48 %
string specifying the indicator
for the training set nodes.
53 % -
'nodes_skippedrefs'
54 % -
'nodes_cogs_skippedrefs'
55 element_indicator_mode =
'nodes_cogs';
57 % specifies maximum number of skipped refinements in
case of
58 %
'*_skippedrefs' indicator modes.
59 element_indicator_s_max = 100;
61 %
boolean flag indicating whether at the maximum refinement level the
62 % algorithm shall stop.
64 % If
this is set to
false, no further refinement is done at the last level,
65 % but the reduced basis spaces are generated, anyways.
66 force_stop_at_max_refinement_level =
true;
72 % constructor extending a
Greedy algorithm
object
76 % by training set adaptation.
77 assert(isa(child,
'Greedy.Interface'));
81 function detailed_data = init_basis(
this, rmodel, model_data)
82 %
function detailed_data = init_basis(
this, rmodel, model_data)
89 detailed_data = init_basis(this.child, rmodel, model_data);
91 if isstruct(model_data)
92 detailed_data =
Greedy.
DataTree.Detailed.InfoNode(detailed_data, 'paramspace_adapt');
97 function detailed_data = basis_extension(this, rmodel, detailed_data, checkpoint)
98 % function detailed_data_tree = basis_extension(this, rmodel, detailed_data_tree, checkpoint)
107 % copied from basisgen_refined written by
108 % \author Bernard Haasdonk
110 if nargin == 3 || isempty(checkpoint)
114 basetoc = get(checkpoint, 'parameter_toc', 0);
119 base_detailed_data = get(detailed_data, 1);
120 base_detailed_data = basis_extension(this.child, rmodel, base_detailed_data, checkpoint.child(1));
122 dd_leaf = get_active_leaf(base_detailed_data, rmodel);
123 refinement_step = get_field(dd_leaf, 'refinement_step', 0);
125 if stopped_on_any_child(base_detailed_data, 'stopped_on_timeout')
126 set_field(detailed_data, 'elapsed_time', toc(alltimer) + basetoc);
130 if ~stopped_on_active_child(base_detailed_data, 'stopped_on_max_val_train_ratio', rmodel)
131 detailed_data.stop_flags = union(detailed_data.stop_flags, base_detailed_data.stop_flags);
135 if refinement_step >= get_field(detailed_data, 'max_refinement_steps')
136 set_stop_flag(dd_leaf, 'stopped_on_max_refinement_level');
140 M_train = get_field(dd_leaf, 'M_train');
145 levels = get_refinement_levels(M_train);
146 maxlev = max(levels);
147 if maxlev == get_field(detailed_data, 'max_refinement_level') - 1 ...
148 && ~get_field(detailed_data, 'force_stop_at_max_refinement_level')
150 set_field(dd_leaf, 'stop_max_val_train_ratio', 1e6);
154 % if refinement is still possible and required val-train ratio is not assured
155 % refine the parameter space regardless of stopped_on_epsilon flag.
156 r_values = get_field(dd_leaf, 'r_value_sequence');
157 if maxlev < get_field(detailed_data, 'max_refinement_level') ...
158 && r_values(end) > get_field(dd_leaf, 'stop_max_val_train_ratio')
160 set_stop_flag(dd_leaf, 'stopped_on_epsilon', false);
163 if maxlev >= get_field(detailed_data, 'max_refinement_level')
164 set_stop_flag(dd_leaf, 'stopped_on_max_refinement_level');
168 disp('detected grid refinement necessary!!');
169 disp(['max val train ratio was: ', num2str(r_values(end))]);
170 nleafelements = size(M_train);
172 snapshot(dd_leaf, 'ParamSpaceAdapt', 'Parameter space adaptation', ...
173 ['Refinement step no. ', num2str(refinement_step)]);
175 switch get_field(detailed_data, 'refinement_mode')
177 set_field(dd_leaf, 'M_train', refine(M_train));
180 eta = refinement_element_indicators( this, rmodel, detailed_data, ...
183 [dummy, ind] = sort(-eta);
184 nmax = ceil(nleafelements * get_field(detailed_data, 'refinement_theta'));
186 set_field(dd_leaf, 'M_train', refine(M_train, ind(1:nmax)));
189 set_stop_flag(dd_leaf, 'stopped_on_max_val_train_ratio', false);
191 refinement_step = refinement_step + 1;
192 set_field(dd_leaf, 'refinement_step', refinement_step);
193 checkpoint = checkpoint.store(rmodel, detailed_data, 'refinement', ...
194 struct('parameter_toc', toc(alltimer) + basetoc));
196 set_field(detailed_data, 'elapsed_time', toc(alltimer) + basetoc);
199 function eta = refinement_element_indicators( this, rmodel, detailed_data, dd_leaf )
200 % function eta = refinement_element_indicators( this, rmodel, detailed_data, dd_leaf )
201 % evaluates the refinement indicators
206 % dd_leaf: Detailed data node of type
Greedy.
DataTree.Detailed.ILeafNode
207 % which is currently extended
leaf in the detailed data tree.
208 Delta_train = get_field(dd_leaf, 'M_last_errors');
209 MMesh = get_field(dd_leaf, 'M_train');
210 base_detailed_data = get(detailed_data, 1);
212 if isempty(Delta_train)
213 Delta_train = error_indicators(this.child, rmodel, base_detailed_data, MMesh.sample);
216 if isequal(get_field(detailed_data, 'element_indicator_mode'), 'nodes_cogs') ...
217 || isequal(get_field(detailed_data, 'element_indicator_mode'), 'nodes_cogs_skippedrefs')
219 % determine all cog vertex estimators
221 Delta_cog = error_indicators(this.child, rmodel, base_detailed_data, M_cog, true);
222 set_field(dd_leaf, 'Delta_cog', Delta_cog);
225 Max_Delta_vertex = elementwise_maximum_of_vertex_values(MMesh, Delta_train);
227 switch get_field(detailed_data, 'element_indicator_mode')
229 % eta(i) = max_{mu in (V(e) and cog(e))} Delta (Phi_t, mu)
230 eta = max( Delta_cog(:), Max_Delta_vertex(:) );
232 % eta(i) = max_{mu in (V(e))} Delta (Phi_t, mu)
233 eta = Max_Delta_vertex(:);
234 case 'nodes_skippedrefs'
235 % eta(i) = max_{mu in (V(e))} Delta (Phi_t, mu) +
236 % s(i)/s_max * epsilon
237 % determine number of skipped refinements of all
leaf elements
238 s = skipped_refinements(MMesh);
239 eta = Max_Delta_vertex(:) + ...
240 max(Max_Delta_vertex) / ...
241 get_field(detailed_data,
'element_indicator_s_max') * ...
243 case 'nodes_cogs_skippedrefs'
244 % eta(i) = max_{mu in (V(e) and cogs)} Delta (Phi_t, mu) +
245 % s(i)/s_max * epsilon
246 % determine number of skipped refinements of all
leaf elements
247 s = skipped_refinements(MMesh);
248 eta = max(Delta_cog(:),Max_Delta_vertex(:)) + ...
249 max(Max_Delta_vertex) / ...
250 get_field(detailed_data,
'element_indicator_s_max') * ...
253 error(
'element_indicator_mode in grid-refinement unknown');
Interface class for all kind of reduced basis generation algorithms
Helper class used to store and restore data tree objects at specified checkpoints.
virtual function Greedy.DataTree.Detailed.INode detailed_data = init_basis(IReducedModel rmodel,ModelData model_data)
construction of an initial reduced basis
Interface classes to be implemented by the Greedy.Algorithm user.
This is the interface for a reduced model providing methods to compute low dimensional reduced simula...
greedy basis generation extension which adaptively refines the trainings parameter set...
Customizable implementation of an abstract greedy algorithm.
interface specialization for a reduced model that can be used with the Greedy algorithm for basis gen...
Interface class for the generation and storage of reduced basis spaces as described in Module (M2)...