1 function hp_model = h_refinement( hp_model,model_data )
2 %
function hp_model = h_refinement( hp_model,model_data )
4 %compute the h_refinment of the parameter space
6 if hp_model.error_distance_extionsion ==1
7 hp_model = h_refinement_err_ext(hp_model,model_data );
13 hp_model.base_model.rho = hp_model.rho;
14 hp_model.base_model.rho_POD = hp_model.rho_POD;
15 hp_model.base_model.error_tol1=hp_model.error_tol1;
17 rand(
'state',hp_model.base_model.RB_train_rand_seed);
18 hp_model.base_model.training = rand_uniform(hp_model.base_model.RB_train_size,...
19 hp_model.base_model.mu_ranges);
21 hp_model.tree.anchor = get_mu(hp_model);
22 hp_model.tree.model = hp_model.base_model;
23 %hp_model.tree.model.h_part=1;
24 hp_model.tree.training = hp_model.base_model.training;
25 %hp_model.tree.model_data = model_data;
27 %erzeuge basis des anfangsparameters:
28 training_backup = hp_model.tree.model.training;
29 hp_model.tree.model.RB_stop_Nmax = 1;
30 hp_model.tree.model.training = hp_model.tree.anchor;
32 hp_model.tree = create_basis_automatic(hp_model.tree);
34 hp_model.tree.model.training = training_backup;
36 % Littletree für die Trainigsset generiung
37 littletree = hp_model.tree.anchor;
41 hp_model.tree = hp1(hp_model.tree);
48 function tree_out = hp1(tree_in)
50 %
function tree_out = hp1(tree_in)
51 % generiert die h-Partitionierung für parabollische Probleme rekursiv
52 % tree_in muss schon eine Basis für den Anker besitzen!
54 %finde den Parameter mit dem größten Fehler -> mu1
55 disp(
'--calculating errors of current domain');
56 % Fehler des Trainigssets berechnen:
58 tree_in.detailed_data, tree_in.reduced_data,...
59 tree_in.model.training,
'');
61 [max_error, mu_ind]= max(post_errs);
63 % Fehler von der anchor Approximation (nur relevant bei statischer basisgen):
65 tree_in.detailed_data, tree_in.reduced_data,...
69 disp([
'--Maximum error in current domain: ' num2str(max_error)
' - required: < ' num2str(hp_model.error_tol1)] );
70 disp([
'--Maximum error of anchor : ' num2str(error_anchor)
' - required: < ' num2str(hp_model.error_tol1/tree_in.model.rho)]);
72 % Warnen falls Ankerfehler größer als error/rho
73 if error_anchor>hp_model.error_tol1/tree_in.model.rho
74 warning(
'more RB nodes required!');
77 if max_error<hp_model.error_tol1
79 % fertig, keine zerteilung mehr nötig;
82 % Falls error>errortol -> splitten!
85 % Erzeuge neue Blätter
88 % Vererbung von Eigeschaften
89 child_right.model = tree_in.model;
90 child_left.model = tree_in.model;
91 % child_right.model.h_part =1;
92 % child_left.model.h_part =1;
93 % child_right.model_data = tree_in.model_data;
94 % child_left.model_data = tree_in.model_data;
96 % Linker Zweig erbt den Anker vom Elternzweig
97 child_left.anchor = tree_in.anchor;
98 % Rechter Zweig enthält den ersten neuen greedygefundenen Parameter
99 child_right.anchor = tree_in.model.training(:, mu_ind(1));
102 child_left.model.training=[];
103 child_right.model.training=[];
106 % Aufteilen des Elterntrainigsset auf die Kinder
107 % hier kommt die d-Funktion zum einsatz
108 leng = length(tree_in.model.training(1,:));
111 if ~isequal(tree_in.model.training(:,i),child_right.anchor) && ~isequal(tree_in.model.training(:,i),child_left.anchor)
112 distance_l=hp_model.distance_function(child_left.model,child_left.anchor,tree_in.model.training(:,i));
113 distance_r=hp_model.distance_function(child_right.model,child_right.anchor,tree_in.model.training(:,i)) ;
115 if distance_l<distance_r
116 child_left.model.training = [child_left.model.training, tree_in.model.training(:,i)];
118 child_right.model.training = [child_right.model.training, tree_in.model.training(:,i)];
123 % Protokolliert die Änderungen in dem kleinen Baum (globale variable!) mit.
124 littletree=change_littletree(littletree,tree_in.anchor,child_right.anchor);
126 % Trainingsset mittels accept-reject Monte Carlo verfeinern
129 % grenzen des Zufallgenerators finden:
130 switch hp_model.trainingset_generation_mode
132 %boundarybox erstellen:
133 param_min = min(child_left.model.training');
134 param_max = max(child_left.model.training');
135 l = max(param_max-param_min);
136 t = size(child_left.model.training);
137 if isempty(l) || min(l) == 0 || t(2) <= 1
138 box_ranges = child_left.model.mu_ranges;
141 param_max=param_max+max(l,hp_model.boundarybox_minimum)*hp_model.boundarybox_factor;
142 param_min=param_min-max(l,hp_model.boundarybox_minimum)*hp_model.boundarybox_factor;
143 box_ranges = cell2mat(child_left.model.mu_ranges')';
146 param_min = max([param_min;box_ranges(1,:)]);
147 param_max = min([param_max;box_ranges(2,:)]);
149 box_ranges = mat2cell([param_min;param_max]',ones(1,length(param_min)));
154 box_ranges = child_left.model.mu_ranges;
156 box_ranges = child_left.model.mu_ranges;
160 while length(child_left.model.training) < child_left.model.RB_train_size
162 parameter = rand_uniform(1,box_ranges);
163 if part_of_domain(littletree,child_left.anchor,parameter,child_left.model)==1
164 child_left.model.training = [child_left.model.training,parameter ];
172 switch hp_model.trainingset_generation_mode
174 %boundarybox erstellen:
175 param_min = min(child_right.model.training');
176 param_max = max(child_right.model.training');
177 l = max(param_max-param_min);
178 t=size(child_right.model.training);
179 if isempty(l) || min(l) == 0 || t(2) <= 1
180 box_ranges = child_right.model.mu_ranges;
183 param_max=param_max+max(l,hp_model.boundarybox_minimum)*hp_model.boundarybox_factor;
184 param_min=param_min-max(l,hp_model.boundarybox_minimum)*hp_model.boundarybox_factor;
185 box_ranges = cell2mat(child_right.model.mu_ranges')';
188 param_min = max([param_min;box_ranges(1,:)]);
189 param_max = min([param_max;box_ranges(2,:)]);
191 box_ranges = mat2cell([param_min;param_max]',ones(1,length(param_min)));
198 box_ranges = child_left.model.mu_ranges;
200 box_ranges = child_left.model.mu_ranges;
204 while length(child_right.model.training) < child_right.model.RB_train_size
206 parameter = rand_uniform(1,box_ranges);
207 if part_of_domain(littletree,child_right.anchor,parameter,child_right.model)==1
208 child_right.model.training = [child_right.model.training,parameter ];
215 child_left.training = child_left.model.training;
216 child_right.training = child_right.model.training;
219 % Basis des ersten parameters (mu0) erben
220 child_left.detailed_data = tree_in.detailed_data;
221 child_left.reduced_data = tree_in.reduced_data;
222 % Basis für den neuen parameter (mu1) generieren
223 backup = child_right.model.training;
224 child_right.model.use_generated_RB_basis = 0;
225 child_right.model.RB_stop_Nmax = 1;
226 child_right.model.training = child_right.anchor;
227 % Basen des Ankers mu1
228 child_right = create_basis_automatic(child_right);
229 child_right.model.training = backup;
232 tree_out={hp1(child_left),hp1(child_right)};
253 function tree_out = create_basis_automatic(tree_in)
255 switch (hp_model.find_nr_nodes)
260 % generiere basis des ankers -autmatische anzahl der nodes bestimmen;
261 % Basisgen. mit POD error:
263 tree_in.model.RB_extension_algorithm = tree_in.model.RB_extension_algorithm_hp; %=RB_extension_PCA_fixspace_flexible_hp%
264 tree_in.detailed_data = gen_detailed_data(tree_in.model,model_data);
265 tree_in.reduced_data = gen_reduced_data(tree_in.model,tree_in.detailed_data);
266 tree_in.model.RB_extension_algorithm = RB_ex_algo_backup;
269 tree_in.model.RB_stop_epsilon = tree_in.model.error_tol1/tree_in.model.rho;
270 tree_in.model.RB_stop_Nmax = inf;
271 tree_in.model.nr_extension_modes =1;
272 % benutze die POD moden:
273 tree_in.model.use_generated_RB_basis = 1;
275 % Basis genieren (Standart
Greedy)
278 tree_in.reduced_data = gen_reduced_data(tree_in.model,tree_in.detailed_data);
286 % tree_in.detailed_data, tree_in.reduced_data,...
292 %while error_RB > tree_in.model.error_tol1/tree_in.model.rho
294 % tree_in.model.use_generated_RB_basis = 1;
296 % tree_in.reduced_data = gen_reduced_data(tree_in.model,tree_in.detailed_data);
298 % tree_in.detailed_data, tree_in.reduced_data,...
305 %model.nr_extension_modes werden verwendet - muss gesetzt
307 tree_in.detailed_data = gen_detailed_data(tree_in.model,model_data);
308 tree_in.reduced_data = gen_reduced_data(tree_in.model,tree_in.detailed_data);
321 %Aktuallisiert die Änderungen bei dem Baume mit der Ankerstruktur mit
323 function tree_out = change_littletree(tree_in,para_expand,para_add)
326 tree_in{1} = change_littletree(tree_in{1},para_expand,para_add);
327 tree_in{2} = change_littletree(tree_in{2},para_expand,para_add);
330 if isequal(tree_in,para_expand)
331 tree_out = {tree_in,para_add};
340 % Überprüft, ob ein Parameter param zu dem Gebiet des anchors gehört
341 % benötigt einen little tree.
343 function is = part_of_domain(ltree, anchor,param,model)
344 test = iscell(ltree);
347 % Representanten bestimmen
348 first_l=littletree_first(ltree{1});
349 first_r=littletree_first(ltree{2});
350 % Mit d-Funktion naviagtion durchführen
351 distance_l = hp_model.distance_function(model,first_l,param);%norm(first_l-param,2);
352 distance_r = hp_model.distance_function(model,first_r,param);%norm(first_r-param,2);
353 % Unterbaum finden und Rekursiv vortfahren
354 if distance_l<distance_r
355 is = part_of_domain(ltree{1}, anchor,param,model);
357 is = part_of_domain(ltree{2}, anchor,param,model);
360 % im passenden Gebiet angekommen
362 if isequal(ltree,anchor)
372 % gibt den zugehörigen Anker für die Variable tree zurück.
373 function value= littletree_first (tree)
376 value=littletree_first(tree{1});
function test_err = rb_test_indicator(model, detailed_data, reduced_data, M_test, savepath)
M_test,[savepath])
Customizable implementation of an abstract greedy algorithm.
function detailed_data = rb_basis_generation(model, detailed_data)
reduced basis construction with different methods
function [ RBext , dummy ] = RB_extension_PCA_fixspace_flexible(model, detailed_data)
function computing a RB basis extension for given parameters by the POD-Greedy algorithm.