KerMor  0.9
Model order reduction for nonlinear dynamical systems and nonlinear approximation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ScalarEpsSVR_SMO.m
Go to the documentation of this file.
1 namespace general{
2 namespace regression{
3 
4 
5 /* (Autoinserted by mtoc++)
6  * This source code has been filtered by the mtoc++ executable,
7  * which generates code that can be processed by the doxygen documentation tool.
8  *
9  * On the other hand, it can neither be interpreted by MATLAB, nor can it be compiled with a C++ compiler.
10  * Except for the comments, the function bodies of your M-file functions are untouched.
11  * Consequently, the FILTER_SOURCE_FILES doxygen switch (default in our Doxyfile.template) will produce
12  * attached source files that are highly readable by humans.
13  *
14  * Additionally, links in the doxygen generated documentation to the source code of functions and class members refer to
15  * the correct locations in the source code browser.
16  * However, the line numbers most likely do not correspond to the line numbers in the original MATLAB source files.
17  */
18 
45  public: /* ( setObservable ) */
46 
47  Eps = .1;
62  StopEps = 1e-4;
77  Vis = 0;
92  Version = 2;
110  NNk = 10;
125  MaxCount = 60000;
140  public:
141 
155  public:
156 
157  function [ai , sf ] = regress(fxi,initialai) {
158  if any(abs(fxi) > 1)
159  error(" This SVR implementation requires fxi values in [-1,1] ");
160  end
161 /* initialai = reshape(initialai,1,[]); */
162  if this.Version == 1
163  /* Call 1D-Optimizer */
164  [ai, sf] = this.regress1D(fxi, initialai);
165  else
166  /* Call 2D-Optimizer */
167  [ai, sf] = this.regress2D(fxi, initialai);
168  end
169  }
170 
171 
172  function copy = clone() {
173  copy = general.regression.ScalarEpsSVR_SMO;
174  copy = clone@general.regression.BaseScalarSVR(this, copy);
175  copy.Eps= this.Eps;
176  copy.StopEps= this.StopEps;
177  copy.Vis= this.Vis;
178  copy.Version= this.Version;
179  copy.NNk= this.NNk;
180  copy.MaxCount= this.MaxCount;
181  copy.LastIterations= this.LastIterations;
182  }
183 
184 
185  private:
186 
187 
188  function [ai , sf ] = regress1D(fxi,initialai) {
189 
190  if ~isempty(initialai)
191  /* Init - warm start */
192  [a, dW, T] = I2_WarmStart(this, fxi, initialai);
193  else
194  /* Init - cold start */
195  [a, dW, T] = I0_ColdStart(this, fxi);
196 
197  /* Init - kernel rule
198  * [a, dW, T] = I1_ColdStartKernelRule(this, fxi); */
199  end
200 
201  /* % Preps */
202  cnt = 1;
203 
204  n = length(fxi);
205 
206  sgn = ones(1,2*n);
207  sgn(n+1:end) = -1;
208 
209  S = n*this.C;
210 
211  if this.Vis > 1
212  h = figure(1);
213  end
214 
215  stop = this.StopEps/(2*this.Lambda);
216  while S > stop && cnt < this.MaxCount+1
217 
218  if this.Vis > 1
219  afxi = (a(1:n)-a(n+1:end))*this.K;
220  figure(h);
221  subplot(1,2,1);
222  plot(1:n,fxi," r ",1:n,[fxi-this.Eps; fxi+this.Eps]," r-- ",1:n,afxi," b ");
223  legend(" f(x_i) training values "," +\epsilon "," -\epsilon "," approximation values ");
224  axis tight;
225  end
226 
227  /* % Select working set */
228  [idx, M] = this.WSS0_1D_GetMaxGainIndex(a, dW);
229 
230  /* [idx, M] = this.WSS1_1D_GetMaxGradientIndex(a, dW); */
231 
232  r = max(-a(idx),min(this.C-a(idx),dW(idx)));
233 
234  /* % Misc vars
235  * For index: Subtract n if index is for an \alpha^- (linear indexing) */
236  idx1 = idx - n*(idx > n);
237 
238  if this.Vis > 1
239  subplot(1,2,1);
240  hold on;
241  plot(idx1,afxi(idx1)," . "," MarkerSize ",5);
242  hold off;
243  end
244 
245  /* % update ai at max gain index */
246  a(idx) = a(idx) + r;
247 
248  /* % Update T with old dW */
249  T = T + r*(r - 2*dW(idx) + sgn(idx)*fxi(idx1) - this.Eps);
250 
251  /* % Update dW - only on side with change */
252  Ki = this.K(idx1, :);
253  dW = dW + sign(idx-n-.5) * r * [Ki -Ki];
254 
255  /* % Update E */
256  E = this.C * sum(max(0,min(2-this.Eps,dW)));
257 
258  S = abs(T + E);
259  cnt = cnt+1;
260  end
261  if this.Vis > 0
262  disp(sprintf(" Finished after %d/%d iterations.\n ",cnt,this.MaxCount));/* #ok */
263 
264  end
265 
266  /* \alpha = \alpha^+ - \alpha^- */
267  ai = (a(1:n)-a(n+1:end))^t;
268 
269  sf = StopFlag.TOL_OK;
270  if cnt == this.MaxCount
271  sf = StopFlag.MAX_ITER;
272  end
273  this.LastIterations= cnt;
274  }
275 
276 
277  function [ai , sf ] = regress2D(fxi,initialai) {
278  if ~isempty(initialai)
279  /* Init - warm start */
280  [a, dW, T] = I2_WarmStart(this, fxi, initialai);
281  else
282  /* Init - cold start */
283  [a, dW, T] = I0_ColdStart(this, fxi);
284 
285  /* Init - kernel rule
286  * [a, dW, T] = I1_ColdStartKernelRule(this, fxi); */
287  end
288 
289  if this.Vis > 1
290  h = figure(1);
291  end
292 
293  /* % Preps */
294  n = length(fxi);
295  sgn = ones(1,2*n);
296  sgn(n+1:end) = -1;
297 
298  S = n*this.C;
299 
300  /* Initialize i to -1 if MaxGainPlusLast is used. */
301  i = -1;
302 
303  cnt = 1;
304  stop = this.StopEps/(2*this.Lambda);
305  while abs(S) > stop && cnt < this.MaxCount+1
306  /* % Max gain computation - O(n^2) strategy */
307  ij = [];/* #ok<*AGROW> */
308 
309 
310 /* ij = [ij; this.WSS0_BestGainIndices(a, dW, sgn); */
311 
312 /* ij = [ij; this.WSS1024_RandomAdmissibleIndices(a); */
313 
314 /* ij = [ij; this.WSS1_1DMaxGainPlusLast(a, dW, i)]; */
315 
316 /* ij = [ij; this.WSS2_TwoSetMax1DGain(a, dW)]; */
317 
318 /* ij = [ij; this.WSS4_1DMaxGainPluskNN(a, dW)]; */
319 
320  ij = [ij; this.WSS7_Combi(a, dW, i)];
321 
322  [r, s, idx] = this.getMax2DGainUpdatesForIndices(a, dW, ij);
323 
324  /* Not usable with current scheme
325  *[i, j] = WSS128_ApproxBestGainIndices(this, a, dW, sgn); */
326 
327  i = ij(idx,1);
328  j = ij(idx,2);
329  i1 = i - (i > n)*n;
330  j1 = j - (j > n)*n;
331 
332  if this.Vis > 1
333  afxi = (a(1:n)-a(n+1:end))*this.K;
334  figure(h);
335  subplot(1,2,1);
336  plot(1:n,fxi," r ",1:n,[fxi-this.Eps; fxi+this.Eps]" , "r--" ,1:n,afxi, "b^t);
337  hold on;
338  if (i1 == i)
339  plot(i,afxi(i)," o "," MarkerEdgeColor "," k ",...
340  " MarkerFaceColor ",[.1 .8 .1],...
341  " MarkerSize ",6);
342  else
343  plot(i1,afxi(i1)," o "," MarkerEdgeColor "," k ",...
344  " MarkerFaceColor ",[.8 .1 .1],...
345  " MarkerSize ",6);
346  end
347  if (j1 == j)
348  plot(j,afxi(j)," o "," MarkerEdgeColor "," k ",...
349  " MarkerFaceColor ",[.1 .8 .1],...
350  " MarkerSize ",6);
351  else
352  plot(j1,afxi(j1)," o "," MarkerEdgeColor "," k ",...
353  " MarkerFaceColor ",[.8 .1 .1],...
354  " MarkerSize ",6);
355  end
356 
357  hold off;
358  axis tight;
359  if this.Vis > 2
360  fprintf(" alpha_{%d} change: %e, alpha_{%d} change: %e\n ",i,r,j,s);
361  end
362  pause;
363  end
364 
365  a(i) = a(i) + r;
366  a(j) = a(j) + s;
367 
368  /* If difference is smaller than n, they are ++ or -- updates, meaning addition.
369  * otherwise, is |i-j| \geq n, we have +- or -+ cases, meaning subtraction.
370  * subtraction of .5 avoids having sign(0) = 0. */
371  pm = sign(i - n - .5)*sign(j - n - .5);
372  /* % Update T using old dW */
373  T = T + r*(r - 2*dW(i) - this.Eps + sgn(i)*fxi(i1)) ...
374  + s*(s - 2*dW(j) - this.Eps + sgn(j)*fxi(j1)) ...
375  + 2*pm*r*s*this.K(i1,j1);
376 
377  /* % Update dW - only on side with change */
378  Ki = this.K(i1, :);
379  Kj = this.K(j1, :);
380  dW = dW + sign(i-n-.5) * r * [Ki -Ki] ...
381  + sign(j-n-.5) * s * [Kj -Kj];
382 
383  /* % Get new E term */
384  E = this.C * sum(max(0,min(2-this.Eps,dW)));
385 
386  S = T + E;
387  cnt = cnt+1;
388  end
389  cnt = cnt-1;
390  if this.Vis > 0 || KerMor.App.Verbose > 0
391  fprintf(" ScalarEpsSVR_SMO: Finished after %d/%d iterations.\n ",cnt,this.MaxCount);
392  end
393 
394  /* \alpha = \alpha^+ - \alpha^- */
395  ai = (a(1:n)-a(n+1:end))^t;
396 
397  sf = StopFlag.TOL_OK;
398  if cnt == this.MaxCount
399  sf = StopFlag.MAX_ITER;
400  end
401  this.LastIterations= cnt;
402  }
403 
404 
405  function [i , M ] = WSS0_1D_GetMaxGainIndex(a,dW) {
406  n = numel(a)/2;
407  ind = 1:n;
408  /* Check which alpha values might be changed (only the ones with their partner
409  * variable equal to zero) */
410  ch = [ind(a(n+1:end) == 0) ind(a(1:n) == 0)+n];
411 
412  /* Find optima for changeable alphas */
413  r = max(-a(ch),min(this.C-a(ch),dW(ch)));
414 
415  /* Get gain */
416  g = r .* (dW(ch) - .5*r);
417 
418  [M, idxch] = max(g);
419  i = ch(idxch);
420 
421  if this.Vis > 1
422  subplot(1,2,2);
423  r2 = zeros(size(a));
424  r2(ch) = r;
425  plot(1:2*n,r2," b ");
426  hold on;
427  g2 = zeros(size(a));
428  g2(ch) = g;
429  plot(1:2*n,g2," g ");
430  plot(i,g(idxch)," r. "," MarkerSize ",6);
431  plot([n n+eps],[0, max([r g])]," black ");
432  hold off;
433  title(sprintf(" Best gain index: %d, gain: %e, \\alpha_{%d} change: %e ",i, M, i, r(idxch)));
434  legend(" \alpha difference "," gain ");
435  axis tight;
436  end
437  }
438 
439 
440  function [i , M ] = WSS1_1D_GetMaxGradientIndex(a,dW) {
441  [M, i] = max(max([dW .* (a < this.C); -dW .* (a > 0)],[],1));
442  }
451  function ij = WSS0_AllIndices(a) {
452 
453  n = numel(a)/2;
454  ind = 1:n;
455  apch = ind(a(n+1:end) == 0);
456  amch = ind(a(1:n) == 0);
457  ch = [apch amch+n];
458  /* Create unique combinations of indices */
459  A = repmat(ch,length(ch),1);
460  I = triu(A,1);
461  J = triu(A^t,1);
462  ij = [I(I~=0) J(J~=0)];
463  /* Remove indices that would look at moving the same alpha up and down at the same time */
464  ij(ij(:,1)-ij(:,2)-n == 0,:) = [];
465  }
474  function ij = WSS1_1DMaxGainPlusLast(a,dW,i) {
475  in = this.WSS0_1D_GetMaxGainIndex(a, dW);
476  if i ~= -1
477  j = i;
478  i = in;
479  else
480  [dWs, sortidx] = sort(abs(dW));
481  pick = 1;
482  while sortidx(pick) == in
483  pick = pick + 1;
484  end
485  i = in;
486  j = sortidx(pick);
487  end
488  ij = [i j];
489  }
499  function ij = WSS2_TwoSetMax1DGain(a,dW) {
500  n = numel(a)/2;
501  len = round(n/2);
502  set1 = 1:len;
503  set2 = set1(end)+1:n;
504  i = this.WSS0_1D_GetMaxGainIndex(a([set1 set1+n]), dW([set1 set1+n]));
505  if i > len
506  i = i+(n-len); /* Move to \am index (add number of set2 elements), i.e. i = i+numel(set2) */
507 
508  end
509  j = this.WSS0_1D_GetMaxGainIndex(a([set2 set2+n]), dW([set2 set2+n]));
510  if j > numel(set2)
511  j = j+len; /* If larger than set2 size add number of set1 elements */
512 
513  end
514  /* Add +len as this is the offset for the second set in each case */
515  ij = [i j+len];
516  }
517 
518 
519  function ij = WSS4_1DMaxGainPluskNN(a,dW) {
520  i = this.WSS0_1D_GetMaxGainIndex(a, dW);
521 
522  n = numel(a)/2;
523  ki = i;
524  if ki > n
525  ki = ki - n;
526  end
527  [dist, idx] = sort(sqrt(1-this.K(ki,:)));
528  /* Extract indices of alpha^+ that are "close" wrt to the kernel metric \sqrt{1-\K(x_i,x_j)}
529  * Start at 2 as i itself is not an option for second index */
530  nidx = idx(2: min(this.NNk+1,numel(idx)));
531  /* Of course also consider the \alpha^- */
532  ij = [i*ones(size(nidx,2)*2,1) [nidx nidx+n]^t];
533  }
534 
535 
536  function ij = WSS7_Combi(a,dW,i) {
537  ij = this.WSS2_TwoSetMax1DGain(a, dW);
538  ij = [ij; this.WSS4_1DMaxGainPluskNN(a, dW)];
539 
540  /* Implement Max1DGainPlusLast directly to save a call to the 1D method. */
541  in = ij(end,1);
542  if i ~= -1
543  j = i;
544  i = in;
545  else
546  [dWs, sortidx] = sort(abs(dW));
547  pick = 1;
548  while sortidx(pick) == in
549  pick = pick + 1;
550  end
551  i = in;
552  j = sortidx(pick);
553  end
554  ij = [ij; i j];
555  }
556 
557 
558  function ij = WSS128_ApproxBestGainIndices(a,dW) {
559 
560  /* Only consider changeable \alpha^{+,-}, i.e. the ones whos partner \alpha equals zero. */
561  n = numel(a)/2;
562  ind = 1:n;
563  apch = ind(a(n+1:end) == 0);
564  amch = ind(a(1:n) == 0);
565  ch = [apch amch+n];
566  /* Create unique combinations of indices */
567  A = repmat(ch,length(ch),1);
568  I = triu(A,1);
569  J = triu(A^t,1);
570  ij = [I(I~=0) J(J~=0)];
571  /* Remove indices that would look at moving the same alpha up and down at the same time */
572  ij(ij(:,1)-ij(:,2)-n == 0,:) = [];
573 
574  kijidx = ij;
575  kijidxup = kijidx > n;
576  kijidx(kijidxup) = kijidx(kijidxup)-n;
577  kijidx = sub2ind(size(this.K),kijidx(:,1),kijidx(:,2));
578  Kij = this.K(kijidx);
579 
580  sgn = 1-mod(sum(kijidxup,2),2)*2;
581  dwi = dW(ij);
582  aij = a(ij);
583 
584  div = 1 ./ (1-Kij.^2);
585 
586  /* Compute update r */
587  rs = [(dwi(:,1) - sgn .* Kij .* dwi(:,2)) .* div ...
588  (dwi(:,2) - sgn .* Kij .* dwi(:,1)) .* div];
589  ub = rs > this.C - aij;
590  lb = rs < - aij;
591  rs(ub) = this.C-aij(ub);
592  rs(lb) = -aij(lb);
593 
594  /* % Compute gain for r_i,s_j combinations */
595  g = sum(rs .* (dwi - .5*rs),2) - sgn.*rs(:,1).*rs(:,2).*Kij;
596 
597  /* Extract i,j indices */
598  [maxg,idx] = max(g);
599 
600  if this.Vis > 2
601  fprintf(" Updates found inside WSS128: r=%f, s=%f\n ",rs(idx,1),rs(idx,2));
602  end
603  ij = ij(idx,:);
604  if this.Vis > 1
605  subplot(1,2,2);
606  plot(1:length(g),max(0,g));
607  hold on;
608  plot(idx,g(idx)," black. "," MarkerSize ",8);
609  hold off;
610  title(sprintf(" Best gain index: (%d,%d), gain: %e ",ij(1),ij(2), maxg));
611  axis tight;
612  end
613  }
622  function ij = WSS1024_RandomAdmissibleIndices(a) {
623  n = numel(a)/2;
624  ind = 1:n;
625  ch = [ind(a(n+1:end) == 0) ind(a(1:n) == 0)+n];
626  i = ch(randi(length(ch)));
627  j = ch(randi(length(ch)));
628  while j==i || abs(j-i) == n
629  j = ch(randi(length(ch)));
630  end
631  ij = [i j];
632  }
633 
634 
635  function [a , dW , T ] = I0_ColdStart(fxi) {
636  T = 0;
637  a = zeros(1,2*length(fxi)); /* 1..n = a^+, n+1..2n=a^- */
638 
639  dW = [fxi -fxi] - this.Eps;
640  }
641 
642 
643  function [ai , dW , T ] = I1_ColdStartKernelRule(fxi) {
644  n = length(fxi);
645  /* ai(1:n) = 1; ai(n+1:2*n) = 0; */
646  ai = max(0,min(this.C,[fxi.*(fxi>=0) -fxi.*(fxi<0)]));
647  ai = ai * 10 / n;
648  a = ai(1:n)-ai(n+1:end);
649  hlp = -a*this.K + fxi;
650  dW = [hlp -hlp] - this.Eps;
651  /* T = a*this.K*a' + this.Eps*sum(ai) - fxi*a'; */
652  T = -sum(ai .* dW);
653  }
654 
655 
656 
657  function [ai , dW , T ] = I2_WarmStart(fxi,initialai) {
658  n = length(fxi);
659  ap = zeros(1,n);
660  am = ap;
661  ap(initialai > 0) = initialai(initialai>0);
662  am(initialai < 0) = initialai(initialai<0);
663  ai = [ap am];
664  hlp = -(ap-am)*this.K + fxi;
665  dW = [hlp -hlp] - this.Eps;
666  T = -sum(ai .* dW);
667  }
668 
669 
670  function [r , s , idx ] = getMax2DGainUpdatesForIndices(a,dW,ij) {
671  n = numel(a)/2;
672  kijidx = ij;
673  kijidxup = kijidx > n;
674  kijidx(kijidxup) = kijidx(kijidxup)-n;
675  kijidx = sub2ind([n n],kijidx(:,1),kijidx(:,2));
676  Kij = this.K(kijidx);
677 
678  sgn = 1-mod(sum(kijidxup,2),2)*2;
679  dwi = dW(ij);
680  aij = a(ij);
681 
682  div = 1 ./ (1-Kij.^2);
683 
684  /* Compute update r */
685  rs = [(dwi(:,1) - sgn .* Kij .* dwi(:,2)) .* div ...
686  (dwi(:,2) - sgn .* Kij .* dwi(:,1)) .* div];
687 
688  /* Handle constraints */
689  b = rs > this.C - aij | rs < - aij;
690 
691  ronly = find(b(:,1) & ~b(:,2));
692  sonly = find(~b(:,1) & b(:,2));
693  both = find(b(:,1) & b(:,2));
694 
695  /* r is constrained only */
696  if ~isempty(ronly)
697  rs(ronly,1) = min(this.C - aij(ronly,1),max(- aij(ronly,1), rs(ronly,1)));
698  rs(ronly,2) = min(this.C-aij(ronly,2), max(-aij(ronly,2), dwi(ronly,2) - rs(ronly,1) .* Kij(ronly)));
699  end
700 
701  /* s is constrained only */
702  if ~isempty(sonly)
703  rs(sonly,2) = min(this.C - aij(sonly,2),max(-aij(sonly,2), rs(sonly,2)));
704  rs(sonly,1) = min(this.C-aij(sonly,1), max(-aij(sonly,1), dwi(sonly,1) - rs(sonly,2) .* Kij(sonly)));
705  end
706 
707  /* Case of both vars constrained
708  * Update s2 and r2 for the cases of the other one is constrained */
709  if ~isempty(both)
710  Kijb = Kij(both);
711  r1 = min(this.C - aij(both,1),max(- aij(both,1), rs(both,1)));
712  s1 = min(this.C-aij(both,2), max(-aij(both,2), dwi(both,1) - r1 .* Kijb));
713  s2 = min(this.C - aij(both,2),max(- aij(both,2), rs(both,2)));
714  r2 = min(this.C-aij(both,1), max(-aij(both,1), dwi(both,2) - s2 .* Kijb));
715 
716  /* Determine update via get higher gain
717  * Gain if r is constrained and s accordingly updated */
718  g1 = r1 .* (dwi(both,1) -.5*r1) + s1 .*(dwi(both,2) -.5 * s1) + sgn(both) .* r1 .* s1.* Kijb;
719  /* Gain if s is constrained and r accordingly updated */
720  g2 = r2 .* (dwi(both,1) -.5*r2) + s2 .*(dwi(both,2) -.5 * s2) + sgn(both) .* r2 .* s2 .* Kijb;
721 
722  /* % See which gain is higher. */
723  cmp = g1 > g2;
724  rs(both(cmp),1) = r1(cmp);
725  rs(both(cmp),2) = s1(cmp);
726  rs(both(~cmp),1) = r2(~cmp);
727  rs(both(~cmp),2) = s2(~cmp);
728  end
729 
730  /* % Compute gain */
731  g = sum(rs .* (dwi - .5*rs),2) + sgn.*rs(:,1).*rs(:,2).*Kij;
732 
733  /* Extract r,s updates */
734  [dummy, idx] = max(g);
735  r = rs(idx,1);
736  s = rs(idx,2);
737  }
738 
739 
740  public: /* ( Static ) */
741 
742  static function res = test_ScalarEpsSVR_SMO() {
743  x = -5:.1:5;
744  fx = sinc(x)+.2*x;
745  fx = fx ./ max(abs(fx));
746 
747  svr = general.regression.ScalarEpsSVR_SMO;
748  svr.Eps= .1;
749  svr.Lambda= 1/20;
750  svr.Vis= 0;
751 
752  kernel = kernels.GaussKernel(.8);
753  svr.K= kernel.evaluate(x,x);
754 
755  res = compute(1);
756  res = res & compute(2);
757 
758  function res = compute(version)
759  svr.Version= version;
760  [ai, svidx] = svr.computeKernelCoefficients(fx,[]);
761  sv = x(:,svidx);
762  svfun = @(x)ai" *(kernel.evaluate(x,sv) ");
763  fsvr = svfun(x);
764  fdiff = abs(fsvr(svidx)-fx(svidx));
765  res = isempty(find(fdiff > 1.01*svr.Eps,1));
766  end
767  }
775 };
776 }
777 }
778 
MaxCount
The maximum number of iterations.
static const TOL_OK
SVR-SMO specific flag. Indicates that the E+T values are smaller than the prescribed tolerance...
Definition: StopFlag.m:101
An integer value.
Eps
The epsilon value to use for the loss function.
data.FileMatrix K
The kernel matrix to use.
Definition: BaseScalarSVR.m:75
disp
Handle object disp method which is called by the display method. See the MATLAB disp function...
double C
The weighting of the slack variables.
integer LastIterations
The number of iterations used at the last run.
function [ ai , sf ] = regress(fxi, initialai)
SCALARSVR Scalar support vector regression.
Definition: BaseScalarSVR.m:19
Global configuration class for all KerMor run-time settings.
Definition: KerMor.m:17
static function KerMor theinstance = App()
The singleton KerMor instance.
Definition: KerMor.m:910
NNk
Nearest neighbors to search for WSS4 strategy.
double Lambda
The regularization parameter for the primary minimization problem.
Definition: BaseScalarSVR.m:93
static const MAX_ITER
Maximum number of iterations reached.
Definition: StopFlag.m:74
StopFlag: Flags that algorithms can use to specify the reason for their termination.
Definition: StopFlag.m:17