62 fLogger( new
MsgLogger(
"SVWorkingSet", kINFO ) )
71 : fdoRegression(doreg),
72 fInputData(inputVectors),
74 fKFunction(kernelFunction),
80 fLogger( new
MsgLogger(
"SVWorkingSet", kINFO ) )
123 if (fKMatrix != 0) {
delete fKMatrix; fKMatrix = 0;}
137 std::vector<TMVA::SVEvent*>::iterator idIter;
140 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
141 if((*idIter)->GetAlpha()>0)
142 fErrorC_J += (*idIter)->GetAlpha()*(*idIter)->GetTypeFlag()*fKVals[k];
150 if((jevt->
GetIdx() == 1) && (fErrorC_J < fB_up )){
154 else if ((jevt->
GetIdx() == -1)&&(fErrorC_J > fB_low)) {
161 if((jevt->
GetIdx()>=0) && (fB_low - fErrorC_J > 2*fTolerance)) {
166 if((jevt->
GetIdx()<=0) && (fErrorC_J - fB_up > 2*fTolerance)) {
171 if (converged)
return kFALSE;
174 if(fB_low - fErrorC_J > fErrorC_J - fB_up) ievt = fTEventLow;
175 else ievt = fTEventUp;
178 if (TakeStep(ievt, jevt))
return kTRUE;
187 if (ievt == jevt)
return kFALSE;
188 std::vector<TMVA::SVEvent*>::iterator idIter;
195 Float_t newAlpha_I, newAlpha_J;
209 s =
Int_t( type_I * type_J );
217 if (type_I == type_J) {
251 if (
gamma >= (c_i - c_j) )
258 if ( (c_i - c_j) >=
gamma)
266 Float_t kernel_II, kernel_IJ, kernel_JJ;
268 kernel_II = fKMatrix->GetElement(ievt->
GetNs(),ievt->
GetNs());
269 kernel_IJ = fKMatrix->GetElement(ievt->
GetNs(), jevt->
GetNs());
270 kernel_JJ = fKMatrix->GetElement(jevt->
GetNs(),jevt->
GetNs());
272 eta = 2*kernel_IJ - kernel_II - kernel_JJ;
274 newAlpha_J = alpha_J + (type_J*( errorC_J - errorC_I ))/eta;
275 if (newAlpha_J <
l) newAlpha_J =
l;
276 else if (newAlpha_J >
h) newAlpha_J =
h;
283 Float_t c_J = type_J*( errorC_I - errorC_J ) - eta * alpha_J;
284 lobj = c_I *
l *
l + c_J *
l;
285 hobj = c_I *
h *
h + c_J *
h;
287 if (lobj > hobj +
epsilon) newAlpha_J =
l;
288 else if (lobj < hobj -
epsilon) newAlpha_J =
h;
289 else newAlpha_J = alpha_J;
296 newAlpha_I = alpha_I -
s*( newAlpha_J - alpha_J );
298 if (newAlpha_I < 0) {
299 newAlpha_J +=
s* newAlpha_I;
302 else if (newAlpha_I > c_i) {
303 Float_t temp = newAlpha_I - c_i;
304 newAlpha_J +=
s * temp;
308 Float_t dL_I = type_I * ( newAlpha_I - alpha_I );
309 Float_t dL_J = type_J * ( newAlpha_J - alpha_J );
312 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
314 if((*idIter)->GetIdx()==0){
315 Float_t ii = fKMatrix->GetElement(ievt->
GetNs(), (*idIter)->GetNs());
316 Float_t jj = fKMatrix->GetElement(jevt->
GetNs(), (*idIter)->GetNs());
318 (*idIter)->UpdateErrorCache(dL_I * ii + dL_J * jj);
328 ievt->
SetErrorCache(errorC_I + dL_I*kernel_II + dL_J*kernel_IJ);
329 jevt->
SetErrorCache(errorC_J + dL_I*kernel_IJ + dL_J*kernel_JJ);
336 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
337 if((*idIter)->GetIdx()==0){
338 if((*idIter)->GetErrorCache()> fB_low){
339 fB_low = (*idIter)->GetErrorCache();
340 fTEventLow = (*idIter);
342 if( (*idIter)->GetErrorCache()< fB_up){
343 fB_up =(*idIter)->GetErrorCache();
344 fTEventUp = (*idIter);
378 if((fB_up > fB_low - 2*fTolerance))
return kTRUE;
388 Int_t numChanged = 0;
389 Int_t examineAll = 1;
392 Int_t deltaChanges = 0;
395 std::vector<TMVA::SVEvent*>::iterator idIter;
397 while ((numChanged > 0) || (examineAll > 0)) {
398 if (fIPyCurrentIter) *fIPyCurrentIter = numit;
399 if (fExitFromTraining && *fExitFromTraining)
break;
402 for (idIter = fInputData->begin(); idIter!=fInputData->end(); ++idIter){
403 if(!fdoRegression) numChanged += (
UInt_t)ExamineExample(*idIter);
404 else numChanged += (
UInt_t)ExamineExampleReg(*idIter);
408 for (idIter = fInputData->begin(); idIter!=fInputData->end(); ++idIter) {
409 if ((*idIter)->IsInI0()) {
410 if(!fdoRegression) numChanged += (
UInt_t)ExamineExample(*idIter);
411 else numChanged += (
UInt_t)ExamineExampleReg(*idIter);
420 if (examineAll == 1) examineAll = 0;
421 else if (numChanged == 0 || numChanged < 10 || deltaChanges > 3 ) examineAll = 1;
423 if (numChanged == numChangedOld) deltaChanges++;
424 else deltaChanges = 0;
425 numChangedOld = numChanged;
428 if (numit >= nMaxIter) {
430 <<
"Max number of iterations exceeded. "
431 <<
"Training may not be completed. Try use less Cost parameter" <<
Endl;
462 std::vector<TMVA::SVEvent*>::iterator idIter;
464 for( idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter)
465 if((*idIter)->GetAlpha() !=0) counter++;
472 std::vector<TMVA::SVEvent*>::iterator idIter;
473 if( fSupVec != 0) {
delete fSupVec; fSupVec = 0; }
474 fSupVec =
new std::vector<TMVA::SVEvent*>(0);
476 for( idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
477 if((*idIter)->GetDeltaAlpha() !=0){
478 fSupVec->push_back((*idIter));
488 if (ievt == jevt)
return kFALSE;
489 std::vector<TMVA::SVEvent*>::iterator idIter;
497 const Float_t eta = -2*kernel_IJ + kernel_II + kernel_JJ;
503 Bool_t caseA, caseB, caseC, caseD, terminated;
504 caseA = caseB = caseC = caseD = terminated =
kFALSE;
505 Float_t b_alpha_i, b_alpha_j, b_alpha_i_p, b_alpha_j_p;
521 Float_t tmp_alpha_i, tmp_alpha_j;
522 tmp_alpha_i = tmp_alpha_j = 0.;
525 if((caseA ==
kFALSE) && (b_alpha_i > 0 || (b_alpha_i_p == 0 && deltafi > 0)) && (b_alpha_j > 0 || (b_alpha_j_p == 0 && deltafi < 0)))
532 tmp_alpha_j = b_alpha_j - (deltafi/eta);
535 tmp_alpha_i = b_alpha_i - (tmp_alpha_j - b_alpha_j);
538 if( IsDiffSignificant(b_alpha_j,tmp_alpha_j,
epsilon) || IsDiffSignificant(b_alpha_i,tmp_alpha_i,
epsilon)){
539 b_alpha_j = tmp_alpha_j;
540 b_alpha_i = tmp_alpha_i;
549 else if((caseB==
kFALSE) && (b_alpha_i>0 || (b_alpha_i_p==0 && deltafi >2*
epsilon )) && (b_alpha_j_p>0 || (b_alpha_j==0 && deltafi>2*
epsilon)))
557 tmp_alpha_j = b_alpha_j_p - ((deltafi-2*
epsilon)/eta);
560 tmp_alpha_i = b_alpha_i - (tmp_alpha_j - b_alpha_j_p);
563 if( IsDiffSignificant(b_alpha_j_p,tmp_alpha_j,
epsilon) || IsDiffSignificant(b_alpha_i,tmp_alpha_i,
epsilon)){
564 b_alpha_j_p = tmp_alpha_j;
565 b_alpha_i = tmp_alpha_i;
573 else if((caseC==
kFALSE) && (b_alpha_i_p>0 || (b_alpha_i==0 && deltafi < -2*
epsilon )) && (b_alpha_j>0 || (b_alpha_j_p==0 && deltafi< -2*
epsilon)))
580 tmp_alpha_j = b_alpha_j - ((deltafi+2*
epsilon)/eta);
583 tmp_alpha_i = b_alpha_i_p - (tmp_alpha_j - b_alpha_j);
586 if( IsDiffSignificant(b_alpha_j,tmp_alpha_j,
epsilon) || IsDiffSignificant(b_alpha_i_p,tmp_alpha_i,
epsilon)){
587 b_alpha_j = tmp_alpha_j;
588 b_alpha_i_p = tmp_alpha_i;
596 else if((caseD ==
kFALSE) &&
597 (b_alpha_i_p>0 || (b_alpha_i==0 && deltafi <0 )) &&
598 (b_alpha_j_p>0 || (b_alpha_j==0 && deltafi >0 )))
605 tmp_alpha_j = b_alpha_j_p + (deltafi/eta);
608 tmp_alpha_i = b_alpha_i_p - (tmp_alpha_j - b_alpha_j_p);
610 if( IsDiffSignificant(b_alpha_j_p,tmp_alpha_j,
epsilon) || IsDiffSignificant(b_alpha_i_p,tmp_alpha_i,
epsilon)){
611 b_alpha_j_p = tmp_alpha_j;
612 b_alpha_i_p = tmp_alpha_i;
638 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
641 if((*idIter)->GetIdx()==0){
642 Float_t k_ii = fKMatrix->GetElement(ievt->
GetNs(), (*idIter)->GetNs());
643 Float_t k_jj = fKMatrix->GetElement(jevt->
GetNs(), (*idIter)->GetNs());
645 (*idIter)->UpdateErrorCache(diff_alpha_i * k_ii + diff_alpha_j * k_jj);
662 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
663 if((!(*idIter)->IsInI3()) && ((*idIter)->GetErrorCache()> fB_low)){
664 fB_low = (*idIter)->GetErrorCache();
665 fTEventLow = (*idIter);
668 if((!(*idIter)->IsInI2()) && ((*idIter)->GetErrorCache()< fB_up)){
669 fB_up =(*idIter)->GetErrorCache();
670 fTEventUp = (*idIter);
691 std::vector<TMVA::SVEvent*>::iterator idIter;
694 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
695 fErrorC_J -= (*idIter)->GetDeltaAlpha()*fKVals[k];
703 if(fErrorC_J + feps < fB_up ){
704 fB_up = fErrorC_J + feps;
707 else if(fErrorC_J -feps > fB_low) {
708 fB_low = fErrorC_J - feps;
711 }
else if((jevt->
IsInI2()) && (fErrorC_J + feps > fB_low)){
712 fB_low = fErrorC_J + feps;
714 }
else if((jevt->
IsInI3()) && (fErrorC_J - feps < fB_up)){
715 fB_up = fErrorC_J - feps;
723 if( fB_low -fErrorC_J + feps > 2*fTolerance){
726 if(fErrorC_J-feps-fB_up > fB_low-fErrorC_J+feps){
729 }
else if(fErrorC_J -feps - fB_up > 2*fTolerance){
732 if(fB_low - fErrorC_J+feps > fErrorC_J-feps -fB_up){
740 if( fB_low -fErrorC_J - feps > 2*fTolerance){
743 if(fErrorC_J+feps-fB_up > fB_low-fErrorC_J-feps){
746 }
else if(fErrorC_J + feps - fB_up > 2*fTolerance){
749 if(fB_low - fErrorC_J-feps > fErrorC_J+feps -fB_up){
757 if( fB_low -fErrorC_J - feps > 2*fTolerance){
760 if(fErrorC_J+feps-fB_up > fB_low-fErrorC_J-feps){
763 }
else if(fErrorC_J - feps - fB_up > 2*fTolerance){
766 if(fB_low - fErrorC_J+feps > fErrorC_J-feps -fB_up){
774 if( fErrorC_J + feps -fB_up > 2*fTolerance){
782 if(fB_low -fErrorC_J +feps > 2*fTolerance){
788 if(converged)
return kFALSE;
789 if (TakeStepReg(ievt, jevt))
return kTRUE;
ostringstream derivative to redirect and format output
Event class for Support Vector Machine.
Float_t GetTarget() const
Float_t GetErrorCache() const
Float_t GetCweight() const
Float_t * GetLine() const
void SetAlpha_p(Float_t alpha)
Float_t GetAlpha_p() const
void SetAlpha(Float_t alpha)
Int_t GetTypeFlag() const
void SetErrorCache(Float_t err_cache)
Float_t GetDeltaAlpha() const
Kernel for Support Vector Machine.
Kernel matrix for Support Vector Machine.
Float_t * GetLine(UInt_t)
returns a row of the kernel matrix
Bool_t TakeStep(SVEvent *, SVEvent *)
void Train(UInt_t nIter=1000)
train the SVM
Bool_t IsDiffSignificant(Float_t, Float_t, Float_t)
Bool_t ExamineExample(SVEvent *)
Bool_t ExamineExampleReg(SVEvent *)
Bool_t TakeStepReg(SVEvent *, SVEvent *)
void SetIndex(TMVA::SVEvent *)
~SVWorkingSet()
destructor
SVWorkingSet()
constructor
std::vector< TMVA::SVEvent * > * fInputData
SVKernelMatrix * fKMatrix
std::vector< TMVA::SVEvent * > * GetSupportVectors()
Random number generator class based on M.
virtual UInt_t Integer(UInt_t imax)
Returns a random integer uniformly distributed on the interval [ 0, imax-1 ].
static constexpr double s
MsgLogger & Endl(MsgLogger &ml)
Short_t Max(Short_t a, Short_t b)
Short_t Min(Short_t a, Short_t b)