61 fLogger( new
MsgLogger(
"SVWorkingSet", kINFO ) )
70 : fdoRegression(doreg),
71 fInputData(inputVectors),
73 fKFunction(kernelFunction),
79 fLogger( new
MsgLogger(
"SVWorkingSet", kINFO ) )
122 if (fKMatrix != 0) {
delete fKMatrix; fKMatrix = 0;}
136 std::vector<TMVA::SVEvent*>::iterator idIter;
139 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
140 if((*idIter)->GetAlpha()>0)
141 fErrorC_J += (*idIter)->GetAlpha()*(*idIter)->GetTypeFlag()*fKVals[k];
149 if((jevt->
GetIdx() == 1) && (fErrorC_J < fB_up )){
153 else if ((jevt->
GetIdx() == -1)&&(fErrorC_J > fB_low)) {
160 if((jevt->
GetIdx()>=0) && (fB_low - fErrorC_J > 2*fTolerance)) {
165 if((jevt->
GetIdx()<=0) && (fErrorC_J - fB_up > 2*fTolerance)) {
170 if (converged)
return kFALSE;
173 if(fB_low - fErrorC_J > fErrorC_J - fB_up) ievt = fTEventLow;
174 else ievt = fTEventUp;
177 if (TakeStep(ievt, jevt))
return kTRUE;
186 if (ievt == jevt)
return kFALSE;
187 std::vector<TMVA::SVEvent*>::iterator idIter;
194 Float_t newAlpha_I, newAlpha_J;
208 s =
Int_t( type_I * type_J );
216 if (type_I == type_J) {
217 Float_t gamma = alpha_I + alpha_J;
247 Float_t gamma = alpha_I - alpha_J;
250 if ( gamma >= (c_i - c_j) )
257 if ( (c_i - c_j) >= gamma)
265 Float_t kernel_II, kernel_IJ, kernel_JJ;
267 kernel_II = fKMatrix->GetElement(ievt->
GetNs(),ievt->
GetNs());
268 kernel_IJ = fKMatrix->GetElement(ievt->
GetNs(), jevt->
GetNs());
269 kernel_JJ = fKMatrix->GetElement(jevt->
GetNs(),jevt->
GetNs());
271 eta = 2*kernel_IJ - kernel_II - kernel_JJ;
273 newAlpha_J = alpha_J + (type_J*( errorC_J - errorC_I ))/eta;
274 if (newAlpha_J <
l) newAlpha_J =
l;
275 else if (newAlpha_J >
h) newAlpha_J =
h;
282 Float_t c_J = type_J*( errorC_I - errorC_J ) - eta * alpha_J;
283 lobj = c_I *
l *
l + c_J *
l;
284 hobj = c_I *
h *
h + c_J *
h;
286 if (lobj > hobj + epsilon) newAlpha_J =
l;
287 else if (lobj < hobj - epsilon) newAlpha_J =
h;
288 else newAlpha_J = alpha_J;
291 if (
TMath::Abs( newAlpha_J - alpha_J ) < ( epsilon * ( newAlpha_J + alpha_J+ epsilon ))){
295 newAlpha_I = alpha_I - s*( newAlpha_J - alpha_J );
297 if (newAlpha_I < 0) {
298 newAlpha_J += s* newAlpha_I;
301 else if (newAlpha_I > c_i) {
302 Float_t temp = newAlpha_I - c_i;
303 newAlpha_J += s * temp;
307 Float_t dL_I = type_I * ( newAlpha_I - alpha_I );
308 Float_t dL_J = type_J * ( newAlpha_J - alpha_J );
310 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
311 if((*idIter)->GetIdx()==0){
312 Float_t ii = fKMatrix->GetElement(ievt->
GetNs(), (*idIter)->GetNs());
313 Float_t jj = fKMatrix->GetElement(jevt->
GetNs(), (*idIter)->GetNs());
315 (*idIter)->UpdateErrorCache(dL_I * ii + dL_J * jj);
325 ievt->
SetErrorCache(errorC_I + dL_I*kernel_II + dL_J*kernel_IJ);
326 jevt->
SetErrorCache(errorC_J + dL_I*kernel_IJ + dL_J*kernel_JJ);
333 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
334 if((*idIter)->GetIdx()==0){
335 if((*idIter)->GetErrorCache()> fB_low){
336 fB_low = (*idIter)->GetErrorCache();
337 fTEventLow = (*idIter);
339 if( (*idIter)->GetErrorCache()< fB_up){
340 fB_up =(*idIter)->GetErrorCache();
341 fTEventUp = (*idIter);
375 if((fB_up > fB_low - 2*fTolerance))
return kTRUE;
385 Int_t numChanged = 0;
386 Int_t examineAll = 1;
389 Int_t deltaChanges = 0;
392 std::vector<TMVA::SVEvent*>::iterator idIter;
394 while ((numChanged > 0) || (examineAll > 0)) {
395 if (fIPyCurrentIter) *fIPyCurrentIter = numit;
396 if (fExitFromTraining && *fExitFromTraining)
break;
399 for (idIter = fInputData->begin(); idIter!=fInputData->end(); ++idIter){
400 if(!fdoRegression) numChanged += (
UInt_t)ExamineExample(*idIter);
401 else numChanged += (
UInt_t)ExamineExampleReg(*idIter);
405 for (idIter = fInputData->begin(); idIter!=fInputData->end(); ++idIter) {
406 if ((*idIter)->IsInI0()) {
407 if(!fdoRegression) numChanged += (
UInt_t)ExamineExample(*idIter);
408 else numChanged += (
UInt_t)ExamineExampleReg(*idIter);
417 if (examineAll == 1) examineAll = 0;
418 else if (numChanged == 0 || numChanged < 10 || deltaChanges > 3 ) examineAll = 1;
420 if (numChanged == numChangedOld) deltaChanges++;
421 else deltaChanges = 0;
422 numChangedOld = numChanged;
425 if (numit >= nMaxIter) {
427 <<
"Max number of iterations exceeded. "
428 <<
"Training may not be completed. Try use less Cost parameter" <<
Endl;
459 std::vector<TMVA::SVEvent*>::iterator idIter;
460 if( fSupVec != 0) {
delete fSupVec; fSupVec = 0; }
461 fSupVec =
new std::vector<TMVA::SVEvent*>(0);
463 for( idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
464 if((*idIter)->GetDeltaAlpha() !=0){
465 fSupVec->push_back((*idIter));
475 if (ievt == jevt)
return kFALSE;
476 std::vector<TMVA::SVEvent*>::iterator idIter;
477 const Float_t epsilon = 0.001*fTolerance;
484 const Float_t eta = -2*kernel_IJ + kernel_II + kernel_JJ;
490 Bool_t caseA, caseB, caseC, caseD, terminated;
491 caseA = caseB = caseC = caseD = terminated =
kFALSE;
492 Float_t b_alpha_i, b_alpha_j, b_alpha_i_p, b_alpha_j_p;
508 Float_t tmp_alpha_i, tmp_alpha_j;
509 tmp_alpha_i = tmp_alpha_j = 0.;
512 if((caseA ==
kFALSE) && (b_alpha_i > 0 || (b_alpha_i_p == 0 && deltafi > 0)) && (b_alpha_j > 0 || (b_alpha_j_p == 0 && deltafi < 0)))
519 tmp_alpha_j = b_alpha_j - (deltafi/eta);
522 tmp_alpha_i = b_alpha_i - (tmp_alpha_j - b_alpha_j);
525 if( IsDiffSignificant(b_alpha_j,tmp_alpha_j, epsilon) || IsDiffSignificant(b_alpha_i,tmp_alpha_i, epsilon)){
526 b_alpha_j = tmp_alpha_j;
527 b_alpha_i = tmp_alpha_i;
536 else if((caseB==
kFALSE) && (b_alpha_i>0 || (b_alpha_i_p==0 && deltafi >2*epsilon )) && (b_alpha_j_p>0 || (b_alpha_j==0 && deltafi>2*epsilon)))
540 high =
TMath::Min( b_cost_i , b_cost_j + gamma);
544 tmp_alpha_j = b_alpha_j_p - ((deltafi-2*epsilon)/eta);
547 tmp_alpha_i = b_alpha_i - (tmp_alpha_j - b_alpha_j_p);
550 if( IsDiffSignificant(b_alpha_j_p,tmp_alpha_j, epsilon) || IsDiffSignificant(b_alpha_i,tmp_alpha_i, epsilon)){
551 b_alpha_j_p = tmp_alpha_j;
552 b_alpha_i = tmp_alpha_i;
560 else if((caseC==
kFALSE) && (b_alpha_i_p>0 || (b_alpha_i==0 && deltafi < -2*epsilon )) && (b_alpha_j>0 || (b_alpha_j_p==0 && deltafi< -2*epsilon)))
567 tmp_alpha_j = b_alpha_j - ((deltafi+2*epsilon)/eta);
570 tmp_alpha_i = b_alpha_i_p - (tmp_alpha_j - b_alpha_j);
573 if( IsDiffSignificant(b_alpha_j,tmp_alpha_j, epsilon) || IsDiffSignificant(b_alpha_i_p,tmp_alpha_i, epsilon)){
574 b_alpha_j = tmp_alpha_j;
575 b_alpha_i_p = tmp_alpha_i;
583 else if((caseD ==
kFALSE) &&
584 (b_alpha_i_p>0 || (b_alpha_i==0 && deltafi <0 )) &&
585 (b_alpha_j_p>0 || (b_alpha_j==0 && deltafi >0 )))
592 tmp_alpha_j = b_alpha_j_p + (deltafi/eta);
595 tmp_alpha_i = b_alpha_i_p - (tmp_alpha_j - b_alpha_j_p);
597 if( IsDiffSignificant(b_alpha_j_p,tmp_alpha_j, epsilon) || IsDiffSignificant(b_alpha_i_p,tmp_alpha_i, epsilon)){
598 b_alpha_j_p = tmp_alpha_j;
599 b_alpha_i_p = tmp_alpha_i;
613 if( IsDiffSignificant(b_alpha_i, ievt->
GetAlpha(), epsilon) ||
614 IsDiffSignificant(b_alpha_j, jevt->
GetAlpha(), epsilon) ||
615 IsDiffSignificant(b_alpha_i_p, ievt->
GetAlpha_p(), epsilon) ||
616 IsDiffSignificant(b_alpha_j_p, jevt->
GetAlpha_p(), epsilon) ){
624 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
626 if((*idIter)->GetIdx()==0){
627 Float_t k_ii = fKMatrix->GetElement(ievt->
GetNs(), (*idIter)->GetNs());
628 Float_t k_jj = fKMatrix->GetElement(jevt->
GetNs(), (*idIter)->GetNs());
630 (*idIter)->UpdateErrorCache(diff_alpha_i * k_ii + diff_alpha_j * k_jj);
647 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
648 if((!(*idIter)->IsInI3()) && ((*idIter)->GetErrorCache()> fB_low)){
649 fB_low = (*idIter)->GetErrorCache();
650 fTEventLow = (*idIter);
653 if((!(*idIter)->IsInI2()) && ((*idIter)->GetErrorCache()< fB_up)){
654 fB_up =(*idIter)->GetErrorCache();
655 fTEventUp = (*idIter);
676 std::vector<TMVA::SVEvent*>::iterator idIter;
679 for(idIter = fInputData->begin(); idIter != fInputData->end(); ++idIter){
680 fErrorC_J -= (*idIter)->GetDeltaAlpha()*fKVals[k];
688 if(fErrorC_J + feps < fB_up ){
689 fB_up = fErrorC_J + feps;
692 else if(fErrorC_J -feps > fB_low) {
693 fB_low = fErrorC_J - feps;
696 }
else if((jevt->
IsInI2()) && (fErrorC_J + feps > fB_low)){
697 fB_low = fErrorC_J + feps;
699 }
else if((jevt->
IsInI3()) && (fErrorC_J - feps < fB_up)){
700 fB_up = fErrorC_J - feps;
708 if( fB_low -fErrorC_J + feps > 2*fTolerance){
711 if(fErrorC_J-feps-fB_up > fB_low-fErrorC_J+feps){
714 }
else if(fErrorC_J -feps - fB_up > 2*fTolerance){
717 if(fB_low - fErrorC_J+feps > fErrorC_J-feps -fB_up){
725 if( fB_low -fErrorC_J - feps > 2*fTolerance){
728 if(fErrorC_J+feps-fB_up > fB_low-fErrorC_J-feps){
731 }
else if(fErrorC_J + feps - fB_up > 2*fTolerance){
734 if(fB_low - fErrorC_J-feps > fErrorC_J+feps -fB_up){
742 if( fB_low -fErrorC_J - feps > 2*fTolerance){
745 if(fErrorC_J+feps-fB_up > fB_low-fErrorC_J-feps){
748 }
else if(fErrorC_J - feps - fB_up > 2*fTolerance){
751 if(fB_low - fErrorC_J+feps > fErrorC_J-feps -fB_up){
759 if( fErrorC_J + feps -fB_up > 2*fTolerance){
767 if(fB_low -fErrorC_J +feps > 2*fTolerance){
773 if(converged)
return kFALSE;
774 if (TakeStepReg(ievt, jevt))
return kTRUE;
ostringstream derivative to redirect and format output
Event class for Support Vector Machine.
Float_t GetTarget() const
Float_t GetErrorCache() const
Float_t GetCweight() const
Float_t * GetLine() const
void SetAlpha_p(Float_t alpha)
Float_t GetAlpha_p() const
void SetAlpha(Float_t alpha)
Int_t GetTypeFlag() const
void SetErrorCache(Float_t err_cache)
Float_t GetDeltaAlpha() const
Kernel for Support Vector Machine.
Kernel matrix for Support Vector Machine.
Float_t * GetLine(UInt_t)
returns a row of the kernel matrix
SVEvent * fTEventUp
last optimized event
Bool_t TakeStep(SVEvent *, SVEvent *)
void Train(UInt_t nIter=1000)
train the SVM
Bool_t IsDiffSignificant(Float_t, Float_t, Float_t)
Float_t fTolerance
documentation
Bool_t ExamineExample(SVEvent *)
Bool_t ExamineExampleReg(SVEvent *)
Bool_t fdoRegression
TODO temporary, find nicer solution.
Bool_t TakeStepReg(SVEvent *, SVEvent *)
Float_t fB_low
documentation
Float_t fB_up
documentation
SVEvent * fTEventLow
last optimized event
void SetIndex(TMVA::SVEvent *)
~SVWorkingSet()
destructor
SVWorkingSet()
constructor
std::vector< TMVA::SVEvent * > * fInputData
input events
SVKernelMatrix * fKMatrix
kernel matrix
std::vector< TMVA::SVEvent * > * GetSupportVectors()
Random number generator class based on M.
virtual UInt_t Integer(UInt_t imax)
Returns a random integer uniformly distributed on the interval [ 0, imax-1 ].
MsgLogger & Endl(MsgLogger &ml)
Short_t Max(Short_t a, Short_t b)
Returns the largest of a and b.
Short_t Min(Short_t a, Short_t b)
Returns the smallest of a and b.
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.