Logo ROOT  
Reference Guide
SVKernelFunction.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andrzej Zemla
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : SVKernelFunction *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Implementation *
12 * *
13 * Authors (alphabetical): *
14 * Marcin Wolter <Marcin.Wolter@cern.ch> - IFJ PAN, Krakow, Poland *
15 * Andrzej Zemla <azemla@cern.ch> - IFJ PAN, Krakow, Poland *
16 * (IFJ PAN: Henryk Niewodniczanski Inst. Nucl. Physics, Krakow, Poland) *
17 * *
18 * MultiGaussian, Product and Sum kernels included: *
19 * Adrian Bevan <adrian.bevan@cern.ch> - Queen Mary *
20 * University of London, UK *
21 * Tom Stevenson <thomas.james.stevenson@cern.ch> - Queen Mary *
22 * University of London, UK *
23 * *
24 * Copyright (c) 2005: *
25 * CERN, Switzerland *
26 * MPI-K Heidelberg, Germany *
27 * PAN, Krakow, Poland *
28 * *
29 * Redistribution and use in source and binary forms, with or without *
30 * modification, are permitted according to the terms listed in LICENSE *
31 * (http://tmva.sourceforge.net/LICENSE) *
32 **********************************************************************************/
33
34/*! \class TMVA::SVKernelFunction
35\ingroup TMVA
36Kernel for Support Vector Machine
37*/
38
40#include "TMVA/SVEvent.h"
41#include "TMath.h"
42#include <vector>
43
44////////////////////////////////////////////////////////////////////////////////
45/// constructor
46
48 : fGamma(0.),
49 fKernel(kRBF), // kernel, order, theta, and kappa are for backward compatibility
50 fOrder(0),
51 fTheta(0),
52 fKappa(0)
53{
54}
55
56////////////////////////////////////////////////////////////////////////////////
57/// constructor
58
60 : fGamma(gamma),
61 fKernel(kRBF), // kernel, order, theta, and kappa are for backward compatibility
62 fOrder(0),
63 fTheta(0),
64 fKappa(0)
65{
66 fmGamma.clear();
67}
68
69////////////////////////////////////////////////////////////////////////////////
70/// constructor
71
73 : fKernel(k)
74{
75 if (k==kRBF) { fGamma = param1; }
76 else if (k==kPolynomial){
77 fOrder = param1;
78 fTheta = param2;
79 }
80 fKernelsList.clear();
81}
82
83////////////////////////////////////////////////////////////////////////////////
84/// constructor
85
86TMVA::SVKernelFunction::SVKernelFunction( std::vector<float> params ) :
87 fKernel(kMultiGauss)
88{
89 fmGamma.clear();
90 for( std::vector<float>::const_iterator iter = params.begin(); iter != params.end()\
91 ; ++iter ){
92 fmGamma.push_back(*iter);
93 }
94 //fKernelsList.clear();
95}
96
97////////////////////////////////////////////////////////////////////////////////
98/// constructor
99
100TMVA::SVKernelFunction::SVKernelFunction(EKernelType k, std::vector<EKernelType> kernels, std::vector<Float_t> gammas, Float_t gamma, Float_t order, Float_t theta) :
101 fGamma(gamma),
102 fKernel(k),
103 fOrder(order),
104 fTheta(theta)
105{
106 fmGamma.clear();
107 fKernelsList.clear();
108 fKernelsList = kernels;
109 fmGamma = gammas;
110}
111
112////////////////////////////////////////////////////////////////////////////////
113/// destructor
114
116{
117 fmGamma.clear();
118 fKernelsList.clear();
119}
120
121////////////////////////////////////////////////////////////////////////////////
122/// set old options for compatibility mode
123
125 fKernel = k;
126 fOrder = order;
127 fTheta = theta;
128 fKappa = kappa;
129}
130
131////////////////////////////////////////////////////////////////////////////////
132
134{
135 switch(fKernel) {
136 case kRBF:
137 {
138 std::vector<Float_t> *v1 = ev1->GetDataVector();
139 std::vector<Float_t> *v2 = ev2->GetDataVector();
140
141 Float_t norm = 0;
142 for (UInt_t i = 0; i < v1->size(); i++) norm += ((*v1)[i] -(*v2)[i]) *((*v1)[i] -(*v2)[i]) ;
143
144 return TMath::Exp(-norm*fGamma);
145 }
146 case kMultiGauss:
147 {
148 // Kernel function with a kernel parameter gamma for
149 // each input variable. Described in "An Introduction to
150 // Support Vector Machines and Other Kernel-based Learning
151 // Methods" by Cristianini and Shawe-Taylor, Section 3.5
152 std::vector<Float_t> *v1 = ev1->GetDataVector();
153 std::vector<Float_t> *v2 = ev2->GetDataVector();
154 if(fmGamma.size() != v1->size()){
155 std::cout << "Fewer gammas than input variables! #Gammas= " << fmGamma.size() << " #Input variables= " << v1->size() << std::endl;
156 std::cout << "***> abort program execution" << std::endl;
157 exit(1);
158 }
159
160 Float_t result = 1.;
161 for (UInt_t i = 0; i < v1->size(); i++) {
162 result *= TMath::Exp( -((*v1)[i] -(*v2)[i])*((*v1)[i] -(*v2)[i])*fmGamma[i] );
163 }
164 return result;
165 }
166 case kPolynomial:
167 {
168 // Polynomial kernel of form (z.x + theta)^n
169 // it should be noted that the power is currently only integer
170 std::vector<Float_t> *v1 = ev1->GetDataVector();
171 std::vector<Float_t> *v2 = ev2->GetDataVector();
172 Float_t prod = fTheta;
173 for (UInt_t idx = 0; idx < v1->size(); idx++) prod += (*v1)[idx] * (*v2)[idx];
174
175 Float_t result = 1.;
176 Int_t i = fOrder;
177 result = TMath::Power(prod,i);
178 return result;
179 }
180 case kLinear:
181 {
182 // This is legacy code. The linear polynomial is a special case
183 // of the polynomial with order=1 and theta=0.
184 std::vector<Float_t> *v1 = ev1->GetDataVector();
185 std::vector<Float_t> *v2 = ev2->GetDataVector();
186 Float_t prod = 0;
187 for (UInt_t i = 0; i < v1->size(); i++) prod += (*v1)[i] * (*v2)[i];
188 return prod;
189 }
190 case kSigmoidal:
191 {
192 // This kernel doesn't always result in a positive-semidefinite Gram
193 // matrix so should be used with caution and therefore not
194 // currently accessible. This is not a valid Mercer kernel
195 std::vector<Float_t> *v1 = ev1->GetDataVector();
196 std::vector<Float_t> *v2 = ev2->GetDataVector();
197 Float_t prod = 0;
198 for (UInt_t i = 0; i < v1->size(); i++) prod += ((*v1)[i] -(*v2)[i]) *((*v1)[i] -(*v2)[i]) ;
199 prod *= fKappa;
200 prod += fTheta;
201 return TMath::TanH( prod );
202 }
203 case kProd:
204 {
205 // Calculate product of kernels by looping over list of kernels
206 // and evaluating the value for each, setting kernel back to
207 // kProd before returning so it can be used again. Described in "An Introduction to // Support Vector Machines and Other Kernel-based Learning
208 // Methods" by Cristianini and Shawe-Taylor, Section 3.3.2
209 Float_t kernelVal;
210 kernelVal = 1;
211 for(UInt_t i = 0; i<fKernelsList.size(); i++){
212 fKernel = fKernelsList.at(i);
213 Float_t a = Evaluate(ev1,ev2);
214 kernelVal *= a;
215 }
216 fKernel = kProd;
217 return kernelVal;
218 }
219 case kSum:
220 {
221 // Calculate sum of kernels by looping over list of kernels
222 // and evaluating the value for each, setting kernel back to
223 // kSum before returning so it can be used again. Described in "An Introduction to // Support Vector Machines and Other Kernel-based Learning
224 // Methods" by Cristianini and Shawe-Taylor, Section 3.3.2
225 Float_t kernelVal = 0;
226 for(UInt_t i = 0; i<fKernelsList.size(); i++){
227 fKernel = fKernelsList.at(i);
228 Float_t a = Evaluate(ev1,ev2);
229 kernelVal += a;
230 }
231 fKernel = kSum;
232 return kernelVal;
233 }
234 }
235 return 0;
236}
237
int Int_t
Definition: RtypesCore.h:41
unsigned int UInt_t
Definition: RtypesCore.h:42
float Float_t
Definition: RtypesCore.h:53
Event class for Support Vector Machine.
Definition: SVEvent.h:40
std::vector< Float_t > * GetDataVector()
Definition: SVEvent.h:60
std::vector< EKernelType > fKernelsList
std::vector< Float_t > fmGamma
void setCompatibilityParams(EKernelType k, UInt_t order, Float_t theta, Float_t kappa)
set old options for compatibility mode
Float_t Evaluate(SVEvent *ev1, SVEvent *ev2)
double gamma(double x)
Double_t Exp(Double_t x)
Definition: TMath.h:717
Double_t TanH(Double_t)
Definition: TMath.h:647
LongDouble_t Power(LongDouble_t x, LongDouble_t y)
Definition: TMath.h:725
auto * a
Definition: textangle.C:12