Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_Models.py File Reference

Detailed Description

View in nbviewer Open in SWAN
Example of inference with SOFIE using a set of models trained with Keras.

This tutorial shows how to store several models in a single header file and the weights in a ROOT binary file. The models are then evaluated using the RDataFrame First, generate the input model by running TMVA_Higgs_Classification.C.

This tutorial parses the input model and runs the inference using ROOT's JITing capability.

import os
import numpy as np
import ROOT
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
## generate and train Keras models with different architectures
def CreateModel(nlayers = 4, nunits = 64):
model = Sequential()
model.add(Dense(nunits, activation='relu',input_dim=7))
for i in range(1,nlayers) :
model.add(Dense(nunits, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = Adam(learning_rate = 0.001), weighted_metrics = ['accuracy'])
return model
def PrepareData() :
#get the input data
inputFile = str(ROOT.gROOT.GetTutorialDir()) + "/machine_learning/data/Higgs_data.root"
df1 = ROOT.RDataFrame("sig_tree", inputFile)
sigData = df1.AsNumpy(columns=['m_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'])
#print(sigData)
# stack all the 7 numpy array in a single array (nevents x nvars)
data_sig_size = xsig.shape[0]
print("size of data", data_sig_size)
# make SOFIE inference on background data
df2 = ROOT.RDataFrame("bkg_tree", inputFile)
bkgData = df2.AsNumpy(columns=['m_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'])
data_bkg_size = xbkg.shape[0]
ysig = np.ones(data_sig_size)
ybkg = np.zeros(data_bkg_size)
inputs_data = np.concatenate((xsig,xbkg),axis=0)
inputs_targets = np.concatenate((ysig,ybkg),axis=0)
#split data in training and test data
x_train, x_test, y_train, y_test = train_test_split(
inputs_data, inputs_targets, test_size=0.50, random_state=1234)
return x_train, y_train, x_test, y_test
def TrainModel(model, x, y, name) :
model.fit(x,y,epochs=5,batch_size=50)
modelFile = name + '.keras'
model.save(modelFile)
return modelFile
### run the models
x_train, y_train, x_test, y_test = PrepareData()
## create models and train them
model1 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_4L_50')
model2 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_4L_200')
model3 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_2L_500')
#evaluate with SOFIE the 3 trained models
def GenerateModelCode(modelFile, generatedHeaderFile):
print("Generating inference code for the Keras model from ",modelFile,"in the header ", generatedHeaderFile)
#Generating inference code using a ROOT binary file
# add option to append to the same file the generated headers (pass True for append flag)
model.OutputGenerated(generatedHeaderFile, True)
#model.PrintGenerated()
return generatedHeaderFile
generatedHeaderFile = "Higgs_Model.hxx"
#need to remove existing header file since we are appending on same one
if (os.path.exists(generatedHeaderFile)):
print("removing existing file", generatedHeaderFile)
os.remove(generatedHeaderFile)
weightFile = "Higgs_Model.root"
if (os.path.exists(weightFile)):
print("removing existing file", weightFile)
os.remove(weightFile)
GenerateModelCode(model1, generatedHeaderFile)
GenerateModelCode(model2, generatedHeaderFile)
GenerateModelCode(model3, generatedHeaderFile)
#compile the generated code
ROOT.gInterpreter.Declare('#include "' + generatedHeaderFile + '"')
#run the inference on the test data
session1 = ROOT.TMVA_SOFIE_Higgs_Model_4L_50.Session("Higgs_Model.root")
session2 = ROOT.TMVA_SOFIE_Higgs_Model_4L_200.Session("Higgs_Model.root")
session3 = ROOT.TMVA_SOFIE_Higgs_Model_2L_500.Session("Higgs_Model.root")
hs1 = ROOT.TH1D("hs1","Signal result 4L 50",100,0,1)
hs2 = ROOT.TH1D("hs2","Signal result 4L 200",100,0,1)
hs3 = ROOT.TH1D("hs3","Signal result 2L 500",100,0,1)
hb1 = ROOT.TH1D("hb1","Background result 4L 50",100,0,1)
hb2 = ROOT.TH1D("hb2","Background result 4L 200",100,0,1)
hb3 = ROOT.TH1D("hb3","Background result 2L 500",100,0,1)
def EvalModel(session, x) :
result = session.infer(x)
return result[0]
for i in range(0,x_test.shape[0]):
result1 = EvalModel(session1, x_test[i,:])
result2 = EvalModel(session2, x_test[i,:])
result3 = EvalModel(session3, x_test[i,:])
if (y_test[i] == 1) :
hs1.Fill(result1)
hs2.Fill(result2)
hs3.Fill(result3)
else:
hb1.Fill(result1)
hb2.Fill(result2)
hb3.Fill(result3)
def PlotHistos(hs,hb):
hb.SetLineColor("kBlue")
hb.Draw("same")
PlotHistos(hs1,hb1)
PlotHistos(hs2,hb2)
PlotHistos(hs3,hb3)
## draw also ROC curves
def GetContent(h) :
x = ROOT.std.vector['float'](n)
w = ROOT.std.vector['float'](n)
for i in range(0,n):
x[i] = h.GetBinCenter(i+1)
w[i] = h.GetBinContent(i+1)
return x,w
def MakeROCCurve(hs, hb) :
xs,ws = GetContent(hs)
xb,wb = GetContent(hb)
roc = ROOT.TMVA.ROCCurve(xs,xb,ws,wb)
print("ROC integral for ",hs.GetName(), roc.GetROCIntegral())
curve = roc.GetROCCurve()
return roc,curve
r1,curve1 = MakeROCCurve(hs1,hb1)
r2,curve2 = MakeROCCurve(hs2,hb2)
r3,curve3 = MakeROCCurve(hs3,hb3)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
ROOT's RDataFrame offers a modern, high-level interface for analysis of data stored in TTree ,...
size of data 10000
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_1 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_2 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_3 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_4 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2:13␛[0m 672ms/step - accuracy: 0.5200 - loss: 0.6916␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 61/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 837us/step - accuracy: 0.5416 - loss: 0.6842 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m127/200␛[0m ␛[32m━━━━━━━━━━━━␛[0m␛[37m━━━━━━━━␛[0m ␛[1m0s␛[0m 799us/step - accuracy: 0.5588 - loss: 0.6768␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m197/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 772us/step - accuracy: 0.5714 - loss: 0.6715␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 825us/step - accuracy: 0.5973 - loss: 0.6594
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7000 - loss: 0.6409␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 74/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 694us/step - accuracy: 0.6545 - loss: 0.6381␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m145/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 700us/step - accuracy: 0.6494 - loss: 0.6369␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 749us/step - accuracy: 0.6411 - loss: 0.6374
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6600 - loss: 0.6327␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 68/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 757us/step - accuracy: 0.6590 - loss: 0.6285␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m134/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 758us/step - accuracy: 0.6577 - loss: 0.6263␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 800us/step - accuracy: 0.6496 - loss: 0.6265
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6200 - loss: 0.6554␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 67/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 758us/step - accuracy: 0.6422 - loss: 0.6301␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m133/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 759us/step - accuracy: 0.6472 - loss: 0.6255␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 758us/step - accuracy: 0.6489 - loss: 0.6242␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 807us/step - accuracy: 0.6534 - loss: 0.6201
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6600 - loss: 0.6880␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 64/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 797us/step - accuracy: 0.6576 - loss: 0.6204␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m128/200␛[0m ␛[32m━━━━━━━━━━━━␛[0m␛[37m━━━━━━━━␛[0m ␛[1m0s␛[0m 794us/step - accuracy: 0.6615 - loss: 0.6154␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m192/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 794us/step - accuracy: 0.6631 - loss: 0.6135␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 842us/step - accuracy: 0.6667 - loss: 0.6102
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_5 (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_6 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_7 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_8 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_9 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m1:58␛[0m 595ms/step - accuracy: 0.5400 - loss: 0.6958␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 63/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 816us/step - accuracy: 0.5389 - loss: 0.6863 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m127/200␛[0m ␛[32m━━━━━━━━━━━━␛[0m␛[37m━━━━━━━━␛[0m ␛[1m0s␛[0m 801us/step - accuracy: 0.5553 - loss: 0.6797␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m197/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 771us/step - accuracy: 0.5657 - loss: 0.6750␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 819us/step - accuracy: 0.5902 - loss: 0.6648
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7000 - loss: 0.6529␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 71/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 715us/step - accuracy: 0.6230 - loss: 0.6534␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m144/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 704us/step - accuracy: 0.6228 - loss: 0.6516␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 762us/step - accuracy: 0.6293 - loss: 0.6452
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6400 - loss: 0.6469␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 69/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 744us/step - accuracy: 0.6383 - loss: 0.6316␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m140/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 727us/step - accuracy: 0.6407 - loss: 0.6318␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 772us/step - accuracy: 0.6422 - loss: 0.6312
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6200 - loss: 0.6188␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 72/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 709us/step - accuracy: 0.6281 - loss: 0.6311␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m144/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 706us/step - accuracy: 0.6380 - loss: 0.6287␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 756us/step - accuracy: 0.6538 - loss: 0.6222
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6600 - loss: 0.6408␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 73/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 696us/step - accuracy: 0.6631 - loss: 0.6100␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m145/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 696us/step - accuracy: 0.6613 - loss: 0.6102␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 746us/step - accuracy: 0.6593 - loss: 0.6131
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_10 (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_11 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_12 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_13 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_14 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2:28␛[0m 746ms/step - accuracy: 0.3600 - loss: 0.7103␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 64/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 794us/step - accuracy: 0.4951 - loss: 0.6941 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m130/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 777us/step - accuracy: 0.5213 - loss: 0.6882␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m199/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 761us/step - accuracy: 0.5398 - loss: 0.6828␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 811us/step - accuracy: 0.5832 - loss: 0.6681
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.5000 - loss: 0.7051␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 68/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 756us/step - accuracy: 0.6211 - loss: 0.6507␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m134/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 757us/step - accuracy: 0.6229 - loss: 0.6488␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 800us/step - accuracy: 0.6342 - loss: 0.6392
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6800 - loss: 0.6271␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 66/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 781us/step - accuracy: 0.6444 - loss: 0.6357␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m132/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 773us/step - accuracy: 0.6471 - loss: 0.6339␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m197/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 771us/step - accuracy: 0.6476 - loss: 0.6330␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 822us/step - accuracy: 0.6490 - loss: 0.6297
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6800 - loss: 0.5669␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 68/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 750us/step - accuracy: 0.6641 - loss: 0.6125␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m135/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 753us/step - accuracy: 0.6592 - loss: 0.6181␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m198/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 769us/step - accuracy: 0.6585 - loss: 0.6195␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 819us/step - accuracy: 0.6570 - loss: 0.6217
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6800 - loss: 0.5633␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 65/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 783us/step - accuracy: 0.6489 - loss: 0.6207␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m131/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 773us/step - accuracy: 0.6513 - loss: 0.6189␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m197/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 771us/step - accuracy: 0.6527 - loss: 0.6187␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 820us/step - accuracy: 0.6563 - loss: 0.6192
PyKeras: parsing model Higgs_Model_4L_50.keras
Generating inference code for the Keras model from Higgs_Model_4L_50.keras in the header Higgs_Model.hxx
PyKeras: parsing model Higgs_Model_4L_200.keras
Generating inference code for the Keras model from Higgs_Model_4L_200.keras in the header Higgs_Model.hxx
PyKeras: parsing model Higgs_Model_2L_500.keras
Generating inference code for the Keras model from Higgs_Model_2L_500.keras in the header Higgs_Model.hxx
ROC integral for hs1 0.7242965939411862
ROC integral for hs2 0.7353550007328232
ROC integral for hs3 0.7312635924262583
Author
Lorenzo Moneta

Definition in file TMVA_SOFIE_Models.py.