import torch
import ROOT
tree_name = "sig_tree"
file_name = "http://root.cern/files/Higgs_data.root"
batch_size = 128
chunk_size = 5_000
target = "Type"
gen_train, gen_validation = ROOT.TMVA.Experimental.CreatePyTorchGenerators(
tree_name,
file_name,
batch_size,
chunk_size,
target=target,
validation_split=0.3,
)
input_columns = gen_train.train_columns
num_features =
len(input_columns)
def calc_accuracy(targets, pred):
return torch.sum(targets == pred.round()) / pred.size(0)
model = torch.nn.Sequential(
torch.nn.Linear(num_features, 300),
torch.nn.Tanh(),
torch.nn.Linear(300, 300),
torch.nn.Tanh(),
torch.nn.Linear(300, 300),
torch.nn.Tanh(),
torch.nn.Linear(300, 1),
torch.nn.Sigmoid(),
)
loss_fn = torch.nn.MSELoss(reduction="mean")
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for i, (x_train, y_train) in enumerate(gen_train):
pred = model(x_train).view(-1)
loss = loss_fn(pred, y_train)
model.zero_grad()
loss.backward()
optimizer.step()
accuracy = calc_accuracy(y_train, pred)
print(f"Training => accuracy: {accuracy}")
for i, (x_train, y_train) in enumerate(gen_validation):
pred = model(x_train).view(-1)
accuracy = calc_accuracy(y_train, pred)
print(f"Validation => accuracy: {accuracy}")
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t UChar_t len
Training => accuracy: 0.296875
Training => accuracy: 0.375
Training => accuracy: 0.859375
Training => accuracy: 0.9765625
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Training => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0
Validation => accuracy: 1.0