Logo ROOT  
Reference Guide
Loading...
Searching...
No Matches
LikelihoodGradientJob.cxx
Go to the documentation of this file.
1/*
2 * Project: RooFit
3 * Authors:
4 * PB, Patrick Bos, Netherlands eScience Center, p.bos@esciencecenter.nl
5 *
6 * Copyright (c) 2021, CERN
7 *
8 * Redistribution and use in source and binary forms,
9 * with or without modification, are permitted according to the terms
10 * listed in LICENSE (http://roofit.sourceforge.net/license.txt)
11 */
12
14
21#include "RooMsgService.h"
22#include "RooMinimizer.h"
23
25#include "Minuit2/MnStrategy.h"
26
27namespace RooFit {
28namespace TestStatistics {
29
30LikelihoodGradientJob::LikelihoodGradientJob(std::shared_ptr<RooAbsL> likelihood,
31 std::shared_ptr<WrapperCalculationCleanFlags> calculation_is_clean,
32 std::size_t N_dim, RooMinimizer *minimizer, SharedOffset offset)
33 : LikelihoodGradientWrapper(std::move(likelihood), std::move(calculation_is_clean), N_dim, minimizer,
34 std::move(offset)),
35 grad_(N_dim),
36 N_tasks_(N_dim)
37{
38 minuit_internal_x_.reserve(N_dim);
40}
41
43 const std::vector<ROOT::Fit::ParameterSettings> &parameter_settings)
44{
45 gradf_.SetInitialGradient(parameter_settings, grad_);
46}
47
53
55{
56 assert(istrat >= 0);
57 ROOT::Minuit2::MnStrategy strategy(static_cast<unsigned int>(istrat));
58
61 setNCycles(strategy.GradientNCycles());
62}
63
64void LikelihoodGradientJob::setStepTolerance(double step_tolerance) const
65{
66 gradf_.SetStepTolerance(step_tolerance);
67}
68
69void LikelihoodGradientJob::setGradTolerance(double grad_tolerance) const
70{
71 gradf_.SetGradTolerance(grad_tolerance);
72}
73
74void LikelihoodGradientJob::setNCycles(unsigned int ncycles) const
75{
76 gradf_.SetNCycles(ncycles);
77}
78
79void LikelihoodGradientJob::setErrorLevel(double error_level) const
80{
81 gradf_.SetErrorLevel(error_level);
82}
83
84///////////////////////////////////////////////////////////////////////////////
85/// Job overrides:
86
88{
89 run_derivator(task);
90}
91
92// SYNCHRONIZATION FROM WORKERS TO MASTER
93
95{
96 task_result_t task_result{id_, task, grad_[task]};
97 zmq::message_t message(sizeof(task_result_t));
98 memcpy(message.data(), &task_result, sizeof(task_result_t));
99 get_manager()->messenger().send_from_worker_to_master(std::move(message));
100}
101
103{
104 auto result = message.data<task_result_t>();
105 grad_[result->task_id] = result->grad;
107 bool job_completed = (N_tasks_at_workers_ == 0);
108 return job_completed;
109}
110
111// END SYNCHRONIZATION FROM WORKERS TO MASTER
112
113// SYNCHRONIZATION FROM MASTER TO WORKERS (STATE)
114
116{
117 // TODO optimization: only send changed parameters (now sending all)
118 zmq::message_t gradient_message(grad_.begin(), grad_.end());
119 zmq::message_t minuit_internal_x_message(minuit_internal_x_.begin(), minuit_internal_x_.end());
120 double maxFCN = minimizer_->maxFCN();
121 double fcnOffset = minimizer_->fcnOffset();
122 ++state_id_;
123
124 if (shared_offset_.offsets() != offsets_previous_) {
125 zmq::message_t offsets_message(shared_offset_.offsets().begin(), shared_offset_.offsets().end());
127 id_, state_id_, isCalculating_, maxFCN, fcnOffset, std::move(gradient_message),
128 std::move(minuit_internal_x_message), std::move(offsets_message));
130 } else {
132 std::move(gradient_message),
133 std::move(minuit_internal_x_message));
134 }
135}
136
142
144{
145 bool more;
146
148 assert(more);
150
151 if (more) {
152 auto maxFCN = get_manager()->messenger().receive_from_master_on_worker<double>(&more);
153 minimizer_->maxFCN() = maxFCN;
154 assert(more);
155
156 auto fcnOffset = get_manager()->messenger().receive_from_master_on_worker<double>(&more);
157 minimizer_->fcnOffset() = fcnOffset;
158 assert(more);
159
160 auto gradient_message = get_manager()->messenger().receive_from_master_on_worker<zmq::message_t>(&more);
161 assert(more);
162 auto gradient_message_begin = gradient_message.data<ROOT::Minuit2::DerivatorElement>();
163 auto gradient_message_end =
164 gradient_message_begin + gradient_message.size() / sizeof(ROOT::Minuit2::DerivatorElement);
165 std::copy(gradient_message_begin, gradient_message_end, grad_.begin());
166
167 auto minuit_internal_x_message = get_manager()->messenger().receive_from_master_on_worker<zmq::message_t>(&more);
168 auto minuit_internal_x_message_begin = minuit_internal_x_message.data<double>();
169 auto minuit_internal_x_message_end =
170 minuit_internal_x_message_begin + minuit_internal_x_message.size() / sizeof(double);
171 std::copy(minuit_internal_x_message_begin, minuit_internal_x_message_end, minuit_internal_x_.begin());
172
173 if (more) {
174 // offsets also incoming
175 auto offsets_message = get_manager()->messenger().receive_from_master_on_worker<zmq::message_t>(&more);
176 assert(!more);
177 auto offsets_message_begin = offsets_message.data<ROOT::Math::KahanSum<double>>();
178 std::size_t N_offsets = offsets_message.size() / sizeof(ROOT::Math::KahanSum<double>);
179 shared_offset_.offsets().reserve(N_offsets);
180 auto offsets_message_end = offsets_message_begin + N_offsets;
181 std::copy(offsets_message_begin, offsets_message_end, shared_offset_.offsets().begin());
182 }
183
184 // Since the gradient parallelization only support Minuit 2, we can do this cast
185 auto &minim = static_cast<ROOT::Minuit2::Minuit2Minimizer &>(*minimizer_->_minimizer);
186
187 // note: the next call must stay after the (possible) update of the offset, because it
188 // calls the likelihood function, so the offset must be correct at this point
189 gradf_.SetupDifferentiate(minimizer_->getNPar(), minim.GetFCN(), minuit_internal_x_.data(),
190 minimizer_->fitter()->Config().ParamsSettings());
191 }
192}
193
194// END SYNCHRONIZATION FROM MASTER TO WORKERS (STATE)
195
196///////////////////////////////////////////////////////////////////////////////
197/// Calculation stuff (mostly duplicates of RooGradMinimizerFcn code):
198
199void LikelihoodGradientJob::run_derivator(unsigned int i_component) const
200{
201 // Since the gradient parallelization only support Minuit 2, we can do this cast
202 auto &minim = static_cast<ROOT::Minuit2::Minuit2Minimizer &>(*minimizer_->_minimizer);
203
204 // Calculate the derivative etc for these parameters
205 grad_[i_component] = gradf_.FastPartialDerivative(minim.GetFCN(), minimizer_->fitter()->Config().ParamsSettings(),
206 i_component, grad_[i_component]);
207}
208
210{
211 if (get_manager()->process_manager().is_master()) {
212 isCalculating_ = true;
214
215 // master fills queue with tasks
216 for (std::size_t ix = 0; ix < N_tasks_; ++ix) {
217 MultiProcess::JobTask job_task{id_, state_id_, ix};
218 get_manager()->queue()->add(job_task);
219 }
221 // wait for task results back from workers to master (put into _grad)
223
224 calculation_is_clean_->gradient = true;
225 isCalculating_ = false;
227 }
228}
229
231{
232 if (get_manager()->process_manager().is_master()) {
233 if (!calculation_is_clean_->gradient) {
235 }
236
237 // put the results from _grad into *grad
238 for (Int_t ix = 0; ix < minimizer_->getNPar(); ++ix) {
239 grad[ix] = grad_[ix].derivative;
240 }
241 }
242}
243
244void LikelihoodGradientJob::fillGradientWithPrevResult(double *grad, double *previous_grad, double *previous_g2,
245 double *previous_gstep)
246{
247 if (get_manager()->process_manager().is_master()) {
248 for (std::size_t i_component = 0; i_component < N_tasks_; ++i_component) {
249 grad_[i_component] = {previous_grad[i_component], previous_g2[i_component], previous_gstep[i_component]};
250 }
251
252 if (!calculation_is_clean_->gradient) {
255 }
259 }
260 }
261
262 // put the results from _grad into *grad
263 for (Int_t ix = 0; ix < minimizer_->getNPar(); ++ix) {
264 grad[ix] = grad_[ix].derivative;
265 previous_g2[ix] = grad_[ix].second_derivative;
266 previous_gstep[ix] = grad_[ix].step_size;
267 }
268 }
269}
270
271void LikelihoodGradientJob::updateMinuitInternalParameterValues(const std::vector<double> &minuit_internal_x)
272{
273 minuit_internal_x_ = minuit_internal_x;
274}
275
277{
278 return true;
279}
280
281} // namespace TestStatistics
282} // namespace RooFit
int Int_t
Signed integer 4 bytes (int).
Definition RtypesCore.h:59
The Kahan summation is a compensated summation algorithm, which significantly reduces numerical error...
Definition Util.h:141
double ErrorDef() const
error definition
Minuit2Minimizer class implementing the ROOT::Math::Minimizer interface for Minuit2 minimization algo...
API class for defining four levels of strategies: low (0), medium (1), high (2), very high (>=3); act...
Definition MnStrategy.h:27
double GradientStepTolerance() const
Definition MnStrategy.h:37
double GradientTolerance() const
Definition MnStrategy.h:38
unsigned int GradientNCycles() const
Definition MnStrategy.h:36
static bool getTimingAnalysis()
Definition Config.cxx:87
std::size_t id_
Definition Job.h:45
std::size_t state_id_
Definition Job.h:46
JobManager * get_manager()
Get JobManager instance; create and activate if necessary.
Definition Job.cxx:112
void gather_worker_results()
Wait for all tasks to be retrieved for the current Job.
Definition Job.cxx:126
value_t receive_from_master_on_worker(bool *more=nullptr)
Definition Messenger.h:176
void send_from_worker_to_master(T &&item)
specialization that sends the final message
Definition Messenger.h:192
void publish_from_master_to_workers(T &&item)
specialization that sends the final message
Definition Messenger.h:150
static void start_timer(std::string section_name)
static void end_timer(std::string section_name)
virtual void add(JobTask job_task)=0
Enqueue a task.
bool usesMinuitInternalValues() override
Implement usesMinuitInternalValues to return true when you want Minuit to send this class Minuit-inte...
void update_state() override
Virtual function to update any necessary state on workers.
std::vector< ROOT::Minuit2::DerivatorElement > grad_
void fillGradientWithPrevResult(double *grad, double *previous_grad, double *previous_g2, double *previous_gstep) override
void updateMinuitInternalParameterValues(const std::vector< double > &minuit_internal_x) override
Minuit passes in parameter values that may not conform to RooFit internal standards (like applying ra...
void run_derivator(unsigned int i_component) const
Calculation stuff (mostly duplicates of RooGradMinimizerFcn code):
void send_back_task_result_from_worker(std::size_t task) override
void synchronizeWithMinimizer(const ROOT::Math::MinimizerOptions &options) override
Synchronize minimizer settings with calculators in child classes.
LikelihoodGradientJob(std::shared_ptr< RooAbsL > likelihood, std::shared_ptr< WrapperCalculationCleanFlags > calculation_is_clean, std::size_t N_dim, RooMinimizer *minimizer, SharedOffset offset)
void setStepTolerance(double step_tolerance) const
void setGradTolerance(double grad_tolerance) const
void evaluate_task(std::size_t task) override
Job overrides:
bool receive_task_result_on_master(const zmq::message_t &message) override
void synchronizeParameterSettingsImpl(const std::vector< ROOT::Fit::ParameterSettings > &parameter_settings) override
std::shared_ptr< WrapperCalculationCleanFlags > calculation_is_clean_
LikelihoodGradientWrapper(std::shared_ptr< RooAbsL > likelihood, std::shared_ptr< WrapperCalculationCleanFlags > calculation_is_clean, std::size_t N_dim, RooMinimizer *minimizer, SharedOffset offset)
Wrapper class around ROOT::Math::Minimizer that provides a seamless interface between the minimizer f...
std::size_t State
Definition types.h:23
Namespace for new RooFit test statistic calculation.
Definition RooAbsData.h:50
The namespace RooFit contains mostly switches that change the behaviour of functions of PDFs (or othe...
Definition CodegenImpl.h:72
combined job_object, state and task identifier type
Definition types.h:25