-
Notifications
You must be signed in to change notification settings - Fork 190
Closed
Labels
Description
This is the only blocker before we can start using Clad-generated Hessians in RooFit.
Reproducer as a ROOT macro:
// Gaussian.C
// auto-generated test macro
#include <RooFit/Detail/MathFuncs.h>
#include <Math/CladDerivator.h>
double roo_codegen_0(double *params, double const *obs, double const *xlArr)
{
constexpr double inf = std::numeric_limits<double>::infinity();
double RooNLLVarNewWeightSum = 0.0;
double RooNLLVarNewResult = 0.0;
const double t1 = (params[0] + params[1]);
const double t3 = 1.5;
double t2[]{params[2], t3};
const double t4 = RooFit::Detail::MathFuncs::product(t2, 2);
const double t6 = RooFit::Detail::MathFuncs::gaussianIntegral(-10, 10, t1, t4);
#pragma clad checkpoint loop
for (int loopIdx1 = 0; loopIdx1 < obs[1]; loopIdx1++) {
const double t5 = RooFit::Detail::MathFuncs::gaussian(obs[static_cast<int>(obs[0]) + loopIdx1], t1, t4);
const double t7 = t5 / t6;
RooNLLVarNewResult += RooFit::Detail::MathFuncs::nll(t7, obs[static_cast<int>(obs[2]) + loopIdx1], 0, 0);
}
return RooNLLVarNewResult;
}
#pragma clad ON
void gradient_request()
{
clad::gradient(roo_codegen_0, "params");
clad::hessian(roo_codegen_0, "params[0:2]");
}
#pragma clad OFF
// clang-format off
std::vector<double> parametersVec = {
0, 1, 3
};
std::vector<double> observablesVec = {
6, 54, 60, 54, 114, 54, -9.1, -8.1, -5.7, -5.5,
-5.3, -4.9, -4.5, -4.3, -3.9, -3.7, -3.5, -3.1, -2.9, -2.5,
-2.3, -1.7, -1.5, -1.3, -1.1, -0.9, -0.7, -0.5, -0.3, -0.1,
0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.9, 2.1,
2.3, 2.7, 3.1, 3.7, 4.5, 4.7, 4.9, 5.1, 5.5, 6.1,
6.3, 6.5, 7.1, 7.3, 7.7, 7.9, 8.9, 9.1, 9.3, 9.9,
1, 1, 1, 1, 1, 2, 3, 3, 1, 1,
1, 2, 1, 1, 3, 2, 1, 1, 3, 3,
3, 2, 3, 3, 3, 2, 2, 4, 3, 4,
1, 1, 1, 2, 3, 2, 2, 1, 2, 1,
1, 2, 3, 1, 2, 1, 2, 2, 1, 2,
1, 2, 1, 1, 1, 1, 1, 1, 1, 2,
3, 3, 1, 1, 1, 2, 1, 1, 3, 2,
1, 1, 3, 3, 3, 2, 3, 3, 3, 2,
2, 4, 3, 4, 1, 1, 1, 2, 3, 2,
2, 1, 2, 1, 1, 2, 3, 1, 2, 1,
2, 2, 1, 2, 1, 2, 1, 1
};
std::vector<double> auxConstantsVec = {
};
// clang-format on
// To run as a ROOT macro
void Gaussian()
{
auto func = [&](std::span<double> params) {
return roo_codegen_0(params.data(), observablesVec.data(), auxConstantsVec.data());
};
auto grad = [&](std::span<double> params, std::span<double> out) {
return roo_codegen_0_grad_0(parametersVec.data(), observablesVec.data(), auxConstantsVec.data(), out.data());
};
auto hess = [&](std::span<double> params, std::span<double> out) {
return roo_codegen_0_hessian_0(parametersVec.data(), observablesVec.data(), auxConstantsVec.data(), out.data());
};
std::vector<double> gradientVec(parametersVec.size());
std::vector<double> hessianVec(parametersVec.size() * parametersVec.size());
grad(parametersVec, gradientVec);
hess(parametersVec, hessianVec);
auto numDiff = [&](int i) {
const double eps = 1e-6;
std::vector<double> p{parametersVec};
p[i] = parametersVec[i] - eps;
double funcValDown = func(p);
p[i] = parametersVec[i] + eps;
double funcValUp = func(p);
return (funcValUp - funcValDown) / (2 * eps);
};
for (std::size_t i = 0; i < parametersVec.size(); ++i) {
std::cout << i << ":" << std::endl;
std::cout << " numr : " << numDiff(i) << std::endl;
std::cout << " clad : " << gradientVec[i] << std::endl;
}
std::cout << std::endl;
auto n = parametersVec.size();
for (std::size_t i = 0; i < n; ++i) {
for (std::size_t j = 0; j < n; ++j) {
std::cout << hessianVec[i + n *j] << " ";
}
std::cout << std::endl;
}
}Error message:
In file included from input_line_8:1:
/home/rembserj/Gaussian.C:15:25: error: '#pragma clad checkpoint loop' is only allowed before a loop
#pragma clad checkpoint loop
^
/home/rembserj/Gaussian.C:15:25: error: '#pragma clad checkpoint loop' is only allowed before a loop
/home/rembserj/Gaussian.C:15:25: error: '#pragma clad checkpoint loop' is only allowed before a loop
/home/rembserj/Gaussian.C:15:25: error: '#pragma clad checkpoint loop' is only allowed before a loop
warning: attempted differentiation of function 'digamma' without definition and no suitable overload was found in namespace 'custom_derivatives'
note: falling back to numerical differentiation for 'digamma' since no suitable overload was found and clad could not derive it; to disable this feature, compile your programs with -DCLAD_NO_NUM_DIFF
warning: attempted differentiation of function 'digamma' without definition and no suitable overload was found in namespace 'custom_derivatives'
note: falling back to numerical differentiation for 'digamma' since no suitable overload was found and clad could not derive it; to disable this feature, compile your programs with -DCLAD_NO_NUM_DIFFReactions are currently unavailable