1 #ifndef STAN_MODEL_LOG_PROB_GRAD_HPP 2 #define STAN_MODEL_LOG_PROB_GRAD_HPP 4 #include <stan/math/rev/mat.hpp> 28 template <
bool propto,
bool jacobian_adjust_transform,
class M>
30 std::vector<double>& params_r,
31 std::vector<int>& params_i,
33 std::ostream* msgs = 0) {
35 using stan::math::var;
38 vector<var> ad_params_r(params_r.size());
39 for (
size_t i = 0; i < model.num_params_r(); ++i) {
40 stan::math::var var_i(params_r[i]);
41 ad_params_r[i] = var_i;
44 = model.template log_prob<propto, jacobian_adjust_transform>
45 (ad_params_r, params_i, msgs);
47 adLogProb.grad(ad_params_r, gradient);
48 }
catch (
const std::exception &ex) {
49 stan::math::recover_memory();
52 stan::math::recover_memory();
72 template <
bool propto,
bool jacobian_adjust_transform,
class M>
74 Eigen::VectorXd& params_r,
76 std::ostream* msgs = 0) {
78 using stan::math::var;
80 Eigen::Matrix<var, Eigen::Dynamic, 1> ad_params_r(params_r.size());
81 for (
size_t i = 0; i < model.num_params_r(); ++i) {
82 stan::math::var var_i(params_r[i]);
83 ad_params_r[i] = var_i;
88 .template log_prob<propto,
89 jacobian_adjust_transform>(ad_params_r, msgs);
90 double val = adLogProb.val();
91 stan::math::grad(adLogProb, ad_params_r, gradient);
93 }
catch (std::exception &ex) {
94 stan::math::recover_memory();
double log_prob_grad(const M &model, std::vector< double > ¶ms_r, std::vector< int > ¶ms_i, std::vector< double > &gradient, std::ostream *msgs=0)
Compute the gradient using reverse-mode automatic differentiation, writing the result into the specif...
Probability, optimization and sampling library.
void gradient(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, std::ostream *msgs=0)