1 #ifndef STAN_MODEL_FINITE_DIFF_GRAD_HPP 2 #define STAN_MODEL_FINITE_DIFF_GRAD_HPP 28 template <
bool propto,
bool jacobian_adjust_transform,
class M>
30 std::vector<double>& params_r,
31 std::vector<int>& params_i,
32 std::vector<double>& grad,
33 double epsilon = 1e-6,
34 std::ostream* msgs = 0) {
35 std::vector<double> perturbed(params_r);
36 grad.resize(params_r.size());
37 for (
size_t k = 0; k < params_r.size(); k++) {
38 perturbed[k] += epsilon;
41 .template log_prob<propto,
42 jacobian_adjust_transform>(perturbed, params_i,
44 perturbed[k] = params_r[k] - epsilon;
47 .template log_prob<propto,
48 jacobian_adjust_transform>(perturbed, params_i,
50 double gradest = (logp_plus - logp_minus) / (2*epsilon);
52 perturbed[k] = params_r[k];
Probability, optimization and sampling library.
void finite_diff_grad(const M &model, std::vector< double > ¶ms_r, std::vector< int > ¶ms_i, std::vector< double > &grad, double epsilon=1e-6, std::ostream *msgs=0)
Compute the gradient using finite differences for the specified parameters, writing the result into t...