Stan  2.14.0
probability, sampling & optimization
finite_diff_grad.hpp
Go to the documentation of this file.
1 #ifndef STAN_MODEL_FINITE_DIFF_GRAD_HPP
2 #define STAN_MODEL_FINITE_DIFF_GRAD_HPP
3 
4 #include <iostream>
5 #include <vector>
6 
7 namespace stan {
8  namespace model {
9 
28  template <bool propto, bool jacobian_adjust_transform, class M>
29  void finite_diff_grad(const M& model,
30  std::vector<double>& params_r,
31  std::vector<int>& params_i,
32  std::vector<double>& grad,
33  double epsilon = 1e-6,
34  std::ostream* msgs = 0) {
35  std::vector<double> perturbed(params_r);
36  grad.resize(params_r.size());
37  for (size_t k = 0; k < params_r.size(); k++) {
38  perturbed[k] += epsilon;
39  double logp_plus
40  = model
41  .template log_prob<propto,
42  jacobian_adjust_transform>(perturbed, params_i,
43  msgs);
44  perturbed[k] = params_r[k] - epsilon;
45  double logp_minus
46  = model
47  .template log_prob<propto,
48  jacobian_adjust_transform>(perturbed, params_i,
49  msgs);
50  double gradest = (logp_plus - logp_minus) / (2*epsilon);
51  grad[k] = gradest;
52  perturbed[k] = params_r[k];
53  }
54  }
55 
56  }
57 }
58 #endif
Probability, optimization and sampling library.
void finite_diff_grad(const M &model, std::vector< double > &params_r, std::vector< int > &params_i, std::vector< double > &grad, double epsilon=1e-6, std::ostream *msgs=0)
Compute the gradient using finite differences for the specified parameters, writing the result into t...

     [ Stan Home Page ] © 2011–2016, Stan Development Team.