Stan  2.14.0
probability, sampling & optimization
test_gradients.hpp
Go to the documentation of this file.
1 #ifndef STAN_MODEL_TEST_GRADIENTS_HPP
2 #define STAN_MODEL_TEST_GRADIENTS_HPP
3 
7 #include <cmath>
8 #include <sstream>
9 #include <vector>
10 
11 namespace stan {
12  namespace model {
13 
35  template <bool propto, bool jacobian_adjust_transform, class M>
36  int test_gradients(const M& model,
37  std::vector<double>& params_r,
38  std::vector<int>& params_i,
39  double epsilon,
40  double error,
42  std::stringstream msg;
43  std::vector<double> grad;
44  double lp
45  = log_prob_grad<propto, jacobian_adjust_transform>(model,
46  params_r,
47  params_i,
48  grad,
49  &msg);
50  if (msg.str().length() > 0)
51  writer(msg.str());
52 
53  std::vector<double> grad_fd;
54  finite_diff_grad<false,
55  true,
56  M>(model,
57  params_r, params_i,
58  grad_fd, epsilon,
59  &msg);
60  if (msg.str().length() > 0)
61  writer(msg.str());
62 
63  int num_failed = 0;
64 
65  msg.str("");
66  msg << " Log probability=" << lp;
67 
68  writer();
69  writer(msg.str());
70  writer();
71 
72  msg.str("");
73  msg << std::setw(10) << "param idx"
74  << std::setw(16) << "value"
75  << std::setw(16) << "model"
76  << std::setw(16) << "finite diff"
77  << std::setw(16) << "error";
78 
79  writer(msg.str());
80 
81  for (size_t k = 0; k < params_r.size(); k++) {
82  msg.str("");
83  msg << std::setw(10) << k
84  << std::setw(16) << params_r[k]
85  << std::setw(16) << grad[k]
86  << std::setw(16) << grad_fd[k]
87  << std::setw(16) << (grad[k] - grad_fd[k]);
88  writer(msg.str());
89  if (std::fabs(grad[k] - grad_fd[k]) > error)
90  num_failed++;
91  }
92  return num_failed;
93  }
94 
95  }
96 }
97 #endif
98 
Probability, optimization and sampling library.
int test_gradients(const M &model, std::vector< double > &params_r, std::vector< int > &params_i, double epsilon, double error, stan::interface_callbacks::writer::base_writer &writer)
Test the log_prob_grad() function&#39;s ability to produce accurate gradients using finite differences...
void finite_diff_grad(const M &model, std::vector< double > &params_r, std::vector< int > &params_i, std::vector< double > &grad, double epsilon=1e-6, std::ostream *msgs=0)
Compute the gradient using finite differences for the specified parameters, writing the result into t...
base_writer is an abstract base class defining the interface for Stan writer callbacks.
Definition: base_writer.hpp:20

     [ Stan Home Page ] © 2011–2016, Stan Development Team.