-
-
Notifications
You must be signed in to change notification settings - Fork 187
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add prim and rev gp_exp_cov #859
Closed
Closed
Changes from 1 commit
Commits
Show all changes
16 commits
Select commit
Hold shift + click to select a range
098613d
add prim and rev gp_exp_cov
bd69dc2
[Jenkins] auto-formatting by clang-format version 6.0.0 (tags/google/…
stan-buildbot f9a960b
Update gp_exponential_cov.hpp
3df0bc9
[Jenkins] auto-formatting by clang-format version 6.0.0 (tags/google/…
stan-buildbot e46bd66
add rev mat.hpp
77e77da
temp
5212d97
temp
26ca567
use ChainableStack::instance()
1cc69b9
Merge commit '5cd004ca651c13d63a460c62b86c8a366a8fe830' into HEAD
yashikno 644d11d
[Jenkins] auto-formatting by clang-format version 6.0.0 (tags/google/…
stan-buildbot 13b7475
use ChainableStack::instance()
47b2805
fix merge conflix
d565a9e
proper includes
67c0557
[Jenkins] auto-formatting by clang-format version 6.0.0 (tags/google/…
stan-buildbot 8599e40
working updates for PR review
7a4ea7b
[Jenkins] auto-formatting by clang-format version 6.0.0 (tags/google/…
stan-buildbot File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
temp
- Loading branch information
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,7 +27,7 @@ namespace math { | |
*/ | ||
template <typename T_x, typename T_s, typename T_l> | ||
class gp_exponential_cov_vari : public vari { | ||
public: | ||
public: | ||
const size_t size_; | ||
const size_t size_ltri_; | ||
const double l_d_; | ||
|
@@ -58,25 +58,24 @@ class gp_exponential_cov_vari : public vari { | |
*/ | ||
gp_exponential_cov_vari(const std::vector<T_x> &x, const T_s &sigma, | ||
const T_l &length_scale) | ||
: vari(0.0), | ||
size_(x.size()), | ||
size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), | ||
sigma_d_(value_of(sigma)), | ||
: vari(0.0), size_(x.size()), size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), sigma_d_(value_of(sigma)), | ||
sigma_sq_d_(sigma_d_ * sigma_d_), | ||
dist_(ChainableStack::memalloc_.alloc_array<double>(size_ltri_)), | ||
l_vari_(length_scale.vi_), | ||
sigma_vari_(sigma.vi_), | ||
cov_lower_(ChainableStack::memalloc_.alloc_array<vari *>(size_ltri_)), | ||
cov_diag_(ChainableStack::memalloc_.alloc_array<vari *>(size_)) { | ||
dist_(ChainableStack::context().memalloc_.alloc_array<double>( | ||
size_ltri_)), | ||
l_vari_(length_scale.vi_), sigma_vari_(sigma.vi_), | ||
cov_lower_(ChainableStack::context().memalloc_.alloc_array<vari*>( | ||
size_ltri_)), | ||
cov_diag_( | ||
ChainableStack::context().memalloc_.alloc_array<vari*>(size_)) { | ||
double neg_inv_l = -1.0 / l_d_; | ||
size_t pos = 0; | ||
for (size_t j = 0; j < size_ - 1; ++j) { | ||
for (size_t i = j + 1; i < size_; ++i) { | ||
double dist_sq = squared_distance(x[i], x[j]); | ||
dist_[pos] = dist_sq; | ||
cov_lower_[pos] | ||
= new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
cov_lower_[pos] = | ||
new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
++pos; | ||
} | ||
} | ||
|
@@ -114,7 +113,7 @@ class gp_exponential_cov_vari : public vari { | |
*/ | ||
template <typename T_x, typename T_l> | ||
class gp_exponential_cov_vari<T_x, double, T_l> : public vari { | ||
public: | ||
public: | ||
const size_t size_; | ||
const size_t size_ltri_; | ||
const double l_d_; | ||
|
@@ -145,24 +144,24 @@ class gp_exponential_cov_vari<T_x, double, T_l> : public vari { | |
*/ | ||
gp_exponential_cov_vari(const std::vector<T_x> &x, double sigma, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This isn't used. |
||
const T_l &length_scale) | ||
: vari(0.0), | ||
size_(x.size()), | ||
size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), | ||
sigma_d_(value_of(sigma)), | ||
: vari(0.0), size_(x.size()), size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), sigma_d_(value_of(sigma)), | ||
sigma_sq_d_(sigma_d_ * sigma_d_), | ||
dist_(ChainableStack::memalloc_.alloc_array<double>(size_ltri_)), | ||
dist_(ChainableStack::context().memalloc_.alloc_array<double>( | ||
size_ltri_)), | ||
l_vari_(length_scale.vi_), | ||
cov_lower_(ChainableStack::memalloc_.alloc_array<vari *>(size_ltri_)), | ||
cov_diag_(ChainableStack::memalloc_.alloc_array<vari *>(size_)) { | ||
cov_lower_(ChainableStack::context().memalloc_.alloc_array<vari*>( | ||
size_ltri_)), | ||
cov_diag_( | ||
ChainableStack::context().memalloc_.alloc_array<vari*>(size_)) { | ||
double neg_inv_l = -1.0 / l_d_; | ||
size_t pos = 0; | ||
for (size_t j = 0; j < size_ - 1; ++j) { | ||
for (size_t i = j + 1; i < size_; ++i) { | ||
double dist_sq = squared_distance(x[i], x[j]); | ||
dist_[pos] = dist_sq; | ||
cov_lower_[pos] | ||
= new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
cov_lower_[pos] = | ||
new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
++pos; | ||
} | ||
} | ||
|
@@ -193,7 +192,7 @@ class gp_exponential_cov_vari<T_x, double, T_l> : public vari { | |
*/ | ||
template <typename T_x, typename T_s> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This isn't used, so remove. |
||
class gp_exponential_cov_vari<T_x, T_s, double> : public vari { | ||
public: | ||
public: | ||
const size_t size_; | ||
const size_t size_ltri_; | ||
const double l_d_; | ||
|
@@ -224,23 +223,23 @@ class gp_exponential_cov_vari<T_x, T_s, double> : public vari { | |
*/ | ||
gp_exponential_cov_vari(const std::vector<T_x> &x, const T_s &sigma, | ||
double length_scale) | ||
: vari(0.0), | ||
size_(x.size()), | ||
size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), | ||
sigma_d_(value_of(sigma)), | ||
: vari(0.0), size_(x.size()), size_ltri_(size_ * (size_ - 1) / 2), | ||
l_d_(value_of(length_scale)), sigma_d_(value_of(sigma)), | ||
sigma_sq_d_(sigma_d_ * sigma_d_), | ||
dist_(ChainableStack::memalloc_.alloc_array<double>(size_ltri_)), | ||
cov_lower_(ChainableStack::memalloc_.alloc_array<vari *>(size_ltri_)), | ||
cov_diag_(ChainableStack::memalloc_.alloc_array<vari *>(size_)) { | ||
dist_(ChainableStack::context().memalloc_.alloc_array<double>( | ||
size_ltri_)), | ||
cov_lower_(ChainableStack::context().memalloc_.alloc_array<vari*>( | ||
size_ltri_)), | ||
cov_diag_( | ||
ChainableStack::context().memalloc_.alloc_array<vari*>(size_)) { | ||
double neg_inv_l = -1.0 / l_d_; | ||
size_t pos = 0; | ||
for (size_t j = 0; j < size_ - 1; ++j) { | ||
for (size_t i = j + 1; i < size_; ++i) { | ||
double dist_sq = squared_distance(x[i], x[j]); | ||
dist_[pos] = dist_sq; | ||
cov_lower_[pos] | ||
= new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
cov_lower_[pos] = | ||
new vari(sigma_sq_d_ * exp(dist_sq * neg_inv_l), false); | ||
++pos; | ||
} | ||
} | ||
|
@@ -287,8 +286,8 @@ gp_exponential_cov(const std::vector<T_x> &x, const var &sigma, const var &l) { | |
if (x_size == 0) | ||
return cov; | ||
|
||
gp_exponential_cov_vari<T_x, var, var> *baseVari | ||
= new gp_exponential_cov_vari<T_x, var, var>(x, sigma, l); | ||
gp_exponential_cov_vari<T_x, var, var> *baseVari = | ||
new gp_exponential_cov_vari<T_x, var, var>(x, sigma, l); | ||
|
||
size_t pos = 0; | ||
for (size_t j = 0; j < x_size - 1; ++j) { | ||
|
@@ -304,4 +303,5 @@ gp_exponential_cov(const std::vector<T_x> &x, const var &sigma, const var &l) { | |
} | ||
} // namespace math | ||
} // namespace stan | ||
|
||
#endif |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Questions:
<double, var, var>
.<T_x, double, T_l>
and<T_x, T_s, double>
. I guess I'm saying I don't believe that it'll compile if you actually tried to have both in there.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why are there two specializations in
stan/math/rev/mat/fun/cov_exp_quad.hpp
? OK, let me find argument deductions where it fails, and I'll most likely remove the other two specializations.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are two different partial template specializations in the
rev
version ofcov_exp_quad
.L215 has:
cov_exp_quad<double, var, var>
L257 has:
cov_exp_quad<double, double, var>
That's why there is a base template function and then a partial specialization of
class cov_exp_quad_vari
. Now that I look at it, there's probably a different way to implement it, but that's a different question.