diff --git a/stan/math/prim/fun/as_array_or_scalar.hpp b/stan/math/prim/fun/as_array_or_scalar.hpp index 8845c2432b2..6e91323d447 100644 --- a/stan/math/prim/fun/as_array_or_scalar.hpp +++ b/stan/math/prim/fun/as_array_or_scalar.hpp @@ -52,7 +52,8 @@ inline auto as_array_or_scalar(T&& v) { * @param v Specified vector. * @return Matrix converted to an array. */ -template * = nullptr> +template * = nullptr, + require_not_std_vector_t>* = nullptr> inline auto as_array_or_scalar(T&& v) { using T_map = Eigen::Map, Eigen::Dynamic, 1>>; @@ -60,6 +61,24 @@ inline auto as_array_or_scalar(T&& v) { std::forward(v)); } +/** + * Converts an std::vector to an Eigen Array. + * @tparam T A standard vector with inner container of a standard vector + * with an inner stan scalar. + * @param v specified vector of vectorised + * @return An Eigen Array with dynamic rows and columns. + */ +template * = nullptr, + require_std_vector_vt>* = nullptr> +inline auto as_array_or_scalar(T&& v) { + Eigen::Array, -1, -1> ret(v.size(), v[0].size()); + for (size_t i = 0; i < v.size(); ++i) { + ret.row(i) = Eigen::Map, 1, -1>>( + v[i].data(), v[i].size()); + } + return ret; +} + } // namespace math } // namespace stan diff --git a/stan/math/prim/fun/beta.hpp b/stan/math/prim/fun/beta.hpp index da31af4bbb4..2ebc9cac2e4 100644 --- a/stan/math/prim/fun/beta.hpp +++ b/stan/math/prim/fun/beta.hpp @@ -69,7 +69,7 @@ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto beta(const T1& a, const T2& b) { return apply_scalar_binary( - a, b, [&](const auto& c, const auto& d) { return beta(c, d); }); + a, b, [](const auto& c, const auto& d) { return beta(c, d); }); } } // namespace math diff --git a/stan/math/prim/functor/apply_scalar_binary.hpp b/stan/math/prim/functor/apply_scalar_binary.hpp index 2fb63667057..5210e901121 100644 --- a/stan/math/prim/functor/apply_scalar_binary.hpp +++ b/stan/math/prim/functor/apply_scalar_binary.hpp @@ -320,8 +320,9 @@ inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { * @param f functor to apply to std::vector inputs. * @return std::vector with result of applying functor to inputs. */ -template * = nullptr> +template < + typename T1, typename T2, typename F, + require_all_std_vector_vt* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { check_matching_sizes("Binary function", "x", x, "y", y); using T_return = plain_type_t; @@ -348,7 +349,7 @@ inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { * @return std::vector with result of applying functor to inputs. */ template * = nullptr, + require_std_vector_vt* = nullptr, require_stan_scalar_t* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { using T_return = plain_type_t; @@ -376,7 +377,7 @@ inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { */ template * = nullptr, - require_std_vector_vt* = nullptr> + require_std_vector_vt* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { using T_return = plain_type_t; size_t y_size = y.size(); diff --git a/stan/math/prim/functor/apply_vector_unary.hpp b/stan/math/prim/functor/apply_vector_unary.hpp index 0b0db855aac..5c68d0b2b14 100644 --- a/stan/math/prim/functor/apply_vector_unary.hpp +++ b/stan/math/prim/functor/apply_vector_unary.hpp @@ -159,12 +159,6 @@ struct apply_vector_unary> { } }; -namespace internal { -template -using is_container_or_var_matrix - = disjunction, is_var_matrix>; -} - /** * Specialisation for use with nested containers (std::vectors). * For each of the member functions, an std::vector with the appropriate @@ -177,7 +171,7 @@ using is_container_or_var_matrix */ template struct apply_vector_unary< - T, require_std_vector_vt> { + T, require_std_vector_vt> { using T_vt = value_type_t; /** diff --git a/stan/math/prim/meta.hpp b/stan/math/prim/meta.hpp index f07d43820c9..adc3b92f12b 100644 --- a/stan/math/prim/meta.hpp +++ b/stan/math/prim/meta.hpp @@ -192,6 +192,7 @@ #include #include #include +#include #include #include #include diff --git a/stan/math/prim/meta/is_container_or_var_matrix.hpp b/stan/math/prim/meta/is_container_or_var_matrix.hpp new file mode 100644 index 00000000000..5d970ce31da --- /dev/null +++ b/stan/math/prim/meta/is_container_or_var_matrix.hpp @@ -0,0 +1,34 @@ +#ifndef STAN_MATH_PRIM_META_IS_CONTAINER_OR_VAR_MATRIX_HPP +#define STAN_MATH_PRIM_META_IS_CONTAINER_OR_VAR_MATRIX_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace stan { + +/** + * Deduces whether type is eigen matrix, standard vector, or var. + * @tparam Container type to check + */ +template +using is_container_or_var_matrix + = bool_constant, + is_var_matrix>::value>; + +STAN_ADD_REQUIRE_UNARY(container_or_var_matrix, is_container_or_var_matrix, + general_types); +STAN_ADD_REQUIRE_CONTAINER(container_or_var_matrix, is_container_or_var_matrix, + general_types); + +} // namespace stan + +#endif diff --git a/stan/math/rev/fun/bessel_first_kind.hpp b/stan/math/rev/fun/bessel_first_kind.hpp index f9abde37e3b..21e745b2d42 100644 --- a/stan/math/rev/fun/bessel_first_kind.hpp +++ b/stan/math/rev/fun/bessel_first_kind.hpp @@ -18,6 +18,10 @@ inline var bessel_first_kind(int v, const var& a) { }); } +/** + * Overload with `var_value` for `int`, `std::vector`, and + * `std::vector>` + */ template * = nullptr, require_eigen_t* = nullptr> inline auto bessel_first_kind(const T1& v, const var_value& a) { diff --git a/stan/math/rev/fun/bessel_second_kind.hpp b/stan/math/rev/fun/bessel_second_kind.hpp index 28aeadae8de..290f5d703ee 100644 --- a/stan/math/rev/fun/bessel_second_kind.hpp +++ b/stan/math/rev/fun/bessel_second_kind.hpp @@ -17,6 +17,10 @@ inline var bessel_second_kind(int v, const var& a) { }); } +/** + * Overload with `var_value` for `int`, `std::vector`, and + * `std::vector>` + */ template * = nullptr, require_eigen_t* = nullptr> inline auto bessel_second_kind(const T1& v, const var_value& a) { diff --git a/stan/math/rev/fun/binary_log_loss.hpp b/stan/math/rev/fun/binary_log_loss.hpp index 116cca95105..990a0b979c3 100644 --- a/stan/math/rev/fun/binary_log_loss.hpp +++ b/stan/math/rev/fun/binary_log_loss.hpp @@ -55,6 +55,9 @@ inline var binary_log_loss(int y, const var& y_hat) { } } +/** + * Overload with `int` and `var_value` + */ template * = nullptr> inline auto binary_log_loss(int y, const var_value& y_hat) { if (y == 0) { @@ -70,12 +73,14 @@ inline auto binary_log_loss(int y, const var_value& y_hat) { } } -template * = nullptr> -inline auto binary_log_loss(const std::vector& y, - const var_value& y_hat) { - arena_t> arena_y - = Eigen::Map>(y.data(), y.size()) - .cast(); +/** + * Overload with `var_value` for `std::vector` and + * `std::vector>` + */ +template * = nullptr, + require_st_integral* = nullptr> +inline auto binary_log_loss(const StdVec& y, const var_value& y_hat) { + auto arena_y = to_arena(as_array_or_scalar(y).template cast()); auto ret_val = -(arena_y == 0) .select((-y_hat.val().array()).log1p(), y_hat.val().array().log()); diff --git a/stan/math/rev/functor.hpp b/stan/math/rev/functor.hpp index 4e36d2e7b14..f4c6923b43c 100644 --- a/stan/math/rev/functor.hpp +++ b/stan/math/rev/functor.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/stan/math/rev/functor/apply_scalar_binary.hpp b/stan/math/rev/functor/apply_scalar_binary.hpp new file mode 100644 index 00000000000..00269473cea --- /dev/null +++ b/stan/math/rev/functor/apply_scalar_binary.hpp @@ -0,0 +1,102 @@ +#ifndef STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_BINARY_HPP +#define STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_BINARY_HPP + +#include +#include +#include +#include +#include +#include + +namespace stan { +namespace math { + +/** + * Specialisation for use with combinations of + * `Eigen::Matrix` and `var_value` inputs. + * Eigen's binaryExpr framework is used for more efficient indexing of both row- + * and column-major inputs without separate loops. + * + * @tparam T1 Type of first argument to which functor is applied. + * @tparam T2 Type of second argument to which functor is applied. + * @tparam F Type of functor to apply. + * @param x First Matrix input to which operation is applied. + * @param y Second Matrix input to which operation is applied. + * @param f functor to apply to Matrix inputs. + * @return `var_value` with result of applying functor to inputs. + */ +template * = nullptr, + require_all_matrix_t* = nullptr> +inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { + check_matching_dims("Binary function", "x", x, "y", y); + return f(x, y); +} + +/** + * Specialisation for use with one `var_value` (row or column) and + * a one-dimensional std::vector of integer types + * + * @tparam T1 Type of first argument to which functor is applied. + * @tparam T2 Type of second argument to which functor is applied. + * @tparam F Type of functor to apply. + * @param x Matrix input to which operation is applied. + * @param y Integer std::vector input to which operation is applied. + * @param f functor to apply to inputs. + * @return var_value object with result of applying functor to inputs. + */ +template * = nullptr, + require_any_std_vector_vt* = nullptr> +inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { + check_matching_sizes("Binary function", "x", x, "y", y); + return f(x, y); +} + +/** + * Specialisation for use with a two-dimensional std::vector of integer types + * and one `var_value`. + * + * @tparam T1 Type of first argument to which functor is applied. + * @tparam T2 Type of second argument to which functor is applied. + * @tparam F Type of functor to apply. + * @param x Either a var matrix or nested integer std::vector input to which + * operation is applied. + * @param x Either a var matrix or nested integer std::vector input to which + * operation is applied. + * @param f functor to apply to inputs. + * @return Eigen object with result of applying functor to inputs. + */ +template * = nullptr, + require_any_std_vector_st* = nullptr, + require_any_var_matrix_t* = nullptr> +inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { + return f(x, y); +} + +/** + * Specialisation for use when the one input is an `var_value type and + * the other is a scalar. + * + * @tparam T1 Type of either `var_value` or scalar object to which + * functor is applied. + * @tparam T2 Type of either `var_value` or scalar object to which + * functor is applied. + * @tparam F Type of functor to apply. + * @param x Matrix or Scalar input to which operation is applied. + * @param x Matrix or Scalar input to which operation is applied. + * @param f functor to apply to var matrix and scalar inputs. + * @return `var_value object with result of applying functor to inputs. + * + */ +template * = nullptr, + require_any_var_matrix_t* = nullptr> +inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { + return f(x, y); +} + +} // namespace math +} // namespace stan +#endif diff --git a/test/unit/math/mix/fun/bessel_first_kind_test.cpp b/test/unit/math/mix/fun/bessel_first_kind_test.cpp index a02df4cd333..4d81eaefec5 100644 --- a/test/unit/math/mix/fun/bessel_first_kind_test.cpp +++ b/test/unit/math/mix/fun/bessel_first_kind_test.cpp @@ -37,9 +37,7 @@ TEST(mathMixScalFun, besselFirstKind_matvec) { }; std::vector std_in1{3, 1}; - Eigen::VectorXd in2(2); - in2 << 0.5, 3.4; - stan::test::expect_ad_matvar(f, std_in1, in2); - - stan::test::expect_ad_matvar(f, std_in1[0], in2); + Eigen::MatrixXd in2(2, 2); + in2 << 0.5, 3.4, 0.5, 3.4; + stan::test::expect_ad_vectorized_matvar(f, std_in1, in2); } diff --git a/test/unit/math/mix/fun/bessel_second_kind_test.cpp b/test/unit/math/mix/fun/bessel_second_kind_test.cpp index eaf90a08fd3..20ac4eba54c 100644 --- a/test/unit/math/mix/fun/bessel_second_kind_test.cpp +++ b/test/unit/math/mix/fun/bessel_second_kind_test.cpp @@ -38,8 +38,7 @@ TEST(mathMixScalFun, besselSecondKind_matvec) { }; std::vector std_in1{3, 1}; - Eigen::VectorXd in2(2); - in2 << 0.5, 3.4; - stan::test::expect_ad_matvar(f, std_in1, in2); - stan::test::expect_ad_matvar(f, std_in1[0], in2); + Eigen::MatrixXd in2(2, 2); + in2 << 0.5, 3.4, 0.5, 3.4; + stan::test::expect_ad_vectorized_matvar(f, std_in1, in2); } diff --git a/test/unit/math/mix/fun/beta2_test.cpp b/test/unit/math/mix/fun/beta2_test.cpp new file mode 100644 index 00000000000..14f3a1c3391 --- /dev/null +++ b/test/unit/math/mix/fun/beta2_test.cpp @@ -0,0 +1,14 @@ +#include + +TEST(mathMixScalFun, beta_varmat_vectorized) { + auto f = [](const auto& x1, const auto& x2) { + using stan::math::beta; + return beta(x1, x2); + }; + + Eigen::MatrixXd in1(2, 2); + in1 << 0.5, 3.4, 5.2, 0.5; + Eigen::MatrixXd in2(2, 2); + in2 << 3.3, 0.9, 6.7, 3.3; + stan::test::expect_ad_vectorized_matvar(f, in1, in2); +} diff --git a/test/unit/math/mix/fun/binary_log_loss_test.cpp b/test/unit/math/mix/fun/binary_log_loss_test.cpp index 78f115a888a..109cda944ab 100644 --- a/test/unit/math/mix/fun/binary_log_loss_test.cpp +++ b/test/unit/math/mix/fun/binary_log_loss_test.cpp @@ -29,7 +29,7 @@ TEST(mathMixScalFun, binaryLogLossvec) { stan::test::expect_ad_vectorized_binary(f, std_std_in1, mat_in2); } -TEST(mathMixScalFun, binaryLogLossmatvar) { +TEST(mathMixScalFun, binaryLogLossMatVar) { auto f = [](const auto& x1, const auto& x2) { using stan::math::binary_log_loss; return binary_log_loss(x1, x2); @@ -41,3 +41,15 @@ TEST(mathMixScalFun, binaryLogLossmatvar) { stan::test::expect_ad_matvar(f, std_in1, in2); stan::test::expect_ad_matvar(f, std_in1[0], in2); } + +TEST(mathMixScalFun, binaryLogLossMatVarVec) { + auto f = [](const auto& x1, const auto& x2) { + using stan::math::binary_log_loss; + return binary_log_loss(x1, x2); + }; + + std::vector std_in1{3, 1}; + Eigen::MatrixXd in2(2, 2); + in2 << 0.5, 3.4, 0.5, 3.5; + stan::test::expect_ad_vectorized_matvar(f, std_in1, in2); +} diff --git a/test/unit/math/mix/fun/falling_factorial_test.cpp b/test/unit/math/mix/fun/falling_factorial_test.cpp index 3772a4a9e6d..cda3d979620 100644 --- a/test/unit/math/mix/fun/falling_factorial_test.cpp +++ b/test/unit/math/mix/fun/falling_factorial_test.cpp @@ -43,9 +43,13 @@ TEST(mathMixScalFun, fallingFactorial_matvar) { return falling_factorial(x1, x2); }; + std::vector std_in2{3, 1}; Eigen::VectorXd in1(2); in1 << 0.5, 3.4; - std::vector std_in2{3, 1}; + Eigen::MatrixXd mat(2, 2); + mat << 0.5, 3.4, 0.5, 3.4; + stan::test::expect_ad_matvar(f, in1, std_in2); stan::test::expect_ad_matvar(f, in1, std_in2[0]); + stan::test::expect_ad_vectorized_matvar(f, mat, std_in2); } diff --git a/test/unit/math/test_ad.hpp b/test/unit/math/test_ad.hpp index a6dd3a1d0b5..3ad6e552dc6 100644 --- a/test/unit/math/test_ad.hpp +++ b/test/unit/math/test_ad.hpp @@ -1306,15 +1306,16 @@ void expect_ad_vectorized_binary_impl(const ad_tolerances& tols, const F& f, std::vector nest_y{y, y}; std::vector> nest_nest_x{nest_x, nest_x}; std::vector> nest_nest_y{nest_y, nest_y}; - expect_ad(tols, f, x, y); - expect_ad(tols, f, x, y[0]); - expect_ad(tols, f, x[0], y); - expect_ad(tols, f, nest_x, nest_y); - expect_ad(tols, f, nest_x, y[0]); - expect_ad(tols, f, x[0], nest_y); - expect_ad(tols, f, nest_nest_x, nest_nest_y); - expect_ad(tols, f, nest_nest_x, y[0]); - expect_ad(tols, f, x[0], nest_nest_y); + expect_ad(tols, f, x, y); // mat, mat + expect_ad(tols, f, x, y[0]); // mat, scal + expect_ad(tols, f, x[0], y); // scal, mat + expect_ad(tols, f, nest_x, nest_y); // nest, nest + expect_ad(tols, f, nest_x, y[0]); // nest, scal + expect_ad(tols, f, x[0], nest_y); // scal, nest + expect_ad(tols, f, nest_nest_x, + nest_nest_y); // nest>, nest> + expect_ad(tols, f, nest_nest_x, y[0]); // nest, scal + expect_ad(tols, f, x[0], nest_nest_y); // scal, nest> } /** diff --git a/test/unit/math/test_ad_matvar.hpp b/test/unit/math/test_ad_matvar.hpp index 99add70c36b..1cde23558d9 100644 --- a/test/unit/math/test_ad_matvar.hpp +++ b/test/unit/math/test_ad_matvar.hpp @@ -31,8 +31,10 @@ namespace test { * @param y Second argument to compare * @param tols Tolerances for comparison */ -template * = nullptr> +template < + typename T1, typename T2, + require_all_not_std_vector_t, value_type_t>* = nullptr, + require_all_std_vector_st* = nullptr> void expect_near_rel_matvar(const std::string& message, T1&& x, T2&& y, const ad_tolerances& tols) { stan::math::check_size_match("expect_near_rel_var", "x", x.size(), "y", @@ -47,6 +49,20 @@ void expect_near_rel_matvar(const std::string& message, T1&& x, T2&& y, } } +template * = nullptr, + require_all_std_vector_st* = nullptr> +void expect_near_rel_matvar(const std::string& message, T1&& x, T2&& y, + const ad_tolerances& tols) { + stan::math::check_size_match("expect_near_rel_var", "x", x.size(), "y", + y.size()); + for (size_t i = 0; i < x.size(); ++i) { + expect_near_rel_matvar( + message + std::string(" elements at i = ") + std::to_string(i), x[i], + y[i], tols); + } +} + /** * Overload for non-`std::vector` arguments with scalar type `var` * @@ -307,8 +323,8 @@ auto make_matvar_compatible(const std::vector& x) { using vec_mat_var = std::vector>; vec_mat_var A_vec_mv; - for (auto xi : x) { - A_vec_mv.push_back(xi); + for (auto&& xi : x) { + A_vec_mv.push_back(make_matvar_compatible(xi)); } return A_vec_mv; } @@ -361,11 +377,23 @@ template * = nullptr, auto make_varmat_compatible(const std::vector& x) { using vec_var_mat = std::vector>>; vec_var_mat A_vec_vm; - for (auto xi : x) { - A_vec_vm.push_back(xi); + for (auto&& xi : x) { + A_vec_vm.push_back(make_varmat_compatible(xi)); + } + return A_vec_vm; +} +template * = nullptr, + require_st_arithmetic* = nullptr> +auto make_varmat_compatible(const std::vector>& x) { + using vec_var_mat + = std::vector>>>; + vec_var_mat A_vec_vm; + for (auto&& xi : x) { + A_vec_vm.push_back(make_varmat_compatible(xi)); } return A_vec_vm; } + ///@} /** @@ -560,7 +588,7 @@ void expect_ad_matvar(const ad_tolerances& tols, const F& f, const EigMat1& x, using stan::math::var; using varmat = stan::math::var_value; - expect_ad_matvar_impl(tols, f, x, y); + expect_ad_matvar_impl, varmat>(tols, f, x, y); } template * = nullptr, + require_all_not_st_integral* = nullptr> +void expect_ad_vectorized_matvar(const ad_tolerances& tols, const F& f, + const T1& x, const T2& y) { + auto x_scal = x.coeff(0, 0); + auto y_scal = y.coeff(0, 0); + auto x_vec = x.col(0).eval(); + auto y_vec = y.col(0).eval(); + auto x_rowvec = x.col(0).eval(); + auto y_rowvec = y.col(0).eval(); + + std::vector> x_scal_stdvec{x_scal, x_scal}; + std::vector> y_scal_stdvec{y_scal, y_scal}; + std::vector>> x_scal_stdvec_stdvec{ + x_scal_stdvec, x_scal_stdvec}; + std::vector>> y_scal_stdvec_stdvec{ + x_scal_stdvec, x_scal_stdvec}; + std::vector x_mat_stdvec{x, x}; + std::vector y_mat_stdvec{y, y}; + std::vector> x_mat_stdvec_stdvec{x_mat_stdvec, + x_mat_stdvec}; + std::vector> y_mat_stdvec_stdvec{y_mat_stdvec, + y_mat_stdvec}; + expect_ad_matvar(tols, f, x_scal, y); // scal, mat + expect_ad_matvar(tols, f, x, y_scal); // mat, scal + expect_ad_matvar(tols, f, x, y); // mat, mat + expect_ad_matvar(tols, f, x_mat_stdvec, + y_mat_stdvec); // nest, nest + expect_ad_matvar(tols, f, x_mat_stdvec, y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, y_mat_stdvec); // scal, nest + expect_ad_matvar(tols, f, x_mat_stdvec_stdvec, + y_mat_stdvec_stdvec); // nest>, nest> + expect_ad_matvar(tols, f, x_mat_stdvec_stdvec, + y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, + y_mat_stdvec_stdvec); // scal, nest + + std::vector x_vec_stdvec{x_vec, x_vec}; + std::vector y_vec_stdvec{y_vec, y_vec}; + std::vector> x_vec_stdvec_stdvec{x_vec_stdvec, + x_vec_stdvec}; + std::vector> y_vec_stdvec_stdvec{y_vec_stdvec, + y_vec_stdvec}; + + expect_ad_matvar(tols, f, x_vec, y_scal); // vec, scal + expect_ad_matvar(tols, f, x_scal, y_vec); // scal, vec + expect_ad_matvar(tols, f, x_vec, y_vec); // vec, vec + expect_ad_matvar(tols, f, x_vec_stdvec, + y_vec_stdvec); // nest, nest + expect_ad_matvar(tols, f, x_vec_stdvec, y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, y_vec_stdvec); // scal, nest + expect_ad_matvar(tols, f, x_vec_stdvec_stdvec, + y_vec_stdvec_stdvec); // nest>, nest> + expect_ad_matvar(tols, f, x_vec_stdvec_stdvec, + y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, + y_vec_stdvec_stdvec); // scal, nest + + std::vector x_rowvec_stdvec{x_rowvec, x_rowvec}; + std::vector y_rowvec_stdvec{y_rowvec, y_rowvec}; + std::vector> x_rowvec_stdvec_stdvec{ + x_rowvec_stdvec, x_rowvec_stdvec}; + std::vector> y_rowvec_stdvec_stdvec{ + y_rowvec_stdvec, y_rowvec_stdvec}; + + expect_ad_matvar(tols, f, x_scal, y_rowvec); // scal, rowvec + expect_ad_matvar(tols, f, x_rowvec, y_scal); // rowvec, scal + expect_ad_matvar(tols, f, x_rowvec, y_rowvec); // rowvec, rowvec + expect_ad_matvar(tols, f, x_rowvec_stdvec, + y_rowvec_stdvec); // nest, nest + expect_ad_matvar(tols, f, x_rowvec_stdvec, y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, y_rowvec_stdvec); // scal, nest + expect_ad_matvar(tols, f, x_rowvec_stdvec_stdvec, + y_rowvec_stdvec_stdvec); // nest>, nest> + expect_ad_matvar(tols, f, x_rowvec_stdvec_stdvec, + y_scal); // nest, scal + expect_ad_matvar(tols, f, x_scal, + y_rowvec_stdvec_stdvec); // scal, nest +} + +/** + * Implementation function for testing that binary functions with vector inputs + * (both var_value and std::vector types) return the same first order + * derivative as if we were using Eigen inputs. + * + * @tparam F type of function + * @tparam T1 An Eigen matrix of floating point types + * @tparam T2 An std vector with inner integral type. + * @param f function to test + * @param x argument to test + * @param y argument to test + */ +template * = nullptr, + require_eigen_t* = nullptr> +void expect_ad_vectorized_matvar(const ad_tolerances& tols, const F& f, + const T1& x, const T2& y) { + auto x_scal = x[0]; + auto y_vec = y.col(0).eval(); + + std::vector x_stdvec{x, x}; + std::vector y_stdvec{y, y}; + std::vector y_stdvec_vec{y_vec, y_vec}; + std::vector> x_stdvec_stdvec{x_stdvec, x_stdvec}; + std::vector> y_stdvec_stdvec{y_stdvec, y_stdvec}; + expect_ad_matvar(tols, f, x[0], y); // scal, mat + expect_ad_matvar(tols, f, x[0], y_vec); // scal, mat + expect_ad_matvar(tols, f, x[0], y_stdvec); // scal, nest + expect_ad_matvar(tols, f, x, y_vec); // stdvec, vec + expect_ad_matvar(tols, f, x_stdvec, y_stdvec_vec); // nest, nest + expect_ad_matvar(tols, f, x_stdvec, y); // nest, mat + expect_ad_matvar(tols, f, x_stdvec_stdvec, + y_stdvec); // nest>, nest +} + +/** + * Implementation function for testing that binary functions with vector inputs + * (both var_value and std::vector types) return the same first order + * derivative as if we were using Eigen inputs. + * + * This is a specialisation for use when the second input is an integer type. + * We reuse the code in the (std::vector, Eigen::Matrix) specialization + * by writing a lambda that flips the inputs passed to the original lambda. + * + * @tparam F type of function + * @tparam T1 An Eigen matrix of floating point types + * @tparam T2 An std vector with inner integral type. + * @param f function to test + * @param x argument to test + * @param y argument to test + */ +template * = nullptr, + require_std_vector_vt* = nullptr> +void expect_ad_vectorized_matvar(const ad_tolerances& tols, const F& f, + const T1& x, const T2& y) { + auto g = [&f](const auto& x, const auto& y) { return f(y, x); }; + expect_ad_vectorized_matvar(tols, g, y, x); +} + +/** + * Overload with default tolerances + * + * @tparam F Type of function to test + * @tparam EigVec Test input type + * @param tols Test tolerances + * @param f Function to test + * @param x Test input + */ +template +void expect_ad_vectorized_matvar(const F& f, const T1& x, const T2& y) { + ad_tolerances tols; + expect_ad_vectorized_matvar(tols, f, x, y); +} ///@} } // namespace test