Skip to content

Commit

Permalink
register parameters correctly in c++ MultiheadAttention (pytorch#42037)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#42037

This is to fix pytorch#41951

Test Plan: Imported from OSS

Reviewed By: yf225

Differential Revision: D22764717

Pulled By: glaringlee

fbshipit-source-id: e6da0aeb05a2356f52446e6d5fad391f2cd1cf6f
  • Loading branch information
lixinyu authored and facebook-github-bot committed Jul 27, 2020
1 parent e59db43 commit 5246bc4
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 14 deletions.
35 changes: 30 additions & 5 deletions test/cpp/api/modules.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1553,7 +1553,7 @@ TEST_F(ModulesTest, BatchNorm2d) {
ASSERT_TRUE(output.allclose(expected));
auto s = output.sum();
s.backward();

ASSERT_EQ(input.sizes(), input.grad().sizes());
}

Expand Down Expand Up @@ -1643,7 +1643,7 @@ TEST_F(ModulesTest, BatchNorm3d) {
ASSERT_TRUE(output.allclose(expected));
auto s = output.sum();
s.backward();

ASSERT_EQ(input.sizes(), input.grad().sizes());
}

Expand Down Expand Up @@ -2059,7 +2059,7 @@ TEST_F(ModulesTest, TripletMarginLoss) {

TEST_F(ModulesTest, NLLLoss) {
NLLLoss loss;
auto input = torch::tensor({{-0.1315, -3.1315, -2.5315},
auto input = torch::tensor({{-0.1315, -3.1315, -2.5315},
{-3.7038, -0.1038, -2.6038},
{-2.3422, -1.3422, -0.4422}},
torch::dtype(torch::kFloat).requires_grad(true));
Expand Down Expand Up @@ -3188,6 +3188,7 @@ namespace detail {
std::mt19937 generator(device());
std::uniform_int_distribution<int> d_2_10(2, 10);
std::uniform_int_distribution<int> d_3_10(3, 10);
bool registration_checked = false;
for (int i = 0; i < 100; i++) {
const auto batch_sz = d_2_10(generator);
const auto seq_len = d_2_10(generator);
Expand All @@ -3200,6 +3201,9 @@ namespace detail {
} else {
std::uniform_int_distribution<int> d(5, 20);
kv_dim = d(generator);
while (kv_dim == d_model) {
kv_dim = d(generator);
}
}
std::vector<int64_t> dims {batch_sz, seq_len, kv_dim};
torch::Tensor saved_k;
Expand Down Expand Up @@ -3238,6 +3242,27 @@ namespace detail {
.vdim(kv_dim);
const auto multihead_attn_module = MultiheadAttention(options);

if (!registration_checked) {
// make sure parameters are all registered correctly
auto named_parameters = multihead_attn_module->named_parameters();
if (same_embed_dim) {
ASSERT_TRUE(named_parameters.contains("in_proj_weight"));
}
else {
ASSERT_TRUE(named_parameters.contains("q_proj_weight"));
ASSERT_TRUE(named_parameters.contains("k_proj_weight"));
ASSERT_TRUE(named_parameters.contains("v_proj_weight"));
}
if (add_bias_kv) {
ASSERT_TRUE(named_parameters.contains("bias_k"));
ASSERT_TRUE(named_parameters.contains("bias_v"));
}
// make sure sub modules are all registered correctly
auto submodules = multihead_attn_module->named_children();
ASSERT_TRUE(submodules.contains("out_proj"));
registration_checked = true;
}

torch::Tensor bias_k;
torch::Tensor bias_v;
if (add_bias_kv) {
Expand Down Expand Up @@ -3768,10 +3793,10 @@ TEST_F(ModulesTest, CrossMapLRN2d) {
auto crossmaplrn2d = CrossMapLRN2d(3);
auto output = crossmaplrn2d(input);
output.sum().backward();

ASSERT_TRUE(input.grad().allclose(grad_expected));
ASSERT_TRUE(output.allclose(expected));

/// size change
crossmaplrn2d = CrossMapLRN2d(CrossMapLRN2dOptions(4).alpha(1e-4).beta(0.75).k(1));
output = crossmaplrn2d(input);
Expand Down
23 changes: 14 additions & 9 deletions torch/csrc/api/src/nn/modules/activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ std::tuple<Tensor, Tensor> MultiheadAttentionImpl::forward(
bool need_weights, const Tensor& attn_mask) {
if (!_qkv_same_embed_dim) {
return F::multi_head_attention_forward(
query, key, value,
query, key, value,
F::MultiheadAttentionForwardFuncOptions(
/*embed_dim_to_check=*/options.embed_dim(),
/*num_heads=*/options.num_heads(),
Expand All @@ -427,7 +427,7 @@ std::tuple<Tensor, Tensor> MultiheadAttentionImpl::forward(
);
} else {
return F::multi_head_attention_forward(
query, key, value,
query, key, value,
F::MultiheadAttentionForwardFuncOptions(
/*embed_dim_to_check=*/options.embed_dim(),
/*num_heads=*/options.num_heads(),
Expand All @@ -454,18 +454,23 @@ void MultiheadAttentionImpl::reset() {
TORCH_CHECK(head_dim * options.num_heads() == options.embed_dim(),
"embed_dim must be divisible by num_heads");
if (!_qkv_same_embed_dim) {
q_proj_weight = torch::empty({options.embed_dim(), options.embed_dim()});
k_proj_weight = torch::empty({options.embed_dim(), options.kdim()});
v_proj_weight = torch::empty({options.embed_dim(), options.vdim()});
q_proj_weight = register_parameter(
"q_proj_weight", torch::empty({options.embed_dim(), options.embed_dim()}));
k_proj_weight = register_parameter(
"k_proj_weight", torch::empty({options.embed_dim(), options.kdim()}));
v_proj_weight = register_parameter(
"v_proj_weight", torch::empty({options.embed_dim(), options.vdim()}));
register_parameter("in_proj_weight", {}, /*requires_grad=*/false);
} else {
in_proj_weight = torch::empty({3 * options.embed_dim(), options.embed_dim()});
in_proj_weight = register_parameter(
"in_proj_weight", torch::empty({3 * options.embed_dim(), options.embed_dim()}));
register_parameter("q_proj_weight", {}, /*requires_grad=*/false);
register_parameter("k_proj_weight", {}, /*requires_grad=*/false);
register_parameter("v_proj_weight", {}, /*requires_grad=*/false);
}
if (options.bias()) {
in_proj_bias = torch::empty(3 * options.embed_dim());
in_proj_bias = register_parameter(
"in_proj_bias", torch::empty(3 * options.embed_dim()));
} else {
register_parameter("in_proj_bias", {}, /*requires_grad=*/false);
}
Expand All @@ -475,8 +480,8 @@ void MultiheadAttentionImpl::reset() {
options.embed_dim()).bias(options.bias()))
);
if (options.add_bias_kv()) {
bias_k = torch::empty({1, 1, options.embed_dim()});
bias_v = torch::empty({1, 1, options.embed_dim()});
bias_k = register_parameter("bias_k", torch::empty({1, 1, options.embed_dim()}));
bias_v = register_parameter("bias_v", torch::empty({1, 1, options.embed_dim()}));
} else {
bias_k = {};
bias_v = {};
Expand Down

0 comments on commit 5246bc4

Please sign in to comment.