Skip to content

Commit

Permalink
Create at::from_blob (pytorch#8640)
Browse files Browse the repository at this point in the history
  • Loading branch information
goldsborough committed Jun 20, 2018
1 parent 66e8ecf commit d46312f
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 10 deletions.
2 changes: 2 additions & 0 deletions aten/src/ATen/templates/Functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

namespace at {

using native::from_blob;

${function_declarations}

static inline Type & infer_type(const Tensor & t) {
Expand Down
16 changes: 16 additions & 0 deletions aten/src/ATen/templates/NativeFunctions.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <ATen/TensorOptions.h>

#include <array>
#include <functional>
#include <string>
#include <tuple>
#include <vector>
Expand All @@ -22,6 +23,21 @@ struct Type;
namespace at {
namespace native {

inline Tensor from_blob(
void* data,
IntList sizes,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {}) {
return options.type().tensorFromBlob(data, sizes, deleter);
}

inline Tensor from_blob(
void* data,
IntList sizes,
const TensorOptions& options = {}) {
return native::from_blob(data, sizes, [](void*) {}, options);
}

${native_function_declarations}

} // namespace native
Expand Down
15 changes: 7 additions & 8 deletions test/cpp/api/integration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,8 @@ bool test_mnist(
const auto backend = useGPU ? at::kCUDA : at::kCPU;
auto inp =
torch::empty({batch_size, 1, trdata.size(2), trdata.size(3)}, backend);
auto lab = torch::empty({batch_size}, at::device(backend).dtype(torch::kInt64));
auto lab =
torch::empty({batch_size}, at::device(backend).dtype(torch::kInt64));
for (auto p = 0U; p < shuffled_inds.size() - batch_size; p++) {
inp[p % batch_size] = trdata[shuffled_inds[p]];
lab[p % batch_size] = trlabel[shuffled_inds[p]];
Expand Down Expand Up @@ -274,9 +275,7 @@ TEST_CASE("integration") {
rewards[i] = R;
}
auto r_t =
at::CPU(torch::kFloat32)
.tensorFromBlob(
rewards.data(), {static_cast<int64_t>(rewards.size())});
at::from_blob(rewards.data(), {static_cast<int64_t>(rewards.size())});
r_t = (r_t - r_t.mean()) / (r_t.std() + 1e-5);

std::vector<at::Tensor> policy_loss;
Expand Down Expand Up @@ -369,12 +368,12 @@ TEST_CASE("integration/mnist", "[cuda]") {
TEST_CASE("integration/mnist/batchnorm", "[cuda]") {
auto model = std::make_shared<SimpleContainer>();
auto conv1 = model->add(Conv2d(1, 10, 5), "conv1");
auto batchnorm2d = model->add(
BatchNorm(BatchNormOptions(10).stateful(true)), "batchnorm2d");
auto batchnorm2d =
model->add(BatchNorm(BatchNormOptions(10).stateful(true)), "batchnorm2d");
auto conv2 = model->add(Conv2d(10, 20, 5), "conv2");
auto linear1 = model->add(Linear(320, 50), "linear1");
auto batchnorm1 = model->add(
BatchNorm(BatchNormOptions(50).stateful(true)), "batchnorm1");
auto batchnorm1 =
model->add(BatchNorm(BatchNormOptions(50).stateful(true)), "batchnorm1");
auto linear2 = model->add(Linear(50, 10), "linear2");

auto forward = [&](Variable x) {
Expand Down
5 changes: 3 additions & 2 deletions torch/csrc/jit/tensor_conversions.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,9 @@ inline at::Tensor as_tensor(bool v) {
}

inline at::Tensor as_tensor(at::IntList l) {
return at::CPU(at::kLong).tensorFromBlob(const_cast<void*>(reinterpret_cast<const void*>(l.data())),
{static_cast<int64_t>(l.size())}).clone();
void* data = const_cast<void*>(reinterpret_cast<const void*>(l.data()));
auto sizes = {static_cast<int64_t>(l.size())};
return at::from_blob(data, sizes, at::kLong).clone();
}

inline at::Tensor as_tensor(const at::Scalar& s) {
Expand Down

0 comments on commit d46312f

Please sign in to comment.