Skip to content

Commit

Permalink
move flags to c10 (pytorch#12144)
Browse files Browse the repository at this point in the history
Summary:
still influx.
Pull Request resolved: pytorch#12144

Reviewed By: smessmer

Differential Revision: D10140176

Pulled By: Yangqing

fbshipit-source-id: 1a313abed022039333e3925d19f8b3ef2d95306c
  • Loading branch information
Yangqing authored and facebook-github-bot committed Oct 4, 2018
1 parent c9f7d7b commit 38f3d1f
Show file tree
Hide file tree
Showing 180 changed files with 1,342 additions and 1,260 deletions.
1 change: 0 additions & 1 deletion aten/src/ATen/core/C++17.h
Original file line number Diff line number Diff line change
Expand Up @@ -251,5 +251,4 @@ template<class T> inline std::string to_string(T value) {
return detail::to_string_<T>::call(value);
}


}}
19 changes: 11 additions & 8 deletions aten/src/ATen/core/TensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@
#include <ATen/core/context_base.h>
#include <ATen/core/optional.h>

#include "c10/util/Flags.h"

#include "caffe2/core/allocator.h"
#include "caffe2/core/common.h"
#include "caffe2/core/flags.h"
#include "caffe2/core/logging.h"

// A global boolean variable to control whether we free memory when a Tensor
Expand All @@ -23,14 +24,13 @@
// This parameter is respected "upper-case" methods which call Resize()
// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_
// or ShrinkTo, both of which guarantee to never to free memory.
CAFFE2_DECLARE_bool(caffe2_keep_on_shrink);
C10_DECLARE_bool(caffe2_keep_on_shrink);

// Since we can have high variance in blob memory allocated across different
// inputs in the same run, we will shrink the blob only if the memory gain
// is larger than this flag in bytes. This only applies to functions which
// respect caffe2_keep_on_shrink.
CAFFE2_DECLARE_int64(caffe2_max_keep_on_shrink_memory);

C10_DECLARE_int64(caffe2_max_keep_on_shrink_memory);

namespace caffe2 {

Expand Down Expand Up @@ -604,10 +604,13 @@ struct CAFFE2_API TensorImpl : public c10::intrusive_ptr_target {
// is smaller than new size
reset_tensor = storage_.capacity() < (storage_offset_ + numel_) * storage_.itemsize();
} else {
reset_tensor = storage_.capacity() < (storage_offset_ + numel_) * storage_.itemsize() ||
!caffe2::FLAGS_caffe2_keep_on_shrink ||
storage_.capacity() - (storage_offset_ + numel_) * storage_.itemsize() >
static_cast<size_t>(caffe2::FLAGS_caffe2_max_keep_on_shrink_memory);
reset_tensor = storage_.capacity() <
(storage_offset_ + numel_) * storage_.itemsize() ||
!c10::FLAGS_caffe2_keep_on_shrink ||
storage_.capacity() -
(storage_offset_ + numel_) * storage_.itemsize() >
static_cast<size_t>(
c10::FLAGS_caffe2_max_keep_on_shrink_memory);
}

if (reset_tensor && !is_init) {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/mkl/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
All files living in this directory are written with the assumption that MKL is available,
which means that these code are not guarded by `#if AT_MKL_ENABLED()`. Therefore, whenever
you need to use definitions from here, please guard the `#include<ATen/mkl/*.h>` and
definition usages with `#if AT_MKL_ENABLED()` macro, e.g. [SpectralOps.cpp](native/mkl/SpectralOps.cpp).
definition usages with `#if AT_MKL_ENABLED()` macro, e.g. [SpectralOps.cpp](native/mkl/SpectralOps.cpp).
2 changes: 1 addition & 1 deletion aten/src/TH/generic/simd/convolve.h
Original file line number Diff line number Diff line change
@@ -1 +1 @@
void convolve_5x5(float* output, float* input, float* kernel, int64_t outRows, int64_t outCols, int64_t inCols);
void convolve_5x5(float* output, float* input, float* kernel, int64_t outRows, int64_t outCols, int64_t inCols);
2 changes: 1 addition & 1 deletion aten/src/TH/generic/simd/convolve5x5_avx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,4 +211,4 @@ void convolve_5x5_avx(float* output, float* input, float* kernel, int64_t outRow
CLEAR_AVX();
convolve_5x5_sse(&output[procCols], &input[procCols], kernel, outRows, remCols, outStride, inCols);
}
}
}
2 changes: 1 addition & 1 deletion aten/src/THCUNN/generic/SpatialFullConvolution.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@ void THNN_(SpatialFullConvolution_accGradParameters)(
kW, kH, dW, dH, padW, padH, 1, 1, adjW, adjH, scale_);
}

#endif
#endif
63 changes: 30 additions & 33 deletions binaries/caffe2_benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,69 +9,66 @@ using std::map;
using std::string;
using std::vector;

CAFFE2_DEFINE_string(
C10_DEFINE_string(
backend,
"builtin",
"The backend to use when running the model. The allowed "
"backend choices are: builtin, default, nnpack, eigen, mkl, cuda");

CAFFE2_DEFINE_string(
init_net,
"",
"The given net to initialize any parameters.");
CAFFE2_DEFINE_string(
C10_DEFINE_string(init_net, "", "The given net to initialize any parameters.");
C10_DEFINE_string(
input,
"",
"Input that is needed for running the network. If "
"multiple input needed, use comma separated string.");
CAFFE2_DEFINE_string(
C10_DEFINE_string(
input_dims,
"",
"Alternate to input_files, if all inputs are simple "
"float TensorCPUs, specify the dimension using comma "
"separated numbers. If multiple input needed, use "
"semicolon to separate the dimension of different "
"tensors.");
CAFFE2_DEFINE_string(
C10_DEFINE_string(
input_file,
"",
"Input file that contain the serialized protobuf for "
"the input blobs. If multiple input needed, use comma "
"separated string. Must have the same number of items "
"as input does.");
CAFFE2_DEFINE_string(
C10_DEFINE_string(
input_type,
"float",
"Input type when specifying the input dimension."
"The supported types are float, uint8_t.");
CAFFE2_DEFINE_int(iter, 10, "The number of iterations to run.");
CAFFE2_DEFINE_string(net, "", "The given net to benchmark.");
CAFFE2_DEFINE_string(
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
C10_DEFINE_string(net, "", "The given net to benchmark.");
C10_DEFINE_string(
output,
"",
"Output that should be dumped after the execution "
"finishes. If multiple outputs are needed, use comma "
"separated string. If you want to dump everything, pass "
"'*' as the output value.");
CAFFE2_DEFINE_string(
C10_DEFINE_string(
output_folder,
"",
"The folder that the output should be written to. This "
"folder must already exist in the file system.");
CAFFE2_DEFINE_bool(
C10_DEFINE_bool(
run_individual,
false,
"Whether to benchmark individual operators.");
CAFFE2_DEFINE_int(
C10_DEFINE_int(
sleep_before_run,
0,
"The seconds to sleep before starting the benchmarking.");
CAFFE2_DEFINE_bool(
C10_DEFINE_bool(
text_output,
false,
"Whether to write out output in text format for regression purpose.");
CAFFE2_DEFINE_int(warmup, 0, "The number of iterations to warm up.");
CAFFE2_DEFINE_bool(
C10_DEFINE_int(warmup, 0, "The number of iterations to warm up.");
C10_DEFINE_bool(
wipe_cache,
false,
"Whether to evict the cache before running network.");
Expand All @@ -81,19 +78,19 @@ int main(int argc, char** argv) {
benchmark(
argc,
argv,
caffe2::FLAGS_backend,
caffe2::FLAGS_init_net,
caffe2::FLAGS_input,
caffe2::FLAGS_input_dims,
caffe2::FLAGS_input_file,
caffe2::FLAGS_input_type,
caffe2::FLAGS_iter,
caffe2::FLAGS_net,
caffe2::FLAGS_output,
caffe2::FLAGS_output_folder,
caffe2::FLAGS_run_individual,
caffe2::FLAGS_sleep_before_run,
caffe2::FLAGS_text_output,
caffe2::FLAGS_warmup,
caffe2::FLAGS_wipe_cache);
c10::FLAGS_backend,
c10::FLAGS_init_net,
c10::FLAGS_input,
c10::FLAGS_input_dims,
c10::FLAGS_input_file,
c10::FLAGS_input_type,
c10::FLAGS_iter,
c10::FLAGS_net,
c10::FLAGS_output,
c10::FLAGS_output_folder,
c10::FLAGS_run_individual,
c10::FLAGS_sleep_before_run,
c10::FLAGS_text_output,
c10::FLAGS_warmup,
c10::FLAGS_wipe_cache);
}
16 changes: 8 additions & 8 deletions binaries/convert_caffe_image_db.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@
#include "caffe2/proto/caffe2_legacy.pb.h"
#include "caffe2/core/logging.h"

CAFFE2_DEFINE_string(input_db, "", "The input db.");
CAFFE2_DEFINE_string(input_db_type, "", "The input db type.");
CAFFE2_DEFINE_string(output_db, "", "The output db.");
CAFFE2_DEFINE_string(output_db_type, "", "The output db type.");
CAFFE2_DEFINE_int(batch_size, 1000, "The write batch size.");
C10_DEFINE_string(input_db, "", "The input db.");
C10_DEFINE_string(input_db_type, "", "The input db type.");
C10_DEFINE_string(output_db, "", "The output db.");
C10_DEFINE_string(output_db_type, "", "The output db type.");
C10_DEFINE_int(batch_size, 1000, "The write batch size.");

using caffe2::db::Cursor;
using caffe2::db::DB;
Expand All @@ -37,9 +37,9 @@ int main(int argc, char** argv) {
caffe2::GlobalInit(&argc, &argv);

std::unique_ptr<DB> in_db(caffe2::db::CreateDB(
caffe2::FLAGS_input_db_type, caffe2::FLAGS_input_db, caffe2::db::READ));
c10::FLAGS_input_db_type, c10::FLAGS_input_db, caffe2::db::READ));
std::unique_ptr<DB> out_db(caffe2::db::CreateDB(
caffe2::FLAGS_output_db_type, caffe2::FLAGS_output_db, caffe2::db::NEW));
c10::FLAGS_output_db_type, c10::FLAGS_output_db, caffe2::db::NEW));
std::unique_ptr<Cursor> cursor(in_db->NewCursor());
std::unique_ptr<Transaction> transaction(out_db->NewTransaction());
int count = 0;
Expand Down Expand Up @@ -80,7 +80,7 @@ int main(int argc, char** argv) {
data->set_byte_data(buffer, datum.data().size());
}
transaction->Put(cursor->key(), protos.SerializeAsString());
if (++count % caffe2::FLAGS_batch_size == 0) {
if (++count % c10::FLAGS_batch_size == 0) {
transaction->Commit();
LOG(INFO) << "Converted " << count << " items so far.";
}
Expand Down
16 changes: 8 additions & 8 deletions binaries/convert_db.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/core/logging.h"

CAFFE2_DEFINE_string(input_db, "", "The input db.");
CAFFE2_DEFINE_string(input_db_type, "", "The input db type.");
CAFFE2_DEFINE_string(output_db, "", "The output db.");
CAFFE2_DEFINE_string(output_db_type, "", "The output db type.");
CAFFE2_DEFINE_int(batch_size, 1000, "The write batch size.");
C10_DEFINE_string(input_db, "", "The input db.");
C10_DEFINE_string(input_db_type, "", "The input db type.");
C10_DEFINE_string(output_db, "", "The output db.");
C10_DEFINE_string(output_db_type, "", "The output db type.");
C10_DEFINE_int(batch_size, 1000, "The write batch size.");

using caffe2::db::Cursor;
using caffe2::db::DB;
Expand All @@ -33,15 +33,15 @@ int main(int argc, char** argv) {
caffe2::GlobalInit(&argc, &argv);

std::unique_ptr<DB> in_db(caffe2::db::CreateDB(
caffe2::FLAGS_input_db_type, caffe2::FLAGS_input_db, caffe2::db::READ));
c10::FLAGS_input_db_type, c10::FLAGS_input_db, caffe2::db::READ));
std::unique_ptr<DB> out_db(caffe2::db::CreateDB(
caffe2::FLAGS_output_db_type, caffe2::FLAGS_output_db, caffe2::db::NEW));
c10::FLAGS_output_db_type, c10::FLAGS_output_db, caffe2::db::NEW));
std::unique_ptr<Cursor> cursor(in_db->NewCursor());
std::unique_ptr<Transaction> transaction(out_db->NewTransaction());
int count = 0;
for (; cursor->Valid(); cursor->Next()) {
transaction->Put(cursor->key(), cursor->value());
if (++count % caffe2::FLAGS_batch_size == 0) {
if (++count % c10::FLAGS_batch_size == 0) {
transaction->Commit();
LOG(INFO) << "Converted " << count << " items so far.";
}
Expand Down
51 changes: 27 additions & 24 deletions binaries/convert_encoded_to_raw_leveldb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@

// This script converts an image dataset to leveldb.
//
// caffe2::FLAGS_input_folder is the root folder that holds all the images, and
// caffe2::FLAGS_list_file should be a list of files as well as their labels, in the
// format as
// c10::FLAGS_input_folder is the root folder that holds all the images, and
// c10::FLAGS_list_file should be a list of files as well as their labels, in
// the format as
// subfolder1/file1.JPEG 7
// ....

Expand All @@ -35,14 +35,15 @@
#include "leveldb/db.h"
#include "leveldb/write_batch.h"

CAFFE2_DEFINE_string(input_db_name, "", "The input image file name.");
CAFFE2_DEFINE_string(output_db_name, "", "The output training leveldb name.");
CAFFE2_DEFINE_bool(color, true, "If set, load images in color.");
CAFFE2_DEFINE_int(scale, 256,
"If caffe2::FLAGS_raw is set, scale all the images' shorter edge to the given "
C10_DEFINE_string(input_db_name, "", "The input image file name.");
C10_DEFINE_string(output_db_name, "", "The output training leveldb name.");
C10_DEFINE_bool(color, true, "If set, load images in color.");
C10_DEFINE_int(
scale,
256,
"If c10::FLAGS_raw is set, scale all the images' shorter edge to the given "
"value.");
CAFFE2_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");

C10_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");

namespace caffe2 {

Expand Down Expand Up @@ -92,7 +93,7 @@ void ConvertToRawDataset(
data->set_data_type(TensorProto::BYTE);
data->add_dims(0);
data->add_dims(0);
if (caffe2::FLAGS_color) {
if (c10::FLAGS_color) {
data->add_dims(3);
}
string value;
Expand All @@ -107,28 +108,30 @@ void ConvertToRawDataset(
const string& encoded_image = input_protos.protos(0).string_data(0);
int encoded_size = encoded_image.size();
cv::Mat img = cv::imdecode(
cv::Mat(1, &encoded_size, CV_8UC1,
const_cast<char*>(encoded_image.data())),
caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat(
1, &encoded_size, CV_8UC1, const_cast<char*>(encoded_image.data())),
c10::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat resized_img;
int scaled_width, scaled_height;
if (caffe2::FLAGS_warp) {
scaled_width = caffe2::FLAGS_scale;
scaled_height = caffe2::FLAGS_scale;
if (c10::FLAGS_warp) {
scaled_width = c10::FLAGS_scale;
scaled_height = c10::FLAGS_scale;
} else if (img.rows > img.cols) {
scaled_width = caffe2::FLAGS_scale;
scaled_height = static_cast<float>(img.rows) * caffe2::FLAGS_scale / img.cols;
scaled_width = c10::FLAGS_scale;
scaled_height =
static_cast<float>(img.rows) * c10::FLAGS_scale / img.cols;
} else {
scaled_height = caffe2::FLAGS_scale;
scaled_width = static_cast<float>(img.cols) * caffe2::FLAGS_scale / img.rows;
scaled_height = c10::FLAGS_scale;
scaled_width = static_cast<float>(img.cols) * c10::FLAGS_scale / img.rows;
}
cv::resize(img, resized_img, cv::Size(scaled_width, scaled_height), 0, 0,
cv::INTER_LINEAR);
data->set_dims(0, scaled_height);
data->set_dims(1, scaled_width);
DCHECK(resized_img.isContinuous());
data->set_byte_data(resized_img.ptr(),
scaled_height * scaled_width * (caffe2::FLAGS_color ? 3 : 1));
data->set_byte_data(
resized_img.ptr(),
scaled_height * scaled_width * (c10::FLAGS_color ? 3 : 1));
output_protos.SerializeToString(&value);
// Put in db
batch->Put(iter->key(), value);
Expand All @@ -151,6 +154,6 @@ void ConvertToRawDataset(
int main(int argc, char** argv) {
caffe2::GlobalInit(&argc, &argv);
caffe2::ConvertToRawDataset(
caffe2::FLAGS_input_db_name, caffe2::FLAGS_output_db_name);
c10::FLAGS_input_db_name, c10::FLAGS_output_db_name);
return 0;
}
Loading

0 comments on commit 38f3d1f

Please sign in to comment.