Skip to content

Commit

Permalink
Enable all clang-tidy performance checks (pytorch#15198)
Browse files Browse the repository at this point in the history
Summary:
This PR adds the final set of clang-tidy checks we should add for our codebase: a last set of performance-related checks. Most fixes here are around changing `auto` to `const auto&` in a few places where unnecessary copies were made, and adding `reserve()` calls before loops doing repeated `push_back()`. Also a few cases of calling `std::string::find` with a single-character string literal instead of a single char, which uses a less efficient string search algorithm meant for searching larger substrings.

![image](https://user-images.githubusercontent.com/6429851/49978940-adc1a780-ff01-11e8-99da-a4e431361f07.png)

ezyang apaszke
Pull Request resolved: pytorch#15198

Differential Revision: D13468797

Pulled By: goldsborough

fbshipit-source-id: 2bed1ea1c7c162b7f3e0e1026f17125e88c4d5b2
  • Loading branch information
goldsborough authored and facebook-github-bot committed Dec 14, 2018
1 parent fc2856e commit 7a61306
Show file tree
Hide file tree
Showing 37 changed files with 75 additions and 76 deletions.
3 changes: 2 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ Checks: '
,-modernize-use-auto
,-modernize-use-default-member-init
,-modernize-use-using
,performance-unnecessary-value-param
,performance-*
,-performance-noexcept-move-constructor
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/Exceptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ struct python_error : public std::exception {
}

python_error(python_error&& other) {
type = std::move(other.type);
type = other.type;
value = other.value;
traceback = other.traceback;
other.type = nullptr;
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/api/include/torch/nn/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
/// \endrst
void apply(
const NamedModuleApplyFunction& function,
std::string name_prefix = std::string());
const std::string& name_prefix = std::string());

/// Applies the `function` to the `Module` and recursively to every submodule.
/// The function must accept a `const std::string&` for the key of the module,
Expand All @@ -167,7 +167,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
/// \endrst
void apply(
const ConstNamedModuleApplyFunction& function,
std::string name_prefix = std::string()) const;
const std::string& name_prefix = std::string()) const;

/// Applies the `function` to the `Module` and recursively to every submodule.
/// The function must accept a `const std::shared_ptr<Module>&`.
Expand Down Expand Up @@ -198,7 +198,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
/// \endrst
void apply(
const NamedModulePointerApplyFunction& function,
std::string name_prefix = std::string()) const;
const std::string& name_prefix = std::string()) const;

/// Returns the parameters of this `Module` and if `recurse` is true, also
/// recursively of every submodule.
Expand Down Expand Up @@ -243,7 +243,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
/// stored in a `shared_ptr`.
/// \endrst
OrderedDict<std::string, std::shared_ptr<Module>> named_modules(
std::string name_prefix = std::string(),
const std::string& name_prefix = std::string(),
bool include_self = true) const;

/// Returns the direct submodules of this `Module`.
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/api/src/data/datasets/mnist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ uint32_t expect_int32(std::ifstream& stream, uint32_t expected) {
return value;
}

std::string join_paths(std::string head, std::string tail) {
std::string join_paths(std::string head, const std::string& tail) {
if (head.back() != '/') {
head.push_back('/');
}
head += std::move(tail);
head += tail;
return head;
}

Expand Down
20 changes: 10 additions & 10 deletions torch/csrc/api/src/nn/module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,26 +101,26 @@ void Module::apply(const ConstModuleApplyFunction& function) const {

void Module::apply(
const NamedModuleApplyFunction& function,
std::string name_prefix) {
const std::string& name_prefix) {
function(/*name=*/name_prefix, *this);
apply_to_submodules(
[&function](
const std::string& name, const std::shared_ptr<Module>& module) {
function(name, *module);
},
std::move(name_prefix));
name_prefix);
}

void Module::apply(
const ConstNamedModuleApplyFunction& function,
std::string name_prefix) const {
const std::string& name_prefix) const {
function(/*name=*/name_prefix, *this);
apply_to_submodules(
[&function](
const std::string& name, const std::shared_ptr<Module>& module) {
function(name, *module);
},
std::move(name_prefix));
name_prefix);
}

void Module::apply(const ModulePointerApplyFunction& function) const {
Expand All @@ -133,10 +133,10 @@ void Module::apply(const ModulePointerApplyFunction& function) const {

void Module::apply(
const NamedModulePointerApplyFunction& function,
std::string name_prefix) const {
const std::string& name_prefix) const {
function(
/*name=*/name_prefix, shared_from_this_checked());
apply_to_submodules(function, std::move(name_prefix));
apply_to_submodules(function, name_prefix);
}

std::vector<Tensor> Module::parameters(bool recurse) const {
Expand Down Expand Up @@ -199,7 +199,7 @@ std::vector<std::shared_ptr<Module>> Module::modules(bool include_self) const {
}

OrderedDict<std::string, std::shared_ptr<Module>> Module::named_modules(
std::string name_prefix,
const std::string& name_prefix,
bool include_self) const {
OrderedDict<std::string, std::shared_ptr<Module>> result;
if (include_self) {
Expand All @@ -208,14 +208,14 @@ OrderedDict<std::string, std::shared_ptr<Module>> Module::named_modules(
const std::string& key, const std::shared_ptr<Module>& module) {
result.insert(key, module);
},
std::move(name_prefix));
name_prefix);
} else {
apply_to_submodules(
[&result](
const std::string& key, const std::shared_ptr<Module>& module) {
result.insert(key, module);
},
std::move(name_prefix));
name_prefix);
}
return result;
}
Expand Down Expand Up @@ -329,7 +329,7 @@ void Module::apply_to_submodules(
for (const auto& child : children_) {
auto qualified_name = join_name(name_prefix, child.key());
function(qualified_name, child.value());
child.value()->apply_to_submodules(function, std::move(qualified_name));
child.value()->apply_to_submodules(function, qualified_name);
}
}

Expand Down
3 changes: 1 addition & 2 deletions torch/csrc/api/src/nn/modules/batchnorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ namespace torch {
namespace nn {
BatchNormOptions::BatchNormOptions(int64_t features) : features_(features) {}

BatchNormImpl::BatchNormImpl(BatchNormOptions options)
: options(std::move(options)) {
BatchNormImpl::BatchNormImpl(BatchNormOptions options) : options(options) {
reset();
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/src/nn/modules/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ EmbeddingOptions::EmbeddingOptions(int64_t count, int64_t dimension)
: count_(count), dimension_(dimension) {}

EmbeddingImpl::EmbeddingImpl(EmbeddingOptions options)
: options(std::move(options)) {
: options(options) {
reset();
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/src/nn/modules/linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ namespace torch {
namespace nn {
LinearOptions::LinearOptions(int64_t in, int64_t out) : in_(in), out_(out) {}

LinearImpl::LinearImpl(LinearOptions options) : options(std::move(options)) {
LinearImpl::LinearImpl(LinearOptions options) : options(options) {
reset();
}

Expand Down
12 changes: 5 additions & 7 deletions torch/csrc/api/src/nn/modules/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ RNNOutput RNNImplBase<Derived>::generic_forward(
}
Tensor output, new_state;
std::tie(output, new_state) = function(
std::move(input),
input,
std::move(state),
flat_weights_,
options.with_bias_,
Expand Down Expand Up @@ -208,12 +208,12 @@ RNNOutput RNNImpl::forward(const Tensor& input, Tensor state) {
case RNNActivation::ReLU:
return generic_forward(
static_cast<RNNFunctionSignature*>(&torch::rnn_relu),
std::move(input),
input,
std::move(state));
case RNNActivation::Tanh:
return generic_forward(
static_cast<RNNFunctionSignature*>(&torch::rnn_tanh),
std::move(input),
input,
std::move(state));
default:
AT_ERROR("Unhandled RNN activation function!");
Expand Down Expand Up @@ -244,7 +244,7 @@ RNNOutput LSTMImpl::forward(const Tensor& input, Tensor state) {
}
Tensor output, hidden_state, cell_state;
std::tie(output, hidden_state, cell_state) = torch::lstm(
std::move(input),
input,
{state[0], state[1]},
flat_weights_,
options.with_bias_,
Expand All @@ -266,9 +266,7 @@ GRUImpl::GRUImpl(const GRUOptions& options)

RNNOutput GRUImpl::forward(const Tensor& input, Tensor state) {
return generic_forward(
static_cast<RNNFunctionSignature*>(&torch::gru),
std::move(input),
std::move(state));
static_cast<RNNFunctionSignature*>(&torch::gru), input, std::move(state));
}
} // namespace nn
} // namespace torch
1 change: 1 addition & 0 deletions torch/csrc/api/src/optim/serialize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ void serialize(
const std::string& key,
const std::vector<int64_t>& steps) {
std::vector<torch::Tensor> tensors;
tensors.reserve(steps.size());
for (const auto& step : steps) {
tensors.push_back(torch::tensor(static_cast<int64_t>(step)));
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/VariableTypeUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ inline Tensor as_variable(Tensor tensor) {

inline std::vector<Tensor> as_variable(TensorList tl) {
return fmap(tl, [](const Tensor& t) -> Tensor {
return make_variable(std::move(t), /*requires_grad=*/false);
return make_variable(t, /*requires_grad=*/false);
});
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ static variable_list call_function(FunctionTask& task) {

if(has_post_hooks){
// NOLINTNEXTLINE(bugprone-use-after-move)
return call_post_hooks(fn, std::move(outputs), std::move(inputs));
return call_post_hooks(fn, std::move(outputs), inputs);
}
return outputs;
}
Expand Down
5 changes: 1 addition & 4 deletions torch/csrc/autograd/python_function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -585,10 +585,7 @@ static Node* _trace_pre_record(
Py_INCREF(op_obj);
auto pyobj = THPObjectPtr(op_obj);
return jit::tracer::preRecordPythonTrace(
std::move(pyobj),
std::move(arg_types),
input_vars,
std::move(scalar_args));
std::move(pyobj), arg_types, input_vars, std::move(scalar_args));
}

static void _trace_post_record(
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/batched/BatchTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ BatchTensor::BatchTensor(const std::vector<at::Tensor>& datalist, at::Tensor dim
sizes[0] = bs;
mask_sizes[0] = bs;
for(int64_t i = 1; i < dims.size(0) + 1; i++){
for(auto x : datalist){
for(const auto& x : datalist){
sizes[i] = std::max(sizes[i], x.size(i));
}
mask_sizes[i] = *dims[i - 1].data<uint8_t>() ? sizes[i] : 1;
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/jit/constants.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,27 +102,27 @@ RegisterOperators reg({
return 0;
};
} else if(type->isSubtypeOf(ListType::ofInts())) {
auto is = node->is(attr::value);
const auto& is = node->is(attr::value);
return [is](Stack& stack) {
push(stack, is);
return 0;
};
} else if(type->isSubtypeOf(ListType::ofBools())) {
auto bs = node->is(attr::value);
const auto& bs = node->is(attr::value);
return [bs](Stack& stack) {
push(stack, bs);
return 0;
};
} else if(type->isSubtypeOf(ListType::ofTensors())) {
auto ts = fmap(node->ts(attr::value), [](const at::Tensor & t) -> at::Tensor {
const auto& ts = fmap(node->ts(attr::value), [](const at::Tensor & t) -> at::Tensor {
return autograd::make_variable(t);
});
return [ts](Stack& stack) {
push(stack, ts);
return 0;
};
} else if (type == StringType::get()) {
auto s = node->s(attr::value);
const auto& s = node->s(attr::value);
return [s](Stack& stack) {
push(stack, s);
return 0;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/custom_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ Node* getTracedNode(
const std::tuple<Types...>& tuple) {
auto symbol = Symbol::fromQualString(schema.name());
const auto& graph = tracer::getTracingState()->graph;
Node* node = graph->create(std::move(symbol), /*num_outputs=*/0);
Node* node = graph->create(symbol, /*num_outputs=*/0);
tracer::recordSourceLocation(node);

// Hack to call addInputs for the parameter pack in a sequenced fashion.
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/fuser/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ int debugFuser() {
// If the given node is used once by a chunk node, returns that node.
// Returns nullptr otherwise.
static const Node* usedInFusedChunk(const Value* input) {
const auto uses = input->uses();
const auto& uses = input->uses();
if (uses.size() == 1) {
const Node *user = uses[0].user;
if (user->kind() == prim::ConstantChunk) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/graph_executor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ struct GraphExecutorImpl {
}

void runTraced(Stack & stack) {
auto state = tracer::getTracingState();
const auto& state = tracer::getTracingState();
auto inputs = last(stack, num_inputs);
auto input_values = fmap(inputs, [](const IValue & v) {
return tracer::getNestedValueTrace(v);
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ void initJITBindings(PyObject *module) {
m.def("_jit_get_operation", [](const std::string& qualified_name) {
try {
auto symbol = Symbol::fromQualString(qualified_name);
auto operations = getAllOperatorsFor(std::move(symbol));
auto operations = getAllOperatorsFor(symbol);
AT_CHECK(!operations.empty(), "No such operator ", qualified_name);
AT_CHECK(
operations.size() == 1,
Expand Down Expand Up @@ -338,7 +338,7 @@ void initJITBindings(PyObject *module) {
});
m.def("_jit_get_schemas_for_operator", [](const std::string& qualified_name) {
auto symbol = Symbol::fromQualString(qualified_name);
auto operations = getAllOperatorsFor(std::move(symbol));
auto operations = getAllOperatorsFor(symbol);
return fmap(operations, [](const std::shared_ptr<Operator>& op) {
return op->schema();
});
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/interpreter.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct Suspend : public std::exception {

struct InterpreterContinuation {
InterpreterContinuation(InterpreterState state_, Stack stack_)
: state(std::move(state_)), stack(std::move(stack_)) {}
: state(state_), stack(std::move(stack_)) {}

void operator()() {
state.runAsync(stack);
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/operator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ struct SchemaParser {
n = "-" + L.expect(TK_NUMBER).text();
else
n = L.expect(TK_NUMBER).text();
if(kind == TypeKind::FloatType || n.find(".") != std::string::npos || n.find("e") != std::string::npos) {
if(kind == TypeKind::FloatType || n.find('.') != std::string::npos || n.find('e') != std::string::npos) {
return std::stod(n);
} else {
int64_t v = std::stoll(n);
Expand Down Expand Up @@ -405,7 +405,7 @@ struct OperatorRegistry {

// XXX - caller must be holding lock
void registerPendingOperators() {
for(auto op : to_register) {
for(const auto& op : to_register) {
Symbol sym = Symbol::fromQualString(op->schema().name());
operators[sym].push_back(op);
operators_by_sig[canonicalSchemaString(op->schema())] = op;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/passes/alias_analysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,7 @@ void AliasDb::addAlias(const Value* value, Symbol alias) {
valueToAlias_[value].addSet(alias);
} else {
AliasInfo aliasInfo;
aliasInfo.addSet(std::move(alias));
aliasInfo.addSet(alias);
valueToAlias_.insert({value, std::move(aliasInfo)});
}
}
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/jit/passes/graph_fuser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -973,6 +973,7 @@ void PeepholeOptimizeShapeExpressions(Block * block) {
}
if (unique_to_value.size() != node->inputs().size()) {
std::vector<Value*> inputs;
inputs.reserve(unique_to_value.size());
for (auto & entry : unique_to_value) {
inputs.push_back(entry.second);
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/passes/python_print.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ struct PythonPrintPass {
} else if(v.isTensorList()) {
stmt << "[";
const char* delim = "";
for(auto t : v.toTensorListRef()) {
for(const auto& t : v.toTensorListRef()) {
stmt << delim << "CONSTANTS.c" << getOrAddTensorConstant(t);
delim = ", ";
}
Expand Down
Loading

0 comments on commit 7a61306

Please sign in to comment.