Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR] adjust pir pass log printing #60723

Merged
merged 5 commits into from
Jan 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions paddle/common/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,11 @@ namespace common {
#endif // __FLT_MAX__
#endif // PADDLE_WITH_MUSL

#define REGISTER_FILE_SYMBOLS(name) \
int RegisterSymbolsFor##name() { return 0; }

#define DECLARE_FILE_SYMBOLS(name) \
extern int RegisterSymbolsFor##name(); \
UNUSED static int use_file_##name = RegisterSymbolsFor##name()

} // namespace common
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/gather_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@

#include "paddle/fluid/framework/details/gather_op_handle.h"

#include "paddle/common/macros.h"
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
#include "paddle/fluid/platform/init_phi.h"

REGISTER_FILE_SYMBOLS(gather_op_handle);

Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/framework/executor_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include "paddle/fluid/framework/executor_cache.h"

#include "paddle/common/macros.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/ir_adaptor/translator/translate.h"
Expand All @@ -25,6 +26,8 @@
#include "paddle/pir/pass/pass.h"
#include "paddle/pir/pass/pass_manager.h"

DECLARE_FILE_SYMBOLS(print_statistics);

PHI_DECLARE_bool(pir_apply_inplace_pass);
PHI_DECLARE_bool(print_ir);

Expand Down Expand Up @@ -562,6 +565,7 @@ std::unique_ptr<::pir::Program> ConstructBackwardIrProgram(
pm.AddPass(::pir::CreateInplacePass());
if (VLOG_IS_ON(6)) {
pm.EnableIRPrinting();
pm.EnablePrintStatistics();
}
pm.Run(res.get());
if (FLAGS_print_ir) {
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/op_compatible_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,12 @@

#include "paddle/fluid/framework/op_compatible_info.h"

#include "paddle/fluid/platform/enforce.h"
#include "paddle/common/macros.h"
#include "paddle/fluid/platform/init_phi.h"
#include "paddle/fluid/string/string_helper.h"

REGISTER_FILE_SYMBOLS(op_compatible_info);

namespace paddle {
namespace framework {

Expand Down
18 changes: 15 additions & 3 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -828,8 +828,12 @@ bool AnalysisPredictor::PrepareExecutor() {
gpu_pm.AddPass(::pir::CreateDeadCodeEliminationPass());
gpu_pm.AddPass(::pir::CreateReplaceFetchWithShadowOutputPass());
//----------------------------------------------------------------------------------------------//

// gpu_pm.EnableIRPrinting();
if (!config_.glog_info_disabled()) {
gpu_pm.EnablePrintStatistics();
}
if (config_.ir_debug_) {
gpu_pm.EnableIRPrinting();
}
gpu_pm.Run(pir_program_.get());
}

Expand All @@ -840,8 +844,16 @@ bool AnalysisPredictor::PrepareExecutor() {
if (FLAGS_pir_apply_inplace_pass) {
lowered_pm.AddPass(::pir::CreateInplacePass());
}
if (!config_.glog_info_disabled()) {
lowered_pm.EnablePrintStatistics();
}
if (config_.ir_debug_) {
lowered_pm.EnableIRPrinting();
}
lowered_pm.Run(pir_program_.get());

LOG(INFO) << "======= pir optimization completed =======";

executor_->PrepareInterpreterCore(
sub_scope_, *pir_program_, execution_config);
} else {
Expand Down Expand Up @@ -1863,7 +1875,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
argument_.reset(nullptr);
}
#endif
LOG(INFO) << "======= optimize end =======";
LOG(INFO) << "======= ir optimization completed =======";
}

template <>
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/memory/allocation/aligned_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@

#include "paddle/fluid/memory/allocation/aligned_allocator.h"

#include "paddle/common/macros.h"
#include "paddle/fluid/platform/enforce.h"

#include "paddle/fluid/platform/init_phi.h"

REGISTER_FILE_SYMBOLS(aligned_allocator);

namespace paddle {
namespace memory {
namespace allocation {
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/memory/allocation/best_fit_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@
#include <cmath>
#include <mutex>

#include "paddle/common/macros.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/init_phi.h"

REGISTER_FILE_SYMBOLS(best_fit_allocator);

namespace paddle {
Expand Down
5 changes: 4 additions & 1 deletion paddle/fluid/memory/allocation/buffered_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,11 @@
// limitations under the License.

#include "paddle/fluid/memory/allocation/buffered_allocator.h"
#include "paddle/fluid/platform/init_phi.h"

#include "paddle/common/macros.h"

REGISTER_FILE_SYMBOLS(buffered_allocator);

namespace paddle {
namespace memory {
namespace allocation {
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,12 @@
// limitations under the License.

#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h"

#include "paddle/common/ddim.h"
#include "paddle/common/macros.h"
#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h"
#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h"
#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h"
#include "paddle/fluid/platform/init_phi.h"
#include "paddle/phi/common/place.h"
#include "paddle/pir/core/ir_printer.h"

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/transforms/build_cinn_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ class BuildCinnPass : public pir::Pass {

std::vector<GroupOpsVec> groups =
::pir::SubgraphDetector(&block, IsSupportCinn)();
PrintStatistics(groups.size());
AddStatistics(groups.size());
for (auto& group_ops : groups) {
VLOG(4) << "current group_ops.size(): " << group_ops.size();
::pir::ReplaceWithGroupOp(&block, group_ops);
Expand Down
31 changes: 19 additions & 12 deletions paddle/fluid/pir/transforms/constant_folding_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include "paddle/pir/core/operation.h"
#include "paddle/pir/core/parameter.h"
#include "paddle/pir/core/program.h"
#include "paddle/pir/core/region.h"
#include "paddle/pir/pass/pass.h"
#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h"
#include "paddle/pir/pattern_rewrite/pattern_match.h"
Expand All @@ -51,7 +52,7 @@ class ConstantFoldingPattern : public pir::RewritePattern {
public:
ConstantFoldingPattern(
pir::IrContext* context,
size_t* counter,
size_t* suffix,
const phi::Place& place,
paddle::framework::Scope* scope,
paddle::framework::interpreter::ExecutionConfig* exe_config,
Expand All @@ -60,7 +61,7 @@ class ConstantFoldingPattern : public pir::RewritePattern {
1 /*benefit*/,
context,
{} /*generated_names*/),
counter_(counter),
suffix_(suffix),
place_(place),
scope_(scope),
exe_config_(exe_config),
Expand Down Expand Up @@ -298,7 +299,7 @@ class ConstantFoldingPattern : public pir::RewritePattern {
.time_since_epoch()
.count();
std::string output_var_name =
"constant_folding@_" + ss.str() + std::to_string((*counter_)++);
"constant_folding@_" + ss.str() + std::to_string((*suffix_)++);

builder.Build<pir::ShadowOutputOp>(temp_op->result(i), output_var_name);
output_var_names.push_back(output_var_name);
Expand All @@ -308,7 +309,7 @@ class ConstantFoldingPattern : public pir::RewritePattern {
}

protected:
size_t* counter_;
size_t* suffix_;
phi::Place place_;
paddle::framework::Scope* scope_;
paddle::framework::interpreter::ExecutionConfig* exe_config_;
Expand All @@ -319,13 +320,13 @@ class ConstantFoldingPatternForTrain : public ConstantFoldingPattern {
public:
ConstantFoldingPatternForTrain(
pir::IrContext* context,
size_t* counter,
size_t* suffix,
const phi::Place& place,
paddle::framework::Scope* scope,
paddle::framework::interpreter::ExecutionConfig* exe_config,
std::vector<std::string>* deleted_vars)
: ConstantFoldingPattern(
context, counter, place, scope, exe_config, deleted_vars) {}
context, suffix, place, scope, exe_config, deleted_vars) {}

bool Match(pir::Operation* op) const override {
VLOG(4) << "constant_folding_pass applys match on [" << op->name()
Expand Down Expand Up @@ -405,26 +406,32 @@ class ConstantFoldingPass : public pir::Pass {

if (Has("train_mode") && Get<bool>("train_mode")) {
ps.Add<ConstantFoldingPatternForTrain>(context,
&counter_,
&suffix_,
phi::CPUPlace{},
scope_,
&exe_config_,
&deleted_vars_);
} else {
ps.Add<ConstantFoldingPattern>(
context, &counter_, place_, scope_, &exe_config_, &deleted_vars_);
context, &suffix_, place_, scope_, &exe_config_, &deleted_vars_);
}
patterns_ = pir::FrozenRewritePatternSet(std::move(ps));
return true;
}

void Run(pir::Operation* op) override {
size_t op_nums = op->GetParentProgram()->block()->size();
int64_t num_ops{0};
for (uint32_t i = 0; i < op->num_regions(); ++i) {
auto& region = op->region(i);
for (auto& block : region) {
num_ops += block.size();
}
}
pir::GreedyRewriteConfig cfg;
cfg.use_top_down_traversal = true;
cfg.max_iterations = 10;
pir::ApplyPatternsGreedily(op, patterns_, cfg);
PrintStatistics(counter_, op_nums);
auto [_, num_rewrites] = pir::ApplyPatternsGreedily(op, patterns_, cfg);
AddStatistics(num_rewrites, num_ops);
// delete old parameter var
scope_->EraseVars(deleted_vars_);
if (place_.GetType() != phi::AllocationType::CPU) {
Expand All @@ -434,7 +441,7 @@ class ConstantFoldingPass : public pir::Pass {
}

private:
size_t counter_{0};
size_t suffix_{0};
phi::Place place_;
paddle::framework::Scope* scope_{nullptr};
paddle::framework::interpreter::ExecutionConfig exe_config_{};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/transforms/dead_code_elimination_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class DeadCodeEliminationPass : public pir::Pass {
VLOG(6) << "apply dead_code_elimination_pass";
int64_t num_erasers{0};
EraseOp(*op->GetParentProgram()->block(), &num_erasers);
PrintStatistics(num_erasers);
AddStatistics(num_erasers);
}

private:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/transforms/inplace_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ class InplacePass : public pir::Pass {
}
}
}
PrintStatistics(num_rewrites_);
AddStatistics(num_rewrites_);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class ParamsSyncAmongDevicesPass : public pir::Pass {
}
}
}
PrintStatistics(num_rewrites_);
AddStatistics(num_rewrites_);
}

bool CanApplyOn(pir::Operation* op) const override {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class ReplaceFetchWithShadowOutputPass : public pir::Pass {
cfg.use_top_down_traversal = true;
cfg.max_iterations = 10;
auto [_, num_rewrites] = pir::ApplyPatternsGreedily(op, patterns_, cfg);
PrintStatistics(num_rewrites);
AddStatistics(num_rewrites);
}

bool CanApplyOn(pir::Operation* op) const override {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/transforms/sub_graph_extract_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class SubGraphExtractPass : public pir::Pass {

std::vector<GroupOpsVec> groups =
::pir::SubgraphDetector(&block, IsSplitOp)();
PrintStatistics(groups.size());
AddStatistics(groups.size());
for (auto& group_ops : groups) {
VLOG(4) << "current group_ops.size(): " << group_ops.size();
::pir::ReplaceWithGroupOp(&block, group_ops);
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/platform/init_phi.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/platform/init_phi.h"
#include "glog/logging.h"

#include "paddle/common/macros.h"
#include "paddle/fluid/platform/init.h"

REGISTER_FILE_SYMBOLS(init_phi)
Expand Down
7 changes: 0 additions & 7 deletions paddle/fluid/platform/init_phi.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,4 @@ class PADDLE_API InitPhi {
InitPhi();
};

#define REGISTER_FILE_SYMBOLS(name) \
int RegisterSymbolsFor##name() { return 0; }

#define DECLARE_FILE_SYMBOLS(name) \
extern int RegisterSymbolsFor##name(); \
UNUSED static int use_file_##name = RegisterSymbolsFor##name()

} // namespace paddle
3 changes: 2 additions & 1 deletion paddle/fluid/pybind/pir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1638,7 +1638,8 @@ void BindPassManager(pybind11::module *m) {
return pass_names;
})
.def("run", [](PassManager &self, Program *p) { self.Run(p); })
.def("empty", &PassManager::Empty);
.def("empty", &PassManager::empty)
.def("clear", &PassManager::clear);
}

void BindPir(pybind11::module *module) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ limitations under the License. */
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/memory/allocation/cuda_ipc_allocator.h"
#endif
#include "paddle/common/macros.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/common_infer_shape_functions.h"
Expand All @@ -92,7 +93,6 @@ limitations under the License. */
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/platform/init_phi.h"
#include "paddle/fluid/platform/monitor.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
Expand Down
Loading