diff --git a/compiler/compiler.h b/compiler/compiler.h index e363e707dd..afa0dbab60 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -19,6 +19,7 @@ #include "base/mutex.h" #include "base/os.h" +#include "compilation_kind.h" #include "dex/invoke_type.h" namespace art { @@ -75,8 +76,7 @@ class Compiler { jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, jit::JitMemoryRegion* region ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, - bool baseline ATTRIBUTE_UNUSED, - bool osr ATTRIBUTE_UNUSED, + CompilationKind compilation_kind ATTRIBUTE_UNUSED, jit::JitLogger* jit_logger ATTRIBUTE_UNUSED) REQUIRES_SHARED(Locks::mutator_lock_) { return false; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 632a4fb30e..2ad394235f 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -165,10 +165,10 @@ JitCompiler::~JitCompiler() { } bool JitCompiler::CompileMethod( - Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) { + Thread* self, JitMemoryRegion* region, ArtMethod* method, CompilationKind compilation_kind) { SCOPED_TRACE << "JIT compiling " << method->PrettyMethod() - << " (baseline=" << baseline << ", osr=" << osr << ")"; + << " (kind=" << compilation_kind << ")"; DCHECK(!method->IsProxyMethod()); DCHECK(method->GetDeclaringClass()->IsResolved()); @@ -185,7 +185,7 @@ bool JitCompiler::CompileMethod( JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); uint64_t start_ns = NanoTime(); success = compiler_->JitCompile( - self, code_cache, region, method, baseline, osr, jit_logger_.get()); + self, code_cache, region, method, compilation_kind, jit_logger_.get()); uint64_t duration_ns = NanoTime() - start_ns; VLOG(jit) << "Compilation of " << method->PrettyMethod() diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index 09de1f8681..9dd84f0e0a 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -18,6 +18,7 @@ #define ART_COMPILER_JIT_JIT_COMPILER_H_ #include "base/mutex.h" +#include "compilation_kind.h" #include "jit/jit.h" @@ -40,7 +41,7 @@ class JitCompiler : public JitCompilerInterface { // Compilation entrypoint. Returns whether the compilation succeeded. bool CompileMethod( - Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) + Thread* self, JitMemoryRegion* region, ArtMethod* method, CompilationKind kind) REQUIRES_SHARED(Locks::mutator_lock_) override; const CompilerOptions& GetCompilerOptions() const { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index ef5669add9..d3a4407b24 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -2045,8 +2045,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, invoke_type, callee_dead_reference_safe, graph_->IsDebuggable(), - /* osr= */ false, - /* baseline= */ graph_->IsCompilingBaseline(), + graph_->GetCompilationKind(), /* start_instruction_id= */ caller_instruction_counter); callee_graph->SetArtMethod(resolved_method); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index a7ea371ace..e562b8723e 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -33,6 +33,7 @@ #include "base/transform_array_ref.h" #include "art_method.h" #include "class_root.h" +#include "compilation_kind.h" #include "data_type.h" #include "deoptimization_kind.h" #include "dex/dex_file.h" @@ -378,8 +379,7 @@ class HGraph : public ArenaObject { InvokeType invoke_type = kInvalidInvokeType, bool dead_reference_safe = false, bool debuggable = false, - bool osr = false, - bool baseline = false, + CompilationKind compilation_kind = CompilationKind::kOptimized, int start_instruction_id = 0) : allocator_(allocator), arena_stack_(arena_stack), @@ -415,8 +415,7 @@ class HGraph : public ArenaObject { cached_double_constants_(std::less(), allocator->Adapter(kArenaAllocConstantsMap)), cached_current_method_(nullptr), art_method_(nullptr), - osr_(osr), - baseline_(baseline), + compilation_kind_(compilation_kind), cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) { blocks_.reserve(kDefaultNumberOfBlocks); } @@ -645,9 +644,11 @@ class HGraph : public ArenaObject { return instruction_set_; } - bool IsCompilingOsr() const { return osr_; } + bool IsCompilingOsr() const { return compilation_kind_ == CompilationKind::kOsr; } - bool IsCompilingBaseline() const { return baseline_; } + bool IsCompilingBaseline() const { return compilation_kind_ == CompilationKind::kBaseline; } + + CompilationKind GetCompilationKind() const { return compilation_kind_; } ArenaSet& GetCHASingleImplementationList() { return cha_single_implementation_list_; @@ -837,14 +838,11 @@ class HGraph : public ArenaObject { // (such as when the superclass could not be found). ArtMethod* art_method_; - // Whether we are compiling this graph for on stack replacement: this will - // make all loops seen as irreducible and emit special stack maps to mark - // compiled code entries which the interpreter can directly jump to. - const bool osr_; - - // Whether we are compiling baseline (not running optimizations). This affects - // the code being generated. - const bool baseline_; + // How we are compiling the graph: either optimized, osr, or baseline. + // For osr, we will make all loops seen as irreducible and emit special + // stack maps to mark compiled code entries which the interpreter can + // directly jump to. + const CompilationKind compilation_kind_; // List of methods that are assumed to have single implementation. ArenaSet cha_single_implementation_list_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 8d4aa9fd5d..bae402e596 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -298,8 +298,7 @@ class OptimizingCompiler final : public Compiler { jit::JitCodeCache* code_cache, jit::JitMemoryRegion* region, ArtMethod* method, - bool baseline, - bool osr, + CompilationKind compilation_kind, jit::JitLogger* jit_logger) override REQUIRES_SHARED(Locks::mutator_lock_); @@ -379,8 +378,7 @@ class OptimizingCompiler final : public Compiler { CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, - bool baseline, - bool osr, + CompilationKind compilation_kind, VariableSizedHandleScope* handles) const; CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator, @@ -717,8 +715,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, - bool baseline, - bool osr, + CompilationKind compilation_kind, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); const CompilerOptions& compiler_options = GetCompilerOptions(); @@ -787,8 +784,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, kInvalidInvokeType, dead_reference_safe, compiler_options.GetDebuggable(), - /* osr= */ osr, - /* baseline= */ baseline); + compilation_kind); if (method != nullptr) { graph->SetArtMethod(method); @@ -861,7 +857,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } } - if (baseline) { + if (compilation_kind == CompilationKind::kBaseline) { RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer); } else { RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer); @@ -914,7 +910,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic( kInvalidInvokeType, /* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety. compiler_options.GetDebuggable(), - /* osr= */ false); + CompilationKind::kOptimized); DCHECK(Runtime::Current()->IsAotCompiler()); DCHECK(method != nullptr); @@ -1047,8 +1043,9 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, &code_allocator, dex_compilation_unit, method, - compiler_options.IsBaseline(), - /* osr= */ false, + compiler_options.IsBaseline() + ? CompilationKind::kBaseline + : CompilationKind::kOptimized, &handles)); } } @@ -1194,10 +1191,14 @@ bool OptimizingCompiler::JitCompile(Thread* self, jit::JitCodeCache* code_cache, jit::JitMemoryRegion* region, ArtMethod* method, - bool baseline, - bool osr, + CompilationKind compilation_kind, jit::JitLogger* jit_logger) { const CompilerOptions& compiler_options = GetCompilerOptions(); + // If the baseline flag was explicitly passed, change the compilation kind + // from optimized to baseline. + if (compiler_options.IsBaseline() && compilation_kind == CompilationKind::kOptimized) { + compilation_kind = CompilationKind::kBaseline; + } DCHECK(compiler_options.IsJitCompiler()); DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region)); StackHandleScope<3> hs(self); @@ -1275,7 +1276,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArrayRef(stack_map), debug_info, /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(), - osr, + compilation_kind, /* has_should_deoptimize_flag= */ false, cha_single_implementation_list)) { code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); @@ -1316,8 +1317,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, &code_allocator, dex_compilation_unit, method, - baseline || compiler_options.IsBaseline(), - osr, + compilation_kind, &handles)); if (codegen.get() == nullptr) { return false; @@ -1384,7 +1384,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArrayRef(stack_map), debug_info, /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(), - osr, + compilation_kind, codegen->GetGraph()->HasShouldDeoptimizeFlag(), codegen->GetGraph()->GetCHASingleImplementationList())) { code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); diff --git a/runtime/Android.bp b/runtime/Android.bp index 3cf255c50c..e3200c4394 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -485,6 +485,7 @@ gensrcs { "base/callee_save_type.h", "base/locks.h", "class_status.h", + "compilation_kind.h", "gc_root.h", "gc/allocator_type.h", "gc/allocator/rosalloc.h", diff --git a/runtime/compilation_kind.h b/runtime/compilation_kind.h new file mode 100644 index 0000000000..c289e984d8 --- /dev/null +++ b/runtime/compilation_kind.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_COMPILATION_KIND_H_ +#define ART_RUNTIME_COMPILATION_KIND_H_ + +#include +#include + +namespace art { + +enum class CompilationKind { + kOsr, + kBaseline, + kOptimized, +}; + +std::ostream& operator<<(std::ostream& os, CompilationKind rhs); + +} // namespace art + +#endif // ART_RUNTIME_COMPILATION_KIND_H_ diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index c7db749e5e..8aae7bf3b5 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -28,6 +28,7 @@ #include "base/scoped_flock.h" #include "base/utils.h" #include "class_root-inl.h" +#include "compilation_kind.h" #include "debugger.h" #include "dex/type_lookup_table.h" #include "gc/space/image_space.h" @@ -289,7 +290,10 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) { return true; } -bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit) { +bool Jit::CompileMethod(ArtMethod* method, + Thread* self, + CompilationKind compilation_kind, + bool prejit) { DCHECK(Runtime::Current()->UseJitCompilation()); DCHECK(!method->IsRuntimeMethod()); @@ -319,7 +323,7 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr } JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion(); - if (osr && GetCodeCache()->IsSharedRegion(*region)) { + if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) { VLOG(jit) << "JIT not osr compiling " << method->PrettyMethod() << " due to using shared region"; @@ -329,20 +333,20 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr // If we get a request to compile a proxy method, we pass the actual Java method // of that proxy method, as the compiler does not expect a proxy method. ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); - if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, baseline, region)) { + if (!code_cache_->NotifyCompilationOf( + method_to_compile, self, compilation_kind, prejit, region)) { return false; } VLOG(jit) << "Compiling method " << ArtMethod::PrettyMethod(method_to_compile) - << " osr=" << std::boolalpha << osr - << " baseline=" << std::boolalpha << baseline; - bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, baseline, osr); - code_cache_->DoneCompiling(method_to_compile, self, osr, baseline); + << " kind=" << compilation_kind; + bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind); + code_cache_->DoneCompiling(method_to_compile, self, compilation_kind); if (!success) { VLOG(jit) << "Failed to compile method " << ArtMethod::PrettyMethod(method_to_compile) - << " osr=" << std::boolalpha << osr; + << " kind=" << compilation_kind; } if (kIsDebugBuild) { if (self->IsExceptionPending()) { @@ -758,12 +762,11 @@ class JitCompileTask final : public Task { enum class TaskKind { kAllocateProfile, kCompile, - kCompileBaseline, - kCompileOsr, kPreCompile, }; - JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind), klass_(nullptr) { + JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind) + : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) { ScopedObjectAccess soa(Thread::Current()); // For a non-bootclasspath class, add a global ref to the class to prevent class unloading // until compilation is done. @@ -787,15 +790,12 @@ class JitCompileTask final : public Task { { ScopedObjectAccess soa(self); switch (kind_) { - case TaskKind::kPreCompile: case TaskKind::kCompile: - case TaskKind::kCompileBaseline: - case TaskKind::kCompileOsr: { + case TaskKind::kPreCompile: { Runtime::Current()->GetJit()->CompileMethod( method_, self, - /* baseline= */ (kind_ == TaskKind::kCompileBaseline), - /* osr= */ (kind_ == TaskKind::kCompileOsr), + compilation_kind_, /* prejit= */ (kind_ == TaskKind::kPreCompile)); break; } @@ -817,6 +817,7 @@ class JitCompileTask final : public Task { private: ArtMethod* const method_; const TaskKind kind_; + const CompilationKind compilation_kind_; jobject klass_; DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask); @@ -1343,9 +1344,10 @@ bool Jit::CompileMethodFromProfile(Thread* self, (entry_point == GetQuickResolutionStub())) { method->SetPreCompiled(); if (!add_to_queue) { - CompileMethod(method, self, /* baseline= */ false, /* osr= */ false, /* prejit= */ true); + CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true); } else { - Task* task = new JitCompileTask(method, JitCompileTask::TaskKind::kPreCompile); + Task* task = new JitCompileTask( + method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized); if (compile_after_boot) { MutexLock mu(Thread::Current(), boot_completed_lock_); if (!boot_completed_) { @@ -1553,7 +1555,10 @@ bool Jit::MaybeCompileMethod(Thread* self, // We failed allocating. Instead of doing the collection on the Java thread, we push // an allocation to a compiler thread, that will do the collection. thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile)); + self, + new JitCompileTask(method, + JitCompileTask::TaskKind::kAllocateProfile, + CompilationKind::kOptimized)); // Dummy compilation kind. } } } @@ -1561,11 +1566,13 @@ bool Jit::MaybeCompileMethod(Thread* self, if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) { if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { DCHECK(thread_pool_ != nullptr); - JitCompileTask::TaskKind kind = + CompilationKind compilation_kind = (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler()) - ? JitCompileTask::TaskKind::kCompileBaseline - : JitCompileTask::TaskKind::kCompile; - thread_pool_->AddTask(self, new JitCompileTask(method, kind)); + ? CompilationKind::kBaseline + : CompilationKind::kOptimized; + thread_pool_->AddTask( + self, + new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, compilation_kind)); } } if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) { @@ -1576,7 +1583,8 @@ bool Jit::MaybeCompileMethod(Thread* self, if (!code_cache_->IsOsrCompiled(method)) { DCHECK(thread_pool_ != nullptr); thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr)); + self, + new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr)); } } } @@ -1592,7 +1600,10 @@ void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) { // task that will compile optimize the method. if (options_->UseTieredJitCompilation()) { thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile)); + self, + new JitCompileTask(method, + JitCompileTask::TaskKind::kCompile, + CompilationKind::kOptimized)); } } @@ -1623,7 +1634,8 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { } // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to // conflicts with jitzygote optimizations. - JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile); + JitCompileTask compile_task( + method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized); // Fake being in a runtime thread so that class-load behavior will be the same as normal jit. ScopedSetRuntimeThread ssrt(thread); compile_task.Run(thread); @@ -1852,16 +1864,21 @@ void Jit::EnqueueCompilationFromNterp(ArtMethod* method, Thread* self) { // If we already have compiled code for it, nterp may be stuck in a loop. // Compile OSR. thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr)); + self, + new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr)); return; } if (GetCodeCache()->CanAllocateProfilingInfo()) { ProfilingInfo::Create(self, method, /* retry_allocation= */ false); thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileBaseline)); + self, + new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline)); } else { thread_pool_->AddTask( - self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile)); + self, + new JitCompileTask(method, + JitCompileTask::TaskKind::kCompile, + CompilationKind::kOptimized)); } } diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index e9fd915fc1..853db10489 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -24,6 +24,7 @@ #include "base/mutex.h" #include "base/runtime_debug.h" #include "base/timing_logger.h" +#include "compilation_kind.h" #include "handle.h" #include "offsets.h" #include "interpreter/mterp/mterp.h" @@ -192,7 +193,7 @@ class JitCompilerInterface { public: virtual ~JitCompilerInterface() {} virtual bool CompileMethod( - Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) + Thread* self, JitMemoryRegion* region, ArtMethod* method, CompilationKind compilation_kind) REQUIRES_SHARED(Locks::mutator_lock_) = 0; virtual void TypesLoaded(mirror::Class**, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) = 0; @@ -243,7 +244,7 @@ class Jit { // Create JIT itself. static Jit* Create(JitCodeCache* code_cache, JitOptions* options); - bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit) + bool CompileMethod(ArtMethod* method, Thread* self, CompilationKind compilation_kind, bool prejit) REQUIRES_SHARED(Locks::mutator_lock_); const JitCodeCache* GetCodeCache() const { diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 166beeff96..4ea61c69d8 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -663,10 +663,10 @@ bool JitCodeCache::Commit(Thread* self, ArrayRef stack_map, const std::vector& debug_info, bool is_full_debug_info, - bool osr, + CompilationKind compilation_kind, bool has_should_deoptimize_flag, const ArenaSet& cha_single_implementation_list) { - DCHECK(!method->IsNative() || !osr); + DCHECK(!method->IsNative() || (compilation_kind != CompilationKind::kOsr)); if (!method->IsNative()) { // We need to do this before grabbing the lock_ because it needs to be able to see the string @@ -749,7 +749,7 @@ bool JitCodeCache::Commit(Thread* self, } else { method_code_map_.Put(code_ptr, method); } - if (osr) { + if (compilation_kind == CompilationKind::kOsr) { number_of_osr_compilations_++; osr_code_map_.Put(method, code_ptr); } else if (NeedsClinitCheckBeforeCall(method) && @@ -773,7 +773,7 @@ bool JitCodeCache::Commit(Thread* self, GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); } VLOG(jit) - << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " + << "JIT added (kind=" << compilation_kind << ") " << ArtMethod::PrettyMethod(method) << "@" << method << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " @@ -1287,32 +1287,45 @@ void JitCodeCache::SetGarbageCollectCode(bool value) { } } -void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) { - DCHECK(IsMethodBeingCompiled(method, osr, baseline)); - if (osr) { - current_osr_compilations_.erase(method); - } else if (baseline) { - current_baseline_compilations_.erase(method); - } else { - current_optimized_compilations_.erase(method); +void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) { + DCHECK(IsMethodBeingCompiled(method, kind)); + switch (kind) { + case CompilationKind::kOsr: + current_osr_compilations_.erase(method); + break; + case CompilationKind::kBaseline: + current_baseline_compilations_.erase(method); + break; + case CompilationKind::kOptimized: + current_optimized_compilations_.erase(method); + break; } } -void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) { - DCHECK(!IsMethodBeingCompiled(method, osr, baseline)); - if (osr) { - current_osr_compilations_.insert(method); - } else if (baseline) { - current_baseline_compilations_.insert(method); - } else { - current_optimized_compilations_.insert(method); +void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) { + DCHECK(!IsMethodBeingCompiled(method, kind)); + switch (kind) { + case CompilationKind::kOsr: + current_osr_compilations_.insert(method); + break; + case CompilationKind::kBaseline: + current_baseline_compilations_.insert(method); + break; + case CompilationKind::kOptimized: + current_optimized_compilations_.insert(method); + break; } } -bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) { - return osr ? ContainsElement(current_osr_compilations_, method) - : baseline ? ContainsElement(current_baseline_compilations_, method) - : ContainsElement(current_optimized_compilations_, method); +bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) { + switch (kind) { + case CompilationKind::kOsr: + return ContainsElement(current_osr_compilations_, method); + case CompilationKind::kBaseline: + return ContainsElement(current_baseline_compilations_, method); + case CompilationKind::kOptimized: + return ContainsElement(current_optimized_compilations_, method); + } } bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) { @@ -1679,19 +1692,19 @@ bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, - bool osr, + CompilationKind compilation_kind, bool prejit, - bool baseline, JitMemoryRegion* region) { const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode(); - if (!osr && ContainsPc(existing_entry_point)) { + if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) { OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(existing_entry_point); - if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == baseline) { + bool is_baseline = (compilation_kind == CompilationKind::kBaseline); + if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == is_baseline) { VLOG(jit) << "Not compiling " << method->PrettyMethod() << " because it has already been compiled" - << " baseline=" << std::boolalpha << baseline; + << " kind=" << compilation_kind; return false; } } @@ -1719,7 +1732,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, } } - if (osr) { + if (compilation_kind == CompilationKind::kOsr) { MutexLock mu(self, *Locks::jit_lock_); if (osr_code_map_.find(method) != osr_code_map_.end()) { return false; @@ -1756,7 +1769,9 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, return new_compilation; } else { ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); - if (CanAllocateProfilingInfo() && baseline && info == nullptr) { + if (CanAllocateProfilingInfo() && + (compilation_kind == CompilationKind::kBaseline) && + (info == nullptr)) { // We can retry allocation here as we're the JIT thread. if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) { info = method->GetProfilingInfo(kRuntimePointerSize); @@ -1773,10 +1788,10 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, } } MutexLock mu(self, *Locks::jit_lock_); - if (IsMethodBeingCompiled(method, osr, baseline)) { + if (IsMethodBeingCompiled(method, compilation_kind)) { return false; } - AddMethodBeingCompiled(method, osr, baseline); + AddMethodBeingCompiled(method, compilation_kind); return true; } } @@ -1800,7 +1815,9 @@ void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) { info->DecrementInlineUse(); } -void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline) { +void JitCodeCache::DoneCompiling(ArtMethod* method, + Thread* self, + CompilationKind compilation_kind) { DCHECK_EQ(Thread::Current(), self); MutexLock mu(self, *Locks::jit_lock_); if (UNLIKELY(method->IsNative())) { @@ -1813,7 +1830,7 @@ void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf(). } // else Commit() updated entrypoints of all methods in the JniStubData. } else { - RemoveMethodBeingCompiled(method, osr, baseline); + RemoveMethodBeingCompiled(method, compilation_kind); } } diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 7e00bcb6da..43406033fa 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -32,6 +32,7 @@ #include "base/mem_map.h" #include "base/mutex.h" #include "base/safe_map.h" +#include "compilation_kind.h" #include "jit_memory_region.h" namespace art { @@ -195,9 +196,8 @@ class JitCodeCache { bool NotifyCompilationOf(ArtMethod* method, Thread* self, - bool osr, + CompilationKind compilation_kind, bool prejit, - bool baseline, JitMemoryRegion* region) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); @@ -214,7 +214,7 @@ class JitCodeCache { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); - void DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline) + void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); @@ -268,7 +268,7 @@ class JitCodeCache { ArrayRef stack_map, // Compiler output (source). const std::vector& debug_info, bool is_full_debug_info, - bool osr, + CompilationKind compilation_kind, bool has_should_deoptimize_flag, const ArenaSet& cha_single_implementation_list) REQUIRES_SHARED(Locks::mutator_lock_) @@ -500,16 +500,15 @@ class JitCodeCache { REQUIRES_SHARED(Locks::mutator_lock_); // Record that `method` is being compiled with the given mode. - // TODO: introduce an enum for the mode. - void AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) + void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind) REQUIRES(Locks::jit_lock_); // Remove `method` from the list of methods meing compiled with the given mode. - void RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) + void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind) REQUIRES(Locks::jit_lock_); // Return whether `method` is being compiled with the given mode. - bool IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) + bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind) REQUIRES(Locks::jit_lock_); // Return whether `method` is being compiled in any mode. diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc index 37d998ce33..e44d8a3431 100644 --- a/test/566-polymorphic-inlining/polymorphic_inline.cc +++ b/test/566-polymorphic-inlining/polymorphic_inline.cc @@ -46,7 +46,7 @@ static void do_checks(jclass cls, const char* method_name) { usleep(1000); } // Will either ensure it's compiled or do the compilation itself. - jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false, /*prejit=*/ false); + jit->CompileMethod(method, soa.Self(), CompilationKind::kOptimized, /*prejit=*/ false); } CodeInfo info(header); diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc index 22423e2b6b..b7365dd430 100644 --- a/test/570-checker-osr/osr.cc +++ b/test/570-checker-osr/osr.cc @@ -130,7 +130,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env, usleep(1000); // Will either ensure it's compiled or do the compilation itself. jit->CompileMethod( - m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true, /*prejit=*/ false); + m, Thread::Current(), CompilationKind::kOsr, /*prejit=*/ false); } }); } diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc index 6c76288663..0e477827f7 100644 --- a/test/common/runtime_state.cc +++ b/test/common/runtime_state.cc @@ -284,7 +284,7 @@ static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::m // this before checking if we will execute JIT code to make sure the // method is compiled 'optimized' and not baseline (tests expect optimized // compilation). - jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false, /*prejit=*/ false); + jit->CompileMethod(method, self, CompilationKind::kOptimized, /*prejit=*/ false); if (code_cache->WillExecuteJitCode(method)) { break; }