Skip to content

Commit

Permalink
Add compiler type to CompilerOptions.
Browse files Browse the repository at this point in the history
Let CompilerOptions hold the information whether it is AOT
or JIT compilation, or Zygote JIT for shared code.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing --jit
Test: aosp_taimen-userdebug boots.
Change-Id: Id9200572406f8e43d99b8b61ef0e3edf43b52fff
  • Loading branch information
vmarko committed May 20, 2020
1 parent 1f5300a commit 695348f
Show file tree
Hide file tree
Showing 14 changed files with 87 additions and 86 deletions.
1 change: 1 addition & 0 deletions compiler/driver/compiler_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ CompilerOptions::CompilerOptions()
dex_files_for_oat_file_(),
image_classes_(),
verification_results_(nullptr),
compiler_type_(CompilerType::kAotCompiler),
image_type_(ImageType::kNone),
compile_art_test_(false),
baseline_(false),
Expand Down
21 changes: 21 additions & 0 deletions compiler/driver/compiler_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ class CompilerOptions final {
static const size_t kDefaultInlineMaxCodeUnits = 32;
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;

enum class CompilerType : uint8_t {
kAotCompiler, // AOT compiler.
kJitCompiler, // Normal JIT compiler.
kSharedCodeJitCompiler, // Zygote JIT producing code in the shared region area, putting
// restrictions on, for example, how literals are being generated.
};

enum class ImageType : uint8_t {
kNone, // JIT or AOT app compilation producing only an oat file but no image.
kBootImage, // Creating boot image.
Expand Down Expand Up @@ -191,6 +198,19 @@ class CompilerOptions final {
return implicit_so_checks_;
}

bool IsAotCompiler() const {
return compiler_type_ == CompilerType::kAotCompiler;
}

bool IsJitCompiler() const {
return compiler_type_ == CompilerType::kJitCompiler ||
compiler_type_ == CompilerType::kSharedCodeJitCompiler;
}

bool IsJitCompilerForSharedCode() const {
return compiler_type_ == CompilerType::kSharedCodeJitCompiler;
}

bool GetImplicitSuspendChecks() const {
return implicit_suspend_checks_;
}
Expand Down Expand Up @@ -394,6 +414,7 @@ class CompilerOptions final {
// Results of AOT verification.
const VerificationResults* verification_results_;

CompilerType compiler_type_;
ImageType image_type_;
bool compile_art_test_;
bool baseline_;
Expand Down
4 changes: 4 additions & 0 deletions compiler/jit/jit_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ void JitCompiler::ParseCompilerOptions() {
UNREACHABLE();
}
}
// Set to appropriate JIT compiler type.
compiler_options_->compiler_type_ = runtime->IsZygote()
? CompilerOptions::CompilerType::kSharedCodeJitCompiler
: CompilerOptions::CompilerType::kJitCompiler;
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();

Expand Down
26 changes: 10 additions & 16 deletions compiler/optimizing/code_generator_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,6 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
// the offset explicitly.
constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;

ALWAYS_INLINE static inline bool UseJitCompilation() {
Runtime* runtime = Runtime::Current();
// Note: There may be no Runtime for gtests; gtests use debug builds.
return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
}

inline Condition ARM64Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return eq;
Expand Down Expand Up @@ -944,7 +938,7 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
EmitJumpTables();

// Emit JIT baker read barrier slow paths.
DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
DCHECK(GetCompilerOptions().IsJitCompiler() || jit_baker_read_barrier_slow_paths_.empty());
for (auto& entry : jit_baker_read_barrier_slow_paths_) {
uint32_t encoded_data = entry.first;
vixl::aarch64::Label* slow_path_entry = &entry.second.label;
Expand Down Expand Up @@ -1788,7 +1782,7 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
// Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
// entire oat file. This adds an extra branch and we do not want to slow down the main path.
// For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
if (slow_path == nullptr || UseJitCompilation()) {
if (slow_path == nullptr || GetCompilerOptions().IsJitCompiler()) {
__ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
// Ensure the pc position is recorded immediately after the `blr` instruction.
ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
Expand Down Expand Up @@ -4535,7 +4529,7 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewStringBssEntryPatch(

void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
DCHECK(!UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label;
__ bind(bl_label);
Expand All @@ -4544,7 +4538,7 @@ void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offse

void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
if (UseJitCompilation()) {
if (GetCompilerOptions().IsJitCompiler()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch64::Label* slow_path_entry = &it->second.label;
__ cbnz(mr, slow_path_entry);
Expand Down Expand Up @@ -4635,7 +4629,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label);
EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
Expand Down Expand Up @@ -4847,11 +4841,11 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -5066,11 +5060,11 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -6684,7 +6678,7 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,

// For JIT, the slow path is considered part of the compiled method,
// so JIT should pass null as `debug_name`.
DCHECK(!UseJitCompilation() || debug_name == nullptr);
DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
Expand Down
26 changes: 10 additions & 16 deletions compiler/optimizing/code_generator_arm_vixl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,12 +93,6 @@ constexpr uint32_t kReferenceLoadMinFarOffset = 4 * KB;
// Using a base helps identify when we hit Marking Register check breakpoints.
constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;

ALWAYS_INLINE static inline bool UseJitCompilation() {
Runtime* runtime = Runtime::Current();
// Note: There may be no Runtime for gtests which use debug builds.
return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
}

#ifdef __
#error "ARM Codegen VIXL macro-assembler macro already defined."
#endif
Expand Down Expand Up @@ -1937,7 +1931,7 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
FixJumpTables();

// Emit JIT baker read barrier slow paths.
DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
DCHECK(GetCompilerOptions().IsJitCompiler() || jit_baker_read_barrier_slow_paths_.empty());
for (auto& entry : jit_baker_read_barrier_slow_paths_) {
uint32_t encoded_data = entry.first;
vixl::aarch32::Label* slow_path_entry = &entry.second.label;
Expand Down Expand Up @@ -2517,7 +2511,7 @@ void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
// Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
// entire oat file. This adds an extra branch and we do not want to slow down the main path.
// For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
if (slow_path == nullptr || UseJitCompilation()) {
if (slow_path == nullptr || GetCompilerOptions().IsJitCompiler()) {
__ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
Expand Down Expand Up @@ -7129,11 +7123,11 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -7394,11 +7388,11 @@ HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -9076,7 +9070,7 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa

void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
DCHECK(!UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label;
__ bind(bl_label);
Expand All @@ -9087,7 +9081,7 @@ void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_off

void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
if (UseJitCompilation()) {
if (GetCompilerOptions().IsJitCompiler()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch32::Label* slow_path_entry = &it->second.label;
__ b(ne, EncodingSize(Wide), slow_path_entry);
Expand Down Expand Up @@ -9140,7 +9134,7 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
EmitMovwMovtPlaceholder(labels, reg);
__ Ldr(reg, MemOperand(reg, /* offset= */ 0));
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
uintptr_t address =
Expand Down Expand Up @@ -9717,7 +9711,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb

// For JIT, the slow path is considered part of the compiled method,
// so JIT should pass null as `debug_name`.
DCHECK(!UseJitCompilation() || debug_name == nullptr);
DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
Expand Down
10 changes: 5 additions & 5 deletions compiler/optimizing/code_generator_x86.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5144,7 +5144,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
__ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageRelRoPatch(method_address, boot_image_reference);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
Expand Down Expand Up @@ -6630,11 +6630,11 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -6867,11 +6867,11 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kRuntimeCall:
break;
Expand Down
10 changes: 5 additions & 5 deletions compiler/optimizing/code_generator_x86_64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1134,7 +1134,7 @@ void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_im
__ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
Expand Down Expand Up @@ -5966,11 +5966,11 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadClass::LoadKind::kRuntimeCall:
break;
Expand Down Expand Up @@ -6162,11 +6162,11 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
DCHECK(!GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(GetCompilerOptions().IsJitCompiler());
break;
case HLoadString::LoadKind::kRuntimeCall:
break;
Expand Down
7 changes: 3 additions & 4 deletions compiler/optimizing/inliner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!method->GetDeclaringClass()->IsVerified()) {
if (Runtime::Current()->UseJitCompilation()) {
if (compiler_options.IsJitCompiler()) {
// We're at runtime, we know this is cold code if the class
// is not verified, so don't bother analyzing.
return true;
Expand Down Expand Up @@ -673,7 +673,7 @@ HInliner::InlineCacheType HInliner::GetInlineCacheJIT(
StackHandleScope<1>* hs,
/*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(codegen_->GetCompilerOptions().IsJitCompiler());

ArtMethod* caller = graph_->GetArtMethod();
// Under JIT, we should always know the caller.
Expand Down Expand Up @@ -1185,7 +1185,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
// This optimization only works under JIT for now.
if (!Runtime::Current()->UseJitCompilation()) {
if (!codegen_->GetCompilerOptions().IsJitCompiler()) {
return false;
}

Expand Down Expand Up @@ -2046,7 +2046,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
callee_dead_reference_safe,
graph_->IsDebuggable(),
/* osr= */ false,
/* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
/* baseline= */ graph_->IsCompilingBaseline(),
/* start_instruction_id= */ caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
Expand Down
10 changes: 5 additions & 5 deletions compiler/optimizing/instruction_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1352,18 +1352,18 @@ bool HInstructionBuilder::IsInitialized(ObjPtr<mirror::Class> cls) const {

// Check if the class will be initialized at runtime.
if (cls->IsInitialized()) {
Runtime* runtime = Runtime::Current();
if (runtime->IsAotCompiler()) {
const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
if (compiler_options.IsAotCompiler()) {
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
if (IsInBootImage(cls, code_generator_->GetCompilerOptions())) {
if (IsInBootImage(cls, compiler_options)) {
return true;
}
} else {
DCHECK(runtime->UseJitCompilation());
DCHECK(compiler_options.IsJitCompiler());
if (Runtime::Current()->GetJit()->CanAssumeInitialized(
cls,
graph_->IsCompilingForSharedJitCode())) {
compiler_options.IsJitCompilerForSharedCode())) {
// For JIT, the class cannot revert to an uninitialized state.
return true;
}
Expand Down
Loading

0 comments on commit 695348f

Please sign in to comment.