Skip to content

Commit

Permalink
Add a IsInitialized check in AOT code with clinit at entry checks.
Browse files Browse the repository at this point in the history
The investigations behind
https://android-review.googlesource.com/c/platform/art/+/2214701 showed
that it can take some time for a class to become visibly initialized. To
avoid a busy loop between the AOT code and the resolution trampoline, we
also add a isInitialized check, which if true will be followed by a
memory barrier and the execution of the compiled code.

Test: test.py
Bug: 162110941
Change-Id: I6c36cde6ebd12b1f81281eb8a684b496f266e3ea
  • Loading branch information
Nicolas Geoffray committed Sep 14, 2022
1 parent b7b5fe5 commit f853790
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 0 deletions.
2 changes: 2 additions & 0 deletions compiler/optimizing/code_generator.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ constexpr uint32_t shifted_visibly_initialized_value =
enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
constexpr uint32_t shifted_initializing_value =
enum_cast<uint32_t>(ClassStatus::kInitializing) << (status_lsb_position % kBitsPerByte);
constexpr uint32_t shifted_initialized_value =
enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);

class Assembler;
class CodeGenerator;
Expand Down
9 changes: 9 additions & 0 deletions compiler/optimizing/code_generator_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1242,6 +1242,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
if (GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
UseScratchRegisterScope temps(masm);
vixl::aarch64::Label resolution;
vixl::aarch64::Label memory_barrier;

Register temp1 = temps.AcquireW();
Register temp2 = temps.AcquireW();
Expand All @@ -1255,6 +1256,11 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
__ Cmp(temp2, shifted_visibly_initialized_value);
__ B(hs, &frame_entry_label_);

// Check if we're initialized and jump to code that does a memory barrier if
// so.
__ Cmp(temp2, shifted_initialized_value);
__ B(hs, &memory_barrier);

// Check if we're initializing and the thread initializing is the one
// executing the code.
__ Cmp(temp2, shifted_initializing_value);
Expand All @@ -1271,6 +1277,9 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
GetThreadOffset<kArm64PointerSize>(kQuickQuickResolutionTrampoline);
__ Ldr(temp1.X(), MemOperand(tr, entrypoint_offset.Int32Value()));
__ Br(temp1.X());

__ Bind(&memory_barrier);
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
__ Bind(&frame_entry_label_);

Expand Down
9 changes: 9 additions & 0 deletions compiler/optimizing/code_generator_arm_vixl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2246,6 +2246,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Label resolution;
vixl32::Label memory_barrier;

// Check if we're visibly initialized.

Expand All @@ -2265,6 +2266,11 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
__ Cmp(temp2, shifted_visibly_initialized_value);
__ B(cs, &frame_entry_label_);

// Check if we're initialized and jump to code that does a memory barrier if
// so.
__ Cmp(temp2, shifted_initialized_value);
__ B(cs, &memory_barrier);

// Check if we're initializing and the thread initializing is the one
// executing the code.
__ Cmp(temp2, shifted_initializing_value);
Expand All @@ -2281,6 +2287,9 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
GetThreadOffset<kArmPointerSize>(kQuickQuickResolutionTrampoline);
__ Ldr(temp1, MemOperand(tr, entrypoint_offset.Int32Value()));
__ Bx(temp1);

__ Bind(&memory_barrier);
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}

__ Bind(&frame_entry_label_);
Expand Down

0 comments on commit f853790

Please sign in to comment.