Skip to content

Commit

Permalink
Added global order numbers to in process command buffers.
Browse files Browse the repository at this point in the history
The global order numbers have been generalized and managed by the
sync point manager. The GpuChannel previously managed the global
order numbers by itself, but these order numbers have to be
ordered with respect to order numbers for in process command
buffers as well.

The global order numbers have been merged to a sync point state
class (SyncPointClientState), and later wait/release functions
will be implemented in SyncPointClient.

R=piman@chromium.org, sievers@chromium.org
BUG=514815

Review URL: https://codereview.chromium.org/1339203002

Cr-Commit-Position: refs/heads/master@{#350247}
  • Loading branch information
dyen authored and Commit bot committed Sep 22, 2015
1 parent 2b4bacc commit a6b0d39
Show file tree
Hide file tree
Showing 8 changed files with 303 additions and 48 deletions.
60 changes: 40 additions & 20 deletions content/common/gpu/gpu_channel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,22 @@ const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;

} // anonymous namespace

// Begin order numbers at 1 so 0 can mean no orders.
uint32_t GpuChannelMessageQueue::global_order_counter_ = 1;

scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create(
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner) {
return new GpuChannelMessageQueue(gpu_channel, task_runner);
}

scoped_refptr<gpu::SyncPointClientState>
GpuChannelMessageQueue::GetSyncPointClientState() {
return sync_point_client_state_;
}

GpuChannelMessageQueue::GpuChannelMessageQueue(
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner)
: enabled_(true),
unprocessed_order_num_(0),
processed_order_num_(0),
sync_point_client_state_(gpu::SyncPointClientState::Create()),
gpu_channel_(gpu_channel),
task_runner_(task_runner) {}

Expand All @@ -93,14 +94,20 @@ GpuChannelMessageQueue::~GpuChannelMessageQueue() {
}

uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
base::AutoLock auto_lock(channel_messages_lock_);
return unprocessed_order_num_;
return sync_point_client_state_->unprocessed_order_num();
}

void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const {
return sync_point_client_state_->processed_order_num();
}

void GpuChannelMessageQueue::PushBackMessage(
gpu::SyncPointManager* sync_point_manager, const IPC::Message& message) {
base::AutoLock auto_lock(channel_messages_lock_);
if (enabled_)
PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message)));
if (enabled_) {
PushMessageHelper(sync_point_manager,
make_scoped_ptr(new GpuChannelMessage(message)));
}
}

bool GpuChannelMessageQueue::GenerateSyncPointMessage(
Expand All @@ -118,7 +125,7 @@ bool GpuChannelMessageQueue::GenerateSyncPointMessage(
msg->retire_sync_point = retire_sync_point;
msg->sync_point = *sync_point;

PushMessageHelper(msg.Pass());
PushMessageHelper(sync_point_manager, msg.Pass());
return true;
}
return false;
Expand All @@ -139,19 +146,27 @@ base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const {
GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const {
base::AutoLock auto_lock(channel_messages_lock_);
if (!channel_messages_.empty()) {
DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_);
DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_);
DCHECK_GT(channel_messages_.front()->order_number,
sync_point_client_state_->processed_order_num());
DCHECK_LE(channel_messages_.front()->order_number,
sync_point_client_state_->unprocessed_order_num());

return channel_messages_.front();
}
return nullptr;
}

void GpuChannelMessageQueue::BeginMessageProcessing(
const GpuChannelMessage* msg) {
sync_point_client_state_->BeginProcessingOrderNumber(msg->order_number);
}

bool GpuChannelMessageQueue::MessageProcessed() {
base::AutoLock auto_lock(channel_messages_lock_);
DCHECK(!channel_messages_.empty());
scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
channel_messages_.pop_front();
processed_order_num_ = msg->order_number;
sync_point_client_state_->FinishProcessingOrderNumber(msg->order_number);
return !channel_messages_.empty();
}

Expand Down Expand Up @@ -186,15 +201,16 @@ void GpuChannelMessageQueue::ScheduleHandleMessage() {
}

void GpuChannelMessageQueue::PushMessageHelper(
gpu::SyncPointManager* sync_point_manager,
scoped_ptr<GpuChannelMessage> msg) {
channel_messages_lock_.AssertAcquired();
DCHECK(enabled_);

msg->order_number = global_order_counter_++;
msg->order_number =
sync_point_client_state_->GenerateUnprocessedOrderNumber(
sync_point_manager);
msg->time_received = base::TimeTicks::Now();

unprocessed_order_num_ = msg->order_number;

bool had_messages = !channel_messages_.empty();
channel_messages_.push_back(msg.release());
if (!had_messages)
Expand Down Expand Up @@ -339,7 +355,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
base::Bind(&GpuChannel::HandleOutOfOrderMessage,
gpu_channel_, message));
} else {
message_queue_->PushBackMessage(message);
message_queue_->PushBackMessage(sync_point_manager_, message);
}
handled = true;
}
Expand Down Expand Up @@ -611,7 +627,7 @@ base::ProcessId GpuChannel::GetClientPID() const {
}

uint32_t GpuChannel::GetProcessedOrderNum() const {
return message_queue_->processed_order_num();
return message_queue_->GetProcessedOrderNum();
}

uint32_t GpuChannel::GetUnprocessedOrderNum() const {
Expand Down Expand Up @@ -806,6 +822,10 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
return handled;
}

scoped_refptr<gpu::SyncPointClientState> GpuChannel::GetSyncPointClientState() {
return message_queue_->GetSyncPointClientState();
}

void GpuChannel::HandleMessage() {
// If we have been preempted by another channel, just post a task to wake up.
if (preempted_flag_ && preempted_flag_->IsSet()) {
Expand All @@ -819,8 +839,8 @@ void GpuChannel::HandleMessage() {
if (!m)
return;

current_order_num_ = m->order_number;
const IPC::Message& message = m->message;
message_queue_->BeginMessageProcessing(m);
int32_t routing_id = message.routing_id();
GpuCommandBufferStub* stub = stubs_.get(routing_id);

Expand Down
36 changes: 17 additions & 19 deletions content/common/gpu/gpu_channel.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class WaitableEvent;

namespace gpu {
class PreemptionFlag;
class SyncPointClientState;
class SyncPointManager;
union ValueState;
class ValueStateMap;
Expand Down Expand Up @@ -166,16 +167,15 @@ class CONTENT_EXPORT GpuChannel
// Visible for testing.
GpuChannelMessageFilter* filter() const { return filter_.get(); }

// Returns the global order number of the IPC message that started processing
// last.
uint32_t current_order_num() const { return current_order_num_; }

// Returns the global order number for the last processed IPC message.
uint32_t GetProcessedOrderNum() const;

// Returns the global order number for the last unprocessed IPC message.
uint32_t GetUnprocessedOrderNum() const;

// Returns the shared sync point client state.
scoped_refptr<gpu::SyncPointClientState> GetSyncPointClientState();

void HandleMessage();

// Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
Expand Down Expand Up @@ -279,8 +279,6 @@ class CONTENT_EXPORT GpuChannel
// Map of stream id to stream state.
base::hash_map<int32, StreamState> streams_;

uint32_t current_order_num_;

bool allow_future_sync_points_;
bool allow_real_time_streams_;

Expand Down Expand Up @@ -412,23 +410,28 @@ class GpuChannelMessageQueue
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner);

// Returns the global order number for the last processed IPC message.
scoped_refptr<gpu::SyncPointClientState> GetSyncPointClientState();

// Returns the global order number for the last unprocessed IPC message.
uint32_t GetUnprocessedOrderNum() const;

// Returns the global order number for the last unprocessed IPC message.
uint32_t processed_order_num() const { return processed_order_num_; }
uint32_t GetProcessedOrderNum() const;

bool HasQueuedMessages() const;

base::TimeTicks GetNextMessageTimeTick() const;

GpuChannelMessage* GetNextMessage() const;

void BeginMessageProcessing(const GpuChannelMessage* msg);

// Should be called after a message returned by GetNextMessage is processed.
// Returns true if there are more messages on the queue.
bool MessageProcessed();

void PushBackMessage(const IPC::Message& message);
void PushBackMessage(gpu::SyncPointManager* sync_point_manager,
const IPC::Message& message);

bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager,
const IPC::Message& message,
Expand All @@ -446,24 +449,19 @@ class GpuChannelMessageQueue

void ScheduleHandleMessage();

void PushMessageHelper(scoped_ptr<GpuChannelMessage> msg);

// This number is only ever incremented/read on the IO thread.
static uint32_t global_order_counter_;
void PushMessageHelper(gpu::SyncPointManager* sync_point_manager,
scoped_ptr<GpuChannelMessage> msg);

bool enabled_;

// Highest IPC order number seen, set when queued on the IO thread.
uint32_t unprocessed_order_num_;
// Both deques own the messages.
std::deque<GpuChannelMessage*> channel_messages_;

// This lock protects enabled_, unprocessed_order_num_, and channel_messages_.
// This lock protects enabled_ and channel_messages_.
mutable base::Lock channel_messages_lock_;

// Last finished IPC order number. Not protected by a lock as it's only
// accessed on the main thread.
uint32_t processed_order_num_;
// Keeps track of sync point related state such as message order numbers.
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;

base::WeakPtr<GpuChannel> gpu_channel_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
Expand Down
18 changes: 15 additions & 3 deletions content/common/gpu/gpu_command_buffer_stub.cc
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,8 @@ void GpuCommandBufferStub::Destroy() {
// destroy it before those.
scheduler_.reset();

sync_point_client_.reset();

bool have_context = false;
if (decoder_ && decoder_->GetGLContext()) {
// Try to make the context current regardless of whether it was lost, so we
Expand Down Expand Up @@ -528,10 +530,22 @@ void GpuCommandBufferStub::OnInitialize(
bool result = command_buffer_->Initialize();
DCHECK(result);

GpuChannelManager* manager = channel_->gpu_channel_manager();
DCHECK(manager);

gpu::SyncPointManager* sync_point_manager = manager->sync_point_manager();
DCHECK(sync_point_manager);

decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
decoder_.get(),
decoder_.get()));
sync_point_client_ =
sync_point_manager->CreateSyncPointClient(
channel_->GetSyncPointClientState(),
gpu::CommandBufferNamespace::GPU_IO,
command_buffer_id_);

if (preemption_flag_.get())
scheduler_->SetPreemptByFlag(preemption_flag_);

Expand All @@ -551,7 +565,6 @@ void GpuCommandBufferStub::OnInitialize(
this,
handle_);
} else {
GpuChannelManager* manager = channel_->gpu_channel_manager();
surface_ = manager->GetDefaultOffscreenSurface();
}

Expand Down Expand Up @@ -682,8 +695,7 @@ void GpuCommandBufferStub::OnInitialize(
Send(reply_message);

if (handle_.is_null() && !active_url_.is_empty()) {
GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
active_url_));
}

Expand Down
2 changes: 2 additions & 0 deletions content/common/gpu/gpu_command_buffer_stub.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

namespace gpu {
struct Mailbox;
class SyncPointClient;
class ValueStateMap;
namespace gles2 {
class MailboxManager;
Expand Down Expand Up @@ -271,6 +272,7 @@ class GpuCommandBufferStub
scoped_ptr<gpu::CommandBufferService> command_buffer_;
scoped_ptr<gpu::gles2::GLES2Decoder> decoder_;
scoped_ptr<gpu::GpuScheduler> scheduler_;
scoped_ptr<gpu::SyncPointClient> sync_point_client_;
scoped_refptr<gfx::GLSurface> surface_;

scoped_ptr<GpuMemoryManagerClientState> memory_manager_client_state_;
Expand Down
25 changes: 23 additions & 2 deletions gpu/command_buffer/service/in_process_command_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,11 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
return false;
}

sync_point_client_state_ = SyncPointClientState::Create();
sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
sync_point_client_state_,
GetNamespaceID(), GetCommandBufferID());

if (service_->UseVirtualizedGLContexts() ||
decoder_->GetContextGroup()
->feature_info()
Expand Down Expand Up @@ -438,6 +443,8 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
}
context_ = NULL;
surface_ = NULL;
sync_point_client_ = NULL;
sync_point_client_state_ = NULL;
gl_share_group_ = NULL;
#if defined(OS_ANDROID)
stream_texture_manager_.reset();
Expand Down Expand Up @@ -480,10 +487,13 @@ int32 InProcessCommandBuffer::GetLastToken() {
return last_state_.token;
}

void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset,
uint32_t order_num) {
CheckSequencedThread();
ScopedEvent handle_flush(&flush_event_);
base::AutoLock lock(command_buffer_lock_);

sync_point_client_state_->BeginProcessingOrderNumber(order_num);
command_buffer_->Flush(put_offset);
{
// Update state before signaling the flush event.
Expand All @@ -493,6 +503,13 @@ void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
(error::IsError(state_after_last_flush_.error) && context_lost_));

// Currently the in process command buffer does not support being descheduled,
// if it does we would need to back off on calling the finish processing
// order number function until the message is rescheduled and finished
// processing. This DCHECK is to enforce this.
DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset);
sync_point_client_state_->FinishProcessingOrderNumber(order_num);

// If we've processed all pending commands but still have pending queries,
// pump idle work until the query is passed.
if (put_offset == state_after_last_flush_.get_offset &&
Expand Down Expand Up @@ -533,10 +550,14 @@ void InProcessCommandBuffer::Flush(int32 put_offset) {
if (last_put_offset_ == put_offset)
return;

SyncPointManager* sync_manager = service_->sync_point_manager();
const uint32_t order_num =
sync_point_client_state_->GenerateUnprocessedOrderNumber(sync_manager);
last_put_offset_ = put_offset;
base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
gpu_thread_weak_ptr_,
put_offset);
put_offset,
order_num);
QueueTask(task);
}

Expand Down
Loading

0 comments on commit a6b0d39

Please sign in to comment.