Skip to content

Commit

Permalink
locking/mutex: Enable optimistic spinning of woken waiter
Browse files Browse the repository at this point in the history
This patch makes the waiter that sets the HANDOFF flag start spinning
instead of sleeping until the handoff is complete or the owner
sleeps. Otherwise, the handoff will cause the optimistic spinners to
abort spinning as the handed-off owner may not be running.

Tested-by: Jason Low <jason.low2@hpe.com>
Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Ding Tianhong <dingtianhong@huawei.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul E. McKenney <paulmck@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <Will.Deacon@arm.com>
Link: http://lkml.kernel.org/r/1472254509-27508-2-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Waiman Long authored and Ingo Molnar committed Oct 25, 2016
1 parent a40ca56 commit b341afb
Showing 1 changed file with 54 additions and 23 deletions.
77 changes: 54 additions & 23 deletions kernel/locking/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,24 +416,39 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*
* Returns true when the lock was taken, otherwise false, indicating
* that we need to jump to the slowpath and sleep.
*
* The waiter flag is set to true if the spinner is a waiter in the wait
* queue. The waiter-spinner will spin on the lock directly and concurrently
* with the spinner at the head of the OSQ, if present, until the owner is
* changed to itself.
*/
static bool mutex_optimistic_spin(struct mutex *lock,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
struct ww_acquire_ctx *ww_ctx,
const bool use_ww_ctx, const bool waiter)
{
struct task_struct *task = current;

if (!mutex_can_spin_on_owner(lock))
goto done;
if (!waiter) {
/*
* The purpose of the mutex_can_spin_on_owner() function is
* to eliminate the overhead of osq_lock() and osq_unlock()
* in case spinning isn't possible. As a waiter-spinner
* is not going to take OSQ lock anyway, there is no need
* to call mutex_can_spin_on_owner().
*/
if (!mutex_can_spin_on_owner(lock))
goto fail;

/*
* In order to avoid a stampede of mutex spinners trying to
* acquire the mutex all at once, the spinners need to take a
* MCS (queued) lock first before spinning on the owner field.
*/
if (!osq_lock(&lock->osq))
goto done;
/*
* In order to avoid a stampede of mutex spinners trying to
* acquire the mutex all at once, the spinners need to take a
* MCS (queued) lock first before spinning on the owner field.
*/
if (!osq_lock(&lock->osq))
goto fail;
}

while (true) {
for (;;) {
struct task_struct *owner;

if (use_ww_ctx && ww_ctx->acquired > 0) {
Expand All @@ -449,23 +464,28 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* performed the optimistic spinning cannot be done.
*/
if (READ_ONCE(ww->ctx))
break;
goto fail_unlock;
}

/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
owner = __mutex_owner(lock);
if (owner && !mutex_spin_on_owner(lock, owner))
break;
if (owner) {
if (waiter && owner == task) {
smp_mb(); /* ACQUIRE */
break;
}

/* Try to acquire the mutex if it is unlocked. */
if (__mutex_trylock(lock, false)) {
osq_unlock(&lock->osq);
return true;
if (!mutex_spin_on_owner(lock, owner))
goto fail_unlock;
}

/* Try to acquire the mutex if it is unlocked. */
if (__mutex_trylock(lock, waiter))
break;

/*
* The cpu_relax() call is a compiler barrier which forces
* everything in this loop to be re-loaded. We don't need
Expand All @@ -475,8 +495,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
cpu_relax_lowlatency();
}

osq_unlock(&lock->osq);
done:
if (!waiter)
osq_unlock(&lock->osq);

return true;


fail_unlock:
if (!waiter)
osq_unlock(&lock->osq);

fail:
/*
* If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting
Expand All @@ -495,7 +524,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
}
#else
static bool mutex_optimistic_spin(struct mutex *lock,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
struct ww_acquire_ctx *ww_ctx,
const bool use_ww_ctx, const bool waiter)
{
return false;
}
Expand Down Expand Up @@ -600,7 +630,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

if (__mutex_trylock(lock, false) ||
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx)
Expand Down Expand Up @@ -669,7 +699,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* state back to RUNNING and fall through the next schedule(),
* or we must see its unlock and acquire.
*/
if (__mutex_trylock(lock, first))
if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
__mutex_trylock(lock, first))
break;

spin_lock_mutex(&lock->wait_lock, flags);
Expand Down

0 comments on commit b341afb

Please sign in to comment.