lkml.org 
[lkml]   [2014]   [Jun]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors
On Sun, Jun 15, 2014 at 02:47:05PM +0200, Peter Zijlstra wrote:
> When we detect a hypervisor (!paravirt, see later patches), revert to

Please spell out the name of the patches.

> a simple test-and-set lock to avoid the horrors of queue preemption.

Heheh.
>
> Signed-off-by: Peter Zijlstra <peterz@infradead.org>
> ---
> arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++
> include/asm-generic/qspinlock.h | 7 +++++++
> kernel/locking/qspinlock.c | 3 +++
> 3 files changed, 24 insertions(+)
>
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
> #ifndef _ASM_X86_QSPINLOCK_H
> #define _ASM_X86_QSPINLOCK_H
>
> +#include <asm/cpufeature.h>
> #include <asm-generic/qspinlock_types.h>
>
> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
> @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str
>
> #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */
>
> +#define virt_queue_spin_lock virt_queue_spin_lock
> +
> +static inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
> + cpu_relax();
> +
> + return true;
> +}
> +
> #include <asm-generic/qspinlock.h>
>
> #endif /* _ASM_X86_QSPINLOCK_H */
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
> }
> #endif
>
> +#ifndef virt_queue_spin_lock
> +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + return false;
> +}
> +#endif
> +
> /*
> * Initializier
> */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> + if (virt_queue_spin_lock(lock))
> + return;
> +
> /*
> * wait for in-progress pending->locked hand-overs
> *
>
>


\
 
 \ /
  Last update: 2014-06-18 22:21    [W:0.149 / U:0.120 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site