lkml.org 
[lkml]   [2016]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:locking/core] locking/spinlocks, s390: Implement vcpu_is_preempted(cpu)
    Commit-ID:  760928c0dafc7d0faf0c0248e28e16d4c8dc7ad6
    Gitweb: http://git.kernel.org/tip/760928c0dafc7d0faf0c0248e28e16d4c8dc7ad6
    Author: Christian Borntraeger <borntraeger@de.ibm.com>
    AuthorDate: Wed, 2 Nov 2016 05:08:32 -0400
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitDate: Tue, 22 Nov 2016 12:48:06 +0100

    locking/spinlocks, s390: Implement vcpu_is_preempted(cpu)

    This implements the s390 version for vcpu_is_preempted(cpu),
    by reworking the existing smp_vcpu_scheduled() function into
    arch_vcpu_is_preempted().

    We can then also get rid of the local cpu_is_preempted()
    function by moving the CIF_ENABLED_WAIT test into
    arch_vcpu_is_preempted().

    Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
    Cc: David.Laight@ACULAB.COM
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: benh@kernel.crashing.org
    Cc: boqun.feng@gmail.com
    Cc: bsingharora@gmail.com
    Cc: dave@stgolabs.net
    Cc: jgross@suse.com
    Cc: kernellwp@gmail.com
    Cc: konrad.wilk@oracle.com
    Cc: linuxppc-dev@lists.ozlabs.org
    Cc: mpe@ellerman.id.au
    Cc: paulmck@linux.vnet.ibm.com
    Cc: paulus@samba.org
    Cc: pbonzini@redhat.com
    Cc: rkrcmar@redhat.com
    Cc: virtualization@lists.linux-foundation.org
    Cc: will.deacon@arm.com
    Cc: xen-devel-request@lists.xenproject.org
    Cc: xen-devel@lists.xenproject.org
    Link: http://lkml.kernel.org/r/1478077718-37424-6-git-send-email-xinhui.pan@linux.vnet.ibm.com
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    arch/s390/include/asm/spinlock.h | 8 ++++++++
    arch/s390/kernel/smp.c | 9 +++++++--
    arch/s390/lib/spinlock.c | 25 ++++++++-----------------
    3 files changed, 23 insertions(+), 19 deletions(-)

    diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
    index 7e9e09f..7ecd890 100644
    --- a/arch/s390/include/asm/spinlock.h
    +++ b/arch/s390/include/asm/spinlock.h
    @@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
    return __sync_bool_compare_and_swap(lock, old, new);
    }

    +#ifndef CONFIG_SMP
    +static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
    +#else
    +bool arch_vcpu_is_preempted(int cpu);
    +#endif
    +
    +#define vcpu_is_preempted arch_vcpu_is_preempted
    +
    /*
    * Simple spin lock operations. There are two variants, one clears IRQ's
    * on the local processor, one does not.
    diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
    index 35531fe..b988ed1 100644
    --- a/arch/s390/kernel/smp.c
    +++ b/arch/s390/kernel/smp.c
    @@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
    return -1;
    }

    -int smp_vcpu_scheduled(int cpu)
    +bool arch_vcpu_is_preempted(int cpu)
    {
    - return pcpu_running(pcpu_devices + cpu);
    + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
    + return false;
    + if (pcpu_running(pcpu_devices + cpu))
    + return false;
    + return true;
    }
    +EXPORT_SYMBOL(arch_vcpu_is_preempted);

    void smp_yield_cpu(int cpu)
    {
    diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
    index e5f50a7..e48a48e 100644
    --- a/arch/s390/lib/spinlock.c
    +++ b/arch/s390/lib/spinlock.c
    @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
    asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
    }

    -static inline int cpu_is_preempted(int cpu)
    -{
    - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
    - return 0;
    - if (smp_vcpu_scheduled(cpu))
    - return 0;
    - return 1;
    -}
    -
    void arch_spin_lock_wait(arch_spinlock_t *lp)
    {
    unsigned int cpu = SPINLOCK_LOCKVAL;
    @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
    continue;
    }
    /* First iteration: check if the lock owner is running. */
    - if (first_diag && cpu_is_preempted(~owner)) {
    + if (first_diag && arch_vcpu_is_preempted(~owner)) {
    smp_yield_cpu(~owner);
    first_diag = 0;
    continue;
    @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
    * yield the CPU unconditionally. For LPAR rely on the
    * sense running status.
    */
    - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
    + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
    smp_yield_cpu(~owner);
    first_diag = 0;
    }
    @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
    continue;
    }
    /* Check if the lock owner is running. */
    - if (first_diag && cpu_is_preempted(~owner)) {
    + if (first_diag && arch_vcpu_is_preempted(~owner)) {
    smp_yield_cpu(~owner);
    first_diag = 0;
    continue;
    @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
    * yield the CPU unconditionally. For LPAR rely on the
    * sense running status.
    */
    - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
    + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
    smp_yield_cpu(~owner);
    first_diag = 0;
    }
    @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
    owner = 0;
    while (1) {
    if (count-- <= 0) {
    - if (owner && cpu_is_preempted(~owner))
    + if (owner && arch_vcpu_is_preempted(~owner))
    smp_yield_cpu(~owner);
    count = spin_retry;
    }
    @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
    owner = 0;
    while (1) {
    if (count-- <= 0) {
    - if (owner && cpu_is_preempted(~owner))
    + if (owner && arch_vcpu_is_preempted(~owner))
    smp_yield_cpu(~owner);
    count = spin_retry;
    }
    @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
    owner = 0;
    while (1) {
    if (count-- <= 0) {
    - if (owner && cpu_is_preempted(~owner))
    + if (owner && arch_vcpu_is_preempted(~owner))
    smp_yield_cpu(~owner);
    count = spin_retry;
    }
    @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
    {
    if (!cpu)
    return;
    - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
    + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
    return;
    smp_yield_cpu(~cpu);
    }
    \
     
     \ /
      Last update: 2016-11-22 13:33    [W:3.872 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site