lkml.org 
[lkml]   [2016]   [Sep]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
    From
    Date


    在 2016/9/29 23:51, Christian Borntraeger 写道:
    > this implements the s390 backend for commit
    > "kernel/sched: introduce vcpu preempted check interface"
    > by reworking the existing smp_vcpu_scheduled into
    > arch_vcpu_is_preempted. We can then also get rid of the
    > local cpu_is_preempted function by moving the
    > CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
    >
    > Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
    > ---

    hi, Christian
    thanks for your patch!

    > arch/s390/include/asm/spinlock.h | 3 +++
    > arch/s390/kernel/smp.c | 9 +++++++--
    > arch/s390/lib/spinlock.c | 25 ++++++++-----------------
    > 3 files changed, 18 insertions(+), 19 deletions(-)
    >
    > diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
    > index 63ebf37..e16e02f 100644
    > --- a/arch/s390/include/asm/spinlock.h
    > +++ b/arch/s390/include/asm/spinlock.h
    > @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
    > return __sync_bool_compare_and_swap(lock, old, new);
    > }
    >
    > +bool arch_vcpu_is_preempted(int cpu);
    > +#define vcpu_is_preempted arch_vcpu_is_preempted
    > +
    > /*
    > * Simple spin lock operations. There are two variants, one clears IRQ's
    > * on the local processor, one does not.
    > diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
    > index 7b89a75..4aadd16 100644
    > --- a/arch/s390/kernel/smp.c
    > +++ b/arch/s390/kernel/smp.c
    > @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
    > return -1;
    > }
    >
    > -int smp_vcpu_scheduled(int cpu)

    root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
    arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
    arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { return 1; }
    arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
    arch/s390/lib/spinlock.c:44: if (smp_vcpu_scheduled(cpu))

    > +bool arch_vcpu_is_preempted(int cpu)
    > {
    > - return pcpu_running(pcpu_devices + cpu);
    > + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
    > + return false;
    > + if (pcpu_running(pcpu_devices + cpu))
    > + return false;
    I saw smp_vcpu_scheduled() returns true always on !SMP system.

    maybe we can do somegthing silimar. like below

    #ifndef CONFIG_SMP
    static inline bool arch_vcpu_is_preempted(int cpu) { return !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
    #else
    ...

    but I can't help thinking that if this is a!SMP system, maybe we could only
    #ifndef CONFIG_SMP
    static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
    #else
    ...


    thanks
    xinhui

    > + return true;
    > }
    > +EXPORT_SYMBOL(arch_vcpu_is_preempted);
    >
    > void smp_yield_cpu(int cpu)
    > {
    > diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
    > index e5f50a7..e48a48e 100644
    > --- a/arch/s390/lib/spinlock.c
    > +++ b/arch/s390/lib/spinlock.c
    > @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
    > asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
    > }
    >
    > -static inline int cpu_is_preempted(int cpu)
    > -{
    > - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
    > - return 0;
    > - if (smp_vcpu_scheduled(cpu))
    > - return 0;
    > - return 1;
    > -}
    > -
    > void arch_spin_lock_wait(arch_spinlock_t *lp)
    > {
    > unsigned int cpu = SPINLOCK_LOCKVAL;
    > @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
    > continue;
    > }
    > /* First iteration: check if the lock owner is running. */
    > - if (first_diag && cpu_is_preempted(~owner)) {
    > + if (first_diag && arch_vcpu_is_preempted(~owner)) {
    > smp_yield_cpu(~owner);
    > first_diag = 0;
    > continue;
    > @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
    > * yield the CPU unconditionally. For LPAR rely on the
    > * sense running status.
    > */
    > - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
    > + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
    > smp_yield_cpu(~owner);
    > first_diag = 0;
    > }
    > @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
    > continue;
    > }
    > /* Check if the lock owner is running. */
    > - if (first_diag && cpu_is_preempted(~owner)) {
    > + if (first_diag && arch_vcpu_is_preempted(~owner)) {
    > smp_yield_cpu(~owner);
    > first_diag = 0;
    > continue;
    > @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
    > * yield the CPU unconditionally. For LPAR rely on the
    > * sense running status.
    > */
    > - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
    > + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
    > smp_yield_cpu(~owner);
    > first_diag = 0;
    > }
    > @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
    > owner = 0;
    > while (1) {
    > if (count-- <= 0) {
    > - if (owner && cpu_is_preempted(~owner))
    > + if (owner && arch_vcpu_is_preempted(~owner))
    > smp_yield_cpu(~owner);
    > count = spin_retry;
    > }
    > @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
    > owner = 0;
    > while (1) {
    > if (count-- <= 0) {
    > - if (owner && cpu_is_preempted(~owner))
    > + if (owner && arch_vcpu_is_preempted(~owner))
    > smp_yield_cpu(~owner);
    > count = spin_retry;
    > }
    > @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
    > owner = 0;
    > while (1) {
    > if (count-- <= 0) {
    > - if (owner && cpu_is_preempted(~owner))
    > + if (owner && arch_vcpu_is_preempted(~owner))
    > smp_yield_cpu(~owner);
    > count = spin_retry;
    > }
    > @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
    > {
    > if (!cpu)
    > return;
    > - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
    > + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
    > return;
    > smp_yield_cpu(~cpu);
    > }
    >

    \
     
     \ /
      Last update: 2016-09-30 06:50    [W:2.407 / U:0.080 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site