lkml.org 
[lkml]   [2017]   [Aug]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH v6 14/17] powerpc: Add support for setting SPRN_TIDR
    From
    Date
    On Tue, 2017-08-08 at 16:06 -0700, Sukadev Bhattiprolu wrote:
    > We need the SPRN_TIDR to bet set for use with fast thread-wakeup
    > (core-to-core wakeup).  Each thread in a process needs to have a
    > unique id within the process but as explained below, for now, we
    > assign globally unique thread ids to all threads in the system.
    >
    > Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
    > ---
    >  arch/powerpc/include/asm/processor.h |  4 ++
    >  arch/powerpc/kernel/process.c        | 74
    > ++++++++++++++++++++++++++++++++++++
    >  2 files changed, 78 insertions(+)
    >
    > diff --git a/arch/powerpc/include/asm/processor.h
    > b/arch/powerpc/include/asm/processor.h
    > index fab7ff8..bf6ba63 100644
    > --- a/arch/powerpc/include/asm/processor.h
    > +++ b/arch/powerpc/include/asm/processor.h
    > @@ -232,6 +232,10 @@ struct debug_reg {
    >  struct thread_struct {
    >   unsigned long ksp; /* Kernel stack pointer */
    >  
    > +#ifdef CONFIG_PPC_VAS

    I'm tempted to have this always, or a new feature CONFIG_PPC_TID that's PPC_VAS
    depends on.

    > + unsigned long tidr;

    > +#endif
    > +
    >  #ifdef CONFIG_PPC64
    >   unsigned long ksp_vsid;
    >  #endif
    > diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
    > index 9f3e2c9..6123859 100644
    > --- a/arch/powerpc/kernel/process.c
    > +++ b/arch/powerpc/kernel/process.c
    > @@ -1213,6 +1213,16 @@ struct task_struct *__switch_to(struct task_struct
    > *prev,
    >   hard_irq_disable();
    >   }
    >  
    > +#ifdef CONFIG_PPC_VAS
    > + mtspr(SPRN_TIDR, new->thread.tidr);

    how much does this hurt our context_switch benchmark in
    tools/testing/selftests/powerpc/benchmarks/context_switch.c ?

    Also you need an CPU_FTR_ARCH_300 test here (and elsewhere)

    > +#endif
    > + /*
    > +  * We can't take a PMU exception inside _switch() since there is a
    > +  * window where the kernel stack SLB and the kernel stack are out
    > +  * of sync. Hard disable here.
    > +  */
    > + hard_irq_disable();
    > +

    What is this?

    >   /*
    >    * Call restore_sprs() before calling _switch(). If we move it after
    >    * _switch() then we miss out on calling it for new tasks. The reason
    > @@ -1449,9 +1459,70 @@ void flush_thread(void)
    >  #endif /* CONFIG_HAVE_HW_BREAKPOINT */
    >  }
    >  
    > +#ifdef CONFIG_PPC_VAS
    > +static DEFINE_SPINLOCK(vas_thread_id_lock);
    > +static DEFINE_IDA(vas_thread_ida);

    This IDA be per process, not global.

    > +
    > +/*
    > + * We need to assign an unique thread id to each thread in a process. This
    > + * thread id is intended to be used with the Fast Thread-wakeup (aka Core-
    > + * to-core wakeup) mechanism being implemented on top of Virtual Accelerator
    > + * Switchboard (VAS).
    > + *
    > + * To get a unique thread-id per process we could simply use task_pid_nr()
    > + * but the problem is that task_pid_nr() is not yet available for the thread
    > + * when copy_thread() is called. Fixing that would require changing more
    > + * intrusive arch-neutral code in code path in copy_process()?.
    > + *
    > + * Further, to assign unique thread ids within each process, we need an
    > + * atomic field (or an IDR) in task_struct, which again intrudes into the
    > + * arch-neutral code.

    Really?

    > + * So try to assign globally unique thraed ids for now.

    Yuck!

    > + */
    > +static int assign_thread_id(void)
    > +{
    > + int index;
    > + int err;
    > +
    > +again:
    > + if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
    > + return -ENOMEM;
    > +
    > + spin_lock(&vas_thread_id_lock);
    > + err = ida_get_new_above(&vas_thread_ida, 1, &index);

    We can't use 0 or 1?

    > + spin_unlock(&vas_thread_id_lock);
    > +
    > + if (err == -EAGAIN)
    > + goto again;
    > + else if (err)
    > + return err;
    > +
    > + if (index > MAX_USER_CONTEXT) {
    > + spin_lock(&vas_thread_id_lock);
    > + ida_remove(&vas_thread_ida, index);
    > + spin_unlock(&vas_thread_id_lock);
    > + return -ENOMEM;
    > + }
    > +
    > + return index;
    > +}
    > +
    > +static void free_thread_id(int id)
    > +{
    > + spin_lock(&vas_thread_id_lock);
    > + ida_remove(&vas_thread_ida, id);
    > + spin_unlock(&vas_thread_id_lock);
    > +}
    > +#endif /* CONFIG_PPC_VAS */
    > +
    > +
    >  void
    >  release_thread(struct task_struct *t)
    >  {
    > +#ifdef CONFIG_PPC_VAS
    > + free_thread_id(t->thread.tidr);
    > +#endif

    Can you restructure this to avoid the #ifdef ugliness

    >  }
    >  
    >  /*
    > @@ -1587,6 +1658,9 @@ int copy_thread(unsigned long clone_flags, unsigned long
    > usp,
    >  #endif
    >  
    >   setup_ksp_vsid(p, sp);
    > +#ifdef CONFIG_PPC_VAS
    > + p->thread.tidr = assign_thread_id();
    > +#endif

    Same here...

    >  
    >  #ifdef CONFIG_PPC64 
    >   if (cpu_has_feature(CPU_FTR_DSCR)) {

    \
     
     \ /
      Last update: 2017-08-14 09:03    [W:6.971 / U:0.180 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site