lkml.org 
[lkml]   [2011]   [Sep]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH v2 3/3][RT] sched: Have migrate_disable ignore bounded threads
From
Date
On Tue, 2011-09-27 at 08:40 -0400, Steven Rostedt wrote:
> plain text document attachment
> (peterz-migrate-disable-thread-bound.patch)
> From: Peter Zijlstra <a.p.zijlstra@chello.nl>


Thomas,

Hold off on this patch until we get the kworker/u straighten out. I'm
retesting with the following change:

@@ -1383,7 +1384,7 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
- worker->task->flags |= PF_THREAD_BOUND;
+// worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}

So far, so good.

-- Steve

>
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
>
> Index: linux-rt.git/kernel/sched.c
> ===================================================================
> --- linux-rt.git.orig/kernel/sched.c
> +++ linux-rt.git/kernel/sched.c
> @@ -4247,7 +4247,7 @@ void migrate_disable(void)
> {
> struct task_struct *p = current;
>
> - if (in_atomic()) {
> + if (in_atomic() || p->flags & PF_THREAD_BOUND) {
> #ifdef CONFIG_SCHED_DEBUG
> p->migrate_disable_atomic++;
> #endif
> @@ -4278,7 +4278,7 @@ void migrate_enable(void)
> unsigned long flags;
> struct rq *rq;
>
> - if (in_atomic()) {
> + if (in_atomic() || p->flags & PF_THREAD_BOUND) {
> #ifdef CONFIG_SCHED_DEBUG
> p->migrate_disable_atomic--;
> #endif
> @@ -4299,26 +4299,21 @@ void migrate_enable(void)
>
> if (unlikely(migrate_disabled_updated(p))) {
> /*
> - * See comment in update_migrate_disable() about locking.
> + * Undo whatever update_migrate_disable() did, also see there
> + * about locking.
> */
> rq = this_rq();
> raw_spin_lock_irqsave(&rq->lock, flags);
> - mask = tsk_cpus_allowed(p);
> +
> /*
> * Clearing migrate_disable causes tsk_cpus_allowed to
> * show the tasks original cpu affinity.
> */
> p->migrate_disable = 0;
> -
> - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
> -
> - if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
> - /* Get the mask now that migration is enabled */
> - mask = tsk_cpus_allowed(p);
> - if (p->sched_class->set_cpus_allowed)
> - p->sched_class->set_cpus_allowed(p, mask);
> - p->rt.nr_cpus_allowed = cpumask_weight(mask);
> - }
> + mask = tsk_cpus_allowed(p);
> + if (p->sched_class->set_cpus_allowed)
> + p->sched_class->set_cpus_allowed(p, mask);
> + p->rt.nr_cpus_allowed = cpumask_weight(mask);
> raw_spin_unlock_irqrestore(&rq->lock, flags);
> } else
> p->migrate_disable = 0;




\
 
 \ /
  Last update: 2011-09-27 23:35    [W:0.038 / U:1.380 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site