lkml.org 
[lkml]   [2008]   [Oct]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [PATCH] sched: fair scheduler should not resched rt tasks
    From
    Date
    On Sat, 2008-10-11 at 03:01 -0400, Steven Rostedt wrote:
    > Using ftrace, I noticed latencies in real-time tasks where they were
    > needlessly calling schedule due to sched_fair sending out time slices.
    >
    > This patch prevents a call to resched_task by the sched fair class if
    > the task it wants to reschedule is an rt task.

    Right, thats not a good thing, however this patch looks wrong, we should
    never call hrtick_start_fair() on a rt task to begin with.

    The way I can see that happening is through enqueue/dequeue_task_fair()
    where we want to re-programm the hrtick because nr_running changes (and
    thus the current tasks desired runtime).

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index 5d39d92..6bcceec 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL;

    const_debug unsigned int sysctl_sched_migration_cost = 500000UL;

    +static const struct sched_class fair_sched_class;
    +
    /**************************************************************
    * CFS operations on generic schedulable entities:
    */
    @@ -849,11 +851,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
    hrtick_start(rq, delta);
    }
    }
    +
    +/*
    + * called from enqueue/dequeue and updates the hrtick when the
    + * current task is from our class and nr_running is low enough
    + * to matter.
    + */
    +static void hrtick_update(struct rq *rq)
    +{
    + struct task_struct *curr = rq->curr;
    +
    + if (curr->sched_class != &fair_sched_class)
    + return;
    +
    + if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
    + hrtick_start_fair(rq, curr);
    +}
    #else /* !CONFIG_SCHED_HRTICK */
    static inline void
    hrtick_start_fair(struct rq *rq, struct task_struct *p)
    {
    }
    +
    +static inline void hrtick_update(struct rq *rq)
    +{
    +}
    #endif

    /*
    @@ -874,7 +896,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
    wakeup = 1;
    }

    - hrtick_start_fair(rq, rq->curr);
    + hrtick_update(rq);
    }

    /*
    @@ -896,7 +918,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
    sleep = 1;
    }

    - hrtick_start_fair(rq, rq->curr);
    + hrtick_update(rq);
    }

    /*
    @@ -1002,8 +1024,6 @@ static inline int wake_idle(int cpu, struct task_struct *p)

    #ifdef CONFIG_SMP

    -static const struct sched_class fair_sched_class;
    -
    #ifdef CONFIG_FAIR_GROUP_SCHED
    /*
    * effective_load() calculates the load change as seen from the root_task_group



    \
     
     \ /
      Last update: 2008-10-11 13:05    [W:0.023 / U:60.264 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site