lkml.org 
[lkml]   [2009]   [Apr]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch 056/100] posixtimers, sched: Fix posix clock monotonicity
    -stable review patch.  If anyone has any objections, please let us know.
    ---------------------

    From: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>

    upstream commit: c5f8d99585d7b5b7e857fabf8aefd0174903a98c

    Impact: Regression fix (against clock_gettime() backwarding bug)

    This patch re-introduces a couple of functions, task_sched_runtime
    and thread_group_sched_runtime, which was once removed at the
    time of 2.6.28-rc1.

    These functions protect the sampling of thread/process clock with
    rq lock. This rq lock is required not to update rq->clock during
    the sampling.

    i.e.
    The clock_gettime() may return
    ((accounted runtime before update) + (delta after update))
    that is less than what it should be.

    v2 -> v3:
    - Rename static helper function __task_delta_exec()
    to do_task_delta_exec() since -tip tree already has
    a __task_delta_exec() of different version.

    v1 -> v2:
    - Revises comments of function and patch description.
    - Add note about accuracy of thread group's runtime.

    Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
    Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: stable@kernel.org
    LKML-Reference: <49D1CC93.4080401@jp.fujitsu.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Chris Wright <chrisw@sous-sol.org>
    ---
    kernel/posix-cpu-timers.c | 7 ++--
    kernel/sched.c | 65 ++++++++++++++++++++++++++++++++++++++++------
    2 files changed, 61 insertions(+), 11 deletions(-)

    --- a/kernel/posix-cpu-timers.c
    +++ b/kernel/posix-cpu-timers.c
    @@ -224,7 +224,7 @@ static int cpu_clock_sample(const clocki
    cpu->cpu = virt_ticks(p);
    break;
    case CPUCLOCK_SCHED:
    - cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
    + cpu->sched = task_sched_runtime(p);
    break;
    }
    return 0;
    @@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const
    {
    struct task_cputime cputime;

    - thread_group_cputime(p, &cputime);
    switch (CPUCLOCK_WHICH(which_clock)) {
    default:
    return -EINVAL;
    case CPUCLOCK_PROF:
    + thread_group_cputime(p, &cputime);
    cpu->cpu = cputime_add(cputime.utime, cputime.stime);
    break;
    case CPUCLOCK_VIRT:
    + thread_group_cputime(p, &cputime);
    cpu->cpu = cputime.utime;
    break;
    case CPUCLOCK_SCHED:
    - cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
    + cpu->sched = thread_group_sched_runtime(p);
    break;
    }
    return 0;
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -4134,9 +4134,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat
    EXPORT_PER_CPU_SYMBOL(kstat);

    /*
    - * Return any ns on the sched_clock that have not yet been banked in
    + * Return any ns on the sched_clock that have not yet been accounted in
    * @p in case that task is currently running.
    + *
    + * Called with task_rq_lock() held on @rq.
    */
    +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
    +{
    + u64 ns = 0;
    +
    + if (task_current(rq, p)) {
    + update_rq_clock(rq);
    + ns = rq->clock - p->se.exec_start;
    + if ((s64)ns < 0)
    + ns = 0;
    + }
    +
    + return ns;
    +}
    +
    unsigned long long task_delta_exec(struct task_struct *p)
    {
    unsigned long flags;
    @@ -4144,16 +4160,49 @@ unsigned long long task_delta_exec(struc
    u64 ns = 0;

    rq = task_rq_lock(p, &flags);
    + ns = do_task_delta_exec(p, rq);
    + task_rq_unlock(rq, &flags);

    - if (task_current(rq, p)) {
    - u64 delta_exec;
    + return ns;
    +}

    - update_rq_clock(rq);
    - delta_exec = rq->clock - p->se.exec_start;
    - if ((s64)delta_exec > 0)
    - ns = delta_exec;
    - }
    +/*
    + * Return accounted runtime for the task.
    + * In case the task is currently running, return the runtime plus current's
    + * pending runtime that have not been accounted yet.
    + */
    +unsigned long long task_sched_runtime(struct task_struct *p)
    +{
    + unsigned long flags;
    + struct rq *rq;
    + u64 ns = 0;
    +
    + rq = task_rq_lock(p, &flags);
    + ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
    + task_rq_unlock(rq, &flags);
    +
    + return ns;
    +}

    +/*
    + * Return sum_exec_runtime for the thread group.
    + * In case the task is currently running, return the sum plus current's
    + * pending runtime that have not been accounted yet.
    + *
    + * Note that the thread group might have other running tasks as well,
    + * so the return value not includes other pending runtime that other
    + * running tasks might have.
    + */
    +unsigned long long thread_group_sched_runtime(struct task_struct *p)
    +{
    + struct task_cputime totals;
    + unsigned long flags;
    + struct rq *rq;
    + u64 ns;
    +
    + rq = task_rq_lock(p, &flags);
    + thread_group_cputime(p, &totals);
    + ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
    task_rq_unlock(rq, &flags);

    return ns;


    \
     
     \ /
      Last update: 2009-04-23 09:53    [W:0.030 / U:60.376 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site