lkml.org 
[lkml]   [2018]   [Sep]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC 36/60] cosched: Use hrq_of() for rq_clock() and rq_clock_task()
    Date
    We use and keep rq->clock updated on all hierarchical runqueues. In
    fact, not using the hierarchical runqueue would be incorrect as there is
    no guarantee that the leader's CPU runqueue clock is updated.

    Switch all obvious cases from rq_of() to hrq_of().

    Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
    ---
    kernel/sched/core.c | 7 +++++++
    kernel/sched/fair.c | 24 ++++++++++++------------
    2 files changed, 19 insertions(+), 12 deletions(-)

    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index c4358396f588..a9f5339d58cb 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -138,6 +138,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
    #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
    s64 steal = 0, irq_delta = 0;
    #endif
    +#ifdef CONFIG_COSCHEDULING
    + /*
    + * FIXME: We don't have IRQ and steal time aggregates on non-CPU
    + * runqueues. The following just accounts for one of the CPUs
    + * instead of all.
    + */
    +#endif
    #ifdef CONFIG_IRQ_TIME_ACCOUNTING
    irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index 24d01bf8f796..fde1c4ba4bb4 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -858,7 +858,7 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
    static void update_curr(struct cfs_rq *cfs_rq)
    {
    struct sched_entity *curr = cfs_rq->curr;
    - u64 now = rq_clock_task(rq_of(cfs_rq));
    + u64 now = rq_clock_task(hrq_of(cfs_rq));
    u64 delta_exec;

    if (unlikely(!curr))
    @@ -903,7 +903,7 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
    if (!schedstat_enabled())
    return;

    - wait_start = rq_clock(rq_of(cfs_rq));
    + wait_start = rq_clock(hrq_of(cfs_rq));
    prev_wait_start = schedstat_val(se->statistics.wait_start);

    if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
    @@ -922,7 +922,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
    if (!schedstat_enabled())
    return;

    - delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
    + delta = rq_clock(hrq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);

    if (entity_is_task(se)) {
    p = task_of(se);
    @@ -961,7 +961,7 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
    tsk = task_of(se);

    if (sleep_start) {
    - u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
    + u64 delta = rq_clock(hrq_of(cfs_rq)) - sleep_start;

    if ((s64)delta < 0)
    delta = 0;
    @@ -978,7 +978,7 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
    }
    }
    if (block_start) {
    - u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
    + u64 delta = rq_clock(hrq_of(cfs_rq)) - block_start;

    if ((s64)delta < 0)
    delta = 0;
    @@ -1052,10 +1052,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

    if (tsk->state & TASK_INTERRUPTIBLE)
    __schedstat_set(se->statistics.sleep_start,
    - rq_clock(rq_of(cfs_rq)));
    + rq_clock(hrq_of(cfs_rq)));
    if (tsk->state & TASK_UNINTERRUPTIBLE)
    __schedstat_set(se->statistics.block_start,
    - rq_clock(rq_of(cfs_rq)));
    + rq_clock(hrq_of(cfs_rq)));
    }
    }

    @@ -1068,7 +1068,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
    /*
    * We are starting a new run period:
    */
    - se->exec_start = rq_clock_task(rq_of(cfs_rq));
    + se->exec_start = rq_clock_task(hrq_of(cfs_rq));
    }

    /**************************************************
    @@ -4253,7 +4253,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
    if (unlikely(cfs_rq->throttle_count))
    return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;

    - return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
    + return rq_clock_task(hrq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
    }

    /* returns 0 on failure to allocate runtime */
    @@ -4306,7 +4306,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
    struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);

    /* if the deadline is ahead of our clock, nothing to do */
    - if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
    + if (likely((s64)(rq_clock(hrq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
    return;

    if (cfs_rq->runtime_remaining < 0)
    @@ -4771,7 +4771,7 @@ static void sync_throttle(struct cfs_rq *cfs_rq)
    pcfs_rq = parent_cfs_rq(cfs_rq);

    cfs_rq->throttle_count = pcfs_rq->throttle_count;
    - cfs_rq->throttled_clock_task = rq_clock_task(rq_of(cfs_rq));
    + cfs_rq->throttled_clock_task = rq_clock_task(hrq_of(cfs_rq));
    }

    /* conditionally throttle active cfs_rq's from put_prev_entity() */
    @@ -4932,7 +4932,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
    #else /* CONFIG_CFS_BANDWIDTH */
    static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
    {
    - return rq_clock_task(rq_of(cfs_rq));
    + return rq_clock_task(hrq_of(cfs_rq));
    }

    static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
    --
    2.9.3.1.gcba166c.dirty
    \
     
     \ /
      Last update: 2018-09-07 23:47    [W:4.301 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site