lkml.org 
[lkml]   [2010]   [Jan]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [RFC v5 PATCH 1/8] sched: Rename struct rt_bandwidth to sched_bandwidth
    * Bharata B Rao <bharata@linux.vnet.ibm.com> [2010-01-05 13:28:24]:

    > sched: Rename struct rt_bandwidth to sched_bandwidth
    >
    > From: Dhaval Giani <dhaval@linux.vnet.ibm.com>
    >
    > Rename struct rt_bandwidth to sched_bandwidth and rename some of the
    > routines to generic names (s/rt_/sched_) so that they can be used
    > by CFS hard limits code in the subsequent patches.
    >
    > No functionality change by this patch.
    >
    > Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
    > Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>

    Looks good, some nit picks below

    Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>


    > ---
    > kernel/sched.c | 127 ++++++++++++++++++++++++++---------------------------
    > kernel/sched_rt.c | 46 ++++++++++---------
    > 2 files changed, 86 insertions(+), 87 deletions(-)
    >
    > diff --git a/kernel/sched.c b/kernel/sched.c
    > index c535cc4..21cf0d5 100644
    > --- a/kernel/sched.c
    > +++ b/kernel/sched.c
    > @@ -139,50 +139,50 @@ struct rt_prio_array {
    > struct list_head queue[MAX_RT_PRIO];
    > };
    >
    > -struct rt_bandwidth {
    > +struct sched_bandwidth {
    > /* nests inside the rq lock: */
    > - raw_spinlock_t rt_runtime_lock;
    > - ktime_t rt_period;
    > - u64 rt_runtime;
    > - struct hrtimer rt_period_timer;
    > + raw_spinlock_t runtime_lock;
    > + ktime_t period;
    > + u64 runtime;
    > + struct hrtimer period_timer;
    > };
    >
    > -static struct rt_bandwidth def_rt_bandwidth;
    > +static struct sched_bandwidth def_rt_bandwidth;
    >
    > -static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
    > +static int do_sched_rt_period_timer(struct sched_bandwidth *sched_b, int overrun);
    >
    > static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
    > {
    > - struct rt_bandwidth *rt_b =
    > - container_of(timer, struct rt_bandwidth, rt_period_timer);
    > + struct sched_bandwidth *sched_b =
    > + container_of(timer, struct sched_bandwidth, period_timer);
    > ktime_t now;
    > int overrun;
    > int idle = 0;
    >
    > for (;;) {
    > now = hrtimer_cb_get_time(timer);
    > - overrun = hrtimer_forward(timer, now, rt_b->rt_period);
    > + overrun = hrtimer_forward(timer, now, sched_b->period);
    >
    > if (!overrun)
    > break;
    >
    > - idle = do_sched_rt_period_timer(rt_b, overrun);
    > + idle = do_sched_rt_period_timer(sched_b, overrun);
    > }
    >
    > return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
    > }
    >
    > -static
    > -void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
    > +static void init_sched_bandwidth(struct sched_bandwidth *sched_b, u64 period,
    > + u64 runtime, enum hrtimer_restart (*period_timer)(struct hrtimer *))
    > {
    > - rt_b->rt_period = ns_to_ktime(period);
    > - rt_b->rt_runtime = runtime;
    > + sched_b->period = ns_to_ktime(period);
    > + sched_b->runtime = runtime;
    >
    > - raw_spin_lock_init(&rt_b->rt_runtime_lock);
    > + raw_spin_lock_init(&sched_b->runtime_lock);
    >
    > - hrtimer_init(&rt_b->rt_period_timer,
    > + hrtimer_init(&sched_b->period_timer,
    > CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    > - rt_b->rt_period_timer.function = sched_rt_period_timer;
    > + sched_b->period_timer.function = *period_timer;

    Hmm.. may be I forgetting the "C" language, but why do you dereference
    the pointer before assignment? You should be able to directly assign a
    function address to the function pointer. Did you see a warning?

    > }
    >
    > static inline int rt_bandwidth_enabled(void)
    > @@ -190,42 +190,40 @@ static inline int rt_bandwidth_enabled(void)
    > return sysctl_sched_rt_runtime >= 0;
    > }
    >
    > -static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
    > +static void start_sched_bandwidth(struct sched_bandwidth *sched_b)
    > {
    > ktime_t now;
    >
    > - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
    > + if (!rt_bandwidth_enabled() || sched_b->runtime == RUNTIME_INF)
    > return;
    >
    > - if (hrtimer_active(&rt_b->rt_period_timer))
    > + if (hrtimer_active(&sched_b->period_timer))
    > return;
    >
    > - raw_spin_lock(&rt_b->rt_runtime_lock);
    > + raw_spin_lock(&sched_b->runtime_lock);

    I don't quite understand why this is a raw_spin_lock

    [snip]

    --
    Balbir


    \
     
     \ /
      Last update: 2010-01-29 10:03    [W:0.051 / U:64.188 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site