lkml.org 
[lkml]   [2011]   [Jun]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 11/16] sched: allow for positional tg_tree walks
Extend walk_tg_tree to accept a positional argument

static int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)

Existing semantics are preserved, caller must hold rcu_lock() or sufficient
analogue.

Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>

---
kernel/sched.c | 52 +++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 39 insertions(+), 13 deletions(-)

Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -1574,20 +1574,23 @@ static inline void dec_cpu_load(struct r
typedef int (*tg_visitor)(struct task_group *, void *);

/*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
+ * Iterate task_group tree rooted at *from, calling @down when first entering a
+ * node and @up when leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
*/
-static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+static int walk_tg_tree_from(struct task_group *from,
+ tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;

- rcu_read_lock();
- parent = &root_task_group;
+ parent = from;
+
down:
ret = (*down)(parent, data);
if (ret)
- goto out_unlock;
+ goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
@@ -1596,19 +1599,29 @@ up:
continue;
}
ret = (*up)(parent, data);
- if (ret)
- goto out_unlock;
+ if (ret || parent == from)
+ goto out;

child = parent;
parent = parent->parent;
if (parent)
goto up;
-out_unlock:
- rcu_read_unlock();
-
+out:
return ret;
}

+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
+ */
+
+static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+{
+ return walk_tg_tree_from(&root_task_group, down, up, data);
+}
+
static int tg_nop(struct task_group *tg, void *data)
{
return 0;
@@ -1702,7 +1715,9 @@ static int tg_load_down(struct task_grou

static void update_h_load(long cpu)
{
+ rcu_read_lock();
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
+ rcu_read_unlock();
}

#endif
@@ -8687,13 +8702,19 @@ static int tg_rt_schedulable(struct task

static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
+ int ret;
+
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};

- return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
+ rcu_read_lock();
+ ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
+ rcu_read_unlock();
+
+ return ret;
}

static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -9143,6 +9164,7 @@ static int tg_cfs_schedulable_down(struc

static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
+ int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
@@ -9154,7 +9176,11 @@ static int __cfs_schedulable(struct task
do_div(data.quota, NSEC_PER_USEC);
}

- return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+ rcu_read_lock();
+ ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+ rcu_read_unlock();
+
+ return ret;
}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */



\
 
 \ /
  Last update: 2011-06-21 09:29    [W:0.482 / U:0.112 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site