lkml.org 
[lkml]   [2015]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/2] sched/deadline,core: fix bandwidth update when changing cpuset cpumask
Date
When cpumask of an exclusive cpuset is changed the cpuset needs
to be destroyed and recreated with the new span. Since we keep
track of the current used deadline bandwidth in the root_domain(s)
associated with exclusive cpusets, the information is gone after
the rood_domain is destroyed.

Add two methods to save and restore such bandwidth across cpuset
reconfiguration.

Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: linux-kernel@vger.kernel.org
---
kernel/sched/core.c | 3 +++
kernel/sched/deadline.c | 24 ++++++++++++++++++++++++
kernel/sched/sched.h | 4 ++++
3 files changed, 31 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 97fe79c..5827ff4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6923,6 +6923,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
int i, j, n;
int new_topology;
+ unsigned long long dl_bw = 0;

mutex_lock(&sched_domains_mutex);

@@ -6942,6 +6943,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
+ save_dl_bw_of_rd(doms_cur[i], &dl_bw);
detach_destroy_domains(doms_cur[i]);
match1:
;
@@ -6964,6 +6966,7 @@ match1:
}
/* no match - add a new doms_new */
build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
+ restore_dl_bw_of_rd(doms_new[i], dl_bw);
match2:
;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 3fa8fa6..dbf12a9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -69,6 +69,30 @@ void init_dl_bw(struct dl_bw *dl_b)
dl_b->total_bw = 0;
}

+void save_dl_bw_of_rd(const struct cpumask *cpu_map,
+ unsigned long long *dl_bw)
+{
+ int cpu;
+
+ cpu = cpumask_any(cpu_map);
+ rcu_read_lock_sched();
+ if (cpu < num_online_cpus())
+ *dl_bw = dl_bw_of(cpu)->total_bw;
+ rcu_read_unlock_sched();
+}
+
+void restore_dl_bw_of_rd(const struct cpumask *cpu_map,
+ unsigned long long dl_bw)
+{
+ int cpu;
+
+ cpu = cpumask_any(cpu_map);
+ rcu_read_lock_sched();
+ if (cpu < num_online_cpus())
+ dl_bw_of(cpu)->total_bw = dl_bw;
+ rcu_read_unlock_sched();
+}
+
void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
{
dl_rq->rb_root = RB_ROOT;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index dc0f435..46d231a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1276,6 +1276,10 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
+void save_dl_bw_of_rd(const struct cpumask *cpu_map,
+ unsigned long long *dl_bw);
+void restore_dl_bw_of_rd(const struct cpumask *cpu_map,
+ unsigned long long dl_bw);

unsigned long to_ratio(u64 period, u64 runtime);

--
2.3.0


\
 
 \ /
  Last update: 2015-02-24 10:41    [W:0.057 / U:0.156 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site