lkml.org 
[lkml]   [2010]   [Sep]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[065/123] sched: Protect sched_rr_get_param() access to task->sched_class
From: Thomas Gleixner <tglx@linutronix.de>

commit dba091b9e3522b9d32fc9975e48d3b69633b45f0 upstream

sched_rr_get_param calls
task->sched_class->get_rr_interval(task) without protection
against a concurrent sched_setscheduler() call which modifies
task->sched_class.

Serialize the access with task_rq_lock(task) and hand the rq
pointer into get_rr_interval() as it's needed at least in the
sched_fair implementation.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <alpine.LFD.2.00.0912090930120.3089@localhost.localdomain>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
---
include/linux/sched.h | 3 ++-
kernel/sched.c | 6 +++++-
kernel/sched_fair.c | 6 +-----
kernel/sched_idletask.c | 2 +-
kernel/sched_rt.c | 2 +-
5 files changed, 10 insertions(+), 9 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1114,7 +1114,8 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);

- unsigned int (*get_rr_interval) (struct task_struct *task);
+ unsigned int (*get_rr_interval) (struct rq *rq,
+ struct task_struct *task);

#ifdef CONFIG_FAIR_GROUP_SCHED
void (*moved_group) (struct task_struct *p);
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6946,6 +6946,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
{
struct task_struct *p;
unsigned int time_slice;
+ unsigned long flags;
+ struct rq *rq;
int retval;
struct timespec t;

@@ -6962,7 +6964,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
if (retval)
goto out_unlock;

- time_slice = p->sched_class->get_rr_interval(p);
+ rq = task_rq_lock(p, &flags);
+ time_slice = p->sched_class->get_rr_interval(rq, p);
+ task_rq_unlock(rq, &flags);

read_unlock(&tasklist_lock);
jiffies_to_timespec(time_slice, &t);
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2003,21 +2003,17 @@ static void moved_group_fair(struct task
}
#endif

-unsigned int get_rr_interval_fair(struct task_struct *task)
+unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
struct sched_entity *se = &task->se;
- unsigned long flags;
- struct rq *rq;
unsigned int rr_interval = 0;

/*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue:
*/
- rq = task_rq_lock(task, &flags);
if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
- task_rq_unlock(rq, &flags);

return rr_interval;
}
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -97,7 +97,7 @@ static void prio_changed_idle(struct rq
check_preempt_curr(rq, p, 0);
}

-unsigned int get_rr_interval_idle(struct task_struct *task)
+unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{
return 0;
}
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1734,7 +1734,7 @@ static void set_curr_task_rt(struct rq *
dequeue_pushable_task(rq, p);
}

-unsigned int get_rr_interval_rt(struct task_struct *task)
+unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks



\
 
 \ /
  Last update: 2010-09-18 21:51    [W:0.347 / U:13.264 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site