lkml.org 
[lkml]   [2016]   [Apr]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 3/3] lockdep,sched: Implement a better lock pinning scheme
The problem with the existing lock pinning is that each pin is of
value 1; this mean you can simply unpin if you know its pinned,
without having any extra information.

This scheme generates a random (16 bit) cookie for each pin and
requires this same cookie to unpin. This means you have to keep the
cookie in context.

No objsize difference for !LOCKDEP kernels.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/lockdep.h | 23 ++++++++++---
kernel/locking/lockdep.c | 71 ++++++++++++++++++++++++++++++++++++------
kernel/sched/core.c | 79 ++++++++++++++++++++++++-----------------------
kernel/sched/deadline.c | 11 +++---
kernel/sched/fair.c | 6 +--
kernel/sched/idle_task.c | 2 -
kernel/sched/rt.c | 6 +--
kernel/sched/sched.h | 8 ++--
kernel/sched/stop_task.c | 2 -
9 files changed, 140 insertions(+), 68 deletions(-)

--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -356,8 +356,13 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);

-extern void lock_pin_lock(struct lockdep_map *lock);
-extern void lock_unpin_lock(struct lockdep_map *lock);
+struct pin_cookie { unsigned int val; };
+
+#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
+
+extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
+extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
+extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);

# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,

@@ -373,8 +378,9 @@ extern void lock_unpin_lock(struct lockd

#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)

-#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
-#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
+#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))

#else /* !CONFIG_LOCKDEP */

@@ -427,8 +433,13 @@ struct lock_class_key { };

#define lockdep_recursing(tsk) (0)

-#define lockdep_pin_lock(l) do { (void)(l); } while (0)
-#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+struct pin_cookie { };
+
+#define NIL_COOKIE (struct pin_cookie){ }
+
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)

#endif /* !LOCKDEP */

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -45,6 +45,7 @@
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/kmemcheck.h>
+#include <linux/random.h>

#include <asm/sections.h>

@@ -3585,7 +3586,35 @@ static int __lock_is_held(struct lockdep
return 0;
}

-static void __lock_pin_lock(struct lockdep_map *lock)
+static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
+{
+ struct pin_cookie cookie = NIL_COOKIE;
+ struct task_struct *curr = current;
+ int i;
+
+ if (unlikely(!debug_locks))
+ return cookie;
+
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *hlock = curr->held_locks + i;
+
+ if (match_held_lock(hlock, lock)) {
+ /*
+ * Grab 16bits of randomness; this is sufficient to not
+ * be guessable and still allows some pin nesting in
+ * our u32 pin_count.
+ */
+ cookie.val = 1 + (prandom_u32() >> 16);
+ hlock->pin_count += cookie.val;
+ return cookie;
+ }
+ }
+
+ WARN(1, "pinning an unheld lock\n");
+ return cookie;
+}
+
+static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
struct task_struct *curr = current;
int i;
@@ -3597,7 +3626,7 @@ static void __lock_pin_lock(struct lockd
struct held_lock *hlock = curr->held_locks + i;

if (match_held_lock(hlock, lock)) {
- hlock->pin_count++;
+ hlock->pin_count += cookie.val;
return;
}
}
@@ -3605,7 +3634,7 @@ static void __lock_pin_lock(struct lockd
WARN(1, "pinning an unheld lock\n");
}

-static void __lock_unpin_lock(struct lockdep_map *lock)
+static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
struct task_struct *curr = current;
int i;
@@ -3620,7 +3649,11 @@ static void __lock_unpin_lock(struct loc
if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
return;

- hlock->pin_count--;
+ hlock->pin_count -= cookie.val;
+
+ if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
+ hlock->pin_count = 0;
+
return;
}
}
@@ -3751,24 +3784,44 @@ int lock_is_held(struct lockdep_map *loc
}
EXPORT_SYMBOL_GPL(lock_is_held);

-void lock_pin_lock(struct lockdep_map *lock)
+struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
{
+ struct pin_cookie cookie = NIL_COOKIE;
unsigned long flags;

if (unlikely(current->lockdep_recursion))
- return;
+ return cookie;

raw_local_irq_save(flags);
check_flags(flags);

current->lockdep_recursion = 1;
- __lock_pin_lock(lock);
+ cookie = __lock_pin_lock(lock);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
+
+ return cookie;
}
EXPORT_SYMBOL_GPL(lock_pin_lock);

-void lock_unpin_lock(struct lockdep_map *lock)
+void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+
+ current->lockdep_recursion = 1;
+ __lock_repin_lock(lock, cookie);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_repin_lock);
+
+void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
unsigned long flags;

@@ -3779,7 +3832,7 @@ void lock_unpin_lock(struct lockdep_map
check_flags(flags);

current->lockdep_recursion = 1;
- __lock_unpin_lock(lock);
+ __lock_unpin_lock(lock, cookie);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -184,7 +184,7 @@ struct rq *__task_rq_lock(struct task_st
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
- lockdep_pin_lock(&rq->lock);
+ rf->cookie = lockdep_pin_lock(&rq->lock);
return rq;
}
raw_spin_unlock(&rq->lock);
@@ -224,7 +224,7 @@ struct rq *task_rq_lock(struct task_stru
* pair with the WMB to ensure we must then also see migrating.
*/
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
- lockdep_pin_lock(&rq->lock);
+ rf->cookie = lockdep_pin_lock(&rq->lock);
return rq;
}
raw_spin_unlock(&rq->lock);
@@ -1190,9 +1190,9 @@ static int __set_cpus_allowed_ptr(struct
* OK, since we're going to drop the lock immediately
* afterwards anyway.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = move_queued_task(rq, p, dest_cpu);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, rf.cookie);
}
out:
task_rq_unlock(rq, p, &rf);
@@ -1666,8 +1666,8 @@ static inline void ttwu_activate(struct
/*
* Mark the task runnable and perform wakeup-preemption.
*/
-static void
-ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
+ struct pin_cookie cookie)
{
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
@@ -1679,9 +1679,9 @@ ttwu_do_wakeup(struct rq *rq, struct tas
* Our task @p is fully woken up and running; so its safe to
* drop the rq->lock, hereafter rq is only used for statistics.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
p->sched_class->task_woken(rq, p);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, cookie);
}

if (rq->idle_stamp) {
@@ -1699,7 +1699,8 @@ ttwu_do_wakeup(struct rq *rq, struct tas
}

static void
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
+ struct pin_cookie cookie)
{
lockdep_assert_held(&rq->lock);

@@ -1709,7 +1710,7 @@ ttwu_do_activate(struct rq *rq, struct t
#endif

ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
- ttwu_do_wakeup(rq, p, wake_flags);
+ ttwu_do_wakeup(rq, p, wake_flags, cookie);
}

/*
@@ -1728,7 +1729,7 @@ static int ttwu_remote(struct task_struc
if (task_on_rq_queued(p)) {
/* check_preempt_curr() may use rq clock */
update_rq_clock(rq);
- ttwu_do_wakeup(rq, p, wake_flags);
+ ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
ret = 1;
}
__task_rq_unlock(rq, &rf);
@@ -1741,6 +1742,7 @@ void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct llist_node *llist = llist_del_all(&rq->wake_list);
+ struct pin_cookie cookie;
struct task_struct *p;
unsigned long flags;

@@ -1748,15 +1750,15 @@ void sched_ttwu_pending(void)
return;

raw_spin_lock_irqsave(&rq->lock, flags);
- lockdep_pin_lock(&rq->lock);
+ cookie = lockdep_pin_lock(&rq->lock);

while (llist) {
p = llist_entry(llist, struct task_struct, wake_entry);
llist = llist_next(llist);
- ttwu_do_activate(rq, p, 0);
+ ttwu_do_activate(rq, p, 0, cookie);
}

- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

@@ -1843,6 +1845,7 @@ bool cpus_share_cache(int this_cpu, int
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
+ struct pin_cookie cookie;

#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1853,9 +1856,9 @@ static void ttwu_queue(struct task_struc
#endif

raw_spin_lock(&rq->lock);
- lockdep_pin_lock(&rq->lock);
- ttwu_do_activate(rq, p, 0);
- lockdep_unpin_lock(&rq->lock);
+ cookie = lockdep_pin_lock(&rq->lock);
+ ttwu_do_activate(rq, p, 0, cookie);
+ lockdep_unpin_lock(&rq->lock, cookie);
raw_spin_unlock(&rq->lock);
}

@@ -2052,7 +2055,7 @@ try_to_wake_up(struct task_struct *p, un
* ensure that this_rq() is locked, @p is bound to this_rq() and not
* the current task.
*/
-static void try_to_wake_up_local(struct task_struct *p)
+static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
{
struct rq *rq = task_rq(p);

@@ -2069,11 +2072,11 @@ static void try_to_wake_up_local(struct
* disabled avoiding further scheduler activity on it and we've
* not yet picked a replacement task.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
raw_spin_unlock(&rq->lock);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, cookie);
}

if (!(p->state & TASK_NORMAL))
@@ -2084,7 +2087,7 @@ static void try_to_wake_up_local(struct
if (!task_on_rq_queued(p))
ttwu_activate(rq, p, ENQUEUE_WAKEUP);

- ttwu_do_wakeup(rq, p, 0);
+ ttwu_do_wakeup(rq, p, 0, cookie);
if (schedstat_enabled())
ttwu_stat(p, smp_processor_id(), 0);
out:
@@ -2512,9 +2515,9 @@ void wake_up_new_task(struct task_struct
* Nothing relies on rq->lock after this, so its fine to
* drop it.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
p->sched_class->task_woken(rq, p);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, rf.cookie);
}
#endif
task_rq_unlock(rq, p, &rf);
@@ -2779,7 +2782,7 @@ asmlinkage __visible void schedule_tail(
*/
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
- struct task_struct *next)
+ struct task_struct *next, struct pin_cookie cookie)
{
struct mm_struct *mm, *oldmm;

@@ -2811,7 +2814,7 @@ context_switch(struct rq *rq, struct tas
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);

/* Here we just switch the register state and the stack. */
@@ -3151,7 +3154,7 @@ static inline void schedule_debug(struct
* Pick up the highest-prio task:
*/
static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev)
+pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
const struct sched_class *class = &fair_sched_class;
struct task_struct *p;
@@ -3162,20 +3165,20 @@ pick_next_task(struct rq *rq, struct tas
*/
if (likely(prev->sched_class == class &&
rq->nr_running == rq->cfs.h_nr_running)) {
- p = fair_sched_class.pick_next_task(rq, prev);
+ p = fair_sched_class.pick_next_task(rq, prev, cookie);
if (unlikely(p == RETRY_TASK))
goto again;

/* assumes fair_sched_class->next == idle_sched_class */
if (unlikely(!p))
- p = idle_sched_class.pick_next_task(rq, prev);
+ p = idle_sched_class.pick_next_task(rq, prev, cookie);

return p;
}

again:
for_each_class(class) {
- p = class->pick_next_task(rq, prev);
+ p = class->pick_next_task(rq, prev, cookie);
if (p) {
if (unlikely(p == RETRY_TASK))
goto again;
@@ -3229,6 +3232,7 @@ static void __sched notrace __schedule(b
{
struct task_struct *prev, *next;
unsigned long *switch_count;
+ struct pin_cookie cookie;
struct rq *rq;
int cpu;

@@ -3262,7 +3266,7 @@ static void __sched notrace __schedule(b
*/
smp_mb__before_spinlock();
raw_spin_lock(&rq->lock);
- lockdep_pin_lock(&rq->lock);
+ cookie = lockdep_pin_lock(&rq->lock);

rq->clock_skip_update <<= 1; /* promote REQ to ACT */

@@ -3284,7 +3288,7 @@ static void __sched notrace __schedule(b

to_wakeup = wq_worker_sleeping(prev);
if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
+ try_to_wake_up_local(to_wakeup, cookie);
}
}
switch_count = &prev->nvcsw;
@@ -3293,7 +3297,7 @@ static void __sched notrace __schedule(b
if (task_on_rq_queued(prev))
update_rq_clock(rq);

- next = pick_next_task(rq, prev);
+ next = pick_next_task(rq, prev, cookie);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
@@ -3304,9 +3308,9 @@ static void __sched notrace __schedule(b
++*switch_count;

trace_sched_switch(preempt, prev, next);
- rq = context_switch(rq, prev, next); /* unlocks the rq */
+ rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
} else {
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
raw_spin_unlock_irq(&rq->lock);
}

@@ -5389,6 +5393,7 @@ static void migrate_tasks(struct rq *dea
{
struct rq *rq = dead_rq;
struct task_struct *next, *stop = rq->stop;
+ struct pin_cookie cookie;
int dest_cpu;

/*
@@ -5420,8 +5425,8 @@ static void migrate_tasks(struct rq *dea
/*
* pick_next_task assumes pinned rq->lock.
*/
- lockdep_pin_lock(&rq->lock);
- next = pick_next_task(rq, &fake_task);
+ cookie = lockdep_pin_lock(&rq->lock);
+ next = pick_next_task(rq, &fake_task, cookie);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);

@@ -5434,7 +5439,7 @@ static void migrate_tasks(struct rq *dea
* because !cpu_active at this point, which means load-balance
* will not interfere. Also, stop-machine.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
raw_spin_unlock(&rq->lock);
raw_spin_lock(&next->pi_lock);
raw_spin_lock(&rq->lock);
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -670,9 +670,9 @@ static enum hrtimer_restart dl_task_time
* Nothing relies on rq->lock after this, so its safe to drop
* rq->lock.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
push_dl_task(rq);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, rf.cookie);
}
#endif

@@ -1125,7 +1125,8 @@ static struct sched_dl_entity *pick_next
return rb_entry(left, struct sched_dl_entity, rb_node);
}

-struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
+struct task_struct *
+pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
struct sched_dl_entity *dl_se;
struct task_struct *p;
@@ -1140,9 +1141,9 @@ struct task_struct *pick_next_task_dl(st
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
pull_dl_task(rq);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, cookie);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
* means a stop task can slip in, in which case we need to
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5542,7 +5542,7 @@ static void check_preempt_wakeup(struct
}

static struct task_struct *
-pick_next_task_fair(struct rq *rq, struct task_struct *prev)
+pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
@@ -5655,9 +5655,9 @@ pick_next_task_fair(struct rq *rq, struc
* further scheduler activity on it and we're being very careful to
* re-start the picking loop.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
new_tasks = idle_balance(rq);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, cookie);
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -24,7 +24,7 @@ static void check_preempt_curr_idle(stru
}

static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev)
+pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
put_prev_task(rq, prev);

--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1524,7 +1524,7 @@ static struct task_struct *_pick_next_ta
}

static struct task_struct *
-pick_next_task_rt(struct rq *rq, struct task_struct *prev)
+pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
@@ -1536,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, cookie);
pull_rt_task(rq);
- lockdep_pin_lock(&rq->lock);
+ lockdep_repin_lock(&rq->lock, cookie);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
* means a dl or stop task can slip in, in which case we need
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1202,7 +1202,8 @@ struct sched_class {
* tasks.
*/
struct task_struct * (*pick_next_task) (struct rq *rq,
- struct task_struct *prev);
+ struct task_struct *prev,
+ struct pin_cookie cookie);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);

#ifdef CONFIG_SMP
@@ -1453,6 +1454,7 @@ static inline void sched_avg_update(stru

struct rq_flags {
unsigned long flags;
+ struct pin_cookie cookie;
};

struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -1464,7 +1466,7 @@ struct rq *task_rq_lock(struct task_stru
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, rf->cookie);
raw_spin_unlock(&rq->lock);
}

@@ -1473,7 +1475,7 @@ task_rq_unlock(struct rq *rq, struct tas
__releases(rq->lock)
__releases(p->pi_lock)
{
- lockdep_unpin_lock(&rq->lock);
+ lockdep_unpin_lock(&rq->lock, rf->cookie);
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -24,7 +24,7 @@ check_preempt_curr_stop(struct rq *rq, s
}

static struct task_struct *
-pick_next_task_stop(struct rq *rq, struct task_struct *prev)
+pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
struct task_struct *stop = rq->stop;


\
 
 \ /
  Last update: 2016-04-28 18:01    [W:0.047 / U:0.592 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site