lkml.org 
[lkml]   [2016]   [Feb]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 3/3] spinlock_debug: panic on recursive lock spin_dump()
On (01/31/16 21:33), Sergey Senozhatsky wrote:
> spin_dump() calls printk() which can attempt to reacquire the
> 'buggy' lock (one of printk's lock, or console device driver lock,
> etc.) and thus spin_dump() will recurse into itself.

how about splitting ->owner_cpu 4 bytes as:

| |
1 byte spin bug recursion | 1 byte spin_dump recursion counter | 2 bytes owner cpu
| |

?

so we have 2 bytes to store the lock owner's smp_processor_id().
(I just don't want to extend spin lock with another 4 bytes to
fix the recursion.)


spin_dump() will increment lock's spin_dump recursion counter and decrement it
upon the exit from spin_dump(). if lock's recursion counter is equal to
U8_MAX -- panic() the system, we have recursed spin_dump() into spin_dump()
enough... (U8_MAX recursive printk->spin_dump->printk calls look big enough).

for spin_bug() recursion we don't have to maintain any counter: set
SPIN_BUG_RECURSION bit at the beginning of spin_bug() and clear it at
the end. if the lock already has SPIN_BUG_RECURSION bit set -- panic()
the system.

compile tested only.
will test it tomorrow.

---
kernel/locking/spinlock_debug.c | 32 +++++++++++++++++++++++++++-----
1 file changed, 27 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a59..54bcc3b 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -13,6 +13,9 @@
#include <linux/delay.h>
#include <linux/export.h>

+#define SPIN_DUMP_RECURSION (1 << 16)
+#define SPIN_BUG_RECURSION (1 << 31)
+
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -26,7 +29,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ lock->owner_cpu = USHRT_MAX;
}

EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -44,14 +47,26 @@ void __rwlock_init(rwlock_t *lock, const char *name,
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ lock->owner_cpu = USHRT_MAX;
}

EXPORT_SYMBOL(__rwlock_init);

+static void spin_recursion_panic(raw_spinlock_t *lock, const char *msg)
+{
+ panic("BUG: lock: %pS %s recursion on CPU#%d, %s/%d\n",
+ lock, msg, raw_smp_processor_id(),
+ current->comm, task_pid_nr(current));
+}
+
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
+ u8 recursion_counter = (u8)(lock->owner_cpu >> 16);
+
+ lock->owner_cpu += SPIN_DUMP_RECURSION;
+ if ((recursion_counter + 1) == U8_MAX)
+ spin_recursion_panic(lock, "spin_dump()");

if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
@@ -63,8 +78,10 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
lock, lock->magic,
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
- lock->owner_cpu);
+ lock->owner_cpu == USHRT_MAX ? -1 : lock->owner_cpu);
dump_stack();
+
+ lock->owner_cpu -= SPIN_DUMP_RECURSION;
}

static void spin_bug(raw_spinlock_t *lock, const char *msg)
@@ -72,7 +89,12 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
if (!debug_locks_off())
return;

+ if (lock->owner_cpu & SPIN_BUG_RECURSION)
+ spin_recursion_panic(lock, "spin_bug()");
+
+ lock->owner_cpu |= SPIN_BUG_RECURSION;
spin_dump(lock, msg);
+ lock->owner_cpu &= ~SPIN_BUG_RECURSION;
}

#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
@@ -100,7 +122,7 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ lock->owner_cpu = USHRT_MAX;
}

static void __spin_lock_debug(raw_spinlock_t *lock)
@@ -244,7 +266,7 @@ static inline void debug_write_unlock(rwlock_t *lock)
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ lock->owner_cpu = USHRT_MAX;
}

#if 0 /* This can cause lockups */
--
2.7.0.75.g3ee1e0f
\
 
 \ /
  Last update: 2016-02-01 17:21    [W:0.078 / U:0.024 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site