lkml.org 
[lkml]   [2016]   [Jun]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/5] lockdep: Implement bitlock map allocator
Date
Currently, bit-based lock e.g. bit_spin_lock cannot use the lock
correctness validator using lockdep. However, it would be useful if
the validator supports for even bit-based lock as well.

Therefore, this patch provides interface for allocation and freeing
lockdep_map for bit-based lock so that the bit-based lock can also use
the lock correctness validator with the lockdep_map, allocated for each
bit address.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
include/linux/bitlock.h | 20 ++++++
kernel/locking/Makefile | 1 +
kernel/locking/bitlock_map.c | 147 +++++++++++++++++++++++++++++++++++++++++++
lib/Kconfig.debug | 10 +++
4 files changed, 178 insertions(+)
create mode 100644 include/linux/bitlock.h
create mode 100644 kernel/locking/bitlock_map.c

diff --git a/include/linux/bitlock.h b/include/linux/bitlock.h
new file mode 100644
index 0000000..1c8a46f
--- /dev/null
+++ b/include/linux/bitlock.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_BITLOCK_H
+#define __LINUX_BITLOCK_H
+
+#include <linux/lockdep.h>
+
+struct bitlock_map {
+ struct hlist_node hash_entry;
+ unsigned long bitaddr; /* ID */
+ struct lockdep_map map;
+ int ref; /* reference count */
+};
+
+#define BIT_ACQUIRE 0 /* Increase bmap reference count */
+#define BIT_RELEASE 1 /* Decrease bmap reference count */
+#define BIT_OTHER 2 /* No touch bmap reference count */
+
+extern struct lockdep_map *bitlock_get_map(int bitnum, unsigned long *addr, int type);
+extern void bitlock_init(int bitnum, unsigned long *addr, const char *name, struct lock_class_key *key);
+extern void bitlock_free(int bitnum, unsigned long *addr);
+#endif /* __LINUX_BITLOCK_H */
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 8e96f6c..8f4aa9e 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+obj-$(CONFIG_BITLOCK_ALLOC) += bitlock_map.o
diff --git a/kernel/locking/bitlock_map.c b/kernel/locking/bitlock_map.c
new file mode 100644
index 0000000..e2b576f
--- /dev/null
+++ b/kernel/locking/bitlock_map.c
@@ -0,0 +1,147 @@
+/*
+ * kernel/bitlock_map.c
+ *
+ * Lockdep allocator for bit-based lock
+ *
+ * Written by Byungchul Park:
+ *
+ * Thanks to Minchan Kim for coming up with the initial suggestion, that is
+ * to make even a kind of bitlock possible to use the runtime locking
+ * correctness validator.
+ */
+
+#include <linux/bitlock.h>
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define BITLOCK_HASH_BITS 15U
+#define BITLOCK_HASH_SIZE (1U << BITLOCK_HASH_BITS)
+#define bitlock_hashentry(key) (bitlock_hash + hash_long(key, BITLOCK_HASH_BITS))
+
+static struct hlist_head bitlock_hash[BITLOCK_HASH_SIZE];
+
+static DEFINE_SPINLOCK(bitlock_spin);
+
+static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
+{
+ return (unsigned long)((char *)addr + bitnum);
+}
+
+/* Caller must hold a lock to protect hlist traversal */
+static struct bitlock_map *look_up_bmap(int bitnum, unsigned long *addr)
+{
+ struct hlist_head *hash_head;
+ struct bitlock_map *bmap;
+ unsigned long bitaddr = get_bitaddr(bitnum, addr);
+
+ hash_head = bitlock_hashentry(bitaddr);
+ hlist_for_each_entry(bmap, hash_head, hash_entry)
+ if (bmap->bitaddr == bitaddr)
+ return bmap;
+
+ return NULL;
+}
+
+static struct bitlock_map *alloc_bmap(void)
+{
+ struct bitlock_map *ret;
+
+ ret = kmalloc(sizeof(struct bitlock_map), GFP_NOWAIT | __GFP_NOWARN);
+ if (!ret)
+ pr_warn("bitlock: Can't kmalloc a bitlock map.\n");
+
+ return ret;
+}
+
+static void free_bmap(struct bitlock_map *bmap)
+{
+ kfree(bmap);
+}
+
+struct lockdep_map *bitlock_get_map(int bitnum, unsigned long *addr, int type)
+{
+ struct bitlock_map *bmap;
+ struct lockdep_map *map = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bitlock_spin, flags);
+
+ bmap = look_up_bmap(bitnum, addr);
+ if (bmap) {
+ /*
+ * bmap->ref is for checking reliablity.
+ * One pair e.i. bitlock_acquire and
+ * bitlock_release should keep bmap->ref
+ * zero.
+ */
+ if (type == BIT_ACQUIRE)
+ bmap->ref++;
+ else if (type == BIT_RELEASE)
+ bmap->ref--;
+ map = &bmap->map;
+ }
+
+ spin_unlock_irqrestore(&bitlock_spin, flags);
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(bitlock_get_map);
+
+void bitlock_init(int bitnum, unsigned long *addr, const char *name,
+ struct lock_class_key *key)
+{
+ struct hlist_head *hash_head;
+ struct bitlock_map *bmap;
+ unsigned long flags;
+ unsigned long bitaddr = get_bitaddr(bitnum, addr);
+
+ spin_lock_irqsave(&bitlock_spin, flags);
+
+ bmap = look_up_bmap(bitnum, addr);
+
+ /*
+ * We cannot call bitlock_init() for one address more
+ * than once without bitlock_free(). We regard it as
+ * a bug.
+ */
+ BUG_ON(bmap);
+ if (!bmap) {
+ bmap = alloc_bmap();
+ if (bmap) {
+ hash_head = bitlock_hashentry(bitaddr);
+ bmap->bitaddr = bitaddr;
+ bmap->ref = 0;
+ lockdep_init_map(&bmap->map, name, key, 0);
+ hlist_add_head(&bmap->hash_entry, hash_head);
+ }
+ }
+
+ spin_unlock_irqrestore(&bitlock_spin, flags);
+}
+EXPORT_SYMBOL_GPL(bitlock_init);
+
+void bitlock_free(int bitnum, unsigned long *addr)
+{
+ struct bitlock_map *bmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bitlock_spin, flags);
+
+ bmap = look_up_bmap(bitnum, addr);
+ if (bmap) {
+ /*
+ * bmap->ref will be increased when acquiring
+ * while decreased when releasing. Thus it
+ * normally should be zero when bitlock_free()
+ * is called. Otherwise, it's a bug.
+ */
+ BUG_ON(bmap->ref);
+ hlist_del(&bmap->hash_entry);
+ free_bmap(bmap);
+ }
+
+ spin_unlock_irqrestore(&bitlock_spin, flags);
+}
+EXPORT_SYMBOL_GPL(bitlock_free);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8bfd1ac..ca2d2ee 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -993,6 +993,16 @@ config DEBUG_LOCK_ALLOC
spin_lock_init()/mutex_init()/etc., or whether there is any lock
held during task exit.

+config BITLOCK_ALLOC
+ bool "Lock debugging: lockdep_map allocator for bitlock"
+ depends on LOCKDEP_SUPPORT
+ select LOCKDEP
+ default n
+ help
+ This feature makes it possible to allocate lockdep_map for
+ bit-based lock e.g. bit_spin_lock. lockdep_map instance is
+ necessary for lock correctness checking to be used.
+
config PROVE_LOCKING
bool "Lock debugging: prove locking correctness"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
--
1.9.1
\
 
 \ /
  Last update: 2016-06-20 08:01    [W:0.107 / U:0.124 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site