Messages in this thread Patch in this message | | | From | Boqun Feng <> | Subject | [RFC tip/locking/lockdep v4 15/17] lockdep: Reduce the size of lock_list | Date | Tue, 9 Jan 2018 22:38:58 +0800 |
| |
We actually only need 4 bits for lock_list::dep and 1 bit for lock_list::is_rr, besides lock_list::distance should always be no greater than MAX_LOCKDEP_DEPTH(which is 48 right now), so a u16 will fit, this patch then reduces the sizes of those fields to save space for lock_list structure, as a result we can reduce the size increment introduced by recursive read lock detection and keep the lock_list the same size as before.
Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> --- include/linux/lockdep.h | 6 +++--- kernel/locking/lockdep.c | 11 ++++++----- 2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 63b3504528d5..e43c77c125dd 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -186,11 +186,11 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct stack_trace trace; - int distance; + u16 distance; /* bitmap of different dependencies from head to this */ - u16 dep; + u8 dep; /* used by BFS to record whether this is picked as a recursive read */ - u16 is_rr; + bool is_rr; /* * The parent field is used to implement breadth-first search, and the diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index d7cd98467243..4ca160517dcb 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -869,7 +869,7 @@ static struct lock_list *alloc_list_entry(void) * Add a new dependency to the head of the list: */ static int add_lock_to_list(struct lock_class *this, struct list_head *head, - unsigned long ip, int distance, unsigned int dep, + unsigned long ip, u16 distance, unsigned int dep, struct stack_trace *trace) { struct lock_list *entry; @@ -1058,7 +1058,7 @@ static inline unsigned int calc_dep(int prev, int next) * N: non-recursive lock * R: recursive read lock */ -static inline int pick_dep(u16 is_rr, u16 cap_dep) +static inline int pick_dep(bool is_rr, u8 cap_dep) { if (is_rr) { /* could only pick -(N*)-> */ if (cap_dep & DEP_NN_MASK) @@ -1143,7 +1143,8 @@ static enum bfs_result __bfs(struct lock_list *source_entry, struct list_head *head; struct circular_queue *cq = &lock_cq; enum bfs_result ret = BFS_RNOMATCH; - int is_rr, next_is_rr; + bool is_rr; + int next_is_rr; if (match(source_entry, data)) { *target_entry = source_entry; @@ -1199,7 +1200,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry, next_is_rr = pick_dep(is_rr, entry->dep); if (next_is_rr < 0) continue; - entry->is_rr = next_is_rr; + entry->is_rr = !!next_is_rr; visit_lock_entry(entry, lock); if (match(entry, data)) { @@ -2148,7 +2149,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) goto out_bug; for (;;) { - int distance = curr->lockdep_depth - depth + 1; + u16 distance = curr->lockdep_depth - depth + 1; hlock = curr->held_locks + depth - 1; if (hlock->check) { -- 2.15.1
| |