lkml.org 
[lkml]   [2021]   [Apr]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 16/17] rhashtable: Convert to split_lock
Date
NeilBrown noticed the same problem with bit spinlocks that I did,
but chose to solve it locally in the rhashtable implementation rather
than lift it all the way to the bit spin lock implementation. Convert
rhashtables to use split_locks.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: NeilBrown <neilb@suse.de>
---
include/linux/rhashtable.h | 20 +++++++-------------
lib/rhashtable.c | 5 +----
2 files changed, 8 insertions(+), 17 deletions(-)

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 68dab3e08aad..4df164fe6222 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -65,12 +65,11 @@ struct rhash_lock_head {};
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
- * @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
- * @ntbl: Nested table used when out of memory.
+ * @sl: Conceptual spinlock representing every per-bucket lock.
* @buckets: size * hash buckets
*/
struct bucket_table {
@@ -82,7 +81,7 @@ struct bucket_table {

struct bucket_table __rcu *future_tbl;

- struct lockdep_map dep_map;
+ struct split_lock sl;

struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
@@ -327,8 +326,7 @@ static inline void rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
{
local_bh_disable();
- bit_spin_lock(0, (unsigned long *)bkt);
- lock_map_acquire(&tbl->dep_map);
+ bit_spin_lock(0, (unsigned long *)bkt, &tbl->sl);
}

static inline void rht_lock_nested(struct bucket_table *tbl,
@@ -336,15 +334,13 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
unsigned int subclass)
{
local_bh_disable();
- bit_spin_lock(0, (unsigned long *)bucket);
- lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+ bit_spin_lock_nested(0, (unsigned long *)bucket, &tbl->sl, subclass);
}

static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
{
- lock_map_release(&tbl->dep_map);
- bit_spin_unlock(0, (unsigned long *)bkt);
+ bit_spin_unlock(0, (unsigned long *)bkt, &tbl->sl);
local_bh_enable();
}

@@ -397,10 +393,8 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
{
if (rht_is_a_nulls(obj))
obj = NULL;
- lock_map_release(&tbl->dep_map);
- rcu_assign_pointer(*bkt, (void *)obj);
- preempt_enable();
- __release(bitlock);
+ bit_spin_unlock_assign((unsigned long *)bkt, (unsigned long)obj,
+ &tbl->sl);
local_bh_enable();
}

diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index c949c1e3b87c..bfdb0bf87f99 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -179,7 +179,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
struct bucket_table *tbl = NULL;
size_t size;
int i;
- static struct lock_class_key __key;

tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);

@@ -193,10 +192,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
if (tbl == NULL)
return NULL;

- lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
-
+ split_lock_init(&tbl->sl);
tbl->size = size;
-
rcu_head_init(&tbl->rcu);
INIT_LIST_HEAD(&tbl->walkers);

--
2.30.2
\
 
 \ /
  Last update: 2021-04-09 05:03    [W:0.149 / U:0.180 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site