lkml.org 
[lkml]   [2016]   [Jul]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v2 7/7] lib/dlock-list: Use the per-subnode APIs for managing lists
Date
This patch modifies the dlock-list to use the per-subnode APIs to
manage the distributed lists. As a result, the number of lists that
need to be iterated in dlock_list_iterate() will be reduced at least
by half making the iteration a bit faster.

Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
---
include/linux/dlock-list.h | 81 +++++++++++++++++++++----------------------
lib/dlock-list.c | 19 +++++-----
2 files changed, 50 insertions(+), 50 deletions(-)

diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h
index a8e1fd2..01667fc 100644
--- a/include/linux/dlock-list.h
+++ b/include/linux/dlock-list.h
@@ -20,12 +20,12 @@

#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/percpu.h>
+#include <linux/persubnode.h>

/*
* include/linux/dlock-list.h
*
- * A distributed (per-cpu) set of lists each of which is protected by its
+ * A distributed (per-subnode) set of lists each of which is protected by its
* own spinlock, but acts like a single consolidated list to the callers.
*
* The dlock_list_head structure contains the spinlock, the other
@@ -45,19 +45,19 @@ struct dlock_list_head {
}

/*
- * Per-cpu list iteration state
+ * Per-subnode list iteration state
*/
struct dlock_list_state {
- int cpu;
+ int snid; /* Subnode ID */
spinlock_t *lock;
- struct list_head *head; /* List head of current per-cpu list */
+ struct list_head *head; /* List head of current per-subnode list */
struct dlock_list_node *curr;
struct dlock_list_node *next;
};

#define DLOCK_LIST_STATE_INIT() \
{ \
- .cpu = -1, \
+ .snid = -1, \
.lock = NULL, \
.head = NULL, \
.curr = NULL, \
@@ -69,7 +69,7 @@ struct dlock_list_state {

static inline void init_dlock_list_state(struct dlock_list_state *state)
{
- state->cpu = -1;
+ state->snid = -1;
state->lock = NULL;
state->head = NULL;
state->curr = NULL;
@@ -83,12 +83,12 @@ static inline void init_dlock_list_state(struct dlock_list_state *state)
#endif

/*
- * Next per-cpu list entry
+ * Next per-subnode list entry
*/
#define dlock_list_next_entry(pos, member) list_next_entry(pos, member.list)

/*
- * Per-cpu node data structure
+ * Per-subnode node data structure
*/
struct dlock_list_node {
struct list_head list;
@@ -109,50 +109,50 @@ static inline void init_dlock_list_node(struct dlock_list_node *node)
}

static inline void
-free_dlock_list_head(struct dlock_list_head __percpu **pdlock_head)
+free_dlock_list_head(struct dlock_list_head __persubnode **pdlock_head)
{
- free_percpu(*pdlock_head);
+ free_persubnode(*pdlock_head);
*pdlock_head = NULL;
}

/*
- * Check if all the per-cpu lists are empty
+ * Check if all the per-subnode lists are empty
*/
-static inline bool dlock_list_empty(struct dlock_list_head __percpu *dlock_head)
+static inline bool dlock_list_empty(struct dlock_list_head __persubnode *dlock_head)
{
- int cpu;
+ int snid;

- for_each_possible_cpu(cpu)
- if (!list_empty(&per_cpu_ptr(dlock_head, cpu)->list))
+ for_each_subnode(snid)
+ if (!list_empty(&per_subnode_ptr(dlock_head, snid)->list))
return false;
return true;
}

/*
- * Helper function to find the first entry of the next per-cpu list
- * It works somewhat like for_each_possible_cpu(cpu).
+ * Helper function to find the first entry of the next per-subnode list
+ * It works somewhat like for_each_subnode(snid).
*
* Return: true if the entry is found, false if all the lists exhausted
*/
static __always_inline bool
-__dlock_list_next_cpu(struct dlock_list_head __percpu *head,
+__dlock_list_next_subnode(struct dlock_list_head __persubnode *head,
struct dlock_list_state *state)
{
if (state->lock)
spin_unlock(state->lock);
-next_cpu:
+next_subnode:
/*
- * for_each_possible_cpu(cpu)
+ * for_each_subnode(snid)
*/
- state->cpu = cpumask_next(state->cpu, cpu_possible_mask);
- if (state->cpu >= nr_cpu_ids)
- return false; /* All the per-cpu lists iterated */
+ state->snid = cpumask_next(state->snid, subnode_mask);
+ if (state->snid >= nr_subnode_ids)
+ return false; /* All the per-subnode lists iterated */

- state->head = &per_cpu_ptr(head, state->cpu)->list;
+ state->head = &per_subnode_ptr(head, state->snid)->list;
if (list_empty(state->head))
- goto next_cpu;
+ goto next_subnode;

- state->lock = &per_cpu_ptr(head, state->cpu)->lock;
+ state->lock = &per_subnode_ptr(head, state->snid)->lock;
spin_lock(state->lock);
/*
* There is a slight chance that the list may become empty just
@@ -161,7 +161,7 @@ next_cpu:
*/
if (list_empty(state->head)) {
spin_unlock(state->lock);
- goto next_cpu;
+ goto next_subnode;
}
state->curr = list_entry(state->head->next,
struct dlock_list_node, list);
@@ -169,11 +169,11 @@ next_cpu:
}

/*
- * Iterate to the next entry of the group of per-cpu lists
+ * Iterate to the next entry of the group of per-subnode lists
*
* Return: true if the next entry is found, false if all the entries iterated
*/
-static inline bool dlock_list_iterate(struct dlock_list_head __percpu *head,
+static inline bool dlock_list_iterate(struct dlock_list_head __persubnode *head,
struct dlock_list_state *state)
{
/*
@@ -184,10 +184,10 @@ static inline bool dlock_list_iterate(struct dlock_list_head __percpu *head,

if (!state->curr || (&state->curr->list == state->head)) {
/*
- * The current per-cpu list has been exhausted, try the next
- * per-cpu list.
+ * The current per-subnode list has been exhausted, try the next
+ * per-subnode list.
*/
- if (!__dlock_list_next_cpu(head, state))
+ if (!__dlock_list_next_subnode(head, state))
return false;
}

@@ -196,13 +196,13 @@ static inline bool dlock_list_iterate(struct dlock_list_head __percpu *head,
}

/*
- * Iterate to the next entry of the group of per-cpu lists and safe
+ * Iterate to the next entry of the group of per-subnode lists and safe
* against removal of list_entry
*
* Return: true if the next entry is found, false if all the entries iterated
*/
static inline bool
-dlock_list_iterate_safe(struct dlock_list_head __percpu *head,
+dlock_list_iterate_safe(struct dlock_list_head __persubnode *head,
struct dlock_list_state *state)
{
/*
@@ -215,10 +215,10 @@ dlock_list_iterate_safe(struct dlock_list_head __percpu *head,

if (!state->curr || (&state->curr->list == state->head)) {
/*
- * The current per-cpu list has been exhausted, try the next
- * per-cpu list.
+ * The current per-subnode list has been exhausted, try the next
+ * per-subnode list.
*/
- if (!__dlock_list_next_cpu(head, state))
+ if (!__dlock_list_next_subnode(head, state))
return false;
state->next = list_next_entry(state->curr, list);
}
@@ -228,8 +228,7 @@ dlock_list_iterate_safe(struct dlock_list_head __percpu *head,
}

extern void dlock_list_add(struct dlock_list_node *node,
- struct dlock_list_head __percpu *head);
+ struct dlock_list_head __persubnode *head);
extern void dlock_list_del(struct dlock_list_node *node);
-extern int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head);
-
+extern int init_dlock_list_head(struct dlock_list_head __persubnode **pdlock_head);
#endif /* __LINUX_DLOCK_LIST_H */
diff --git a/lib/dlock-list.c b/lib/dlock-list.c
index e1a1930..05bbf45 100644
--- a/lib/dlock-list.c
+++ b/lib/dlock-list.c
@@ -25,20 +25,21 @@
static struct lock_class_key dlock_list_key;

/*
- * Initialize the per-cpu list head
+ * Initialize the per-subnode list head
*/
-int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head)
+int init_dlock_list_head(struct dlock_list_head __persubnode **pdlock_head)
{
struct dlock_list_head *dlock_head;
- int cpu;
+ int snid;

- dlock_head = alloc_percpu(struct dlock_list_head);
+ dlock_head = alloc_persubnode(struct dlock_list_head);
if (!dlock_head)
return -ENOMEM;

- for_each_possible_cpu(cpu) {
- struct dlock_list_head *head = per_cpu_ptr(dlock_head, cpu);
+ for_each_subnode(snid) {
+ struct dlock_list_head *head;

+ head = per_subnode_ptr(dlock_head, snid);
INIT_LIST_HEAD(&head->list);
head->lock = __SPIN_LOCK_UNLOCKED(&head->lock);
lockdep_set_class(&head->lock, &dlock_list_key);
@@ -54,19 +55,19 @@ int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head)
* So we still need to use a lock to protect the content of the list.
*/
void dlock_list_add(struct dlock_list_node *node,
- struct dlock_list_head __percpu *head)
+ struct dlock_list_head __persubnode *head)
{
struct dlock_list_head *myhead;

/*
* Disable preemption to make sure that CPU won't gets changed.
*/
- myhead = get_cpu_ptr(head);
+ myhead = get_subnode_ptr(head);
spin_lock(&myhead->lock);
node->lockptr = &myhead->lock;
list_add(&node->list, &myhead->list);
spin_unlock(&myhead->lock);
- put_cpu_ptr(head);
+ put_subnode_ptr(head);
}

/*
--
1.7.1
\
 
 \ /
  Last update: 2016-07-11 20:01    [W:0.168 / U:0.116 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site