lkml.org 
[lkml]   [2017]   [Oct]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v7 6/6] lib/dlock-list: Add an IRQ-safe mode to be used in interrupt handler
Date
To enable the use of dlock-list in an interrupt handler, a new
irqsafe mode can now be specified at dlock-list allocation time as
an additional argument to alloc_dlock_list_heads(). With that mode
specified, the spin_lock_irqsave/spin_unlock_irqrestore pair will be
used instead of the regular lock and unlock calls.

Signed-off-by: Waiman Long <longman@redhat.com>
---
fs/super.c | 2 +-
include/linux/dlock-list.h | 18 +++++++++++++++---
lib/dlock-list.c | 44 +++++++++++++++++++++++++++++++-------------
3 files changed, 47 insertions(+), 17 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index a90a070..0840e54 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -214,7 +214,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
INIT_LIST_HEAD(&s->s_inodes_wb);
spin_lock_init(&s->s_inode_wblist_lock);

- if (alloc_dlock_list_heads(&s->s_inodes))
+ if (alloc_dlock_list_heads(&s->s_inodes, false))
goto fail;
if (list_lru_init_memcg(&s->s_dentry_lru))
goto fail;
diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h
index 16474ae..2ba7b4f 100644
--- a/include/linux/dlock-list.h
+++ b/include/linux/dlock-list.h
@@ -32,6 +32,8 @@
struct dlock_list_head {
struct list_head list;
spinlock_t lock;
+ int irqsafe; /* IRQ safe mode */
+ unsigned long flags;
} ____cacheline_aligned_in_smp;

struct dlock_list_heads {
@@ -89,7 +91,12 @@ static inline void init_dlock_list_node(struct dlock_list_node *node)
*/
static inline void dlock_list_unlock(struct dlock_list_iter *iter)
{
- spin_unlock(&iter->entry->lock);
+ struct dlock_list_head *h = iter->entry;
+
+ if (h->irqsafe)
+ spin_unlock_irqrestore(&h->lock, h->flags);
+ else
+ spin_unlock(&h->lock);
}

/**
@@ -98,13 +105,18 @@ static inline void dlock_list_unlock(struct dlock_list_iter *iter)
*/
static inline void dlock_list_relock(struct dlock_list_iter *iter)
{
- spin_lock(&iter->entry->lock);
+ struct dlock_list_head *h = iter->entry;
+
+ if (h->irqsafe)
+ spin_lock_irqsave(&h->lock, h->flags);
+ else
+ spin_lock(&h->lock);
}

/*
* Allocation and freeing of dlock list
*/
-extern int alloc_dlock_list_heads(struct dlock_list_heads *dlist);
+extern int alloc_dlock_list_heads(struct dlock_list_heads *dlist, int irqsafe);
extern void free_dlock_list_heads(struct dlock_list_heads *dlist);

/*
diff --git a/lib/dlock-list.c b/lib/dlock-list.c
index 8cd0876..4fded20 100644
--- a/lib/dlock-list.c
+++ b/lib/dlock-list.c
@@ -99,7 +99,8 @@ static int __init cpu2idx_init(void)

/**
* alloc_dlock_list_heads - Initialize and allocate the list of head entries
- * @dlist: Pointer to the dlock_list_heads structure to be initialized
+ * @dlist : Pointer to the dlock_list_heads structure to be initialized
+ * @irqsafe: IRQ safe mode flag
* Return: 0 if successful, -ENOMEM if memory allocation error
*
* This function does not allocate the dlock_list_heads structure itself. The
@@ -112,7 +113,7 @@ static int __init cpu2idx_init(void)
* The extra lists will not be ever used as all the cpu2idx entries will be
* 0 before initialization.
*/
-int alloc_dlock_list_heads(struct dlock_list_heads *dlist)
+int alloc_dlock_list_heads(struct dlock_list_heads *dlist, int irqsafe)
{
int idx, cnt = nr_dlock_lists ? nr_dlock_lists : nr_cpu_ids;

@@ -126,6 +127,7 @@ int alloc_dlock_list_heads(struct dlock_list_heads *dlist)

INIT_LIST_HEAD(&head->list);
head->lock = __SPIN_LOCK_UNLOCKED(&head->lock);
+ head->irqsafe = irqsafe;
lockdep_set_class(&head->lock, &dlock_list_key);
}
return 0;
@@ -194,13 +196,19 @@ struct dlock_list_head *dlock_list_hash(struct dlock_list_heads *dlist,
void dlock_list_add(struct dlock_list_node *node,
struct dlock_list_head *head)
{
- /*
- * There is no need to disable preemption
- */
- spin_lock(&head->lock);
- node->head = head;
- list_add(&node->list, &head->list);
- spin_unlock(&head->lock);
+ unsigned long flags;
+
+ if (head->irqsafe) {
+ spin_lock_irqsave(&head->lock, flags);
+ node->head = head;
+ list_add(&node->list, &head->list);
+ spin_unlock_irqrestore(&head->lock, flags);
+ } else {
+ spin_lock(&head->lock);
+ node->head = head;
+ list_add(&node->list, &head->list);
+ spin_unlock(&head->lock);
+ }
}

/**
@@ -232,6 +240,7 @@ void dlock_lists_add(struct dlock_list_node *node,
void dlock_lists_del(struct dlock_list_node *node)
{
struct dlock_list_head *head;
+ unsigned long flags;
bool retry;

do {
@@ -240,7 +249,11 @@ void dlock_lists_del(struct dlock_list_node *node)
__func__, (unsigned long)node))
return;

- spin_lock(&head->lock);
+ if (head->irqsafe)
+ spin_lock_irqsave(&head->lock, flags);
+ else
+ spin_lock(&head->lock);
+
if (likely(head == node->head)) {
list_del_init(&node->list);
node->head = NULL;
@@ -253,7 +266,11 @@ void dlock_lists_del(struct dlock_list_node *node)
*/
retry = (node->head != NULL);
}
- spin_unlock(&head->lock);
+
+ if (head->irqsafe)
+ spin_unlock_irqrestore(&head->lock, flags);
+ else
+ spin_unlock(&head->lock);
} while (retry);
}

@@ -272,7 +289,7 @@ struct dlock_list_node *__dlock_list_next_list(struct dlock_list_iter *iter)

restart:
if (iter->entry) {
- spin_unlock(&iter->entry->lock);
+ dlock_list_unlock(iter);
iter->entry = NULL;
}

@@ -287,7 +304,8 @@ struct dlock_list_node *__dlock_list_next_list(struct dlock_list_iter *iter)
goto next_list;

head = iter->entry = &iter->head[iter->index];
- spin_lock(&head->lock);
+ dlock_list_relock(iter);
+
/*
* There is a slight chance that the list may become empty just
* before the lock is acquired. So an additional check is
--
1.8.3.1
\
 
 \ /
  Last update: 2017-10-05 20:45    [W:0.152 / U:11.396 seconds]
©2003-2017 Jasper Spaans. hosted at Digital OceanAdvertise on this site