lkml.org 
[lkml]   [2013]   [Oct]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH v8 7/9] MCS Lock: Barrier corrections
From
Date
This patch corrects the way memory barriers are used in the MCS lock
and removes ones that are not needed. Also add comments on all barriers.

Signed-off-by: Jason Low <jason.low2@hp.com>
---
include/linux/mcs_spinlock.h | 13 +++++++++++--
1 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
index 96f14299..93d445d 100644
--- a/include/linux/mcs_spinlock.h
+++ b/include/linux/mcs_spinlock.h
@@ -36,16 +36,19 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
node->locked = 0;
node->next = NULL;

+ /* xchg() provides a memory barrier */
prev = xchg(lock, node);
if (likely(prev == NULL)) {
/* Lock acquired */
return;
}
ACCESS_ONCE(prev->next) = node;
- smp_wmb();
/* Wait until the lock holder passes the lock down */
while (!ACCESS_ONCE(node->locked))
arch_mutex_cpu_relax();
+
+ /* Make sure subsequent operations happen after the lock is acquired */
+ smp_rmb();
}

/*
@@ -58,6 +61,7 @@ static void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *nod

if (likely(!next)) {
/*
+ * cmpxchg() provides a memory barrier.
* Release the lock by setting it to NULL
*/
if (likely(cmpxchg(lock, node, NULL) == node))
@@ -65,9 +69,14 @@ static void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *nod
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
+ } else {
+ /*
+ * Make sure all operations within the critical section
+ * happen before the lock is released.
+ */
+ smp_wmb();
}
ACCESS_ONCE(next->locked) = 1;
- smp_wmb();
}

#endif /* __LINUX_MCS_SPINLOCK_H */
--
1.7.4.4




\
 
 \ /
  Last update: 2013-10-03 01:01    [W:0.057 / U:2.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site