lkml.org 
[lkml]   [2016]   [Aug]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/4 V4] net/netfilter/nf_conntrack_core: update memory barriers.
Date
Change the locking for nf_conntrack_lock and nf_conntract_lock_all()
to the approach used by ipc/sem.c:

- use smp_store_mb() instead of a raw smp_mb()
- use smp_mb__after_spin_lock().
- for nf_conntrack_lock, use spin_lock(&global_lock) instead of
spin_unlock_wait(&global_lock) and loop backward.

The last change avoids that nf_conntrack_lock() could loop multiple times.

Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
---
net/netfilter/nf_conntrack_core.c | 36 ++++++++++++++++++++++--------------
1 file changed, 22 insertions(+), 14 deletions(-)

diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7d90a5d..4ed229b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -79,20 +79,29 @@ static __read_mostly bool nf_conntrack_locks_all;

void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
+ /* 1) Acquire the lock */
spin_lock(lock);
- while (unlikely(nf_conntrack_locks_all)) {
- spin_unlock(lock);

- /*
- * Order the 'nf_conntrack_locks_all' load vs. the
- * spin_unlock_wait() loads below, to ensure
- * that 'nf_conntrack_locks_all_lock' is indeed held:
- */
- smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
- spin_unlock_wait(&nf_conntrack_locks_all_lock);
- spin_lock(lock);
- }
+ /* 2) Order writing the lock and reading nf_conntrack_locks_all */
+ smp_mb__after_spin_lock();
+
+ /* 3) read nf_conntrack_locks_all, with ACQUIRE semantics */
+ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+ return;
+
+ /* fast path failed, unlock */
+ spin_unlock(lock);
+
+ /* Slow path 1) get global lock */
+ spin_lock(&nf_conntrack_locks_all_lock);
+
+ /* Slow path 2) get the lock we want */
+ spin_lock(lock);
+
+ /* Slow path 3) release the global lock */
+ spin_unlock(&nf_conntrack_locks_all_lock);
}
+
EXPORT_SYMBOL_GPL(nf_conntrack_lock);

static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
@@ -132,15 +141,14 @@ static void nf_conntrack_all_lock(void)
int i;

spin_lock(&nf_conntrack_locks_all_lock);
- nf_conntrack_locks_all = true;

/*
- * Order the above store of 'nf_conntrack_locks_all' against
+ * Order the store of 'nf_conntrack_locks_all' against
* the spin_unlock_wait() loads below, such that if
* nf_conntrack_lock() observes 'nf_conntrack_locks_all'
* we must observe nf_conntrack_locks[] held:
*/
- smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+ smp_store_mb(nf_conntrack_locks_all, true);

for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_unlock_wait(&nf_conntrack_locks[i]);
--
2.5.5
\
 
 \ /
  Last update: 2016-09-17 09:58    [W:0.360 / U:0.420 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site