lkml.org 
[lkml]   [2017]   [Apr]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH tip/core/rcu 36/40] srcu: Crude control of expedited grace periods
Date
SRCU's implementation of expedited grace periods has always assumed
that the SRCU instance is idle when the expedited request arrives.
This commit improves this a bit by maintaining a count of the number
of outstanding expedited requests, thus allowing prior non-expedited
grace periods accommodate these requests by shifting to expedited mode.
However, any non-expedited wait already in progress will still wait for
the full duration.

Improved control of expedited grace periods is planned, but one step
at a time.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
include/linux/srcu.h | 1 +
kernel/rcu/srcu.c | 84 ++++++++++++++++++++++++++++------------------------
2 files changed, 47 insertions(+), 38 deletions(-)

diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e7dbc01b61a1..73a1b6296224 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -42,6 +42,7 @@ struct srcu_array {
struct srcu_struct {
unsigned long completed;
unsigned long srcu_gp_seq;
+ atomic_t srcu_exp_cnt;
struct srcu_array __percpu *per_cpu_ref;
spinlock_t queue_lock; /* protect ->srcu_cblist */
struct rcu_segcblist srcu_cblist;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 97aec5d7b316..b62919be99e7 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -43,6 +43,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
sp->srcu_gp_seq = 0;
+ atomic_set(&sp->srcu_exp_cnt, 0);
spin_lock_init(&sp->queue_lock);
rcu_segcblist_init(&sp->srcu_cblist);
INIT_DELAYED_WORK(&sp->work, process_srcu);
@@ -179,7 +180,6 @@ static bool srcu_readers_active(struct srcu_struct *sp)
return sum;
}

-#define SRCU_CALLBACK_BATCH 10
#define SRCU_INTERVAL 1

/**
@@ -191,6 +191,7 @@ static bool srcu_readers_active(struct srcu_struct *sp)
*/
void cleanup_srcu_struct(struct srcu_struct *sp)
{
+ WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt));
if (WARN_ON(srcu_readers_active(sp)))
return; /* Leakage unless caller handles error. */
if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist)))
@@ -238,13 +239,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
* We use an adaptive strategy for synchronize_srcu() and especially for
* synchronize_srcu_expedited(). We spin for a fixed time period
* (defined below) to allow SRCU readers to exit their read-side critical
- * sections. If there are still some readers after 10 microseconds,
- * we repeatedly block for 1-millisecond time periods. This approach
- * has done well in testing, so there is no need for a config parameter.
+ * sections. If there are still some readers after a few microseconds,
+ * we repeatedly block for 1-millisecond time periods.
*/
#define SRCU_RETRY_CHECK_DELAY 5
-#define SYNCHRONIZE_SRCU_TRYCOUNT 2
-#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12

/*
* Start an SRCU grace period.
@@ -261,16 +259,16 @@ static void srcu_gp_start(struct srcu_struct *sp)
}

/*
- * Wait until all readers counted by array index idx complete, but loop
- * a maximum of trycount times. The caller must ensure that ->completed
- * is not changed while checking.
+ * Wait until all readers counted by array index idx complete, but
+ * loop an additional time if there is an expedited grace period pending.
+ * The caller must ensure that ->completed is not changed while checking.
*/
static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
{
for (;;) {
if (srcu_readers_active_idx_check(sp, idx))
return true;
- if (--trycount <= 0)
+ if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0)
return false;
udelay(SRCU_RETRY_CHECK_DELAY);
}
@@ -358,7 +356,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
-static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
+static void __synchronize_srcu(struct srcu_struct *sp)
{
struct rcu_synchronize rcu;
struct rcu_head *head = &rcu.head;
@@ -395,6 +393,32 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
}

/**
+ * synchronize_srcu_expedited - Brute-force SRCU grace period
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Wait for an SRCU grace period to elapse, but be more aggressive about
+ * spinning rather than blocking when waiting.
+ *
+ * Note that synchronize_srcu_expedited() has the same deadlock and
+ * memory-ordering properties as does synchronize_srcu().
+ */
+void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ bool do_norm = rcu_gp_is_normal();
+
+ if (!do_norm) {
+ atomic_inc(&sp->srcu_exp_cnt);
+ smp_mb__after_atomic(); /* increment before GP. */
+ }
+ __synchronize_srcu(sp);
+ if (!do_norm) {
+ smp_mb__before_atomic(); /* GP before decrement. */
+ atomic_dec(&sp->srcu_exp_cnt);
+ }
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
+
+/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize.
*
@@ -435,29 +459,14 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
*/
void synchronize_srcu(struct srcu_struct *sp)
{
- __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
- ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
- : SYNCHRONIZE_SRCU_TRYCOUNT);
+ if (rcu_gp_is_expedited())
+ synchronize_srcu_expedited(sp);
+ else
+ __synchronize_srcu(sp);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);

/**
- * synchronize_srcu_expedited - Brute-force SRCU grace period
- * @sp: srcu_struct with which to synchronize.
- *
- * Wait for an SRCU grace period to elapse, but be more aggressive about
- * spinning rather than blocking when waiting.
- *
- * Note that synchronize_srcu_expedited() has the same deadlock and
- * memory-ordering properties as does synchronize_srcu().
- */
-void synchronize_srcu_expedited(struct srcu_struct *sp)
-{
- __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
-}
-EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
-
-/**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
* @sp: srcu_struct on which to wait for in-flight callbacks.
*/
@@ -484,7 +493,7 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed);
* Core SRCU state machine. Advance callbacks from ->batch_check0 to
* ->batch_check1 and then to ->batch_done as readers drain.
*/
-static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
+static void srcu_advance_batches(struct srcu_struct *sp)
{
int idx;

@@ -515,8 +524,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount)

if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
idx = 1 ^ (sp->completed & 1);
- if (!try_check_zero(sp, idx, trycount))
- return; /* readers present, retry after SRCU_INTERVAL */
+ if (!try_check_zero(sp, idx, 1))
+ return; /* readers present, retry later. */
srcu_flip(sp);
rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
}
@@ -528,9 +537,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
* so check at least twice in quick succession after a flip.
*/
idx = 1 ^ (sp->completed & 1);
- trycount = trycount < 2 ? 2 : trycount;
- if (!try_check_zero(sp, idx, trycount))
- return; /* readers present, retry after SRCU_INTERVAL */
+ if (!try_check_zero(sp, idx, 2))
+ return; /* readers present, retry after later. */
srcu_gp_end(sp);
}
}
@@ -596,8 +604,8 @@ void process_srcu(struct work_struct *work)

sp = container_of(work, struct srcu_struct, work.work);

- srcu_advance_batches(sp, 1);
+ srcu_advance_batches(sp);
srcu_invoke_callbacks(sp);
- srcu_reschedule(sp, SRCU_INTERVAL);
+ srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL);
}
EXPORT_SYMBOL_GPL(process_srcu);
--
2.5.2
\
 
 \ /
  Last update: 2017-04-12 19:49    [W:3.025 / U:0.056 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site