lkml.org 
[lkml]   [2019]   [Apr]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
Subject[PATCH 0/1] rcu/sync: simplify the state machine
Let me finally try to close the gestalt ;)

This version doesn't add the new features yet, and it doesn't remove the
"must die" rcu_sync_enter_start(). But with this patch we are ready, just
I think that this should come as a separate change.

To simplify the review, see the the most important parts of the code with
the patch applied below.

Oleg.
-------------------------------------------------------------------------------

struct rcu_sync {
int gp_state;
int gp_count;
wait_queue_head_t gp_wait;

struct rcu_head cb_head;
};

enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };

#define rss_lock gp_wait.lock

static void rcu_sync_call(struct rcu_sync *rsp)
{
call_rcu(&rsp->cb_head, rcu_sync_func);
}

static void rcu_sync_func(struct rcu_head *rcu)
{
struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
unsigned long flags;

WARN_ON_ONCE(rsp->gp_state == GP_IDLE);
WARN_ON_ONCE(rsp->gp_state == GP_PASSED);

spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) {
/*
* We're at least a GP after the GP_IDLE->GP_ENTER transition.
*/
rsp->gp_state = GP_PASSED;
wake_up_locked(&rsp->gp_wait);
} else if (rsp->gp_state == GP_REPLAY) {
/*
* A new rcu_sync_exit() has happened; requeue the callback to
* catch a later GP.
*/
rsp->gp_state = GP_EXIT;
rcu_sync_call(rsp);
} else {
/*
* We're at least a GP after the last rcu_sync_exit(); eveybody
* will now have observed the write side critical section.
* Let 'em rip!.
*/
rsp->gp_state = GP_IDLE;
}
spin_unlock_irqrestore(&rsp->rss_lock, flags);
}

void rcu_sync_enter(struct rcu_sync *rsp)
{
int gp_state;

spin_lock_irq(&rsp->rss_lock);
gp_state = rsp->gp_state;
if (gp_state == GP_IDLE) {
rsp->gp_state = GP_ENTER;
WARN_ON_ONCE(rsp->gp_count);
/*
* Note that we could simply do rcu_sync_call(rsp) here and
* avoid the "if (gp_state == GP_IDLE)" block below.
*
* However, synchronize_rcu() can be faster if rcu_expedited
* or rcu_blocking_is_gp() is true.
*
* Another reason is that we can't wait for rcu callback if
* we are called at early boot time but this shouldn't happen.
*/
}
rsp->gp_count++;
spin_unlock_irq(&rsp->rss_lock);

if (gp_state == GP_IDLE) {
/*
* See the comment above, this simply does the "synchronous"
* call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
*/
synchronize_rcu();
rcu_sync_func(&rsp->cb_head);
/* Not really needed, wait_event() would see GP_PASSED. */
return;
}

wait_event(rsp->gp_wait, rsp->gp_state >= GP_PASSED);
}

void rcu_sync_exit(struct rcu_sync *rsp)
{
WARN_ON_ONCE(rsp->gp_state == GP_IDLE);
WARN_ON_ONCE(rsp->gp_count == 0);

spin_lock_irq(&rsp->rss_lock);
if (!--rsp->gp_count) {
if (rsp->gp_state == GP_PASSED) {
rsp->gp_state = GP_EXIT;
rcu_sync_call(rsp);
} else if (rsp->gp_state == GP_EXIT) {
rsp->gp_state = GP_REPLAY;
}
}
spin_unlock_irq(&rsp->rss_lock);
}

\
 
 \ /
  Last update: 2019-04-25 18:41    [W:0.187 / U:2.920 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site