lkml.org 
[lkml]   [2017]   [Mar]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.10 024/111] net: solve a NAPI race
    Date
    4.10-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Eric Dumazet <edumazet@google.com>


    commit 39e6c8208d7b6fb9d2047850fb3327db567b564b upstream.

    While playing with mlx4 hardware timestamping of RX packets, I found
    that some packets were received by TCP stack with a ~200 ms delay...

    Since the timestamp was provided by the NIC, and my probe was added
    in tcp_v4_rcv() while in BH handler, I was confident it was not
    a sender issue, or a drop in the network.

    This would happen with a very low probability, but hurting RPC
    workloads.

    A NAPI driver normally arms the IRQ after the napi_complete_done(),
    after NAPI_STATE_SCHED is cleared, so that the hard irq handler can grab
    it.

    Problem is that if another point in the stack grabs NAPI_STATE_SCHED bit
    while IRQ are not disabled, we might have later an IRQ firing and
    finding this bit set, right before napi_complete_done() clears it.

    This can happen with busy polling users, or if gro_flush_timeout is
    used. But some other uses of napi_schedule() in drivers can cause this
    as well.

    thread 1 thread 2 (could be on same cpu, or not)

    // busy polling or napi_watchdog()
    napi_schedule();
    ...
    napi->poll()

    device polling:
    read 2 packets from ring buffer
    Additional 3rd packet is
    available.
    device hard irq

    // does nothing because
    NAPI_STATE_SCHED bit is owned by thread 1
    napi_schedule();

    napi_complete_done(napi, 2);
    rearm_irq();

    Note that rearm_irq() will not force the device to send an additional
    IRQ for the packet it already signaled (3rd packet in my example)

    This patch adds a new NAPI_STATE_MISSED bit, that napi_schedule_prep()
    can set if it could not grab NAPI_STATE_SCHED

    Then napi_complete_done() properly reschedules the napi to make sure
    we do not miss something.

    Since we manipulate multiple bits at once, use cmpxchg() like in
    sk_busy_loop() to provide proper transactions.

    In v2, I changed napi_watchdog() to use a relaxed variant of
    napi_schedule_prep() : No need to set NAPI_STATE_MISSED from this point.

    In v3, I added more details in the changelog and clears
    NAPI_STATE_MISSED in busy_poll_stop()

    In v4, I added the ideas given by Alexander Duyck in v3 review

    Signed-off-by: Eric Dumazet <edumazet@google.com>
    Cc: Alexander Duyck <alexander.duyck@gmail.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    include/linux/netdevice.h | 29 +++++-----------
    net/core/dev.c | 81 ++++++++++++++++++++++++++++++++++++++++++----
    2 files changed, 83 insertions(+), 27 deletions(-)

    --- a/include/linux/netdevice.h
    +++ b/include/linux/netdevice.h
    @@ -330,6 +330,7 @@ struct napi_struct {

    enum {
    NAPI_STATE_SCHED, /* Poll is scheduled */
    + NAPI_STATE_MISSED, /* reschedule a napi */
    NAPI_STATE_DISABLE, /* Disable pending */
    NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
    NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
    @@ -338,12 +339,13 @@ enum {
    };

    enum {
    - NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
    - NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
    - NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
    - NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
    - NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
    - NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
    + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
    + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
    + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
    + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
    + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
    + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
    + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
    };

    enum gro_result {
    @@ -413,20 +415,7 @@ static inline bool napi_disable_pending(
    return test_bit(NAPI_STATE_DISABLE, &n->state);
    }

    -/**
    - * napi_schedule_prep - check if NAPI can be scheduled
    - * @n: NAPI context
    - *
    - * Test if NAPI routine is already running, and if not mark
    - * it as running. This is used as a condition variable to
    - * insure only one NAPI poll instance runs. We also make
    - * sure there is no pending NAPI disable.
    - */
    -static inline bool napi_schedule_prep(struct napi_struct *n)
    -{
    - return !napi_disable_pending(n) &&
    - !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
    -}
    +bool napi_schedule_prep(struct napi_struct *n);

    /**
    * napi_schedule - schedule NAPI poll
    --- a/net/core/dev.c
    +++ b/net/core/dev.c
    @@ -4913,6 +4913,39 @@ void __napi_schedule(struct napi_struct
    EXPORT_SYMBOL(__napi_schedule);

    /**
    + * napi_schedule_prep - check if napi can be scheduled
    + * @n: napi context
    + *
    + * Test if NAPI routine is already running, and if not mark
    + * it as running. This is used as a condition variable
    + * insure only one NAPI poll instance runs. We also make
    + * sure there is no pending NAPI disable.
    + */
    +bool napi_schedule_prep(struct napi_struct *n)
    +{
    + unsigned long val, new;
    +
    + do {
    + val = READ_ONCE(n->state);
    + if (unlikely(val & NAPIF_STATE_DISABLE))
    + return false;
    + new = val | NAPIF_STATE_SCHED;
    +
    + /* Sets STATE_MISSED bit if STATE_SCHED was already set
    + * This was suggested by Alexander Duyck, as compiler
    + * emits better code than :
    + * if (val & NAPIF_STATE_SCHED)
    + * new |= NAPIF_STATE_MISSED;
    + */
    + new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
    + NAPIF_STATE_MISSED;
    + } while (cmpxchg(&n->state, val, new) != val);
    +
    + return !(val & NAPIF_STATE_SCHED);
    +}
    +EXPORT_SYMBOL(napi_schedule_prep);
    +
    +/**
    * __napi_schedule_irqoff - schedule for receive
    * @n: entry to schedule
    *
    @@ -4943,7 +4976,7 @@ EXPORT_SYMBOL(__napi_complete);

    bool napi_complete_done(struct napi_struct *n, int work_done)
    {
    - unsigned long flags;
    + unsigned long flags, val, new;

    /*
    * 1) Don't let napi dequeue from the cpu poll list
    @@ -4967,14 +5000,33 @@ bool napi_complete_done(struct napi_stru
    else
    napi_gro_flush(n, false);
    }
    - if (likely(list_empty(&n->poll_list))) {
    - WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
    - } else {
    + if (unlikely(!list_empty(&n->poll_list))) {
    /* If n->poll_list is not empty, we need to mask irqs */
    local_irq_save(flags);
    - __napi_complete(n);
    + list_del_init(&n->poll_list);
    local_irq_restore(flags);
    }
    +
    + do {
    + val = READ_ONCE(n->state);
    +
    + WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
    +
    + new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
    +
    + /* If STATE_MISSED was set, leave STATE_SCHED set,
    + * because we will call napi->poll() one more time.
    + * This C code was suggested by Alexander Duyck to help gcc.
    + */
    + new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
    + NAPIF_STATE_SCHED;
    + } while (cmpxchg(&n->state, val, new) != val);
    +
    + if (unlikely(val & NAPIF_STATE_MISSED)) {
    + __napi_schedule(n);
    + return false;
    + }
    +
    return true;
    }
    EXPORT_SYMBOL(napi_complete_done);
    @@ -5000,6 +5052,16 @@ static void busy_poll_stop(struct napi_s
    {
    int rc;

    + /* Busy polling means there is a high chance device driver hard irq
    + * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
    + * set in napi_schedule_prep().
    + * Since we are about to call napi->poll() once more, we can safely
    + * clear NAPI_STATE_MISSED.
    + *
    + * Note: x86 could use a single "lock and ..." instruction
    + * to perform these two clear_bit()
    + */
    + clear_bit(NAPI_STATE_MISSED, &napi->state);
    clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);

    local_bh_disable();
    @@ -5146,8 +5208,13 @@ static enum hrtimer_restart napi_watchdo
    struct napi_struct *napi;

    napi = container_of(timer, struct napi_struct, timer);
    - if (napi->gro_list)
    - napi_schedule(napi);
    +
    + /* Note : we use a relaxed variant of napi_schedule_prep() not setting
    + * NAPI_STATE_MISSED, since we do not react to a device IRQ.
    + */
    + if (napi->gro_list && !napi_disable_pending(napi) &&
    + !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
    + __napi_schedule_irqoff(napi);

    return HRTIMER_NORESTART;
    }

    \
     
     \ /
      Last update: 2017-03-28 16:02    [W:2.227 / U:0.312 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site