lkml.org 
[lkml]   [2018]   [May]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.4 08/92] net_sched: fq: take care of throttled flows before reuse
    Date
    4.4-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Eric Dumazet <edumazet@google.com>

    [ Upstream commit 7df40c2673a1307c3260aab6f9d4b9bf97ca8fd7 ]

    Normally, a socket can not be freed/reused unless all its TX packets
    left qdisc and were TX-completed. However connect(AF_UNSPEC) allows
    this to happen.

    With commit fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for
    reused flows") we cleared f->time_next_packet but took no special
    action if the flow was still in the throttled rb-tree.

    Since f->time_next_packet is the key used in the rb-tree searches,
    blindly clearing it might break rb-tree integrity. We need to make
    sure the flow is no longer in the rb-tree to avoid this problem.

    Fixes: fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows")
    Signed-off-by: Eric Dumazet <edumazet@google.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    net/sched/sch_fq.c | 37 +++++++++++++++++++++++++------------
    1 file changed, 25 insertions(+), 12 deletions(-)

    --- a/net/sched/sch_fq.c
    +++ b/net/sched/sch_fq.c
    @@ -126,6 +126,28 @@ static bool fq_flow_is_detached(const st
    return f->next == &detached;
    }

    +static bool fq_flow_is_throttled(const struct fq_flow *f)
    +{
    + return f->next == &throttled;
    +}
    +
    +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
    +{
    + if (head->first)
    + head->last->next = flow;
    + else
    + head->first = flow;
    + head->last = flow;
    + flow->next = NULL;
    +}
    +
    +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
    +{
    + rb_erase(&f->rate_node, &q->delayed);
    + q->throttled_flows--;
    + fq_flow_add_tail(&q->old_flows, f);
    +}
    +
    static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
    {
    struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
    @@ -153,15 +175,6 @@ static void fq_flow_set_throttled(struct

    static struct kmem_cache *fq_flow_cachep __read_mostly;

    -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
    -{
    - if (head->first)
    - head->last->next = flow;
    - else
    - head->first = flow;
    - head->last = flow;
    - flow->next = NULL;
    -}

    /* limit number of collected flows per round */
    #define FQ_GC_MAX 8
    @@ -265,6 +278,8 @@ static struct fq_flow *fq_classify(struc
    f->socket_hash != sk->sk_hash)) {
    f->credit = q->initial_quantum;
    f->socket_hash = sk->sk_hash;
    + if (fq_flow_is_throttled(f))
    + fq_flow_unset_throttled(q, f);
    f->time_next_packet = 0ULL;
    }
    return f;
    @@ -419,9 +434,7 @@ static void fq_check_throttled(struct fq
    q->time_next_delayed_flow = f->time_next_packet;
    break;
    }
    - rb_erase(p, &q->delayed);
    - q->throttled_flows--;
    - fq_flow_add_tail(&q->old_flows, f);
    + fq_flow_unset_throttled(q, f);
    }
    }


    \
     
     \ /
      Last update: 2018-05-24 14:07    [W:4.159 / U:0.124 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site