lkml.org 
[lkml]   [2017]   [Aug]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 38/93] tcp_bbr: introduce bbr_init_pacing_rate_from_rtt() helper
    Date
    4.9-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Neal Cardwell <ncardwell@google.com>


    [ Upstream commit 79135b89b8af304456bd67916b80116ddf03d7b6 ]

    Introduce a helper to initialize the BBR pacing rate unconditionally,
    based on the current cwnd and RTT estimate. This is a pure refactor,
    but is needed for two following fixes.

    Fixes: 0f8782ea1497 ("tcp_bbr: add BBR congestion control")
    Signed-off-by: Neal Cardwell <ncardwell@google.com>
    Signed-off-by: Yuchung Cheng <ycheng@google.com>
    Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    net/ipv4/tcp_bbr.c | 23 ++++++++++++++++++-----
    1 file changed, 18 insertions(+), 5 deletions(-)

    --- a/net/ipv4/tcp_bbr.c
    +++ b/net/ipv4/tcp_bbr.c
    @@ -192,6 +192,23 @@ static u32 bbr_bw_to_pacing_rate(struct
    return rate;
    }

    +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
    +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
    +{
    + struct tcp_sock *tp = tcp_sk(sk);
    + u64 bw;
    + u32 rtt_us;
    +
    + if (tp->srtt_us) { /* any RTT sample yet? */
    + rtt_us = max(tp->srtt_us >> 3, 1U);
    + } else { /* no RTT sample yet */
    + rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
    + }
    + bw = (u64)tp->snd_cwnd * BW_UNIT;
    + do_div(bw, rtt_us);
    + sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
    +}
    +
    /* Pace using current bw estimate and a gain factor. In order to help drive the
    * network toward lower queues while maintaining high utilization and low
    * latency, the average pacing rate aims to be slightly (~1%) lower than the
    @@ -776,7 +793,6 @@ static void bbr_init(struct sock *sk)
    {
    struct tcp_sock *tp = tcp_sk(sk);
    struct bbr *bbr = inet_csk_ca(sk);
    - u64 bw;

    bbr->prior_cwnd = 0;
    bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
    @@ -792,11 +808,8 @@ static void bbr_init(struct sock *sk)

    minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */

    - /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
    - bw = (u64)tp->snd_cwnd * BW_UNIT;
    - do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
    sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
    - bbr_set_pacing_rate(sk, bw, bbr_high_gain);
    + bbr_init_pacing_rate_from_rtt(sk);

    bbr->restore_cwnd = 0;
    bbr->round_start = 0;

    \
     
     \ /
      Last update: 2017-08-09 20:30    [W:4.122 / U:0.228 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site