lkml.org 
[lkml]   [2010]   [Mar]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[128/156] net: backlog functions rename
    2.6.33-stable review patch.  If anyone has any objections, please let us know.

    ------------------

    From: Zhu Yi <yi.zhu@intel.com>

    [ Upstream commit a3a858ff18a72a8d388e31ab0d98f7e944841a62 ]

    sk_add_backlog -> __sk_add_backlog
    sk_add_backlog_limited -> sk_add_backlog

    Signed-off-by: Zhu Yi <yi.zhu@intel.com>
    Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>


    ---
    include/net/sock.h | 6 +++---
    net/core/sock.c | 2 +-
    net/dccp/minisocks.c | 2 +-
    net/ipv4/tcp_ipv4.c | 2 +-
    net/ipv4/tcp_minisocks.c | 2 +-
    net/ipv4/udp.c | 2 +-
    net/ipv6/tcp_ipv6.c | 2 +-
    net/ipv6/udp.c | 4 ++--
    net/llc/llc_c_ac.c | 2 +-
    net/llc/llc_conn.c | 2 +-
    net/sctp/input.c | 4 ++--
    net/tipc/socket.c | 2 +-
    net/x25/x25_dev.c | 2 +-
    13 files changed, 17 insertions(+), 17 deletions(-)

    --- a/include/net/sock.h
    +++ b/include/net/sock.h
    @@ -577,7 +577,7 @@ static inline int sk_stream_memory_free(
    }

    /* OOB backlog add */
    -static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
    +static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
    {
    if (!sk->sk_backlog.tail) {
    sk->sk_backlog.head = sk->sk_backlog.tail = skb;
    @@ -589,12 +589,12 @@ static inline void sk_add_backlog(struct
    }

    /* The per-socket spinlock must be held here. */
    -static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
    +static inline int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
    {
    if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
    return -ENOBUFS;

    - sk_add_backlog(sk, skb);
    + __sk_add_backlog(sk, skb);
    sk->sk_backlog.len += skb->truesize;
    return 0;
    }
    --- a/net/core/sock.c
    +++ b/net/core/sock.c
    @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
    rc = sk_backlog_rcv(sk, skb);

    mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
    - } else if (sk_add_backlog_limited(sk, skb)) {
    + } else if (sk_add_backlog(sk, skb)) {
    bh_unlock_sock(sk);
    atomic_inc(&sk->sk_drops);
    goto discard_and_relse;
    --- a/net/dccp/minisocks.c
    +++ b/net/dccp/minisocks.c
    @@ -254,7 +254,7 @@ int dccp_child_process(struct sock *pare
    * in main socket hash table and lock on listening
    * socket does not protect us more.
    */
    - sk_add_backlog(child, skb);
    + __sk_add_backlog(child, skb);
    }

    bh_unlock_sock(child);
    --- a/net/ipv4/tcp_ipv4.c
    +++ b/net/ipv4/tcp_ipv4.c
    @@ -1677,7 +1677,7 @@ process:
    if (!tcp_prequeue(sk, skb))
    ret = tcp_v4_do_rcv(sk, skb);
    }
    - } else if (sk_add_backlog_limited(sk, skb)) {
    + } else if (sk_add_backlog(sk, skb)) {
    bh_unlock_sock(sk);
    goto discard_and_relse;
    }
    --- a/net/ipv4/tcp_minisocks.c
    +++ b/net/ipv4/tcp_minisocks.c
    @@ -728,7 +728,7 @@ int tcp_child_process(struct sock *paren
    * in main socket hash table and lock on listening
    * socket does not protect us more.
    */
    - sk_add_backlog(child, skb);
    + __sk_add_backlog(child, skb);
    }

    bh_unlock_sock(child);
    --- a/net/ipv4/udp.c
    +++ b/net/ipv4/udp.c
    @@ -1372,7 +1372,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
    bh_lock_sock(sk);
    if (!sock_owned_by_user(sk))
    rc = __udp_queue_rcv_skb(sk, skb);
    - else if (sk_add_backlog_limited(sk, skb)) {
    + else if (sk_add_backlog(sk, skb)) {
    bh_unlock_sock(sk);
    goto drop;
    }
    --- a/net/ipv6/tcp_ipv6.c
    +++ b/net/ipv6/tcp_ipv6.c
    @@ -1732,7 +1732,7 @@ process:
    if (!tcp_prequeue(sk, skb))
    ret = tcp_v6_do_rcv(sk, skb);
    }
    - } else if (sk_add_backlog_limited(sk, skb)) {
    + } else if (sk_add_backlog(sk, skb)) {
    bh_unlock_sock(sk);
    goto discard_and_relse;
    }
    --- a/net/ipv6/udp.c
    +++ b/net/ipv6/udp.c
    @@ -584,7 +584,7 @@ static void flush_stack(struct sock **st
    bh_lock_sock(sk);
    if (!sock_owned_by_user(sk))
    udpv6_queue_rcv_skb(sk, skb1);
    - else if (sk_add_backlog_limited(sk, skb1)) {
    + else if (sk_add_backlog(sk, skb1)) {
    kfree_skb(skb1);
    bh_unlock_sock(sk);
    goto drop;
    @@ -760,7 +760,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
    bh_lock_sock(sk);
    if (!sock_owned_by_user(sk))
    udpv6_queue_rcv_skb(sk, skb);
    - else if (sk_add_backlog_limited(sk, skb)) {
    + else if (sk_add_backlog(sk, skb)) {
    atomic_inc(&sk->sk_drops);
    bh_unlock_sock(sk);
    sock_put(sk);
    --- a/net/llc/llc_c_ac.c
    +++ b/net/llc/llc_c_ac.c
    @@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct so
    llc_conn_state_process(sk, skb);
    else {
    llc_set_backlog_type(skb, LLC_EVENT);
    - sk_add_backlog(sk, skb);
    + __sk_add_backlog(sk, skb);
    }
    }
    }
    --- a/net/llc/llc_conn.c
    +++ b/net/llc/llc_conn.c
    @@ -756,7 +756,7 @@ void llc_conn_handler(struct llc_sap *sa
    else {
    dprintk("%s: adding to backlog...\n", __func__);
    llc_set_backlog_type(skb, LLC_PACKET);
    - if (sk_add_backlog_limited(sk, skb))
    + if (sk_add_backlog(sk, skb))
    goto drop_unlock;
    }
    out:
    --- a/net/sctp/input.c
    +++ b/net/sctp/input.c
    @@ -341,7 +341,7 @@ int sctp_backlog_rcv(struct sock *sk, st
    sctp_bh_lock_sock(sk);

    if (sock_owned_by_user(sk)) {
    - if (sk_add_backlog_limited(sk, skb))
    + if (sk_add_backlog(sk, skb))
    sctp_chunk_free(chunk);
    else
    backloged = 1;
    @@ -375,7 +375,7 @@ static int sctp_add_backlog(struct sock
    struct sctp_ep_common *rcvr = chunk->rcvr;
    int ret;

    - ret = sk_add_backlog_limited(sk, skb);
    + ret = sk_add_backlog(sk, skb);
    if (!ret) {
    /* Hold the assoc/ep while hanging on the backlog queue.
    * This way, we know structures we need will not disappear
    --- a/net/tipc/socket.c
    +++ b/net/tipc/socket.c
    @@ -1322,7 +1322,7 @@ static u32 dispatch(struct tipc_port *tp
    if (!sock_owned_by_user(sk)) {
    res = filter_rcv(sk, buf);
    } else {
    - if (sk_add_backlog_limited(sk, buf))
    + if (sk_add_backlog(sk, buf))
    res = TIPC_ERR_OVERLOAD;
    else
    res = TIPC_OK;
    --- a/net/x25/x25_dev.c
    +++ b/net/x25/x25_dev.c
    @@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_bu
    if (!sock_owned_by_user(sk)) {
    queued = x25_process_rx_frame(sk, skb);
    } else {
    - queued = !sk_add_backlog_limited(sk, skb);
    + queued = !sk_add_backlog(sk, skb);
    }
    bh_unlock_sock(sk);
    sock_put(sk);



    \
     
     \ /
      Last update: 2010-03-31 01:33    [W:0.033 / U:0.616 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site