lkml.org 
[lkml]   [2017]   [Nov]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH net-next 10/12] rxrpc: Add keepalive for a call
From
Date
We need to transmit a packet every so often to act as a keepalive for the
peer (which has a timeout from the last time it received a packet) and also
to prevent any intervening firewalls from closing the route.

Do this by resetting a timer every time we transmit a packet. If the timer
ever expires, we transmit a PING ACK packet and thereby also elicit a PING
RESPONSE ACK from the other side - which prevents our last-rx timeout from
expiring.

The timer is set to 1/6 of the last-rx timeout so that we can detect the
other side going away if it misses 6 replies in a row.

This is particularly necessary for servers where the processing of the
service function may take a significant amount of time.

Signed-off-by: David Howells <dhowells@redhat.com>
---

include/trace/events/rxrpc.h | 6 ++++++
net/rxrpc/ar-internal.h | 1 +
net/rxrpc/call_event.c | 10 ++++++++++
net/rxrpc/output.c | 23 +++++++++++++++++++++++
4 files changed, 40 insertions(+)

diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 84ade8b76a19..e98fed6de497 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -141,6 +141,7 @@ enum rxrpc_timer_trace {
rxrpc_timer_exp_ack,
rxrpc_timer_exp_hard,
rxrpc_timer_exp_idle,
+ rxrpc_timer_exp_keepalive,
rxrpc_timer_exp_lost_ack,
rxrpc_timer_exp_normal,
rxrpc_timer_exp_ping,
@@ -152,6 +153,7 @@ enum rxrpc_timer_trace {
rxrpc_timer_set_for_ack,
rxrpc_timer_set_for_hard,
rxrpc_timer_set_for_idle,
+ rxrpc_timer_set_for_keepalive,
rxrpc_timer_set_for_lost_ack,
rxrpc_timer_set_for_normal,
rxrpc_timer_set_for_ping,
@@ -162,6 +164,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
rxrpc_propose_ack_ping_for_params,
@@ -311,6 +314,7 @@ enum rxrpc_congest_change {
EM(rxrpc_timer_exp_ack, "ExpAck") \
EM(rxrpc_timer_exp_hard, "ExpHrd") \
EM(rxrpc_timer_exp_idle, "ExpIdl") \
+ EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
EM(rxrpc_timer_exp_normal, "ExpNml") \
EM(rxrpc_timer_exp_ping, "ExpPng") \
@@ -321,6 +325,7 @@ enum rxrpc_congest_change {
EM(rxrpc_timer_set_for_ack, "SetAck") \
EM(rxrpc_timer_set_for_hard, "SetHrd") \
EM(rxrpc_timer_set_for_idle, "SetIdl") \
+ EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
EM(rxrpc_timer_set_for_normal, "SetNml") \
EM(rxrpc_timer_set_for_ping, "SetPng") \
@@ -330,6 +335,7 @@ enum rxrpc_congest_change {
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7e7b817c69f0..cdcbc798f921 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -519,6 +519,7 @@ struct rxrpc_call {
unsigned long ack_lost_at; /* When ACK is figured as lost */
unsigned long resend_at; /* When next resend needs to happen */
unsigned long ping_at; /* When next to send a ping */
+ unsigned long keepalive_at; /* When next to send a keepalive ping */
unsigned long expect_rx_by; /* When we expect to get a packet by */
unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
unsigned long expect_term_by; /* When we expect call termination by */
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index c65666b2f39e..bda952ffe6a6 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -366,6 +366,15 @@ void rxrpc_process_call(struct work_struct *work)
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}

+ t = READ_ONCE(call->keepalive_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
+ cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+ rxrpc_propose_ack_ping_for_keepalive);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
t = READ_ONCE(call->ping_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
@@ -423,6 +432,7 @@ void rxrpc_process_call(struct work_struct *work)
set(call->ack_at);
set(call->ack_lost_at);
set(call->resend_at);
+ set(call->keepalive_at);
set(call->ping_at);

now = jiffies;
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index efe06edce189..42410e910aff 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -33,6 +33,24 @@ struct rxrpc_abort_buffer {
};

/*
+ * Arrange for a keepalive ping a certain time after we last transmitted. This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+ *
+ * Receiving a response to the ping will prevent the ->expect_rx_by timer from
+ * expiring.
+ */
+static void rxrpc_set_keepalive(struct rxrpc_call *call)
+{
+ unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+
+ keepalive_at += now;
+ WRITE_ONCE(call->keepalive_at, keepalive_at);
+ rxrpc_reduce_call_timer(call, keepalive_at, now,
+ rxrpc_timer_set_for_keepalive);
+}
+
+/*
* Fill out an ACK packet.
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
@@ -205,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
call->ackr_seen = top;
spin_unlock_bh(&call->lock);
}
+
+ rxrpc_set_keepalive(call);
}

out:
@@ -388,6 +408,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
}
}
}
+
+ rxrpc_set_keepalive(call);
+
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;

\
 
 \ /
  Last update: 2017-11-24 15:42    [W:0.071 / U:4.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site