E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
- EM(rxrpc_rtt_rx_other_ack, "OACK") \
+ EM(rxrpc_rtt_rx_data_ack, "DACK") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
- EM(rxrpc_rtt_rx_ping_response, "PONG") \
- E_(rxrpc_rtt_rx_requested_ack, "RACK")
+ E_(rxrpc_rtt_rx_ping_response, "PONG")
#define rxrpc_timer_traces \
EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
);
TRACE_EVENT(rxrpc_congest,
- TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
- rxrpc_serial_t ack_serial),
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
- TP_ARGS(call, summary, ack_serial),
+ TP_ARGS(call, summary),
TP_STRUCT__entry(
__field(unsigned int, call)
__field(rxrpc_seq_t, hard_ack)
__field(rxrpc_seq_t, top)
__field(rxrpc_seq_t, lowest_nak)
- __field(rxrpc_serial_t, ack_serial)
__field(u16, nr_sacks)
__field(u16, nr_snacks)
__field(u16, cwnd)
__entry->hard_ack = call->acks_hard_ack;
__entry->top = call->tx_top;
__entry->lowest_nak = call->acks_lowest_nak;
- __entry->ack_serial = ack_serial;
__entry->nr_sacks = call->acks_nr_sacks;
__entry->nr_snacks = call->acks_nr_snacks;
__entry->cwnd = call->cong_cwnd;
TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u A=%u+%u/%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
- __entry->ack_serial,
+ __entry->sum.acked_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
__entry->hard_ack,
__print_symbolic(__entry->ca_state, rxrpc_ca_states),
* Do TCP-style congestion management [RFC 5681].
*/
static void rxrpc_congestion_management(struct rxrpc_call *call,
- struct sk_buff *skb,
- struct rxrpc_ack_summary *summary,
- rxrpc_serial_t acked_serial)
+ struct rxrpc_ack_summary *summary)
{
summary->change = rxrpc_cong_no_change;
summary->in_flight = (call->tx_top - call->tx_bottom) - call->acks_nr_sacks;
if (call->cong_cwnd >= call->cong_ssthresh &&
call->cong_ca_state == RXRPC_CA_SLOW_START) {
call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
- call->cong_tstamp = skb->tstamp;
+ call->cong_tstamp = call->acks_latest_ts;
call->cong_cumul_acks = 0;
}
}
call->cong_cwnd += 1;
if (call->cong_cwnd >= call->cong_ssthresh) {
call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
- call->cong_tstamp = skb->tstamp;
+ call->cong_tstamp = call->acks_latest_ts;
}
goto out;
*/
if (call->peer->rtt_count == 0)
goto out;
- if (ktime_before(skb->tstamp,
+ if (ktime_before(call->acks_latest_ts,
ktime_add_us(call->cong_tstamp,
call->peer->srtt_us >> 3)))
goto out_no_clear_ca;
summary->change = rxrpc_cong_rtt_window_end;
- call->cong_tstamp = skb->tstamp;
+ call->cong_tstamp = call->acks_latest_ts;
if (call->cong_cumul_acks >= call->cong_cwnd)
call->cong_cwnd++;
goto out;
summary->change = rxrpc_cong_cleared_nacks;
call->cong_dup_acks = 0;
call->cong_extra = 0;
- call->cong_tstamp = skb->tstamp;
+ call->cong_tstamp = call->acks_latest_ts;
if (call->cong_cwnd < call->cong_ssthresh)
call->cong_ca_state = RXRPC_CA_SLOW_START;
else
out_no_clear_ca:
if (call->cong_cwnd >= RXRPC_TX_MAX_WINDOW)
call->cong_cwnd = RXRPC_TX_MAX_WINDOW;
- trace_rxrpc_congest(call, summary, acked_serial);
+ trace_rxrpc_congest(call, summary);
return;
packet_loss_detected:
call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
}
+/*
+ * Add an RTT sample derived from an ACK'd DATA packet.
+ */
+static void rxrpc_add_data_rtt_sample(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ int ix,
+ rxrpc_serial_t ack_serial)
+{
+ rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_data_ack, -1,
+ summary->acked_serial, ack_serial,
+ ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]),
+ call->acks_latest_ts);
+ summary->rtt_sample_avail = false;
+ __clear_bit(ix, &tq->rtt_samples); /* Prevent repeat RTT sample */
+}
+
/*
* Apply a hard ACK by advancing the Tx window.
*/
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
- struct rxrpc_ack_summary *summary)
+ struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial)
{
struct rxrpc_txqueue *tq = call->tx_queue;
rxrpc_seq_t seq = call->tx_bottom + 1;
rot_last = true;
}
+ if (summary->rtt_sample_avail &&
+ summary->acked_serial == tq->segment_serial[ix] &&
+ test_bit(ix, &tq->rtt_samples))
+ rxrpc_add_data_rtt_sample(call, summary, tq, ix, ack_serial);
+
if (ix == tq->nr_reported_acks) {
/* Packet directly hard ACK'd. */
tq->nr_reported_acks++;
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+ if (!rxrpc_rotate_tx_window(call, top, &summary, 0)) {
rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
return false;
}
})
#endif
+/*
+ * Deal with RTT samples from soft ACKs.
+ */
+static void rxrpc_input_soft_rtt(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct rxrpc_txqueue *tq,
+ rxrpc_serial_t ack_serial)
+{
+ for (int ix = 0; ix < RXRPC_NR_TXQUEUE; ix++)
+ if (summary->acked_serial == tq->segment_serial[ix])
+ return rxrpc_add_data_rtt_sample(call, summary, tq, ix, ack_serial);
+}
+
/*
* Process a batch of soft ACKs specific to a transmission queue segment.
*/
_debug("bound %16lx %u", extracted, nr);
+ if (summary->rtt_sample_avail)
+ rxrpc_input_soft_rtt(call, summary, tq, sp->hdr.serial);
rxrpc_input_soft_ack_tq(call, summary, tq, extracted, RXRPC_NR_TXQUEUE,
seq - RXRPC_NR_TXQUEUE, &lowest_nak);
extracted = ~0UL;
struct rxrpc_ack_summary summary = { 0 };
struct rxrpc_acktrailer trailer;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_serial_t ack_serial, acked_serial;
+ rxrpc_serial_t ack_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
ack_serial = sp->hdr.serial;
- acked_serial = sp->ack.acked_serial;
first_soft_ack = sp->ack.first_ack;
prev_pkt = sp->ack.prev_ack;
nr_acks = sp->ack.nr_acks;
hard_ack = first_soft_ack - 1;
+ summary.acked_serial = sp->ack.acked_serial;
summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
sp->ack.reason : RXRPC_ACK__INVALID);
rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
prefetch(call->tx_queue);
- if (acked_serial != 0) {
- switch (summary.ack_reason) {
- case RXRPC_ACK_PING_RESPONSE:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_ping_response);
- break;
- case RXRPC_ACK_REQUESTED:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_requested_ack);
- break;
- default:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
- rxrpc_rtt_rx_other_ack);
- break;
- }
+ if (summary.acked_serial != 0) {
+ if (summary.ack_reason == RXRPC_ACK_PING_RESPONSE)
+ rxrpc_complete_rtt_probe(call, skb->tstamp, summary.acked_serial,
+ ack_serial, rxrpc_rtt_rx_ping_response);
+ else
+ summary.rtt_sample_avail = true;
}
/* If we get an EXCEEDS_WINDOW ACK from the server, it probably
case RXRPC_ACK_PING:
break;
default:
- if (acked_serial && after(acked_serial, call->acks_highest_serial))
- call->acks_highest_serial = acked_serial;
+ if (summary.acked_serial &&
+ after(summary.acked_serial, call->acks_highest_serial))
+ call->acks_highest_serial = summary.acked_serial;
break;
}
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
if (after(hard_ack, call->tx_bottom)) {
- if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary, ack_serial)) {
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
goto send_response;
}
rxrpc_propose_ping(call, ack_serial,
rxrpc_propose_ack_ping_for_lost_reply);
- rxrpc_congestion_management(call, skb, &summary, acked_serial);
+ rxrpc_congestion_management(call, &summary);
if (summary.need_retransmit)
rxrpc_resend(call, ack_serial, summary.ack_reason == RXRPC_ACK_PING_RESPONSE);
{
struct rxrpc_ack_summary summary = { 0 };
- if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
+ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary, 0))
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
}