]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
rxrpc: Adjust names and types of congestion-related fields
authorDavid Howells <dhowells@redhat.com>
Wed, 4 Dec 2024 07:46:50 +0000 (07:46 +0000)
committerJakub Kicinski <kuba@kernel.org>
Mon, 9 Dec 2024 21:48:28 +0000 (13:48 -0800)
Adjust some of the names of fields and constants to make them look a bit
more like the TCP congestion symbol names, such as flight_size -> in_flight
and congest_mode to ca_state.

Move the persistent congestion-related fields from the rxrpc_ack_summary
struct into the rxrpc_call struct rather than copying them out and back in
again.  The rxrpc_congest tracepoint can fetch them from the call struct.

Rename the counters for soft acks and nacks to have an 's' on the front to
reflect the softness, e.g. nr_acks -> nr_sacks.

Make fields counting numbers of packets or numbers of acks u16 rather than
u8 to allow for windows of up to 8192 DATA packets in flight in future.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20241204074710.990092-23-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/trace/events/rxrpc.h
net/rxrpc/ar-internal.h
net/rxrpc/conn_client.c
net/rxrpc/input.c
net/rxrpc/output.c

index 91108e0de3af9beca3e7e73c34c2020a03ef2bbd..d47b8235fad38a0b20e8a864af79cdc8d15e3fe9 100644 (file)
        EM(rxrpc_propose_ack_rx_idle,           "RxIdle ") \
        E_(rxrpc_propose_ack_terminal_ack,      "ClTerm ")
 
-#define rxrpc_congest_modes \
-       EM(RXRPC_CALL_CONGEST_AVOIDANCE,        "CongAvoid") \
-       EM(RXRPC_CALL_FAST_RETRANSMIT,          "FastReTx ") \
-       EM(RXRPC_CALL_PACKET_LOSS,              "PktLoss  ") \
-       E_(RXRPC_CALL_SLOW_START,               "SlowStart")
+#define rxrpc_ca_states \
+       EM(RXRPC_CA_CONGEST_AVOIDANCE,          "CongAvoid") \
+       EM(RXRPC_CA_FAST_RETRANSMIT,            "FastReTx ") \
+       EM(RXRPC_CA_PACKET_LOSS,                "PktLoss  ") \
+       E_(RXRPC_CA_SLOW_START,                 "SlowStart")
 
 #define rxrpc_congest_changes \
        EM(rxrpc_cong_begin_retransmission,     " Retrans") \
@@ -550,11 +550,11 @@ enum rxrpc_txqueue_trace  { rxrpc_txqueue_traces } __mode(byte);
 
 rxrpc_abort_reasons;
 rxrpc_bundle_traces;
+rxrpc_ca_states;
 rxrpc_call_poke_traces;
 rxrpc_call_traces;
 rxrpc_client_traces;
 rxrpc_congest_changes;
-rxrpc_congest_modes;
 rxrpc_conn_traces;
 rxrpc_local_traces;
 rxrpc_pmtud_reduce_traces;
@@ -1688,27 +1688,39 @@ TRACE_EVENT(rxrpc_retransmit,
 
 TRACE_EVENT(rxrpc_congest,
            TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
-                    rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+                    rxrpc_serial_t ack_serial),
 
-           TP_ARGS(call, summary, ack_serial, change),
+           TP_ARGS(call, summary, ack_serial),
 
            TP_STRUCT__entry(
                    __field(unsigned int,                       call)
-                   __field(enum rxrpc_congest_change,          change)
+                   __field(enum rxrpc_ca_state,                ca_state)
                    __field(rxrpc_seq_t,                        hard_ack)
                    __field(rxrpc_seq_t,                        top)
                    __field(rxrpc_seq_t,                        lowest_nak)
                    __field(rxrpc_serial_t,                     ack_serial)
+                   __field(u16,                                nr_sacks)
+                   __field(u16,                                nr_snacks)
+                   __field(u16,                                cwnd)
+                   __field(u16,                                ssthresh)
+                   __field(u16,                                cumul_acks)
+                   __field(u16,                                dup_acks)
                    __field_struct(struct rxrpc_ack_summary,    sum)
                             ),
 
            TP_fast_assign(
                    __entry->call       = call->debug_id;
-                   __entry->change     = change;
+                   __entry->ca_state   = call->cong_ca_state;
                    __entry->hard_ack   = call->acks_hard_ack;
                    __entry->top        = call->tx_top;
                    __entry->lowest_nak = call->acks_lowest_nak;
                    __entry->ack_serial = ack_serial;
+                   __entry->nr_sacks   = call->acks_nr_sacks;
+                   __entry->nr_snacks  = call->acks_nr_snacks;
+                   __entry->cwnd       = call->cong_cwnd;
+                   __entry->ssthresh   = call->cong_ssthresh;
+                   __entry->cumul_acks = call->cong_cumul_acks;
+                   __entry->dup_acks   = call->cong_dup_acks;
                    memcpy(&__entry->sum, summary, sizeof(__entry->sum));
                           ),
 
@@ -1717,17 +1729,17 @@ TRACE_EVENT(rxrpc_congest,
                      __entry->ack_serial,
                      __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
                      __entry->hard_ack,
-                     __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
-                     __entry->sum.cwnd,
-                     __entry->sum.ssthresh,
-                     __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
-                     __entry->sum.nr_new_acks,
-                     __entry->sum.nr_new_nacks,
+                     __print_symbolic(__entry->ca_state, rxrpc_ca_states),
+                     __entry->cwnd,
+                     __entry->ssthresh,
+                     __entry->nr_sacks, __entry->sum.nr_retained_snacks,
+                     __entry->sum.nr_new_sacks,
+                     __entry->sum.nr_new_snacks,
                      __entry->top - __entry->hard_ack,
-                     __entry->sum.cumulative_acks,
-                     __entry->sum.dup_acks,
-                     __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
-                     __print_symbolic(__entry->change, rxrpc_congest_changes),
+                     __entry->cumul_acks,
+                     __entry->dup_acks,
+                     __entry->lowest_nak, __entry->sum.new_low_snack ? "!" : "",
+                     __print_symbolic(__entry->sum.change, rxrpc_congest_changes),
                      __entry->sum.retrans_timeo ? " rTxTo" : "")
            );
 
@@ -1738,7 +1750,7 @@ TRACE_EVENT(rxrpc_reset_cwnd,
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call)
-                   __field(enum rxrpc_congest_mode,    mode)
+                   __field(enum rxrpc_ca_state,        ca_state)
                    __field(unsigned short,             cwnd)
                    __field(unsigned short,             extra)
                    __field(rxrpc_seq_t,                hard_ack)
@@ -1749,7 +1761,7 @@ TRACE_EVENT(rxrpc_reset_cwnd,
 
            TP_fast_assign(
                    __entry->call       = call->debug_id;
-                   __entry->mode       = call->cong_mode;
+                   __entry->ca_state   = call->cong_ca_state;
                    __entry->cwnd       = call->cong_cwnd;
                    __entry->extra      = call->cong_extra;
                    __entry->hard_ack   = call->acks_hard_ack;
@@ -1761,7 +1773,7 @@ TRACE_EVENT(rxrpc_reset_cwnd,
            TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
                      __entry->call,
                      __entry->hard_ack,
-                     __print_symbolic(__entry->mode, rxrpc_congest_modes),
+                     __print_symbolic(__entry->ca_state, rxrpc_ca_states),
                      __entry->cwnd,
                      __entry->extra,
                      __entry->prepared,
index 840293f913a38d07df2968a1a05effc2dfdb1e35..f6e6b2ab6c2a1702cdb316acaccdc27bdffb13f2 100644 (file)
@@ -623,13 +623,13 @@ enum rxrpc_call_state {
 /*
  * Call Tx congestion management modes.
  */
-enum rxrpc_congest_mode {
-       RXRPC_CALL_SLOW_START,
-       RXRPC_CALL_CONGEST_AVOIDANCE,
-       RXRPC_CALL_PACKET_LOSS,
-       RXRPC_CALL_FAST_RETRANSMIT,
-       NR__RXRPC_CONGEST_MODES
-};
+enum rxrpc_ca_state {
+       RXRPC_CA_SLOW_START,
+       RXRPC_CA_CONGEST_AVOIDANCE,
+       RXRPC_CA_PACKET_LOSS,
+       RXRPC_CA_FAST_RETRANSMIT,
+       NR__RXRPC_CA_STATES
+} __mode(byte);
 
 /*
  * RxRPC call definition
@@ -727,12 +727,12 @@ struct rxrpc_call {
         */
 #define RXRPC_TX_SMSS          RXRPC_JUMBO_DATALEN
 #define RXRPC_MIN_CWND         4
-       u8                      cong_cwnd;      /* Congestion window size */
+       enum rxrpc_ca_state     cong_ca_state;  /* Congestion control state */
        u8                      cong_extra;     /* Extra to send for congestion management */
-       u8                      cong_ssthresh;  /* Slow-start threshold */
-       enum rxrpc_congest_mode cong_mode:8;    /* Congestion management mode */
-       u                     cong_dup_acks;  /* Count of ACKs showing missing packets */
-       u                     cong_cumul_acks; /* Cumulative ACK count */
+       u16                     cong_cwnd;      /* Congestion window size */
+       u16                     cong_ssthresh;  /* Slow-start threshold */
+       u16                     cong_dup_acks;  /* Count of ACKs showing missing packets */
+       u16                     cong_cumul_acks; /* Cumulative ACK count */
        ktime_t                 cong_tstamp;    /* Last time cwnd was changed */
        struct sk_buff          *cong_last_nack; /* Last ACK with nacks received */
 
@@ -763,27 +763,24 @@ struct rxrpc_call {
        rxrpc_seq_t             acks_prev_seq;  /* Highest previousPacket received */
        rxrpc_seq_t             acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
        rxrpc_serial_t          acks_highest_serial; /* Highest serial number ACK'd */
+       unsigned short          acks_nr_sacks;  /* Number of soft acks recorded */
+       unsigned short          acks_nr_snacks; /* Number of soft nacks recorded */
 };
 
 /*
  * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  */
 struct rxrpc_ack_summary {
-       u16                     nr_acks;                /* Number of ACKs in packet */
-       u16                     nr_new_acks;            /* Number of new ACKs in packet */
-       u16                     nr_new_nacks;           /* Number of new nacks in packet */
-       u16                     nr_retained_nacks;      /* Number of nacks retained between ACKs */
-       u8                      ack_reason;
-       bool                    saw_nacks;              /* Saw NACKs in packet */
-       bool                    new_low_nack;           /* T if new low NACK found */
-       bool                    retrans_timeo;          /* T if reTx due to timeout happened */
-       u8                      flight_size;            /* Number of unreceived transmissions */
-       /* Place to stash values for tracing */
-       enum rxrpc_congest_mode mode:8;
-       u8                      cwnd;
-       u8                      ssthresh;
-       u8                      dup_acks;
-       u8                      cumulative_acks;
+       u16             in_flight;              /* Number of unreceived transmissions */
+       u16             nr_new_hacks;           /* Number of rotated new ACKs */
+       u16             nr_new_sacks;           /* Number of new soft ACKs in packet */
+       u16             nr_new_snacks;          /* Number of new soft nacks in packet */
+       u16             nr_retained_snacks;     /* Number of nacks retained between ACKs */
+       u8              ack_reason;
+       bool            saw_snacks:1;           /* T if we saw a soft NACK */
+       bool            new_low_snack:1;        /* T if new low soft NACK found */
+       bool            retrans_timeo:1;        /* T if reTx due to timeout happened */
+       u8 /*enum rxrpc_congest_change*/ change;
 };
 
 /*
index 706631e6ac2f1074803d4427402670668053290e..5f76bd90567c0f9df61a6a29cbe4aff4657306b7 100644 (file)
@@ -437,9 +437,9 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        call->dest_srx.srx_service = conn->service_id;
        call->cong_ssthresh = call->peer->cong_ssthresh;
        if (call->cong_cwnd >= call->cong_ssthresh)
-               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+               call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
        else
-               call->cong_mode = RXRPC_CALL_SLOW_START;
+               call->cong_ca_state = RXRPC_CA_SLOW_START;
 
        chan->call_id           = call_id;
        chan->call_debug_id     = call->debug_id;
index 8d7ab4b9d7d03c3a6ef522842e3d9594dec509bb..c25d816aafeed5cb70a10cf1e90a263e6bf2383a 100644 (file)
@@ -34,49 +34,41 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
                                        struct rxrpc_ack_summary *summary,
                                        rxrpc_serial_t acked_serial)
 {
-       enum rxrpc_congest_change change = rxrpc_cong_no_change;
-       unsigned int cumulative_acks = call->cong_cumul_acks;
-       unsigned int cwnd = call->cong_cwnd;
        bool resend = false;
 
-       summary->flight_size =
-               (call->tx_top - call->tx_bottom) - summary->nr_acks;
+       summary->change = rxrpc_cong_no_change;
+       summary->in_flight = (call->tx_top - call->tx_bottom) - call->acks_nr_sacks;
 
        if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
                summary->retrans_timeo = true;
-               call->cong_ssthresh = umax(summary->flight_size / 2, 2);
-               cwnd = 1;
-               if (cwnd >= call->cong_ssthresh &&
-                   call->cong_mode == RXRPC_CALL_SLOW_START) {
-                       call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+               call->cong_ssthresh = umax(summary->in_flight / 2, 2);
+               call->cong_cwnd = 1;
+               if (call->cong_cwnd >= call->cong_ssthresh &&
+                   call->cong_ca_state == RXRPC_CA_SLOW_START) {
+                       call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
                        call->cong_tstamp = skb->tstamp;
-                       cumulative_acks = 0;
+                       call->cong_cumul_acks = 0;
                }
        }
 
-       cumulative_acks += summary->nr_new_acks;
-       if (cumulative_acks > 255)
-               cumulative_acks = 255;
+       call->cong_cumul_acks += summary->nr_new_sacks;
+       if (call->cong_cumul_acks > 255)
+               call->cong_cumul_acks = 255;
 
-       summary->cwnd = call->cong_cwnd;
-       summary->ssthresh = call->cong_ssthresh;
-       summary->cumulative_acks = cumulative_acks;
-       summary->dup_acks = call->cong_dup_acks;
-
-       switch (call->cong_mode) {
-       case RXRPC_CALL_SLOW_START:
-               if (summary->saw_nacks)
+       switch (call->cong_ca_state) {
+       case RXRPC_CA_SLOW_START:
+               if (summary->saw_snacks)
                        goto packet_loss_detected;
-               if (summary->cumulative_acks > 0)
-                       cwnd += 1;
-               if (cwnd >= call->cong_ssthresh) {
-                       call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+               if (call->cong_cumul_acks > 0)
+                       call->cong_cwnd += 1;
+               if (call->cong_cwnd >= call->cong_ssthresh) {
+                       call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
                        call->cong_tstamp = skb->tstamp;
                }
                goto out;
 
-       case RXRPC_CALL_CONGEST_AVOIDANCE:
-               if (summary->saw_nacks)
+       case RXRPC_CA_CONGEST_AVOIDANCE:
+               if (summary->saw_snacks)
                        goto packet_loss_detected;
 
                /* We analyse the number of packets that get ACK'd per RTT
@@ -88,18 +80,18 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
                                 ktime_add_us(call->cong_tstamp,
                                              call->peer->srtt_us >> 3)))
                        goto out_no_clear_ca;
-               change = rxrpc_cong_rtt_window_end;
+               summary->change = rxrpc_cong_rtt_window_end;
                call->cong_tstamp = skb->tstamp;
-               if (cumulative_acks >= cwnd)
-                       cwnd++;
+               if (call->cong_cumul_acks >= call->cong_cwnd)
+                       call->cong_cwnd++;
                goto out;
 
-       case RXRPC_CALL_PACKET_LOSS:
-               if (!summary->saw_nacks)
+       case RXRPC_CA_PACKET_LOSS:
+               if (!summary->saw_snacks)
                        goto resume_normality;
 
-               if (summary->new_low_nack) {
-                       change = rxrpc_cong_new_low_nack;
+               if (summary->new_low_snack) {
+                       summary->change = rxrpc_cong_new_low_nack;
                        call->cong_dup_acks = 1;
                        if (call->cong_extra > 1)
                                call->cong_extra = 1;
@@ -110,29 +102,29 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
                if (call->cong_dup_acks < 3)
                        goto send_extra_data;
 
-               change = rxrpc_cong_begin_retransmission;
-               call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
-               call->cong_ssthresh = umax(summary->flight_size / 2, 2);
-               cwnd = call->cong_ssthresh + 3;
+               summary->change = rxrpc_cong_begin_retransmission;
+               call->cong_ca_state = RXRPC_CA_FAST_RETRANSMIT;
+               call->cong_ssthresh = umax(summary->in_flight / 2, 2);
+               call->cong_cwnd = call->cong_ssthresh + 3;
                call->cong_extra = 0;
                call->cong_dup_acks = 0;
                resend = true;
                goto out;
 
-       case RXRPC_CALL_FAST_RETRANSMIT:
-               if (!summary->new_low_nack) {
-                       if (summary->nr_new_acks == 0)
-                               cwnd += 1;
+       case RXRPC_CA_FAST_RETRANSMIT:
+               if (!summary->new_low_snack) {
+                       if (summary->nr_new_sacks == 0)
+                               call->cong_cwnd += 1;
                        call->cong_dup_acks++;
                        if (call->cong_dup_acks == 2) {
-                               change = rxrpc_cong_retransmit_again;
+                               summary->change = rxrpc_cong_retransmit_again;
                                call->cong_dup_acks = 0;
                                resend = true;
                        }
                } else {
-                       change = rxrpc_cong_progress;
-                       cwnd = call->cong_ssthresh;
-                       if (!summary->saw_nacks)
+                       summary->change = rxrpc_cong_progress;
+                       call->cong_cwnd = call->cong_ssthresh;
+                       if (!summary->saw_snacks)
                                goto resume_normality;
                }
                goto out;
@@ -143,30 +135,27 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
        }
 
 resume_normality:
-       change = rxrpc_cong_cleared_nacks;
+       summary->change = rxrpc_cong_cleared_nacks;
        call->cong_dup_acks = 0;
        call->cong_extra = 0;
        call->cong_tstamp = skb->tstamp;
-       if (cwnd < call->cong_ssthresh)
-               call->cong_mode = RXRPC_CALL_SLOW_START;
+       if (call->cong_cwnd < call->cong_ssthresh)
+               call->cong_ca_state = RXRPC_CA_SLOW_START;
        else
-               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+               call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
 out:
-       cumulative_acks = 0;
+       call->cong_cumul_acks = 0;
 out_no_clear_ca:
-       if (cwnd >= RXRPC_TX_MAX_WINDOW)
-               cwnd = RXRPC_TX_MAX_WINDOW;
-       call->cong_cwnd = cwnd;
-       call->cong_cumul_acks = cumulative_acks;
-       summary->mode = call->cong_mode;
-       trace_rxrpc_congest(call, summary, acked_serial, change);
+       if (call->cong_cwnd >= RXRPC_TX_MAX_WINDOW)
+               call->cong_cwnd = RXRPC_TX_MAX_WINDOW;
+       trace_rxrpc_congest(call, summary, acked_serial);
        if (resend)
                rxrpc_resend(call, skb);
        return;
 
 packet_loss_detected:
-       change = rxrpc_cong_saw_nack;
-       call->cong_mode = RXRPC_CALL_PACKET_LOSS;
+       summary->change = rxrpc_cong_saw_nack;
+       call->cong_ca_state = RXRPC_CA_PACKET_LOSS;
        call->cong_dup_acks = 0;
        goto send_extra_data;
 
@@ -175,7 +164,7 @@ send_extra_data:
         * state.
         */
        if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
-           summary->nr_acks != call->tx_top - call->tx_bottom) {
+           call->acks_nr_sacks != call->tx_top - call->tx_bottom) {
                call->cong_extra++;
                wake_up(&call->waitq);
        }
@@ -189,8 +178,8 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
 {
        ktime_t rtt, now;
 
-       if (call->cong_mode != RXRPC_CALL_SLOW_START &&
-           call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
+       if (call->cong_ca_state != RXRPC_CA_SLOW_START &&
+           call->cong_ca_state != RXRPC_CA_CONGEST_AVOIDANCE)
                return;
        if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
                return;
@@ -203,7 +192,7 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
        trace_rxrpc_reset_cwnd(call, now);
        rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
        call->tx_last_sent = now;
-       call->cong_mode = RXRPC_CALL_SLOW_START;
+       call->cong_ca_state = RXRPC_CA_SLOW_START;
        call->cong_ssthresh = umax(call->cong_ssthresh, call->cong_cwnd * 3 / 4);
        call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
 }
@@ -282,7 +271,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
        if (call->acks_lowest_nak == call->tx_bottom) {
                call->acks_lowest_nak = to;
        } else if (after(to, call->acks_lowest_nak)) {
-               summary->new_low_nack = true;
+               summary->new_low_snack = true;
                call->acks_lowest_nak = to;
        }
 
@@ -795,11 +784,11 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
        u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
 
        if (after_eq(seq, old_seq + sp->ack.nr_acks)) {
-               summary->nr_new_acks += sp->ack.nr_nacks;
-               summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks);
-               summary->nr_retained_nacks = 0;
+               summary->nr_new_sacks += sp->ack.nr_nacks;
+               summary->nr_new_sacks += seq - (old_seq + sp->ack.nr_acks);
+               summary->nr_retained_snacks = 0;
        } else if (seq == old_seq) {
-               summary->nr_retained_nacks = sp->ack.nr_nacks;
+               summary->nr_retained_snacks = sp->ack.nr_nacks;
        } else {
                for (i = 0; i < sp->ack.nr_acks; i++) {
                        if (acks[i] == RXRPC_ACK_TYPE_NACK) {
@@ -810,8 +799,8 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
                        }
                }
 
-               summary->nr_new_acks += new_acks;
-               summary->nr_retained_nacks = retained_nacks;
+               summary->nr_new_sacks += new_acks;
+               summary->nr_retained_snacks = retained_nacks;
        }
 
        return old_seq + sp->ack.nr_acks - 1;
@@ -840,16 +829,16 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
        for (i = 0; i < sp->ack.nr_acks; i++) {
                seq++;
                if (acks[i] == RXRPC_ACK_TYPE_ACK) {
-                       summary->nr_acks++;
+                       call->acks_nr_sacks++;
                        if (after(seq, since))
-                               summary->nr_new_acks++;
+                               summary->nr_new_sacks++;
                } else {
-                       summary->saw_nacks = true;
+                       summary->saw_snacks = true;
                        if (before_eq(seq, since)) {
                                /* Overlap with previous ACK */
                                old_nacks++;
                        } else {
-                               summary->nr_new_nacks++;
+                               summary->nr_new_snacks++;
                                sp->ack.nr_nacks++;
                        }
 
@@ -860,7 +849,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
 
        if (lowest_nak != call->acks_lowest_nak) {
                call->acks_lowest_nak = lowest_nak;
-               summary->new_low_nack = true;
+               summary->new_low_snack = true;
        }
 
        /* We *can* have more nacks than we did - the peer is permitted to drop
@@ -868,9 +857,9 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
         * possible for the nack distribution to change whilst the number of
         * nacks stays the same or goes down.
         */
-       if (old_nacks < summary->nr_retained_nacks)
-               summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
-       summary->nr_retained_nacks = old_nacks;
+       if (old_nacks < summary->nr_retained_snacks)
+               summary->nr_new_sacks += summary->nr_retained_snacks - old_nacks;
+       summary->nr_retained_snacks = old_nacks;
 }
 
 /*
@@ -996,7 +985,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
                call->cong_last_nack = NULL;
        } else {
-               summary.nr_new_acks = hard_ack - call->acks_hard_ack;
+               summary.nr_new_sacks = hard_ack - call->acks_hard_ack;
                call->acks_lowest_nak = hard_ack + nr_acks;
                since = hard_ack;
        }
@@ -1054,7 +1043,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
        if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
-           summary.nr_acks == call->tx_top - hard_ack &&
+           call->acks_nr_sacks == call->tx_top - hard_ack &&
            rxrpc_is_client_call(call))
                rxrpc_propose_ping(call, ack_serial,
                                   rxrpc_propose_ack_ping_for_lost_reply);
index 3886777d1bb60557b4ae3c2824a4fe6996292f70..7ed928b6f0e16195af39274b9b9a366fd4f1edd5 100644 (file)
@@ -419,7 +419,7 @@ static size_t rxrpc_prepare_data_subpacket(struct rxrpc_call *call,
                why = rxrpc_reqack_ack_lost;
        else if (txb->flags & RXRPC_TXBUF_RESENT)
                why = rxrpc_reqack_retrans;
-       else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
+       else if (call->cong_ca_state == RXRPC_CA_SLOW_START && call->cong_cwnd <= 2)
                why = rxrpc_reqack_slow_start;
        else if (call->tx_winsize <= 2)
                why = rxrpc_reqack_small_txwin;