]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tcp: move icsk_clean_acked to a better location
authorEric Dumazet <edumazet@google.com>
Mon, 17 Mar 2025 08:53:13 +0000 (08:53 +0000)
committerJakub Kicinski <kuba@kernel.org>
Mon, 24 Mar 2025 16:55:18 +0000 (09:55 -0700)
As a followup of my presentation in Zagreb for netdev 0x19:

icsk_clean_acked is only used by TCP when/if CONFIG_TLS_DEVICE
is enabled from tcp_ack().

Rename it to tcp_clean_acked, move it to tcp_sock structure
in the tcp_sock_read_rx for better cache locality in TCP
fast path.

Define this field only when CONFIG_TLS_DEVICE is enabled
saving 8 bytes on configs not using it.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250317085313.2023214-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Documentation/networking/net_cachelines/tcp_sock.rst
include/linux/tcp.h
include/net/inet_connection_sock.h
include/net/tcp.h
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/tls/tls_device.c

index 1f79765072b10d99e15815fdd8a4633c74051a7e..bc9b2131bf7acc888e8910b1c5926c83de48f9d6 100644 (file)
@@ -27,6 +27,7 @@ u32                           dsack_dups
 u32                           snd_una                 read_mostly         read_write          tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
 u32                           snd_sml                 read_write                              tcp_minshall_check,tcp_minshall_update
 u32                           rcv_tstamp                                  read_mostly         tcp_ack
+void *                        tcp_clean_acked                             read_mostly         tcp_ack
 u32                           lsndtime                read_write                              tcp_slow_start_after_idle_check,tcp_event_data_sent
 u32                           last_oow_ack_time
 u32                           compressed_ack_rcv_nxt
index 159b2c59eb6271030dc2c8d58b43229ebef10ea5..1669d95bb0f9aa97e1e74b7b62bb252eb404fbfb 100644 (file)
@@ -244,6 +244,9 @@ struct tcp_sock {
        struct  minmax rtt_min;
        /* OOO segments go in this rbtree. Socket lock must be held. */
        struct rb_root  out_of_order_queue;
+#if defined(CONFIG_TLS_DEVICE)
+       void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
+#endif
        u32     snd_ssthresh;   /* Slow start size threshold            */
        u8      recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
        __cacheline_group_end(tcp_sock_read_rx);
index f736d3097e43d97ee32f5d31f0e566536fe05a35..e8ed52fc603f2397620369588c21206f3c1a3fb7 100644 (file)
@@ -66,7 +66,6 @@ struct inet_connection_sock_af_ops {
  * @icsk_af_ops                   Operations which are AF_INET{4,6} specific
  * @icsk_ulp_ops          Pluggable ULP control hook
  * @icsk_ulp_data         ULP private data
- * @icsk_clean_acked      Clean acked data hook
  * @icsk_ca_state:        Congestion control state
  * @icsk_retransmits:     Number of unrecovered [RTO] timeouts
  * @icsk_pending:         Scheduled timer event
@@ -97,7 +96,6 @@ struct inet_connection_sock {
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        const struct tcp_ulp_ops  *icsk_ulp_ops;
        void __rcu                *icsk_ulp_data;
-       void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
        __u8                      icsk_ca_state:5,
                                  icsk_ca_initialized:1,
index d08fbf90495de69b157d3c87c50e82d781a365df..f8efe56bbccb17e92f84f6114a147bea7c303fa8 100644 (file)
@@ -2815,9 +2815,9 @@ extern struct static_key_false tcp_have_smc;
 #endif
 
 #if IS_ENABLED(CONFIG_TLS_DEVICE)
-void clean_acked_data_enable(struct inet_connection_sock *icsk,
+void clean_acked_data_enable(struct tcp_sock *tp,
                             void (*cad)(struct sock *sk, u32 ack_seq));
-void clean_acked_data_disable(struct inet_connection_sock *icsk);
+void clean_acked_data_disable(struct tcp_sock *tp);
 void clean_acked_data_flush(void);
 #endif
 
index 989c3c3d8e757361a0ac4a9f039a3cfca10d9612..fde56d28f586de27d7161565801d1c6962d7fbfe 100644 (file)
@@ -5026,7 +5026,12 @@ static void __init tcp_struct_check(void)
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+       CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked);
+       CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 77);
+#else
        CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
+#endif
 
        /* TX read-write hotpath cache lines */
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);
index 72382ee4456dbd89fd1b69f3bdbf6b9c8ef5aa78..a35018e2d0ba27b14d0b59d3728f7181b1a51161 100644 (file)
@@ -119,18 +119,18 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 #if IS_ENABLED(CONFIG_TLS_DEVICE)
 static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ);
 
-void clean_acked_data_enable(struct inet_connection_sock *icsk,
+void clean_acked_data_enable(struct tcp_sock *tp,
                             void (*cad)(struct sock *sk, u32 ack_seq))
 {
-       icsk->icsk_clean_acked = cad;
+       tp->tcp_clean_acked = cad;
        static_branch_deferred_inc(&clean_acked_data_enabled);
 }
 EXPORT_SYMBOL_GPL(clean_acked_data_enable);
 
-void clean_acked_data_disable(struct inet_connection_sock *icsk)
+void clean_acked_data_disable(struct tcp_sock *tp)
 {
        static_branch_slow_dec_deferred(&clean_acked_data_enabled);
-       icsk->icsk_clean_acked = NULL;
+       tp->tcp_clean_acked = NULL;
 }
 EXPORT_SYMBOL_GPL(clean_acked_data_disable);
 
@@ -3987,8 +3987,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
 #if IS_ENABLED(CONFIG_TLS_DEVICE)
                if (static_branch_unlikely(&clean_acked_data_enabled.key))
-                       if (icsk->icsk_clean_acked)
-                               icsk->icsk_clean_acked(sk, ack);
+                       if (tp->tcp_clean_acked)
+                               tp->tcp_clean_acked(sk, ack);
 #endif
        }
 
index e50b6e71df13be57183bcccdd00768f0f5a7cd95..f672a62a9a52f6ea7a0f5c500acdf9538f08d297 100644 (file)
@@ -157,7 +157,7 @@ static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
        offload_ctx->retransmit_hint = NULL;
 }
 
-static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
+static void tls_tcp_clean_acked(struct sock *sk, u32 acked_seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_record_info *info, *temp;
@@ -204,7 +204,7 @@ void tls_device_sk_destruct(struct sock *sk)
                        destroy_record(ctx->open_record);
                delete_all_records(ctx);
                crypto_free_aead(ctx->aead_send);
-               clean_acked_data_disable(inet_csk(sk));
+               clean_acked_data_disable(tcp_sk(sk));
        }
 
        tls_device_queue_ctx_destruction(tls_ctx);
@@ -1126,7 +1126,7 @@ int tls_set_device_offload(struct sock *sk)
        start_marker_record->num_frags = 0;
        list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
 
-       clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
+       clean_acked_data_enable(tcp_sk(sk), &tls_tcp_clean_acked);
        ctx->push_pending_record = tls_device_push_pending_record;
 
        /* TLS offload is greatly simplified if we don't send
@@ -1172,7 +1172,7 @@ int tls_set_device_offload(struct sock *sk)
 
 release_lock:
        up_read(&device_offload_lock);
-       clean_acked_data_disable(inet_csk(sk));
+       clean_acked_data_disable(tcp_sk(sk));
        crypto_free_aead(offload_ctx->aead_send);
 free_offload_ctx:
        kfree(offload_ctx);