]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net-sysfs: add rps_sock_flow_table_mask() helper
authorEric Dumazet <edumazet@google.com>
Mon, 2 Mar 2026 18:14:28 +0000 (18:14 +0000)
committerJakub Kicinski <kuba@kernel.org>
Thu, 5 Mar 2026 00:54:09 +0000 (16:54 -0800)
In preparation of the following patch, abstract access
to the @mask field in 'struct rps_sock_flow_table'.

Also cleanup rps_sock_flow_sysctl() a bit :

- Rename orig_sock_table to o_sock_table.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20260302181432.1836150-4-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/rps.h
net/core/dev.c
net/core/sysctl_net_core.c

index 32cfa250d9f931b8ab1c94e0410d0820bb9c999f..82cdffdf3e6b0035e7ceeb130b5b4ac19772e46c 100644 (file)
@@ -60,18 +60,23 @@ struct rps_dev_flow_table {
  * meaning we use 32-6=26 bits for the hash.
  */
 struct rps_sock_flow_table {
-       u32             mask;
+       u32             _mask;
 
        u32             ents[] ____cacheline_aligned_in_smp;
 };
 #define        RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
 
+static inline u32 rps_sock_flow_table_mask(const struct rps_sock_flow_table *table)
+{
+       return table->_mask;
+}
+
 #define RPS_NO_CPU 0xffff
 
 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
                                        u32 hash)
 {
-       unsigned int index = hash & table->mask;
+       unsigned int index = hash & rps_sock_flow_table_mask(table);
        u32 val = hash & ~net_hotdata.rps_cpu_mask;
 
        /* We only give a hint, preemption can change CPU under us */
@@ -129,7 +134,7 @@ static inline void _sock_rps_delete_flow(const struct sock *sk)
        rcu_read_lock();
        table = rcu_dereference(net_hotdata.rps_sock_flow_table);
        if (table) {
-               index = hash & table->mask;
+               index = hash & rps_sock_flow_table_mask(table);
                if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
                        WRITE_ONCE(table->ents[index], RPS_NO_CPU);
        }
index 19b84eaa2643235b10389991a6c88ac257f04737..92f8eeac8de3ce4df051bc065a0d094cd8535b3b 100644 (file)
@@ -5112,12 +5112,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        if (flow_table && sock_flow_table) {
                struct rps_dev_flow *rflow;
                u32 next_cpu;
+               u32 flow_id;
                u32 ident;
 
                /* First check into global flow table if there is a match.
                 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
                 */
-               ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+               flow_id = hash & rps_sock_flow_table_mask(sock_flow_table);
+               ident = READ_ONCE(sock_flow_table->ents[flow_id]);
                if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
                        goto try_rps;
 
index 0b659c932cffef45e05207890b8187d64ae3c85a..cfbe798493b5789dc8baedf9dcbe9c20918e2ba6 100644 (file)
@@ -145,16 +145,17 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
                .maxlen = sizeof(size),
                .mode = table->mode
        };
-       struct rps_sock_flow_table *orig_sock_table, *sock_table;
+       struct rps_sock_flow_table *o_sock_table, *sock_table;
        static DEFINE_MUTEX(sock_flow_mutex);
        void *tofree = NULL;
 
        mutex_lock(&sock_flow_mutex);
 
-       orig_sock_table = rcu_dereference_protected(
+       o_sock_table = rcu_dereference_protected(
                                        net_hotdata.rps_sock_flow_table,
                                        lockdep_is_held(&sock_flow_mutex));
-       size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
+       size = o_sock_table ? rps_sock_flow_table_mask(o_sock_table) + 1 : 0;
+       orig_size = size;
 
        ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
 
@@ -165,6 +166,7 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
                                mutex_unlock(&sock_flow_mutex);
                                return -EINVAL;
                        }
+                       sock_table = o_sock_table;
                        size = roundup_pow_of_two(size);
                        if (size != orig_size) {
                                sock_table =
@@ -175,26 +177,25 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
                                }
                                net_hotdata.rps_cpu_mask =
                                        roundup_pow_of_two(nr_cpu_ids) - 1;
-                               sock_table->mask = size - 1;
-                       } else
-                               sock_table = orig_sock_table;
+                               sock_table->_mask = size - 1;
+                       }
 
                        for (i = 0; i < size; i++)
                                sock_table->ents[i] = RPS_NO_CPU;
                } else
                        sock_table = NULL;
 
-               if (sock_table != orig_sock_table) {
+               if (sock_table != o_sock_table) {
                        rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
                                           sock_table);
                        if (sock_table) {
                                static_branch_inc(&rps_needed);
                                static_branch_inc(&rfs_needed);
                        }
-                       if (orig_sock_table) {
+                       if (o_sock_table) {
                                static_branch_dec(&rps_needed);
                                static_branch_dec(&rfs_needed);
-                               tofree = orig_sock_table;
+                               tofree = o_sock_table;
                        }
                }
        }