From: Krishna Kumar Date: Mon, 25 Aug 2025 03:10:05 +0000 (+0530) Subject: net: Cache hash and flow_id to avoid recalculation X-Git-Tag: v6.18-rc1~132^2~317^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=48aa30443e52c9666d5cd5e67532e475f212337e;p=thirdparty%2Fkernel%2Fstable.git net: Cache hash and flow_id to avoid recalculation get_rps_cpu() can cache flow_id and hash as both are required by set_rps_cpu() instead of recalculating them twice. Signed-off-by: Krishna Kumar Link: https://patch.msgid.link/20250825031005.3674864-3-krikku@gmail.com Signed-off-by: Jakub Kicinski --- diff --git a/net/core/dev.c b/net/core/dev.c index fbf0894c55d0b..1d1650d9ecff4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4881,7 +4881,8 @@ static bool rps_flow_is_active(struct rps_dev_flow *rflow, static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, - struct rps_dev_flow *rflow, u16 next_cpu) + struct rps_dev_flow *rflow, u16 next_cpu, u32 hash, + u32 flow_id) { if (next_cpu < nr_cpu_ids) { u32 head; @@ -4892,8 +4893,6 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *tmp_rflow; unsigned int tmp_cpu; u16 rxq_index; - u32 flow_id; - u32 hash; int rc; /* Should we steer this flow to a different hardware queue? */ @@ -4909,9 +4908,6 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (!flow_table) goto out; - hash = skb_get_hash(skb); - flow_id = rfs_slot(hash, flow_table); - tmp_rflow = &flow_table->flows[flow_id]; tmp_cpu = READ_ONCE(tmp_rflow->cpu); @@ -4959,6 +4955,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow_table *flow_table; struct rps_map *map; int cpu = -1; + u32 flow_id; u32 tcpu; u32 hash; @@ -5005,7 +5002,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, /* OK, now we know there is a match, * we can look at the local (per receive queue) flow table */ - rflow = &flow_table->flows[rfs_slot(hash, flow_table)]; + flow_id = rfs_slot(hash, flow_table); + rflow = &flow_table->flows[flow_id]; tcpu = rflow->cpu; /* @@ -5024,7 +5022,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - rflow->last_qtail)) >= 0)) { tcpu = next_cpu; - rflow = set_rps_cpu(dev, skb, rflow, next_cpu); + rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash, + flow_id); } if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {