]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Jan 2025 09:45:11 +0000 (10:45 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Jan 2025 09:45:11 +0000 (10:45 +0100)
added patches:
cpufreq-amd-pstate-add-check-for-cpufreq_cpu_get-s-return-value.patch
ipv6-fix-soft-lockups-in-fib6_select_path-under-high-next-hop-churn.patch
rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-check-loop.patch

queue-6.6/cpufreq-amd-pstate-add-check-for-cpufreq_cpu_get-s-return-value.patch [new file with mode: 0644]
queue-6.6/ipv6-fix-soft-lockups-in-fib6_select_path-under-high-next-hop-churn.patch [new file with mode: 0644]
queue-6.6/rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-check-loop.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/cpufreq-amd-pstate-add-check-for-cpufreq_cpu_get-s-return-value.patch b/queue-6.6/cpufreq-amd-pstate-add-check-for-cpufreq_cpu_get-s-return-value.patch
new file mode 100644 (file)
index 0000000..1b0ec20
--- /dev/null
@@ -0,0 +1,42 @@
+From 5493f9714e4cdaf0ee7cec15899a231400cb1a9f Mon Sep 17 00:00:00 2001
+From: Anastasia Belova <abelova@astralinux.ru>
+Date: Mon, 26 Aug 2024 16:38:41 +0300
+Subject: cpufreq: amd-pstate: add check for cpufreq_cpu_get's return value
+
+From: Anastasia Belova <abelova@astralinux.ru>
+
+commit 5493f9714e4cdaf0ee7cec15899a231400cb1a9f upstream.
+
+cpufreq_cpu_get may return NULL. To avoid NULL-dereference check it
+and return in case of error.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Signed-off-by: Anastasia Belova <abelova@astralinux.ru>
+Reviewed-by: Perry Yuan <perry.yuan@amd.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+[ Raj: on 6.6, there don't have function amd_pstate_update_limits()
+  so applied the NULL checking in amd_pstate_adjust_perf() only ]
+Signed-off-by: Rajani Kantha <rajanikantha@engineer.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/amd-pstate.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -579,8 +579,13 @@ static void amd_pstate_adjust_perf(unsig
+       unsigned long max_perf, min_perf, des_perf,
+                     cap_perf, lowest_nonlinear_perf, max_freq;
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-      struct amd_cpudata *cpudata = policy->driver_data;
+       unsigned int target_freq;
++      struct amd_cpudata *cpudata;
++
++      if (!policy)
++              return;
++
++      cpudata = policy->driver_data;
+       if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+               amd_pstate_update_min_max_limit(policy);
diff --git a/queue-6.6/ipv6-fix-soft-lockups-in-fib6_select_path-under-high-next-hop-churn.patch b/queue-6.6/ipv6-fix-soft-lockups-in-fib6_select_path-under-high-next-hop-churn.patch
new file mode 100644 (file)
index 0000000..02871e1
--- /dev/null
@@ -0,0 +1,525 @@
+From d9ccb18f83ea2bb654289b6ecf014fd267cc988b Mon Sep 17 00:00:00 2001
+From: Omid Ehtemam-Haghighi <omid.ehtemamhaghighi@menlosecurity.com>
+Date: Tue, 5 Nov 2024 17:02:36 -0800
+Subject: ipv6: Fix soft lockups in fib6_select_path under high next hop churn
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Omid Ehtemam-Haghighi <omid.ehtemamhaghighi@menlosecurity.com>
+
+commit d9ccb18f83ea2bb654289b6ecf014fd267cc988b upstream.
+
+Soft lockups have been observed on a cluster of Linux-based edge routers
+located in a highly dynamic environment. Using the `bird` service, these
+routers continuously update BGP-advertised routes due to frequently
+changing nexthop destinations, while also managing significant IPv6
+traffic. The lockups occur during the traversal of the multipath
+circular linked-list in the `fib6_select_path` function, particularly
+while iterating through the siblings in the list. The issue typically
+arises when the nodes of the linked list are unexpectedly deleted
+concurrently on a different core—indicated by their 'next' and
+'previous' elements pointing back to the node itself and their reference
+count dropping to zero. This results in an infinite loop, leading to a
+soft lockup that triggers a system panic via the watchdog timer.
+
+Apply RCU primitives in the problematic code sections to resolve the
+issue. Where necessary, update the references to fib6_siblings to
+annotate or use the RCU APIs.
+
+Include a test script that reproduces the issue. The script
+periodically updates the routing table while generating a heavy load
+of outgoing IPv6 traffic through multiple iperf3 clients. It
+consistently induces infinite soft lockups within a couple of minutes.
+
+Kernel log:
+
+ 0 [ffffbd13003e8d30] machine_kexec at ffffffff8ceaf3eb
+ 1 [ffffbd13003e8d90] __crash_kexec at ffffffff8d0120e3
+ 2 [ffffbd13003e8e58] panic at ffffffff8cef65d4
+ 3 [ffffbd13003e8ed8] watchdog_timer_fn at ffffffff8d05cb03
+ 4 [ffffbd13003e8f08] __hrtimer_run_queues at ffffffff8cfec62f
+ 5 [ffffbd13003e8f70] hrtimer_interrupt at ffffffff8cfed756
+ 6 [ffffbd13003e8fd0] __sysvec_apic_timer_interrupt at ffffffff8cea01af
+ 7 [ffffbd13003e8ff0] sysvec_apic_timer_interrupt at ffffffff8df1b83d
+-- <IRQ stack> --
+ 8 [ffffbd13003d3708] asm_sysvec_apic_timer_interrupt at ffffffff8e000ecb
+    [exception RIP: fib6_select_path+299]
+    RIP: ffffffff8ddafe7b  RSP: ffffbd13003d37b8  RFLAGS: 00000287
+    RAX: ffff975850b43600  RBX: ffff975850b40200  RCX: 0000000000000000
+    RDX: 000000003fffffff  RSI: 0000000051d383e4  RDI: ffff975850b43618
+    RBP: ffffbd13003d3800   R8: 0000000000000000   R9: ffff975850b40200
+    R10: 0000000000000000  R11: 0000000000000000  R12: ffffbd13003d3830
+    R13: ffff975850b436a8  R14: ffff975850b43600  R15: 0000000000000007
+    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
+ 9 [ffffbd13003d3808] ip6_pol_route at ffffffff8ddb030c
+10 [ffffbd13003d3888] ip6_pol_route_input at ffffffff8ddb068c
+11 [ffffbd13003d3898] fib6_rule_lookup at ffffffff8ddf02b5
+12 [ffffbd13003d3928] ip6_route_input at ffffffff8ddb0f47
+13 [ffffbd13003d3a18] ip6_rcv_finish_core.constprop.0 at ffffffff8dd950d0
+14 [ffffbd13003d3a30] ip6_list_rcv_finish.constprop.0 at ffffffff8dd96274
+15 [ffffbd13003d3a98] ip6_sublist_rcv at ffffffff8dd96474
+16 [ffffbd13003d3af8] ipv6_list_rcv at ffffffff8dd96615
+17 [ffffbd13003d3b60] __netif_receive_skb_list_core at ffffffff8dc16fec
+18 [ffffbd13003d3be0] netif_receive_skb_list_internal at ffffffff8dc176b3
+19 [ffffbd13003d3c50] napi_gro_receive at ffffffff8dc565b9
+20 [ffffbd13003d3c80] ice_receive_skb at ffffffffc087e4f5 [ice]
+21 [ffffbd13003d3c90] ice_clean_rx_irq at ffffffffc0881b80 [ice]
+22 [ffffbd13003d3d20] ice_napi_poll at ffffffffc088232f [ice]
+23 [ffffbd13003d3d80] __napi_poll at ffffffff8dc18000
+24 [ffffbd13003d3db8] net_rx_action at ffffffff8dc18581
+25 [ffffbd13003d3e40] __do_softirq at ffffffff8df352e9
+26 [ffffbd13003d3eb0] run_ksoftirqd at ffffffff8ceffe47
+27 [ffffbd13003d3ec0] smpboot_thread_fn at ffffffff8cf36a30
+28 [ffffbd13003d3ee8] kthread at ffffffff8cf2b39f
+29 [ffffbd13003d3f28] ret_from_fork at ffffffff8ce5fa64
+30 [ffffbd13003d3f50] ret_from_fork_asm at ffffffff8ce03cbb
+
+Fixes: 66f5d6ce53e6 ("ipv6: replace rwlock with rcu and spinlock in fib6_table")
+Reported-by: Adrian Oliver <kernel@aoliver.ca>
+Signed-off-by: Omid Ehtemam-Haghighi <omid.ehtemamhaghighi@menlosecurity.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Ido Schimmel <idosch@idosch.org>
+Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
+Cc: Simon Horman <horms@kernel.org>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20241106010236.1239299-1-omid.ehtemamhaghighi@menlosecurity.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Rajani Kantha <rajanikantha@engineer.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c                                           |    8 
+ net/ipv6/route.c                                             |   45 +
+ tools/testing/selftests/net/Makefile                         |    1 
+ tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh |  262 +++++++++++
+ 4 files changed, 297 insertions(+), 19 deletions(-)
+ create mode 100755 tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1179,8 +1179,8 @@ next_iter:
+               while (sibling) {
+                       if (sibling->fib6_metric == rt->fib6_metric &&
+                           rt6_qualify_for_ecmp(sibling)) {
+-                              list_add_tail(&rt->fib6_siblings,
+-                                            &sibling->fib6_siblings);
++                              list_add_tail_rcu(&rt->fib6_siblings,
++                                                &sibling->fib6_siblings);
+                               break;
+                       }
+                       sibling = rcu_dereference_protected(sibling->fib6_next,
+@@ -1241,7 +1241,7 @@ add:
+                                                        fib6_siblings)
+                                       sibling->fib6_nsiblings--;
+                               rt->fib6_nsiblings = 0;
+-                              list_del_init(&rt->fib6_siblings);
++                              list_del_rcu(&rt->fib6_siblings);
+                               rt6_multipath_rebalance(next_sibling);
+                               return err;
+                       }
+@@ -1954,7 +1954,7 @@ static void fib6_del_route(struct fib6_t
+                                        &rt->fib6_siblings, fib6_siblings)
+                       sibling->fib6_nsiblings--;
+               rt->fib6_nsiblings = 0;
+-              list_del_init(&rt->fib6_siblings);
++              list_del_rcu(&rt->fib6_siblings);
+               rt6_multipath_rebalance(next_sibling);
+       }
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -418,8 +418,8 @@ void fib6_select_path(const struct net *
+                     struct flowi6 *fl6, int oif, bool have_oif_match,
+                     const struct sk_buff *skb, int strict)
+ {
+-      struct fib6_info *sibling, *next_sibling;
+       struct fib6_info *match = res->f6i;
++      struct fib6_info *sibling;
+       if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
+               goto out;
+@@ -445,8 +445,8 @@ void fib6_select_path(const struct net *
+       if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
+               goto out;
+-      list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
+-                               fib6_siblings) {
++      list_for_each_entry_rcu(sibling, &match->fib6_siblings,
++                              fib6_siblings) {
+               const struct fib6_nh *nh = sibling->fib6_nh;
+               int nh_upper_bound;
+@@ -5186,14 +5186,18 @@ static void ip6_route_mpath_notify(struc
+        * nexthop. Since sibling routes are always added at the end of
+        * the list, find the first sibling of the last route appended
+        */
++      rcu_read_lock();
++
+       if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
+-              rt = list_first_entry(&rt_last->fib6_siblings,
+-                                    struct fib6_info,
+-                                    fib6_siblings);
++              rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
++                                          struct fib6_info,
++                                          fib6_siblings);
+       }
+       if (rt)
+               inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
++
++      rcu_read_unlock();
+ }
+ static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
+@@ -5538,17 +5542,21 @@ static size_t rt6_nlmsg_size(struct fib6
+               nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
+                                        &nexthop_len);
+       } else {
+-              struct fib6_info *sibling, *next_sibling;
+               struct fib6_nh *nh = f6i->fib6_nh;
++              struct fib6_info *sibling;
+               nexthop_len = 0;
+               if (f6i->fib6_nsiblings) {
+                       rt6_nh_nlmsg_size(nh, &nexthop_len);
+-                      list_for_each_entry_safe(sibling, next_sibling,
+-                                               &f6i->fib6_siblings, fib6_siblings) {
++                      rcu_read_lock();
++
++                      list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
++                                              fib6_siblings) {
+                               rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
+                       }
++
++                      rcu_read_unlock();
+               }
+               nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+       }
+@@ -5712,7 +5720,7 @@ static int rt6_fill_node(struct net *net
+                   lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+                       goto nla_put_failure;
+       } else if (rt->fib6_nsiblings) {
+-              struct fib6_info *sibling, *next_sibling;
++              struct fib6_info *sibling;
+               struct nlattr *mp;
+               mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
+@@ -5724,14 +5732,21 @@ static int rt6_fill_node(struct net *net
+                                   0) < 0)
+                       goto nla_put_failure;
+-              list_for_each_entry_safe(sibling, next_sibling,
+-                                       &rt->fib6_siblings, fib6_siblings) {
++              rcu_read_lock();
++
++              list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
++                                      fib6_siblings) {
+                       if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
+                                           sibling->fib6_nh->fib_nh_weight,
+-                                          AF_INET6, 0) < 0)
++                                          AF_INET6, 0) < 0) {
++                              rcu_read_unlock();
++
+                               goto nla_put_failure;
++                      }
+               }
++              rcu_read_unlock();
++
+               nla_nest_end(skb, mp);
+       } else if (rt->nh) {
+               if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
+@@ -6168,7 +6183,7 @@ void inet6_rt_notify(int event, struct f
+       err = -ENOBUFS;
+       seq = info->nlh ? info->nlh->nlmsg_seq : 0;
+-      skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
++      skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC);
+       if (!skb)
+               goto errout;
+@@ -6181,7 +6196,7 @@ void inet6_rt_notify(int event, struct f
+               goto errout;
+       }
+       rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
+-                  info->nlh, gfp_any());
++                  info->nlh, GFP_ATOMIC);
+       return;
+ errout:
+       if (err < 0)
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -91,6 +91,7 @@ TEST_PROGS += test_vxlan_mdb.sh
+ TEST_PROGS += test_bridge_neigh_suppress.sh
+ TEST_PROGS += test_vxlan_nolocalbypass.sh
+ TEST_PROGS += test_bridge_backup_port.sh
++TEST_PROGS += ipv6_route_update_soft_lockup.sh
+ TEST_FILES := settings
+ TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
+--- /dev/null
++++ b/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh
+@@ -0,0 +1,262 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Testing for potential kernel soft lockup during IPv6 routing table
++# refresh under heavy outgoing IPv6 traffic. If a kernel soft lockup
++# occurs, a kernel panic will be triggered to prevent associated issues.
++#
++#
++#                            Test Environment Layout
++#
++# ┌----------------┐                                         ┌----------------┐
++# |     SOURCE_NS  |                                         |     SINK_NS    |
++# |    NAMESPACE   |                                         |    NAMESPACE   |
++# |(iperf3 clients)|                                         |(iperf3 servers)|
++# |                |                                         |                |
++# |                |                                         |                |
++# |    ┌-----------|                             nexthops    |---------┐      |
++# |    |veth_source|<--------------------------------------->|veth_sink|<┐    |
++# |    └-----------|2001:0DB8:1::0:1/96  2001:0DB8:1::1:1/96 |---------┘ |    |
++# |                |         ^           2001:0DB8:1::1:2/96 |           |    |
++# |                |         .                   .           |       fwd |    |
++# |  ┌---------┐   |         .                   .           |           |    |
++# |  |   IPv6  |   |         .                   .           |           V    |
++# |  | routing |   |         .           2001:0DB8:1::1:80/96|        ┌-----┐ |
++# |  |  table  |   |         .                               |        | lo  | |
++# |  | nexthop |   |         .                               └--------┴-----┴-┘
++# |  | update  |   |         ............................> 2001:0DB8:2::1:1/128
++# |  └-------- ┘   |
++# └----------------┘
++#
++# The test script sets up two network namespaces, source_ns and sink_ns,
++# connected via a veth link. Within source_ns, it continuously updates the
++# IPv6 routing table by flushing and inserting IPV6_NEXTHOP_ADDR_COUNT nexthop
++# IPs destined for SINK_LOOPBACK_IP_ADDR in sink_ns. This refresh occurs at a
++# rate of 1/ROUTING_TABLE_REFRESH_PERIOD per second for TEST_DURATION seconds.
++#
++# Simultaneously, multiple iperf3 clients within source_ns generate heavy
++# outgoing IPv6 traffic. Each client is assigned a unique port number starting
++# at 5000 and incrementing sequentially. Each client targets a unique iperf3
++# server running in sink_ns, connected to the SINK_LOOPBACK_IFACE interface
++# using the same port number.
++#
++# The number of iperf3 servers and clients is set to half of the total
++# available cores on each machine.
++#
++# NOTE: We have tested this script on machines with various CPU specifications,
++# ranging from lower to higher performance as listed below. The test script
++# effectively triggered a kernel soft lockup on machines running an unpatched
++# kernel in under a minute:
++#
++# - 1x Intel Xeon E-2278G 8-Core Processor @ 3.40GHz
++# - 1x Intel Xeon E-2378G Processor 8-Core @ 2.80GHz
++# - 1x AMD EPYC 7401P 24-Core Processor @ 2.00GHz
++# - 1x AMD EPYC 7402P 24-Core Processor @ 2.80GHz
++# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz
++# - 1x Ampere Altra Q80-30 80-Core Processor @ 3.00GHz
++# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz
++# - 2x Intel Xeon Silver 4214 24-Core Processor @ 2.20GHz
++# - 1x AMD EPYC 7502P 32-Core @ 2.50GHz
++# - 1x Intel Xeon Gold 6314U 32-Core Processor @ 2.30GHz
++# - 2x Intel Xeon Gold 6338 32-Core Processor @ 2.00GHz
++#
++# On less performant machines, you may need to increase the TEST_DURATION
++# parameter to enhance the likelihood of encountering a race condition leading
++# to a kernel soft lockup and avoid a false negative result.
++#
++# NOTE: The test may not produce the expected result in virtualized
++# environments (e.g., qemu) due to differences in timing and CPU handling,
++# which can affect the conditions needed to trigger a soft lockup.
++
++source lib.sh
++source net_helper.sh
++
++TEST_DURATION=300
++ROUTING_TABLE_REFRESH_PERIOD=0.01
++
++IPERF3_BITRATE="300m"
++
++
++IPV6_NEXTHOP_ADDR_COUNT="128"
++IPV6_NEXTHOP_ADDR_MASK="96"
++IPV6_NEXTHOP_PREFIX="2001:0DB8:1"
++
++
++SOURCE_TEST_IFACE="veth_source"
++SOURCE_TEST_IP_ADDR="2001:0DB8:1::0:1/96"
++
++SINK_TEST_IFACE="veth_sink"
++# ${SINK_TEST_IFACE} is populated with the following range of IPv6 addresses:
++# 2001:0DB8:1::1:1  to 2001:0DB8:1::1:${IPV6_NEXTHOP_ADDR_COUNT}
++SINK_LOOPBACK_IFACE="lo"
++SINK_LOOPBACK_IP_MASK="128"
++SINK_LOOPBACK_IP_ADDR="2001:0DB8:2::1:1"
++
++nexthop_ip_list=""
++termination_signal=""
++kernel_softlokup_panic_prev_val=""
++
++terminate_ns_processes_by_pattern() {
++      local ns=$1
++      local pattern=$2
++
++      for pid in $(ip netns pids ${ns}); do
++              [ -e /proc/$pid/cmdline ] && grep -qe "${pattern}" /proc/$pid/cmdline && kill -9 $pid
++      done
++}
++
++cleanup() {
++      echo "info: cleaning up namespaces and terminating all processes within them..."
++
++
++      # Terminate iperf3 instances running in the source_ns. To avoid race
++      # conditions, first iterate over the PIDs and terminate those
++      # associated with the bash shells running the
++      # `while true; do iperf3 -c ...; done` loops. In a second iteration,
++      # terminate the individual `iperf3 -c ...` instances.
++      terminate_ns_processes_by_pattern ${source_ns} while
++      terminate_ns_processes_by_pattern ${source_ns} iperf3
++
++      # Repeat the same process for sink_ns
++      terminate_ns_processes_by_pattern ${sink_ns} while
++      terminate_ns_processes_by_pattern ${sink_ns} iperf3
++
++      # Check if any iperf3 instances are still running. This could happen
++      # if a core has entered an infinite loop and the timeout for detecting
++      # the soft lockup has not expired, but either the test interval has
++      # already elapsed or the test was terminated manually (e.g., with ^C)
++      for pid in $(ip netns pids ${source_ns}); do
++              if [ -e /proc/$pid/cmdline ] && grep -qe 'iperf3' /proc/$pid/cmdline; then
++                      echo "FAIL: unable to terminate some iperf3 instances. Soft lockup is underway. A kernel panic is on the way!"
++                      exit ${ksft_fail}
++              fi
++      done
++
++      if [ "$termination_signal" == "SIGINT" ]; then
++              echo "SKIP: Termination due to ^C (SIGINT)"
++      elif [ "$termination_signal" == "SIGALRM" ]; then
++              echo "PASS: No kernel soft lockup occurred during this ${TEST_DURATION} second test"
++      fi
++
++      cleanup_ns ${source_ns} ${sink_ns}
++
++      sysctl -qw kernel.softlockup_panic=${kernel_softlokup_panic_prev_val}
++}
++
++setup_prepare() {
++      setup_ns source_ns sink_ns
++
++      ip -n ${source_ns} link add name ${SOURCE_TEST_IFACE} type veth peer name ${SINK_TEST_IFACE} netns ${sink_ns}
++
++      # Setting up the Source namespace
++      ip -n ${source_ns} addr add ${SOURCE_TEST_IP_ADDR} dev ${SOURCE_TEST_IFACE}
++      ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} qlen 10000
++      ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} up
++      ip netns exec ${source_ns} sysctl -qw net.ipv6.fib_multipath_hash_policy=1
++
++      # Setting up the Sink namespace
++      ip -n ${sink_ns} addr add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} dev ${SINK_LOOPBACK_IFACE}
++      ip -n ${sink_ns} link set dev ${SINK_LOOPBACK_IFACE} up
++      ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_LOOPBACK_IFACE}.forwarding=1
++
++      ip -n ${sink_ns} link set ${SINK_TEST_IFACE} up
++      ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_TEST_IFACE}.forwarding=1
++
++
++      # Populate nexthop IPv6 addresses on the test interface in the sink_ns
++      echo "info: populating ${IPV6_NEXTHOP_ADDR_COUNT} IPv6 addresses on the ${SINK_TEST_IFACE} interface ..."
++      for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do
++              ip -n ${sink_ns} addr add ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" "${IP}")/${IPV6_NEXTHOP_ADDR_MASK} dev ${SINK_TEST_IFACE};
++      done
++
++      # Preparing list of nexthops
++      for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do
++              nexthop_ip_list=$nexthop_ip_list" nexthop via ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" $IP) dev ${SOURCE_TEST_IFACE} weight 1"
++      done
++}
++
++
++test_soft_lockup_during_routing_table_refresh() {
++      # Start num_of_iperf_servers iperf3 servers in the sink_ns namespace,
++      # each listening on ports starting at 5001 and incrementing
++      # sequentially. Since iperf3 instances may terminate unexpectedly, a
++      # while loop is used to automatically restart them in such cases.
++      echo "info: starting ${num_of_iperf_servers} iperf3 servers in the sink_ns namespace ..."
++      for i in $(seq 1 ${num_of_iperf_servers}); do
++              cmd="iperf3 --bind ${SINK_LOOPBACK_IP_ADDR} -s -p $(printf '5%03d' ${i}) --rcv-timeout 200 &>/dev/null"
++              ip netns exec ${sink_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null
++      done
++
++      # Wait for the iperf3 servers to be ready
++      for i in $(seq ${num_of_iperf_servers}); do
++              port=$(printf '5%03d' ${i});
++              wait_local_port_listen ${sink_ns} ${port} tcp
++      done
++
++      # Continuously refresh the routing table in the background within
++      # the source_ns namespace
++      ip netns exec ${source_ns} bash -c "
++              while \$(ip netns list | grep -q ${source_ns}); do
++                      ip -6 route add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} ${nexthop_ip_list};
++                      sleep ${ROUTING_TABLE_REFRESH_PERIOD};
++                      ip -6 route delete ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK};
++              done &"
++
++      # Start num_of_iperf_servers iperf3 clients in the source_ns namespace,
++      # each sending TCP traffic on sequential ports starting at 5001.
++      # Since iperf3 instances may terminate unexpectedly (e.g., if the route
++      # to the server is deleted in the background during a route refresh), a
++      # while loop is used to automatically restart them in such cases.
++      echo "info: starting ${num_of_iperf_servers} iperf3 clients in the source_ns namespace ..."
++      for i in $(seq 1 ${num_of_iperf_servers}); do
++              cmd="iperf3 -c ${SINK_LOOPBACK_IP_ADDR} -p $(printf '5%03d' ${i}) --length 64 --bitrate ${IPERF3_BITRATE} -t 0 --connect-timeout 150 &>/dev/null"
++              ip netns exec ${source_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null
++      done
++
++      echo "info: IPv6 routing table is being updated at the rate of $(echo "1/${ROUTING_TABLE_REFRESH_PERIOD}" | bc)/s for ${TEST_DURATION} seconds ..."
++      echo "info: A kernel soft lockup, if detected, results in a kernel panic!"
++
++      wait
++}
++
++# Make sure 'iperf3' is installed, skip the test otherwise
++if [ ! -x "$(command -v "iperf3")" ]; then
++      echo "SKIP: 'iperf3' is not installed. Skipping the test."
++      exit ${ksft_skip}
++fi
++
++# Determine the number of cores on the machine
++num_of_iperf_servers=$(( $(nproc)/2 ))
++
++# Check if we are running on a multi-core machine, skip the test otherwise
++if [ "${num_of_iperf_servers}" -eq 0 ]; then
++      echo "SKIP: This test is not valid on a single core machine!"
++      exit ${ksft_skip}
++fi
++
++# Since the kernel soft lockup we're testing causes at least one core to enter
++# an infinite loop, destabilizing the host and likely affecting subsequent
++# tests, we trigger a kernel panic instead of reporting a failure and
++# continuing
++kernel_softlokup_panic_prev_val=$(sysctl -n kernel.softlockup_panic)
++sysctl -qw kernel.softlockup_panic=1
++
++handle_sigint() {
++      termination_signal="SIGINT"
++      cleanup
++      exit ${ksft_skip}
++}
++
++handle_sigalrm() {
++      termination_signal="SIGALRM"
++      cleanup
++      exit ${ksft_pass}
++}
++
++trap handle_sigint SIGINT
++trap handle_sigalrm SIGALRM
++
++(sleep ${TEST_DURATION} && kill -s SIGALRM $$)&
++
++setup_prepare
++test_soft_lockup_during_routing_table_refresh
diff --git a/queue-6.6/rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-check-loop.patch b/queue-6.6/rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-check-loop.patch
new file mode 100644 (file)
index 0000000..57d31a6
--- /dev/null
@@ -0,0 +1,58 @@
+From 8be3e5b0c96beeefe9d5486b96575d104d3e7d17 Mon Sep 17 00:00:00 2001
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+Date: Tue, 8 Oct 2024 00:41:38 -0700
+Subject: RDMA/bnxt_re: Avoid CPU lockups due fifo occupancy check loop
+
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+
+commit 8be3e5b0c96beeefe9d5486b96575d104d3e7d17 upstream.
+
+Driver waits indefinitely for the fifo occupancy to go below a threshold
+as soon as the pacing interrupt is received. This can cause soft lockup on
+one of the processors, if the rate of DB is very high.
+
+Add a loop count for FPGA and exit the __wait_for_fifo_occupancy_below_th
+if the loop is taking more time. Pacing will be continuing until the
+occupancy is below the threshold. This is ensured by the checks in
+bnxt_re_pacing_timer_exp and further scheduling the work for pacing based
+on the fifo occupancy.
+
+Fixes: 2ad4e6303a6d ("RDMA/bnxt_re: Implement doorbell pacing algorithm")
+Link: https://patch.msgid.link/r/1728373302-19530-7-git-send-email-selvin.xavier@broadcom.com
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+[ Add the declaration of variable pacing_data to make it work on 6.6.y ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -485,6 +485,8 @@ static void bnxt_re_set_default_pacing_d
+ static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+ {
+       u32 read_val, fifo_occup;
++      struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
++      u32 retry_fifo_check = 1000;
+       /* loop shouldn't run infintely as the occupancy usually goes
+        * below pacing algo threshold as soon as pacing kicks in.
+@@ -500,6 +502,14 @@ static void __wait_for_fifo_occupancy_be
+               if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
+                       break;
++              if (!retry_fifo_check--) {
++                      dev_info_once(rdev_to_dev(rdev),
++                                    "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n",
++                                    __func__, fifo_occup, pacing_data->fifo_max_depth,
++                                      pacing_data->pacing_th);
++                      break;
++              }
++
+       }
+ }
index 7014efc2c12b3a697fd1a2c2eb2681c1c1770a3d..d9c1c34871749d0e7669faac9f227166f2c308ae 100644 (file)
@@ -8,3 +8,6 @@ irqchip-sunxi-nmi-add-missing-skip_wake-flag.patch
 hwmon-drivetemp-set-scsi-command-timeout-to-10s.patch
 asoc-samsung-add-missing-depends-on-i2c.patch
 ata-libata-core-set-ata_qcflag_rtf_filled-in-fill_result_tf.patch
+cpufreq-amd-pstate-add-check-for-cpufreq_cpu_get-s-return-value.patch
+ipv6-fix-soft-lockups-in-fib6_select_path-under-high-next-hop-churn.patch
+rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-check-loop.patch