--- /dev/null
+From 5b2111516f3dc2525b33b19d62a7f8686c71d311 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 May 2023 21:37:48 -0700
+Subject: bpf: Address KCSAN report on bpf_lru_list
+
+From: Martin KaFai Lau <martin.lau@kernel.org>
+
+[ Upstream commit ee9fd0ac3017c4313be91a220a9ac4c99dde7ad4 ]
+
+KCSAN reported a data-race when accessing node->ref.
+Although node->ref does not have to be accurate,
+take this chance to use a more common READ_ONCE() and WRITE_ONCE()
+pattern instead of data_race().
+
+There is an existing bpf_lru_node_is_ref() and bpf_lru_node_set_ref().
+This patch also adds bpf_lru_node_clear_ref() to do the
+WRITE_ONCE(node->ref, 0) also.
+
+==================================================================
+BUG: KCSAN: data-race in __bpf_lru_list_rotate / __htab_lru_percpu_map_update_elem
+
+write to 0xffff888137038deb of 1 bytes by task 11240 on cpu 1:
+__bpf_lru_node_move kernel/bpf/bpf_lru_list.c:113 [inline]
+__bpf_lru_list_rotate_active kernel/bpf/bpf_lru_list.c:149 [inline]
+__bpf_lru_list_rotate+0x1bf/0x750 kernel/bpf/bpf_lru_list.c:240
+bpf_lru_list_pop_free_to_local kernel/bpf/bpf_lru_list.c:329 [inline]
+bpf_common_lru_pop_free kernel/bpf/bpf_lru_list.c:447 [inline]
+bpf_lru_pop_free+0x638/0xe20 kernel/bpf/bpf_lru_list.c:499
+prealloc_lru_pop kernel/bpf/hashtab.c:290 [inline]
+__htab_lru_percpu_map_update_elem+0xe7/0x820 kernel/bpf/hashtab.c:1316
+bpf_percpu_hash_update+0x5e/0x90 kernel/bpf/hashtab.c:2313
+bpf_map_update_value+0x2a9/0x370 kernel/bpf/syscall.c:200
+generic_map_update_batch+0x3ae/0x4f0 kernel/bpf/syscall.c:1687
+bpf_map_do_batch+0x2d9/0x3d0 kernel/bpf/syscall.c:4534
+__sys_bpf+0x338/0x810
+__do_sys_bpf kernel/bpf/syscall.c:5096 [inline]
+__se_sys_bpf kernel/bpf/syscall.c:5094 [inline]
+__x64_sys_bpf+0x43/0x50 kernel/bpf/syscall.c:5094
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+read to 0xffff888137038deb of 1 bytes by task 11241 on cpu 0:
+bpf_lru_node_set_ref kernel/bpf/bpf_lru_list.h:70 [inline]
+__htab_lru_percpu_map_update_elem+0x2f1/0x820 kernel/bpf/hashtab.c:1332
+bpf_percpu_hash_update+0x5e/0x90 kernel/bpf/hashtab.c:2313
+bpf_map_update_value+0x2a9/0x370 kernel/bpf/syscall.c:200
+generic_map_update_batch+0x3ae/0x4f0 kernel/bpf/syscall.c:1687
+bpf_map_do_batch+0x2d9/0x3d0 kernel/bpf/syscall.c:4534
+__sys_bpf+0x338/0x810
+__do_sys_bpf kernel/bpf/syscall.c:5096 [inline]
+__se_sys_bpf kernel/bpf/syscall.c:5094 [inline]
+__x64_sys_bpf+0x43/0x50 kernel/bpf/syscall.c:5094
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+value changed: 0x01 -> 0x00
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 11241 Comm: syz-executor.3 Not tainted 6.3.0-rc7-syzkaller-00136-g6a66fdd29ea1 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/30/2023
+==================================================================
+
+Reported-by: syzbot+ebe648a84e8784763f82@syzkaller.appspotmail.com
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/r/20230511043748.1384166-1-martin.lau@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/bpf_lru_list.c | 21 +++++++++++++--------
+ kernel/bpf/bpf_lru_list.h | 7 ++-----
+ 2 files changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index 9b5eeff72fd37..39a0e768adc39 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -44,7 +44,12 @@ static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
+ /* bpf_lru_node helpers */
+ static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
+ {
+- return node->ref;
++ return READ_ONCE(node->ref);
++}
++
++static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
++{
++ WRITE_ONCE(node->ref, 0);
+ }
+
+ static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
+@@ -92,7 +97,7 @@ static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
+
+ bpf_lru_list_count_inc(l, tgt_type);
+ node->type = tgt_type;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ list_move(&node->list, &l->lists[tgt_type]);
+ }
+
+@@ -113,7 +118,7 @@ static void __bpf_lru_node_move(struct bpf_lru_list *l,
+ bpf_lru_list_count_inc(l, tgt_type);
+ node->type = tgt_type;
+ }
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+
+ /* If the moving node is the next_inactive_rotation candidate,
+ * move the next_inactive_rotation pointer also.
+@@ -356,7 +361,7 @@ static void __local_list_add_pending(struct bpf_lru *lru,
+ *(u32 *)((void *)node + lru->hash_offset) = hash;
+ node->cpu = cpu;
+ node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ list_add(&node->list, local_pending_list(loc_l));
+ }
+
+@@ -422,7 +427,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
+ if (!list_empty(free_list)) {
+ node = list_first_entry(free_list, struct bpf_lru_node, list);
+ *(u32 *)((void *)node + lru->hash_offset) = hash;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
+ }
+
+@@ -525,7 +530,7 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
+ }
+
+ node->type = BPF_LRU_LOCAL_LIST_T_FREE;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ list_move(&node->list, local_free_list(loc_l));
+
+ raw_spin_unlock_irqrestore(&loc_l->lock, flags);
+@@ -571,7 +576,7 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
+
+ node = (struct bpf_lru_node *)(buf + node_offset);
+ node->type = BPF_LRU_LIST_T_FREE;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
+ buf += elem_size;
+ }
+@@ -597,7 +602,7 @@ static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
+ node = (struct bpf_lru_node *)(buf + node_offset);
+ node->cpu = cpu;
+ node->type = BPF_LRU_LIST_T_FREE;
+- node->ref = 0;
++ bpf_lru_node_clear_ref(node);
+ list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
+ i++;
+ buf += elem_size;
+diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h
+index 7d4f89b7cb841..08da78b59f0b9 100644
+--- a/kernel/bpf/bpf_lru_list.h
++++ b/kernel/bpf/bpf_lru_list.h
+@@ -66,11 +66,8 @@ struct bpf_lru {
+
+ static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
+ {
+- /* ref is an approximation on access frequency. It does not
+- * have to be very accurate. Hence, no protection is used.
+- */
+- if (!node->ref)
+- node->ref = 1;
++ if (!READ_ONCE(node->ref))
++ WRITE_ONCE(node->ref, 1);
+ }
+
+ int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
+--
+2.39.2
+
--- /dev/null
+From 4fa73402fe50139a9e8b841a330a7af135adbd00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 19:19:02 +0900
+Subject: debugobjects: Recheck debug_objects_enabled before reporting
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit 8b64d420fe2450f82848178506d3e3a0bd195539 ]
+
+syzbot is reporting false a positive ODEBUG message immediately after
+ODEBUG was disabled due to OOM.
+
+ [ 1062.309646][T22911] ODEBUG: Out of memory. ODEBUG disabled
+ [ 1062.886755][ T5171] ------------[ cut here ]------------
+ [ 1062.892770][ T5171] ODEBUG: assert_init not available (active state 0) object: ffffc900056afb20 object type: timer_list hint: process_timeout+0x0/0x40
+
+ CPU 0 [ T5171] CPU 1 [T22911]
+ -------------- --------------
+ debug_object_assert_init() {
+ if (!debug_objects_enabled)
+ return;
+ db = get_bucket(addr);
+ lookup_object_or_alloc() {
+ debug_objects_enabled = 0;
+ return NULL;
+ }
+ debug_objects_oom() {
+ pr_warn("Out of memory. ODEBUG disabled\n");
+ // all buckets get emptied here, and
+ }
+ lookup_object_or_alloc(addr, db, descr, false, true) {
+ // this bucket is already empty.
+ return ERR_PTR(-ENOENT);
+ }
+ // Emits false positive warning.
+ debug_print_object(&o, "assert_init");
+ }
+
+Recheck debug_object_enabled in debug_print_object() to avoid that.
+
+Reported-by: syzbot <syzbot+7937ba6a50bdd00fffdf@syzkaller.appspotmail.com>
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/492fe2ae-5141-d548-ebd5-62f5fe2e57f7@I-love.SAKURA.ne.jp
+Closes: https://syzkaller.appspot.com/bug?extid=7937ba6a50bdd00fffdf
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/debugobjects.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index bacb00a9cd9f9..b6217c797554b 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -280,6 +280,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
+ struct debug_obj_descr *descr = obj->descr;
+ static int limit;
+
++ /*
++ * Don't report if lookup_object_or_alloc() by the current thread
++ * failed because lookup_object_or_alloc()/debug_objects_oom() by a
++ * concurrent thread turned off debug_objects_enabled and cleared
++ * the hash buckets.
++ */
++ if (!debug_objects_enabled)
++ return;
++
+ if (limit < 5 && descr != descr_test) {
+ void *hint = descr->debug_hint ?
+ descr->debug_hint(obj->object) : NULL;
+--
+2.39.2
+
--- /dev/null
+From a933a419954977e8ebcf3c7f68e2d637a18e524d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Jul 2023 16:16:56 +0800
+Subject: fbdev: au1200fb: Fix missing IRQ check in au1200fb_drv_probe
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit 4e88761f5f8c7869f15a2046b1a1116f4fab4ac8 ]
+
+This func misses checking for platform_get_irq()'s call and may passes the
+negative error codes to request_irq(), which takes unsigned IRQ #,
+causing it to fail with -EINVAL, overriding an original error code.
+
+Fix this by stop calling request_irq() with invalid IRQ #s.
+
+Fixes: 1630d85a8312 ("au1200fb: fix hardcoded IRQ")
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/au1200fb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
+index e17a083f849ad..cae4a04cba48e 100644
+--- a/drivers/video/fbdev/au1200fb.c
++++ b/drivers/video/fbdev/au1200fb.c
+@@ -1748,6 +1748,9 @@ static int au1200fb_drv_probe(struct platform_device *dev)
+
+ /* Now hook interrupt too */
+ irq = platform_get_irq(dev, 0);
++ if (irq < 0)
++ return irq;
++
+ ret = request_irq(irq, au1200fb_handle_irq,
+ IRQF_SHARED, "lcd", (void *)dev);
+ if (ret) {
+--
+2.39.2
+
--- /dev/null
+From 7c8b31fd6a9f8dba99e2cd82139ece3ce6e7b7c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jun 2023 15:24:37 +0200
+Subject: fbdev: imxfb: warn about invalid left/right margin
+
+From: Martin Kaiser <martin@kaiser.cx>
+
+[ Upstream commit 4e47382fbca916d7db95cbf9e2d7ca2e9d1ca3fe ]
+
+Warn about invalid var->left_margin or var->right_margin. Their values
+are read from the device tree.
+
+We store var->left_margin-3 and var->right_margin-1 in register
+fields. These fields should be >= 0.
+
+Fixes: 7e8549bcee00 ("imxfb: Fix margin settings")
+Signed-off-by: Martin Kaiser <martin@kaiser.cx>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/imxfb.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index ba82f97fb42b2..a4dc25fbdd1ba 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -601,10 +601,10 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ if (var->hsync_len < 1 || var->hsync_len > 64)
+ printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ info->fix.id, var->hsync_len);
+- if (var->left_margin > 255)
++ if (var->left_margin < 3 || var->left_margin > 255)
+ printk(KERN_ERR "%s: invalid left_margin %d\n",
+ info->fix.id, var->left_margin);
+- if (var->right_margin > 255)
++ if (var->right_margin < 1 || var->right_margin > 255)
+ printk(KERN_ERR "%s: invalid right_margin %d\n",
+ info->fix.id, var->right_margin);
+ if (var->yres < 1 || var->yres > ymax_mask)
+--
+2.39.2
+
--- /dev/null
+From 9abda882bac991c17f48151de9a56ca28a28a559 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 10:47:32 -0700
+Subject: igb: Fix igb_down hung on surprise removal
+
+From: Ying Hsu <yinghsu@chromium.org>
+
+[ Upstream commit 004d25060c78fc31f66da0fa439c544dda1ac9d5 ]
+
+In a setup where a Thunderbolt hub connects to Ethernet and a display
+through USB Type-C, users may experience a hung task timeout when they
+remove the cable between the PC and the Thunderbolt hub.
+This is because the igb_down function is called multiple times when
+the Thunderbolt hub is unplugged. For example, the igb_io_error_detected
+triggers the first call, and the igb_remove triggers the second call.
+The second call to igb_down will block at napi_synchronize.
+Here's the call trace:
+ __schedule+0x3b0/0xddb
+ ? __mod_timer+0x164/0x5d3
+ schedule+0x44/0xa8
+ schedule_timeout+0xb2/0x2a4
+ ? run_local_timers+0x4e/0x4e
+ msleep+0x31/0x38
+ igb_down+0x12c/0x22a [igb 6615058754948bfde0bf01429257eb59f13030d4]
+ __igb_close+0x6f/0x9c [igb 6615058754948bfde0bf01429257eb59f13030d4]
+ igb_close+0x23/0x2b [igb 6615058754948bfde0bf01429257eb59f13030d4]
+ __dev_close_many+0x95/0xec
+ dev_close_many+0x6e/0x103
+ unregister_netdevice_many+0x105/0x5b1
+ unregister_netdevice_queue+0xc2/0x10d
+ unregister_netdev+0x1c/0x23
+ igb_remove+0xa7/0x11c [igb 6615058754948bfde0bf01429257eb59f13030d4]
+ pci_device_remove+0x3f/0x9c
+ device_release_driver_internal+0xfe/0x1b4
+ pci_stop_bus_device+0x5b/0x7f
+ pci_stop_bus_device+0x30/0x7f
+ pci_stop_bus_device+0x30/0x7f
+ pci_stop_and_remove_bus_device+0x12/0x19
+ pciehp_unconfigure_device+0x76/0xe9
+ pciehp_disable_slot+0x6e/0x131
+ pciehp_handle_presence_or_link_change+0x7a/0x3f7
+ pciehp_ist+0xbe/0x194
+ irq_thread_fn+0x22/0x4d
+ ? irq_thread+0x1fd/0x1fd
+ irq_thread+0x17b/0x1fd
+ ? irq_forced_thread_fn+0x5f/0x5f
+ kthread+0x142/0x153
+ ? __irq_get_irqchip_state+0x46/0x46
+ ? kthread_associate_blkcg+0x71/0x71
+ ret_from_fork+0x1f/0x30
+
+In this case, igb_io_error_detected detaches the network interface
+and requests a PCIE slot reset, however, the PCIE reset callback is
+not being invoked and thus the Ethernet connection breaks down.
+As the PCIE error in this case is a non-fatal one, requesting a
+slot reset can be avoided.
+This patch fixes the task hung issue and preserves Ethernet
+connection by ignoring non-fatal PCIE errors.
+
+Signed-off-by: Ying Hsu <yinghsu@chromium.org>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230620174732.4145155-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index ab54362c0992e..d7b531eae8195 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -8257,6 +8257,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
++ if (state == pci_channel_io_normal) {
++ dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
++ return PCI_ERS_RESULT_CAN_RECOVER;
++ }
++
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+--
+2.39.2
+
--- /dev/null
+From 91d0d5e4bcbb4df3835e826fab2b20eab3852d19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jul 2023 10:41:51 -0700
+Subject: llc: Don't drop packet from non-root netns.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 6631463b6e6673916d2481f692938f393148aa82 ]
+
+Now these upper layer protocol handlers can be called from llc_rcv()
+as sap->rcv_func(), which is registered by llc_sap_open().
+
+ * function which is passed to register_8022_client()
+ -> no in-kernel user calls register_8022_client().
+
+ * snap_rcv()
+ `- proto->rcvfunc() : registered by register_snap_client()
+ -> aarp_rcv() and atalk_rcv() drop packets from non-root netns
+
+ * stp_pdu_rcv()
+ `- garp_protos[]->rcv() : registered by stp_proto_register()
+ -> garp_pdu_rcv() and br_stp_rcv() are netns-aware
+
+So, we can safely remove the netns restriction in llc_rcv().
+
+Fixes: e730c15519d0 ("[NET]: Make packet reception network namespace safe")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/llc/llc_input.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index dd3e83328ad54..d5c6fb41be92e 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -162,9 +162,6 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
+ void (*sta_handler)(struct sk_buff *skb);
+ void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
+
+- if (!net_eq(dev_net(dev), &init_net))
+- goto drop;
+-
+ /*
+ * When the interface is in promisc. mode, drop all the crap that it
+ * receives, do not try to analyse it.
+--
+2.39.2
+
--- /dev/null
+From b8d88512ef798d911ca4b1e1e59812bfc3abb3d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 May 2023 09:56:07 +0800
+Subject: md: fix data corruption for raid456 when reshape restart while grow
+ up
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit 873f50ece41aad5c4f788a340960c53774b5526e ]
+
+Currently, if reshape is interrupted, echo "reshape" to sync_action will
+restart reshape from scratch, for example:
+
+echo frozen > sync_action
+echo reshape > sync_action
+
+This will corrupt data before reshape_position if the array is growing,
+fix the problem by continue reshape from reshape_position.
+
+Reported-by: Peter Neuwirth <reddunur@online.de>
+Link: https://lore.kernel.org/linux-raid/e2f96772-bfbc-f43b-6da1-f520e5164536@online.de/
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230512015610.821290-3-yukuai1@huaweicloud.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 12392a4fb9c0d..3c2364d0d88f3 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4615,11 +4615,21 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ return -EINVAL;
+ err = mddev_lock(mddev);
+ if (!err) {
+- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ err = -EBUSY;
+- else {
++ } else if (mddev->reshape_position == MaxSector ||
++ mddev->pers->check_reshape == NULL ||
++ mddev->pers->check_reshape(mddev)) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = mddev->pers->start_reshape(mddev);
++ } else {
++ /*
++ * If reshape is still in progress, and
++ * md_check_recovery() can continue to reshape,
++ * don't restart reshape because data can be
++ * corrupted for raid456.
++ */
++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ }
+ mddev_unlock(mddev);
+ }
+--
+2.39.2
+
--- /dev/null
+From 7611d0b264605c196fe32507f470802adc4f99c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 May 2023 21:11:00 +0800
+Subject: md/raid10: prevent soft lockup while flush writes
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit 010444623e7f4da6b4a4dd603a7da7469981e293 ]
+
+Currently, there is no limit for raid1/raid10 plugged bio. While flushing
+writes, raid1 has cond_resched() while raid10 doesn't, and too many
+writes can cause soft lockup.
+
+Follow up soft lockup can be triggered easily with writeback test for
+raid10 with ramdisks:
+
+watchdog: BUG: soft lockup - CPU#10 stuck for 27s! [md0_raid10:1293]
+Call Trace:
+ <TASK>
+ call_rcu+0x16/0x20
+ put_object+0x41/0x80
+ __delete_object+0x50/0x90
+ delete_object_full+0x2b/0x40
+ kmemleak_free+0x46/0xa0
+ slab_free_freelist_hook.constprop.0+0xed/0x1a0
+ kmem_cache_free+0xfd/0x300
+ mempool_free_slab+0x1f/0x30
+ mempool_free+0x3a/0x100
+ bio_free+0x59/0x80
+ bio_put+0xcf/0x2c0
+ free_r10bio+0xbf/0xf0
+ raid_end_bio_io+0x78/0xb0
+ one_write_done+0x8a/0xa0
+ raid10_end_write_request+0x1b4/0x430
+ bio_endio+0x175/0x320
+ brd_submit_bio+0x3b9/0x9b7 [brd]
+ __submit_bio+0x69/0xe0
+ submit_bio_noacct_nocheck+0x1e6/0x5a0
+ submit_bio_noacct+0x38c/0x7e0
+ flush_pending_writes+0xf0/0x240
+ raid10d+0xac/0x1ed0
+
+Fix the problem by adding cond_resched() to raid10 like what raid1 did.
+
+Note that unlimited plugged bio still need to be optimized, for example,
+in the case of lots of dirty pages writeback, this will take lots of
+memory and io will spend a long time in plug, hence io latency is bad.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230529131106.2123367-2-yukuai1@huaweicloud.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/raid10.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 25c8f3e3d2edb..6ecc68fd702e4 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -924,6 +924,7 @@ static void flush_pending_writes(struct r10conf *conf)
+ else
+ generic_make_request(bio);
+ bio = next;
++ cond_resched();
+ }
+ blk_finish_plug(&plug);
+ } else
+@@ -1109,6 +1110,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+ else
+ generic_make_request(bio);
+ bio = next;
++ cond_resched();
+ }
+ kfree(plug);
+ }
+--
+2.39.2
+
--- /dev/null
+From 5bffa0a56b001028eaf7433230e8061590e5f00b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 20:21:59 +0800
+Subject: nbd: Add the maximum limit of allocated index in nbd_dev_add
+
+From: Zhong Jinghua <zhongjinghua@huawei.com>
+
+[ Upstream commit f12bc113ce904777fd6ca003b473b427782b3dde ]
+
+If the index allocated by idr_alloc greater than MINORMASK >> part_shift,
+the device number will overflow, resulting in failure to create a block
+device.
+
+Fix it by imiting the size of the max allocation.
+
+Signed-off-by: Zhong Jinghua <zhongjinghua@huawei.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20230605122159.2134384-1-zhongjinghua@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/nbd.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index eb2ca7f6ab3ab..33ad48719c124 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1630,7 +1630,8 @@ static int nbd_dev_add(int index)
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+- err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
++ err = idr_alloc(&nbd_index_idr, nbd, 0,
++ (MINORMASK >> part_shift) + 1, GFP_KERNEL);
+ if (err >= 0)
+ index = err;
+ }
+--
+2.39.2
+
--- /dev/null
+From 99d3da17314de4f22fdf06b26215c6d3daad7261 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jul 2023 16:36:57 +0530
+Subject: net: ethernet: ti: cpsw_ale: Fix
+ cpsw_ale_get_field()/cpsw_ale_set_field()
+
+From: Tanmay Patil <t-patil@ti.com>
+
+[ Upstream commit b685f1a58956fa36cc01123f253351b25bfacfda ]
+
+CPSW ALE has 75 bit ALE entries which are stored within three 32 bit words.
+The cpsw_ale_get_field() and cpsw_ale_set_field() functions assume that the
+field will be strictly contained within one word. However, this is not
+guaranteed to be the case and it is possible for ALE field entries to span
+across up to two words at the most.
+
+Fix the methods to handle getting/setting fields spanning up to two words.
+
+Fixes: db82173f23c5 ("netdev: driver: ethernet: add cpsw address lookup engine support")
+Signed-off-by: Tanmay Patil <t-patil@ti.com>
+[s-vadapalli@ti.com: rephrased commit message and added Fixes tag]
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/cpsw_ale.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index ddd43e09111e2..d9db5c23a9639 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -67,23 +67,37 @@
+
+ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ {
+- int idx;
++ int idx, idx2;
++ u32 hi_val = 0;
+
+ idx = start / 32;
++ idx2 = (start + bits - 1) / 32;
++ /* Check if bits to be fetched exceed a word */
++ if (idx != idx2) {
++ idx2 = 2 - idx2; /* flip */
++ hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
++ }
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+- return (ale_entry[idx] >> start) & BITMASK(bits);
++ return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
+ }
+
+ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+ {
+- int idx;
++ int idx, idx2;
+
+ value &= BITMASK(bits);
+- idx = start / 32;
++ idx = start / 32;
++ idx2 = (start + bits - 1) / 32;
++ /* Check if bits to be set exceed a word */
++ if (idx != idx2) {
++ idx2 = 2 - idx2; /* flip */
++ ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
++ ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
++ }
+ start -= idx * 32;
+- idx = 2 - idx; /* flip */
++ idx = 2 - idx; /* flip */
+ ale_entry[idx] &= ~(BITMASK(bits) << start);
+ ale_entry[idx] |= (value << start);
+ }
+--
+2.39.2
+
--- /dev/null
+From 81eca9b33b5298ee2ccaebe1b9bdad15a81bb4e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jul 2023 00:29:58 +0200
+Subject: netfilter: nf_tables: fix spurious set element insertion failure
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit ddbd8be68941985f166f5107109a90ce13147c44 ]
+
+On some platforms there is a padding hole in the nft_verdict
+structure, between the verdict code and the chain pointer.
+
+On element insertion, if the new element clashes with an existing one and
+NLM_F_EXCL flag isn't set, we want to ignore the -EEXIST error as long as
+the data associated with duplicated element is the same as the existing
+one. The data equality check uses memcmp.
+
+For normal data (NFT_DATA_VALUE) this works fine, but for NFT_DATA_VERDICT
+padding area leads to spurious failure even if the verdict data is the
+same.
+
+This then makes the insertion fail with 'already exists' error, even
+though the new "key : data" matches an existing entry and userspace
+told the kernel that it doesn't want to receive an error indication.
+
+Fixes: c016c7e45ddf ("netfilter: nf_tables: honor NLM_F_EXCL flag in set element insertion")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index b016ae68d9db8..68a13ab584acf 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5852,6 +5852,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+
+ if (!tb[NFTA_VERDICT_CODE])
+ return -EINVAL;
++
++ /* zero padding hole for memcmp */
++ memset(data, 0, sizeof(*data));
+ data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+ switch (data->verdict.code) {
+--
+2.39.2
+
--- /dev/null
+From ce601508ca830f0488111572205765116c06cb88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 08:30:03 -0500
+Subject: pinctrl: amd: Use amd_pinconf_set() for all config options
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 635a750d958e158e17af0f524bedc484b27fbb93 ]
+
+On ASUS TUF A16 it is reported that the ITE5570 ACPI device connected to
+GPIO 7 is causing an interrupt storm. This issue doesn't happen on
+Windows.
+
+Comparing the GPIO register configuration between Windows and Linux
+bit 20 has been configured as a pull up on Windows, but not on Linux.
+Checking GPIO declaration from the firmware it is clear it *should* have
+been a pull up on Linux as well.
+
+```
+GpioInt (Level, ActiveLow, Exclusive, PullUp, 0x0000,
+ "\\_SB.GPIO", 0x00, ResourceConsumer, ,)
+{ // Pin list
+0x0007
+}
+```
+
+On Linux amd_gpio_set_config() is currently only used for programming
+the debounce. Actually the GPIO core calls it with all the arguments
+that are supported by a GPIO, pinctrl-amd just responds `-ENOTSUPP`.
+
+To solve this issue expand amd_gpio_set_config() to support the other
+arguments amd_pinconf_set() supports, namely `PIN_CONFIG_BIAS_PULL_DOWN`,
+`PIN_CONFIG_BIAS_PULL_UP`, and `PIN_CONFIG_DRIVE_STRENGTH`.
+
+Reported-by: Nik P <npliashechnikov@gmail.com>
+Reported-by: Nathan Schulte <nmschulte@gmail.com>
+Reported-by: Friedrich Vock <friedrich.vock@gmx.de>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217336
+Reported-by: dridri85@gmail.com
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217493
+Link: https://lore.kernel.org/linux-input/20230530154058.17594-1-friedrich.vock@gmx.de/
+Tested-by: Jan Visser <starquake@linuxeverywhere.org>
+Fixes: 2956b5d94a76 ("pinctrl / gpio: Introduce .set_config() callback for GPIO chips")
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20230705133005.577-3-mario.limonciello@amd.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-amd.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index c57f91f484235..fbec8a07e942e 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -169,18 +169,6 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+ return ret;
+ }
+
+-static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+- unsigned long config)
+-{
+- u32 debounce;
+-
+- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+- return -ENOTSUPP;
+-
+- debounce = pinconf_to_config_argument(config);
+- return amd_gpio_set_debounce(gc, offset, debounce);
+-}
+-
+ #ifdef CONFIG_DEBUG_FS
+ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ {
+@@ -632,7 +620,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ }
+
+ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+- unsigned long *configs, unsigned num_configs)
++ unsigned long *configs, unsigned int num_configs)
+ {
+ int i;
+ u32 arg;
+@@ -722,6 +710,20 @@ static int amd_pinconf_group_set(struct pinctrl_dev *pctldev,
+ return 0;
+ }
+
++static int amd_gpio_set_config(struct gpio_chip *gc, unsigned int pin,
++ unsigned long config)
++{
++ struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
++
++ if (pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE) {
++ u32 debounce = pinconf_to_config_argument(config);
++
++ return amd_gpio_set_debounce(gc, pin, debounce);
++ }
++
++ return amd_pinconf_set(gpio_dev->pctrl, pin, &config, 1);
++}
++
+ static const struct pinconf_ops amd_pinconf_ops = {
+ .pin_config_get = amd_pinconf_get,
+ .pin_config_set = amd_pinconf_set,
+--
+2.39.2
+
--- /dev/null
+From b214bbb9e5a6dc8ccd588e7576378c9305086120 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Jun 2023 20:58:47 +0200
+Subject: posix-timers: Ensure timer ID search-loop limit is valid
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 8ce8849dd1e78dadcee0ec9acbd259d239b7069f ]
+
+posix_timer_add() tries to allocate a posix timer ID by starting from the
+cached ID which was stored by the last successful allocation.
+
+This is done in a loop searching the ID space for a free slot one by
+one. The loop has to terminate when the search wrapped around to the
+starting point.
+
+But that's racy vs. establishing the starting point. That is read out
+lockless, which leads to the following problem:
+
+CPU0 CPU1
+posix_timer_add()
+ start = sig->posix_timer_id;
+ lock(hash_lock);
+ ... posix_timer_add()
+ if (++sig->posix_timer_id < 0)
+ start = sig->posix_timer_id;
+ sig->posix_timer_id = 0;
+
+So CPU1 can observe a negative start value, i.e. -1, and the loop break
+never happens because the condition can never be true:
+
+ if (sig->posix_timer_id == start)
+ break;
+
+While this is unlikely to ever turn into an endless loop as the ID space is
+huge (INT_MAX), the racy read of the start value caught the attention of
+KCSAN and Dmitry unearthed that incorrectness.
+
+Rewrite it so that all id operations are under the hash lock.
+
+Reported-by: syzbot+5c54bd3eb218bb595aa9@syzkaller.appspotmail.com
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Link: https://lore.kernel.org/r/87bkhzdn6g.ffs@tglx
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched/signal.h | 2 +-
+ kernel/time/posix-timers.c | 31 ++++++++++++++++++-------------
+ 2 files changed, 19 insertions(+), 14 deletions(-)
+
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
+index bcaba7e8ca6ea..916f4807cc9a6 100644
+--- a/include/linux/sched/signal.h
++++ b/include/linux/sched/signal.h
+@@ -119,7 +119,7 @@ struct signal_struct {
+ #ifdef CONFIG_POSIX_TIMERS
+
+ /* POSIX.1b Interval Timers */
+- int posix_timer_id;
++ unsigned int next_posix_timer_id;
+ struct list_head posix_timers;
+
+ /* ITIMER_REAL timer for the process */
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 8b90abd690730..309c551ac18fd 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -168,25 +168,30 @@ static struct k_itimer *posix_timer_by_id(timer_t id)
+ static int posix_timer_add(struct k_itimer *timer)
+ {
+ struct signal_struct *sig = current->signal;
+- int first_free_id = sig->posix_timer_id;
+ struct hlist_head *head;
+- int ret = -ENOENT;
++ unsigned int cnt, id;
+
+- do {
++ /*
++ * FIXME: Replace this by a per signal struct xarray once there is
++ * a plan to handle the resulting CRIU regression gracefully.
++ */
++ for (cnt = 0; cnt <= INT_MAX; cnt++) {
+ spin_lock(&hash_lock);
+- head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
+- if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
++ id = sig->next_posix_timer_id;
++
++ /* Write the next ID back. Clamp it to the positive space */
++ sig->next_posix_timer_id = (id + 1) & INT_MAX;
++
++ head = &posix_timers_hashtable[hash(sig, id)];
++ if (!__posix_timers_find(head, sig, id)) {
+ hlist_add_head_rcu(&timer->t_hash, head);
+- ret = sig->posix_timer_id;
++ spin_unlock(&hash_lock);
++ return id;
+ }
+- if (++sig->posix_timer_id < 0)
+- sig->posix_timer_id = 0;
+- if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
+- /* Loop over all possible ids completed */
+- ret = -EAGAIN;
+ spin_unlock(&hash_lock);
+- } while (ret == -ENOENT);
+- return ret;
++ }
++ /* POSIX return code when no timer ID could be allocated */
++ return -EAGAIN;
+ }
+
+ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
+--
+2.39.2
+
--- /dev/null
+From 89408f5db48ef6bc05c4ab3f545392fc1e8f2dd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 May 2023 16:25:07 +0800
+Subject: sched/fair: Don't balance task to its current running CPU
+
+From: Yicong Yang <yangyicong@hisilicon.com>
+
+[ Upstream commit 0dd37d6dd33a9c23351e6115ae8cdac7863bc7de ]
+
+We've run into the case that the balancer tries to balance a migration
+disabled task and trigger the warning in set_task_cpu() like below:
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 7 PID: 0 at kernel/sched/core.c:3115 set_task_cpu+0x188/0x240
+ Modules linked in: hclgevf xt_CHECKSUM ipt_REJECT nf_reject_ipv4 <...snip>
+ CPU: 7 PID: 0 Comm: swapper/7 Kdump: loaded Tainted: G O 6.1.0-rc4+ #1
+ Hardware name: Huawei TaiShan 2280 V2/BC82AMDC, BIOS 2280-V2 CS V5.B221.01 12/09/2021
+ pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : set_task_cpu+0x188/0x240
+ lr : load_balance+0x5d0/0xc60
+ sp : ffff80000803bc70
+ x29: ffff80000803bc70 x28: ffff004089e190e8 x27: ffff004089e19040
+ x26: ffff007effcabc38 x25: 0000000000000000 x24: 0000000000000001
+ x23: ffff80000803be84 x22: 000000000000000c x21: ffffb093e79e2a78
+ x20: 000000000000000c x19: ffff004089e19040 x18: 0000000000000000
+ x17: 0000000000001fad x16: 0000000000000030 x15: 0000000000000000
+ x14: 0000000000000003 x13: 0000000000000000 x12: 0000000000000000
+ x11: 0000000000000001 x10: 0000000000000400 x9 : ffffb093e4cee530
+ x8 : 00000000fffffffe x7 : 0000000000ce168a x6 : 000000000000013e
+ x5 : 00000000ffffffe1 x4 : 0000000000000001 x3 : 0000000000000b2a
+ x2 : 0000000000000b2a x1 : ffffb093e6d6c510 x0 : 0000000000000001
+ Call trace:
+ set_task_cpu+0x188/0x240
+ load_balance+0x5d0/0xc60
+ rebalance_domains+0x26c/0x380
+ _nohz_idle_balance.isra.0+0x1e0/0x370
+ run_rebalance_domains+0x6c/0x80
+ __do_softirq+0x128/0x3d8
+ ____do_softirq+0x18/0x24
+ call_on_irq_stack+0x2c/0x38
+ do_softirq_own_stack+0x24/0x3c
+ __irq_exit_rcu+0xcc/0xf4
+ irq_exit_rcu+0x18/0x24
+ el1_interrupt+0x4c/0xe4
+ el1h_64_irq_handler+0x18/0x2c
+ el1h_64_irq+0x74/0x78
+ arch_cpu_idle+0x18/0x4c
+ default_idle_call+0x58/0x194
+ do_idle+0x244/0x2b0
+ cpu_startup_entry+0x30/0x3c
+ secondary_start_kernel+0x14c/0x190
+ __secondary_switched+0xb0/0xb4
+ ---[ end trace 0000000000000000 ]---
+
+Further investigation shows that the warning is superfluous, the migration
+disabled task is just going to be migrated to its current running CPU.
+This is because that on load balance if the dst_cpu is not allowed by the
+task, we'll re-select a new_dst_cpu as a candidate. If no task can be
+balanced to dst_cpu we'll try to balance the task to the new_dst_cpu
+instead. In this case when the migration disabled task is not on CPU it
+only allows to run on its current CPU, load balance will select its
+current CPU as new_dst_cpu and later triggers the warning above.
+
+The new_dst_cpu is chosen from the env->dst_grpmask. Currently it
+contains CPUs in sched_group_span() and if we have overlapped groups it's
+possible to run into this case. This patch makes env->dst_grpmask of
+group_balance_mask() which exclude any CPUs from the busiest group and
+solve the issue. For balancing in a domain with no overlapped groups
+the behaviour keeps same as before.
+
+Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Link: https://lore.kernel.org/r/20230530082507.10444-1-yangyicong@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 259996d2dcf7a..9d1e7b0bf486d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -8142,7 +8142,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ .sd = sd,
+ .dst_cpu = this_cpu,
+ .dst_rq = this_rq,
+- .dst_grpmask = sched_group_span(sd->groups),
++ .dst_grpmask = group_balance_mask(sd->groups),
+ .idle = idle,
+ .loop_break = sched_nr_migrate_break,
+ .cpus = cpus,
+--
+2.39.2
+
fuse-revalidate-don-t-invalidate-if-interrupted.patch
can-bcm-fix-uaf-in-bcm_proc_show.patch
ext4-correct-inline-offset-when-handling-xattrs-in-inode-body.patch
+debugobjects-recheck-debug_objects_enabled-before-re.patch
+nbd-add-the-maximum-limit-of-allocated-index-in-nbd_.patch
+md-fix-data-corruption-for-raid456-when-reshape-rest.patch
+md-raid10-prevent-soft-lockup-while-flush-writes.patch
+posix-timers-ensure-timer-id-search-loop-limit-is-va.patch
+sched-fair-don-t-balance-task-to-its-current-running.patch
+bpf-address-kcsan-report-on-bpf_lru_list.patch
+wifi-wext-core-fix-wstringop-overflow-warning-in-ioc.patch
+igb-fix-igb_down-hung-on-surprise-removal.patch
+spi-bcm63xx-fix-max-prepend-length.patch
+fbdev-imxfb-warn-about-invalid-left-right-margin.patch
+pinctrl-amd-use-amd_pinconf_set-for-all-config-optio.patch
+net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field-cpsw.patch
+fbdev-au1200fb-fix-missing-irq-check-in-au1200fb_drv.patch
+llc-don-t-drop-packet-from-non-root-netns.patch
+netfilter-nf_tables-fix-spurious-set-element-inserti.patch
+tcp-annotate-data-races-around-rskq_defer_accept.patch
+tcp-annotate-data-races-around-tp-notsent_lowat.patch
+tcp-annotate-data-races-around-fastopenq.max_qlen.patch
--- /dev/null
+From e365e2ffc30a9f4aa66f5473be7b533c7d551997 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jun 2023 09:14:52 +0200
+Subject: spi: bcm63xx: fix max prepend length
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 5158814cbb37bbb38344b3ecddc24ba2ed0365f2 ]
+
+The command word is defined as following:
+
+ /* Command */
+ #define SPI_CMD_COMMAND_SHIFT 0
+ #define SPI_CMD_DEVICE_ID_SHIFT 4
+ #define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+ #define SPI_CMD_ONE_BYTE_SHIFT 11
+ #define SPI_CMD_ONE_WIRE_SHIFT 12
+
+If the prepend byte count field starts at bit 8, and the next defined
+bit is SPI_CMD_ONE_BYTE at bit 11, it can be at most 3 bits wide, and
+thus the max value is 7, not 15.
+
+Fixes: b17de076062a ("spi/bcm63xx: work around inability to keep CS up")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Link: https://lore.kernel.org/r/20230629071453.62024-1-jonas.gorski@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-bcm63xx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index bfe5754768f97..cc6ec3fb5bfdf 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -134,7 +134,7 @@ enum bcm63xx_regs_spi {
+ SPI_MSG_DATA_SIZE,
+ };
+
+-#define BCM63XX_SPI_MAX_PREPEND 15
++#define BCM63XX_SPI_MAX_PREPEND 7
+
+ #define BCM63XX_SPI_MAX_CS 8
+ #define BCM63XX_SPI_BUS_NUM 0
+--
+2.39.2
+
--- /dev/null
+From fc25d690b7c5333e6f6bc0e356a1e17a01c1b846 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Jul 2023 21:28:57 +0000
+Subject: tcp: annotate data-races around fastopenq.max_qlen
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 70f360dd7042cb843635ece9d28335a4addff9eb ]
+
+This field can be read locklessly.
+
+Fixes: 1536e2857bd3 ("tcp: Add a TCP_FASTOPEN socket option to get a max backlog on its listner")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230719212857.3943972-12-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tcp.h | 2 +-
+ net/ipv4/tcp.c | 2 +-
+ net/ipv4/tcp_fastopen.c | 6 ++++--
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index b9bc6e3e4ef96..45a85277c2ea5 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -425,7 +425,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
+
+- queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
++ WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
+ }
+
+ static inline void tcp_move_syn(struct tcp_sock *tp,
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index bcc2a3490323b..0859f968c9b27 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3193,7 +3193,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ break;
+
+ case TCP_FASTOPEN:
+- val = icsk->icsk_accept_queue.fastopenq.max_qlen;
++ val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
+ break;
+
+ case TCP_FASTOPEN_CONNECT:
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 0567edb76522c..0edd8d357e3d1 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -239,6 +239,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
+ static bool tcp_fastopen_queue_check(struct sock *sk)
+ {
+ struct fastopen_queue *fastopenq;
++ int max_qlen;
+
+ /* Make sure the listener has enabled fastopen, and we don't
+ * exceed the max # of pending TFO requests allowed before trying
+@@ -251,10 +252,11 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
+ * temporarily vs a server not supporting Fast Open at all.
+ */
+ fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+- if (fastopenq->max_qlen == 0)
++ max_qlen = READ_ONCE(fastopenq->max_qlen);
++ if (max_qlen == 0)
+ return false;
+
+- if (fastopenq->qlen >= fastopenq->max_qlen) {
++ if (fastopenq->qlen >= max_qlen) {
+ struct request_sock *req1;
+ spin_lock(&fastopenq->lock);
+ req1 = fastopenq->rskq_rst_head;
+--
+2.39.2
+
--- /dev/null
+From a1c3e3d2a356373d12abe57ecf1735ee43c11077 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Jul 2023 21:28:54 +0000
+Subject: tcp: annotate data-races around rskq_defer_accept
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ae488c74422fb1dcd807c0201804b3b5e8a322a3 ]
+
+do_tcp_getsockopt() reads rskq_defer_accept while another cpu
+might change its value.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230719212857.3943972-9-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c93aa6542d43b..98811b5f2451a 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2729,9 +2729,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+
+ case TCP_DEFER_ACCEPT:
+ /* Translate value in seconds to number of retransmits */
+- icsk->icsk_accept_queue.rskq_defer_accept =
+- secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+- TCP_RTO_MAX / HZ);
++ WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
++ secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
++ TCP_RTO_MAX / HZ));
+ break;
+
+ case TCP_WINDOW_CLAMP:
+@@ -3067,8 +3067,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
+ break;
+ case TCP_DEFER_ACCEPT:
+- val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+- TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
++ val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
++ val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
++ TCP_RTO_MAX / HZ);
+ break;
+ case TCP_WINDOW_CLAMP:
+ val = tp->window_clamp;
+--
+2.39.2
+
--- /dev/null
+From d6509e7b6aaae7f4c8683d2bf0d3dbec83f01e0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Jul 2023 21:28:55 +0000
+Subject: tcp: annotate data-races around tp->notsent_lowat
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1aeb87bc1440c5447a7fa2d6e3c2cca52cbd206b ]
+
+tp->notsent_lowat can be read locklessly from do_tcp_getsockopt()
+and tcp_poll().
+
+Fixes: c9bee3b7fdec ("tcp: TCP_NOTSENT_LOWAT socket option")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230719212857.3943972-10-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tcp.h | 6 +++++-
+ net/ipv4/tcp.c | 4 ++--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 4f97c0e2d5f34..b1a9e6b1a1533 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1887,7 +1887,11 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
+ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+ {
+ struct net *net = sock_net((struct sock *)tp);
+- return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
++ u32 val;
++
++ val = READ_ONCE(tp->notsent_lowat);
++
++ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+ }
+
+ static inline bool tcp_stream_memory_free(const struct sock *sk)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 98811b5f2451a..bcc2a3490323b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2810,7 +2810,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ err = tcp_repair_set_window(tp, optval, optlen);
+ break;
+ case TCP_NOTSENT_LOWAT:
+- tp->notsent_lowat = val;
++ WRITE_ONCE(tp->notsent_lowat, val);
+ sk->sk_write_space(sk);
+ break;
+ default:
+@@ -3204,7 +3204,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ val = tcp_time_stamp_raw() + tp->tsoffset;
+ break;
+ case TCP_NOTSENT_LOWAT:
+- val = tp->notsent_lowat;
++ val = READ_ONCE(tp->notsent_lowat);
+ break;
+ case TCP_SAVE_SYN:
+ val = tp->save_syn;
+--
+2.39.2
+
--- /dev/null
+From bccf6cb1b2bdae1301563cfc879ac13f50497671 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 12:04:07 -0600
+Subject: wifi: wext-core: Fix -Wstringop-overflow warning in
+ ioctl_standard_iw_point()
+
+From: Gustavo A. R. Silva <gustavoars@kernel.org>
+
+[ Upstream commit 71e7552c90db2a2767f5c17c7ec72296b0d92061 ]
+
+-Wstringop-overflow is legitimately warning us about extra_size
+pontentially being zero at some point, hence potenially ending
+up _allocating_ zero bytes of memory for extra pointer and then
+trying to access such object in a call to copy_from_user().
+
+Fix this by adding a sanity check to ensure we never end up
+trying to allocate zero bytes of data for extra pointer, before
+continue executing the rest of the code in the function.
+
+Address the following -Wstringop-overflow warning seen when built
+m68k architecture with allyesconfig configuration:
+ from net/wireless/wext-core.c:11:
+In function '_copy_from_user',
+ inlined from 'copy_from_user' at include/linux/uaccess.h:183:7,
+ inlined from 'ioctl_standard_iw_point' at net/wireless/wext-core.c:825:7:
+arch/m68k/include/asm/string.h:48:25: warning: '__builtin_memset' writing 1 or more bytes into a region of size 0 overflows the destination [-Wstringop-overflow=]
+ 48 | #define memset(d, c, n) __builtin_memset(d, c, n)
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~
+include/linux/uaccess.h:153:17: note: in expansion of macro 'memset'
+ 153 | memset(to + (n - res), 0, res);
+ | ^~~~~~
+In function 'kmalloc',
+ inlined from 'kzalloc' at include/linux/slab.h:694:9,
+ inlined from 'ioctl_standard_iw_point' at net/wireless/wext-core.c:819:10:
+include/linux/slab.h:577:16: note: at offset 1 into destination object of size 0 allocated by '__kmalloc'
+ 577 | return __kmalloc(size, flags);
+ | ^~~~~~~~~~~~~~~~~~~~~~
+
+This help with the ongoing efforts to globally enable
+-Wstringop-overflow.
+
+Link: https://github.com/KSPP/linux/issues/315
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/ZItSlzvIpjdjNfd8@work
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/wext-core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index b6414c7bef556..4bf33f9b28870 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -798,6 +798,12 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+ }
+ }
+
++ /* Sanity-check to ensure we never end up _allocating_ zero
++ * bytes of data for extra.
++ */
++ if (extra_size <= 0)
++ return -EFAULT;
++
+ /* kzalloc() ensures NULL-termination for essid_compat. */
+ extra = kzalloc(extra_size, GFP_KERNEL);
+ if (!extra)
+--
+2.39.2
+