--- /dev/null
+From 27dd5fcf01a5c84df5898715219bcbfcccecffd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jan 2024 22:43:05 -0800
+Subject: arch/arm/mm: fix major fault accounting when retrying under per-VMA
+ lock
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+[ Upstream commit e870920bbe68e52335a4c31a059e6af6a9a59dbb ]
+
+The change [1] missed ARM architecture when fixing major fault accounting
+for page fault retry under per-VMA lock.
+
+The user-visible effects is that it restores correct major fault
+accounting that was broken after [2] was merged in 6.7 kernel. The
+more detailed description is in [3] and this patch simply adds the
+same fix to ARM architecture which I missed in [3].
+
+Add missing code to fix ARM architecture fault accounting.
+
+[1] 46e714c729c8 ("arch/mm/fault: fix major fault accounting when retrying under per-VMA lock")
+[2] https://lore.kernel.org/all/20231006195318.4087158-6-willy@infradead.org/
+[3] https://lore.kernel.org/all/20231226214610.109282-1-surenb@google.com/
+
+Link: https://lkml.kernel.org/r/20240123064305.2829244-1-surenb@google.com
+Fixes: 12214eba1992 ("mm: handle read faults under the VMA lock")
+Reported-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mm/fault.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index e96fb40b9cc32..07565b593ed68 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -298,6 +298,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
++ if (fault & VM_FAULT_MAJOR)
++ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+--
+2.43.0
+
--- /dev/null
+From 356935a2011365d59bbfc6728029166f964d39c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Oct 2023 12:21:35 +0100
+Subject: ARM: 9328/1: mm: try VMA lock-based page fault handling first
+
+From: Wang Kefeng <wangkefeng.wang@huawei.com>
+
+[ Upstream commit c16af1212479570454752671a170a1756e11fdfb ]
+
+Attempt VMA lock-based page fault handling first, and fall back to the
+existing mmap_lock-based handling if that fails, the ebizzy benchmark
+shows 25% improvement on qemu with 2 cpus.
+
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Stable-dep-of: e870920bbe68 ("arch/arm/mm: fix major fault accounting when retrying under per-VMA lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/mm/fault.c | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index f8567e95f98be..8f47d6762ea4b 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -35,6 +35,7 @@ config ARM
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
++ select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index fef62e4a9edde..e96fb40b9cc32 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -278,6 +278,35 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
++ if (!(flags & FAULT_FLAG_USER))
++ goto lock_mmap;
++
++ vma = lock_vma_under_rcu(mm, addr);
++ if (!vma)
++ goto lock_mmap;
++
++ if (!(vma->vm_flags & vm_flags)) {
++ vma_end_read(vma);
++ goto lock_mmap;
++ }
++ fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
++ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
++ vma_end_read(vma);
++
++ if (!(fault & VM_FAULT_RETRY)) {
++ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
++ goto done;
++ }
++ count_vm_vma_lock_event(VMA_LOCK_RETRY);
++
++ /* Quick path to respond to signals */
++ if (fault_signal_pending(fault, regs)) {
++ if (!user_mode(regs))
++ goto no_context;
++ return 0;
++ }
++lock_mmap:
++
+ retry:
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+@@ -316,6 +345,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+
+ mmap_read_unlock(mm);
++done:
+
+ /*
+ * Handle the "normal" case first - VM_FAULT_MAJOR
+--
+2.43.0
+
--- /dev/null
+From dab92d8302ed9a27886dc013ec6ee10398823f38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 01:59:10 +0200
+Subject: drm/bridge: properly refcount DT nodes in aux bridge drivers
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 6914968a0b52507bf19d85e5fb9e35272e17cd35 ]
+
+The aux-bridge and aux-hpd-bridge drivers didn't call of_node_get() on
+the device nodes further used for dev->of_node and platform data. When
+bridge devices are released, the reference counts are decreased,
+resulting in refcount underflow / use-after-free warnings. Get
+corresponding refcounts during AUX bridge allocation.
+
+Reported-by: Luca Weiss <luca.weiss@fairphone.com>
+Fixes: 2a04739139b2 ("drm/bridge: add transparent bridge helper")
+Fixes: 26f4bac3d884 ("drm/bridge: aux-hpd: Replace of_device.h with explicit include")
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231216235910.911958-1-dmitry.baryshkov@linaro.org
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/aux-bridge.c | 3 ++-
+ drivers/gpu/drm/bridge/aux-hpd-bridge.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
+index 49d7c2ab1ecc3..b29980f95379e 100644
+--- a/drivers/gpu/drm/bridge/aux-bridge.c
++++ b/drivers/gpu/drm/bridge/aux-bridge.c
+@@ -6,6 +6,7 @@
+ */
+ #include <linux/auxiliary_bus.h>
+ #include <linux/module.h>
++#include <linux/of.h>
+
+ #include <drm/drm_bridge.h>
+ #include <drm/bridge/aux-bridge.h>
+@@ -57,7 +58,7 @@ int drm_aux_bridge_register(struct device *parent)
+ adev->id = ret;
+ adev->name = "aux_bridge";
+ adev->dev.parent = parent;
+- adev->dev.of_node = parent->of_node;
++ adev->dev.of_node = of_node_get(parent->of_node);
+ adev->dev.release = drm_aux_bridge_release;
+
+ ret = auxiliary_device_init(adev);
+diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+index 44bb771211b82..a24b6613cc02d 100644
+--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
++++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+@@ -63,9 +63,9 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str
+ adev->id = ret;
+ adev->name = "dp_hpd_bridge";
+ adev->dev.parent = parent;
+- adev->dev.of_node = parent->of_node;
++ adev->dev.of_node = of_node_get(parent->of_node);
+ adev->dev.release = drm_aux_hpd_bridge_release;
+- adev->dev.platform_data = np;
++ adev->dev.platform_data = of_node_get(np);
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+--
+2.43.0
+
--- /dev/null
+From 90aa737d88d467175bd6d1660f5394977fdebb5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Dec 2023 13:13:36 -0700
+Subject: drm/bridge: Return NULL instead of plain 0 in
+ drm_dp_hpd_bridge_register() stub
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit 812cc1da7ffd9e178ef66b8a22113be10fba466c ]
+
+sparse complains:
+
+ drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c: note: in included file:
+ include/drm/bridge/aux-bridge.h:29:16: sparse: sparse: Using plain integer as NULL pointer
+
+Return NULL to clear up the warning.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202312060025.BdeqZrWx-lkp@intel.com/
+Fixes: e560518a6c2e ("drm/bridge: implement generic DP HPD bridge")
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231205-drm_aux_bridge-fixes-v1-3-d242a0ae9df4@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/drm/bridge/aux-bridge.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
+index 874f177381e34..4453906105ca1 100644
+--- a/include/drm/bridge/aux-bridge.h
++++ b/include/drm/bridge/aux-bridge.h
+@@ -41,7 +41,7 @@ static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+ static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+ {
+- return 0;
++ return NULL;
+ }
+
+ static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+--
+2.43.0
+
--- /dev/null
+From 012eb1842d26ca778ee092274d9b186dd24d5123 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jan 2024 16:34:00 +0100
+Subject: exit: wait_task_zombie: kill the no longer necessary
+ spin_lock_irq(siglock)
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+[ Upstream commit c1be35a16b2f1fe21f4f26f9de030ad6eaaf6a25 ]
+
+After the recent changes nobody use siglock to read the values protected
+by stats_lock, we can kill spin_lock_irq(¤t->sighand->siglock) and
+update the comment.
+
+With this patch only __exit_signal() and thread_group_start_cputime() take
+stats_lock under siglock.
+
+Link: https://lkml.kernel.org/r/20240123153359.GA21866@redhat.com
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/exit.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index aedc0832c9f4d..0c4858581c98c 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1125,17 +1125,14 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+ * and nobody can change them.
+ *
+ * psig->stats_lock also protects us from our sub-threads
+- * which can reap other children at the same time. Until
+- * we change k_getrusage()-like users to rely on this lock
+- * we have to take ->siglock as well.
++ * which can reap other children at the same time.
+ *
+ * We use thread_group_cputime_adjusted() to get times for
+ * the thread group, which consolidates times for all threads
+ * in the group including the group leader.
+ */
+ thread_group_cputime_adjusted(p, &tgutime, &tgstime);
+- spin_lock_irq(¤t->sighand->siglock);
+- write_seqlock(&psig->stats_lock);
++ write_seqlock_irq(&psig->stats_lock);
+ psig->cutime += tgutime + sig->cutime;
+ psig->cstime += tgstime + sig->cstime;
+ psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
+@@ -1158,8 +1155,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+ psig->cmaxrss = maxrss;
+ task_io_accounting_add(&psig->ioac, &p->ioac);
+ task_io_accounting_add(&psig->ioac, &sig->ioac);
+- write_sequnlock(&psig->stats_lock);
+- spin_unlock_irq(¤t->sighand->siglock);
++ write_sequnlock_irq(&psig->stats_lock);
+ }
+
+ if (wo->wo_rusage)
+--
+2.43.0
+
--- /dev/null
+From 3ff4a98c26f3d8fbff98a6fd1af0f2537f518efa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jan 2024 09:58:39 +0100
+Subject: readahead: avoid multiple marked readahead pages
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit ab4443fe3ca6298663a55c4a70efc6c3ce913ca6 ]
+
+ra_alloc_folio() marks a page that should trigger next round of async
+readahead. However it rounds up computed index to the order of page being
+allocated. This can however lead to multiple consecutive pages being
+marked with readahead flag. Consider situation with index == 1, mark ==
+1, order == 0. We insert order 0 page at index 1 and mark it. Then we
+bump order to 1, index to 2, mark (still == 1) is rounded up to 2 so page
+at index 2 is marked as well. Then we bump order to 2, index is
+incremented to 4, mark gets rounded to 4 so page at index 4 is marked as
+well. The fact that multiple pages get marked within a single readahead
+window confuses the readahead logic and results in readahead window being
+trimmed back to 1. This situation is triggered in particular when maximum
+readahead window size is not a power of two (in the observed case it was
+768 KB) and as a result sequential read throughput suffers.
+
+Fix the problem by rounding 'mark' down instead of up. Because the index
+is naturally aligned to 'order', we are guaranteed 'rounded mark' == index
+iff 'mark' is within the page we are allocating at 'index' and thus
+exactly one page is marked with readahead flag as required by the
+readahead code and sequential read performance is restored.
+
+This effectively reverts part of commit b9ff43dd2743 ("mm/readahead: Fix
+readahead with large folios"). The commit changed the rounding with the
+rationale:
+
+"... we were setting the readahead flag on the folio which contains the
+last byte read from the block. This is wrong because we will trigger
+readahead at the end of the read without waiting to see if a subsequent
+read is going to use the pages we just read."
+
+Although this is true, the fact is this was always the case with read
+sizes not aligned to folio boundaries and large folios in the page cache
+just make the situation more obvious (and frequent). Also for sequential
+read workloads it is better to trigger the readahead earlier rather than
+later. It is true that the difference in the rounding and thus earlier
+triggering of the readahead can result in reading more for semi-random
+workloads. However workloads really suffering from this seem to be rare.
+In particular I have verified that the workload described in commit
+b9ff43dd2743 ("mm/readahead: Fix readahead with large folios") of reading
+random 100k blocks from a file like:
+
+[reader]
+bs=100k
+rw=randread
+numjobs=1
+size=64g
+runtime=60s
+
+is not impacted by the rounding change and achieves ~70MB/s in both cases.
+
+[jack@suse.cz: fix one more place where mark rounding was done as well]
+ Link: https://lkml.kernel.org/r/20240123153254.5206-1-jack@suse.cz
+Link: https://lkml.kernel.org/r/20240104085839.21029-1-jack@suse.cz
+Fixes: b9ff43dd2743 ("mm/readahead: Fix readahead with large folios")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Guo Xuenan <guoxuenan@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/readahead.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 6925e6959fd3f..1d1a84deb5bc5 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
+
+ if (!folio)
+ return -ENOMEM;
+- mark = round_up(mark, 1UL << order);
++ mark = round_down(mark, 1UL << order);
+ if (index == mark)
+ folio_set_readahead(folio);
+ err = filemap_add_folio(ractl->mapping, folio, index, gfp);
+@@ -577,7 +577,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
+ * It's the expected callback index, assume sequential access.
+ * Ramp up sizes, and push forward the readahead window.
+ */
+- expected = round_up(ra->start + ra->size - ra->async_size,
++ expected = round_down(ra->start + ra->size - ra->async_size,
+ 1UL << order);
+ if (index == expected || index == (ra->start + ra->size)) {
+ ra->start += ra->size;
+--
+2.43.0
+
--- /dev/null
+From eb8756cac789d9008a78dfa650204f2ac20aa174 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jan 2024 22:49:51 +0100
+Subject: selftests: mptcp: decrease BW in simult flows
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 5e2f3c65af47e527ccac54060cf909e3306652ff ]
+
+When running the simult_flow selftest in slow environments -- e.g. QEmu
+without KVM support --, the results can be unstable. This selftest
+checks if the aggregated bandwidth is (almost) fully used as expected.
+
+To help improving the stability while still keeping the same validation
+in place, the BW and the delay are reduced to lower the pressure on the
+CPU.
+
+Fixes: 1a418cb8e888 ("mptcp: simult flow self-tests")
+Fixes: 219d04992b68 ("mptcp: push pending frames when subflow has free space")
+Cc: stable@vger.kernel.org
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://lore.kernel.org/r/20240131-upstream-net-20240131-mptcp-ci-issues-v1-6-4c1c11e571ff@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/mptcp/simult_flows.sh | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index 9096bf5794888..25693b37f820d 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -302,12 +302,12 @@ done
+
+ setup
+ run_test 10 10 0 0 "balanced bwidth"
+-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
++run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
+
+ # we still need some additional infrastructure to pass the following test-cases
+-run_test 30 10 0 0 "unbalanced bwidth"
+-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
+-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
++run_test 10 3 0 0 "unbalanced bwidth"
++run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
++run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
+
+ mptcp_lib_result_print_all_tap
+ exit $ret
+--
+2.43.0
+
netrom-fix-a-data-race-around-sysctl_netrom_link_fai.patch
netrom-fix-data-races-around-sysctl_net_busy_read.patch
net-pds_core-fix-possible-double-free-in-error-handl.patch
+readahead-avoid-multiple-marked-readahead-pages.patch
+selftests-mptcp-decrease-bw-in-simult-flows.patch
+exit-wait_task_zombie-kill-the-no-longer-necessary-s.patch
+arm-9328-1-mm-try-vma-lock-based-page-fault-handling.patch
+arch-arm-mm-fix-major-fault-accounting-when-retrying.patch
+drm-bridge-return-null-instead-of-plain-0-in-drm_dp_.patch
+drm-bridge-properly-refcount-dt-nodes-in-aux-bridge-.patch