--- /dev/null
+From 6f363f5aa845561f7ea496d8b1175e3204470486 Mon Sep 17 00:00:00 2001
+From: Xiu Jianfeng <xiujianfeng@huawei.com>
+Date: Sat, 10 Jun 2023 17:26:43 +0800
+Subject: cgroup: Do not corrupt task iteration when rebinding subsystem
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Xiu Jianfeng <xiujianfeng@huawei.com>
+
+commit 6f363f5aa845561f7ea496d8b1175e3204470486 upstream.
+
+We found a refcount UAF bug as follows:
+
+refcount_t: addition on 0; use-after-free.
+WARNING: CPU: 1 PID: 342 at lib/refcount.c:25 refcount_warn_saturate+0xa0/0x148
+Workqueue: events cpuset_hotplug_workfn
+Call trace:
+ refcount_warn_saturate+0xa0/0x148
+ __refcount_add.constprop.0+0x5c/0x80
+ css_task_iter_advance_css_set+0xd8/0x210
+ css_task_iter_advance+0xa8/0x120
+ css_task_iter_next+0x94/0x158
+ update_tasks_root_domain+0x58/0x98
+ rebuild_root_domains+0xa0/0x1b0
+ rebuild_sched_domains_locked+0x144/0x188
+ cpuset_hotplug_workfn+0x138/0x5a0
+ process_one_work+0x1e8/0x448
+ worker_thread+0x228/0x3e0
+ kthread+0xe0/0xf0
+ ret_from_fork+0x10/0x20
+
+then a kernel panic will be triggered as below:
+
+Unable to handle kernel paging request at virtual address 00000000c0000010
+Call trace:
+ cgroup_apply_control_disable+0xa4/0x16c
+ rebind_subsystems+0x224/0x590
+ cgroup_destroy_root+0x64/0x2e0
+ css_free_rwork_fn+0x198/0x2a0
+ process_one_work+0x1d4/0x4bc
+ worker_thread+0x158/0x410
+ kthread+0x108/0x13c
+ ret_from_fork+0x10/0x18
+
+The race that cause this bug can be shown as below:
+
+(hotplug cpu) | (umount cpuset)
+mutex_lock(&cpuset_mutex) | mutex_lock(&cgroup_mutex)
+cpuset_hotplug_workfn |
+ rebuild_root_domains | rebind_subsystems
+ update_tasks_root_domain | spin_lock_irq(&css_set_lock)
+ css_task_iter_start | list_move_tail(&cset->e_cset_node[ss->id]
+ while(css_task_iter_next) | &dcgrp->e_csets[ss->id]);
+ css_task_iter_end | spin_unlock_irq(&css_set_lock)
+mutex_unlock(&cpuset_mutex) | mutex_unlock(&cgroup_mutex)
+
+Inside css_task_iter_start/next/end, css_set_lock is hold and then
+released, so when iterating task(left side), the css_set may be moved to
+another list(right side), then it->cset_head points to the old list head
+and it->cset_pos->next points to the head node of new list, which can't
+be used as struct css_set.
+
+To fix this issue, switch from all css_sets to only scgrp's css_sets to
+patch in-flight iterators to preserve correct iteration, and then
+update it->cset_head as well.
+
+Reported-by: Gaosheng Cui <cuigaosheng1@huawei.com>
+Link: https://www.spinics.net/lists/cgroups/msg37935.html
+Suggested-by: Michal Koutný <mkoutny@suse.com>
+Link: https://lore.kernel.org/all/20230526114139.70274-1-xiujianfeng@huaweicloud.com/
+Signed-off-by: Xiu Jianfeng <xiujianfeng@huawei.com>
+Fixes: 2d8f243a5e6e ("cgroup: implement cgroup->e_csets[]")
+Cc: stable@vger.kernel.org # v3.16+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cgroup/cgroup.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1712,7 +1712,7 @@ int rebind_subsystems(struct cgroup_root
+ {
+ struct cgroup *dcgrp = &dst_root->cgrp;
+ struct cgroup_subsys *ss;
+- int ssid, i, ret;
++ int ssid, ret;
+ u16 dfl_disable_ss_mask = 0;
+
+ lockdep_assert_held(&cgroup_mutex);
+@@ -1756,7 +1756,8 @@ int rebind_subsystems(struct cgroup_root
+ struct cgroup_root *src_root = ss->root;
+ struct cgroup *scgrp = &src_root->cgrp;
+ struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
+- struct css_set *cset;
++ struct css_set *cset, *cset_pos;
++ struct css_task_iter *it;
+
+ WARN_ON(!css || cgroup_css(dcgrp, ss));
+
+@@ -1774,9 +1775,22 @@ int rebind_subsystems(struct cgroup_root
+ css->cgroup = dcgrp;
+
+ spin_lock_irq(&css_set_lock);
+- hash_for_each(css_set_table, i, cset, hlist)
++ WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
++ list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
++ e_cset_node[ss->id]) {
+ list_move_tail(&cset->e_cset_node[ss->id],
+ &dcgrp->e_csets[ss->id]);
++ /*
++ * all css_sets of scgrp together in same order to dcgrp,
++ * patch in-flight iterators to preserve correct iteration.
++ * since the iterator is always advanced right away and
++ * finished when it->cset_pos meets it->cset_head, so only
++ * update it->cset_head is enough here.
++ */
++ list_for_each_entry(it, &cset->task_iters, iters_node)
++ if (it->cset_head == &scgrp->e_csets[ss->id])
++ it->cset_head = &dcgrp->e_csets[ss->id];
++ }
+ spin_unlock_irq(&css_set_lock);
+
+ /* default hierarchy doesn't enable controllers by default */
--- /dev/null
+From 320805ab61e5f1e2a5729ae266e16bec2904050c Mon Sep 17 00:00:00 2001
+From: Michael Kelley <mikelley@microsoft.com>
+Date: Thu, 18 May 2023 08:13:52 -0700
+Subject: Drivers: hv: vmbus: Fix vmbus_wait_for_unload() to scan present CPUs
+
+From: Michael Kelley <mikelley@microsoft.com>
+
+commit 320805ab61e5f1e2a5729ae266e16bec2904050c upstream.
+
+vmbus_wait_for_unload() may be called in the panic path after other
+CPUs are stopped. vmbus_wait_for_unload() currently loops through
+online CPUs looking for the UNLOAD response message. But the values of
+CONFIG_KEXEC_CORE and crash_kexec_post_notifiers affect the path used
+to stop the other CPUs, and in one of the paths the stopped CPUs
+are removed from cpu_online_mask. This removal happens in both
+x86/x64 and arm64 architectures. In such a case, vmbus_wait_for_unload()
+only checks the panic'ing CPU, and misses the UNLOAD response message
+except when the panic'ing CPU is CPU 0. vmbus_wait_for_unload()
+eventually times out, but only after waiting 100 seconds.
+
+Fix this by looping through *present* CPUs in vmbus_wait_for_unload().
+The cpu_present_mask is not modified by stopping the other CPUs in the
+panic path, nor should it be.
+
+Also, in a CoCo VM the synic_message_page is not allocated in
+hv_synic_alloc(), but is set and cleared in hv_synic_enable_regs()
+and hv_synic_disable_regs() such that it is set only when the CPU is
+online. If not all present CPUs are online when vmbus_wait_for_unload()
+is called, the synic_message_page might be NULL. Add a check for this.
+
+Fixes: cd95aad55793 ("Drivers: hv: vmbus: handle various crash scenarios")
+Cc: stable@vger.kernel.org
+Reported-by: John Starks <jostarks@microsoft.com>
+Signed-off-by: Michael Kelley <mikelley@microsoft.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Link: https://lore.kernel.org/r/1684422832-38476-1-git-send-email-mikelley@microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel_mgmt.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -765,11 +765,22 @@ static void vmbus_wait_for_unload(void)
+ if (completion_done(&vmbus_connection.unload_event))
+ goto completed;
+
+- for_each_online_cpu(cpu) {
++ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
++ /*
++ * In a CoCo VM the synic_message_page is not allocated
++ * in hv_synic_alloc(). Instead it is set/cleared in
++ * hv_synic_enable_regs() and hv_synic_disable_regs()
++ * such that it is set only when the CPU is online. If
++ * not all present CPUs are online, the message page
++ * might be NULL, so skip such CPUs.
++ */
+ page_addr = hv_cpu->synic_message_page;
++ if (!page_addr)
++ continue;
++
+ msg = (struct hv_message *)page_addr
+ + VMBUS_MESSAGE_SINT;
+
+@@ -803,11 +814,14 @@ completed:
+ * maybe-pending messages on all CPUs to be able to receive new
+ * messages after we reconnect.
+ */
+- for_each_online_cpu(cpu) {
++ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ page_addr = hv_cpu->synic_message_page;
++ if (!page_addr)
++ continue;
++
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ msg->header.message_type = HVMSG_NONE;
+ }
--- /dev/null
+From 7074732c8faee201a245a6f983008a5789c0be33 Mon Sep 17 00:00:00 2001
+From: Matthias May <matthias.may@westermo.com>
+Date: Thu, 21 Jul 2022 22:27:19 +0200
+Subject: ip_tunnels: allow VXLAN/GENEVE to inherit TOS/TTL from VLAN
+
+From: Matthias May <matthias.may@westermo.com>
+
+commit 7074732c8faee201a245a6f983008a5789c0be33 upstream.
+
+The current code allows for VXLAN and GENEVE to inherit the TOS
+respective the TTL when skb-protocol is ETH_P_IP or ETH_P_IPV6.
+However when the payload is VLAN encapsulated, then this inheriting
+does not work, because the visible skb-protocol is of type
+ETH_P_8021Q or ETH_P_8021AD.
+
+Instead of skb->protocol use skb_protocol().
+
+Signed-off-by: Matthias May <matthias.may@westermo.com>
+Link: https://lore.kernel.org/r/20220721202718.10092-1-matthias.may@westermo.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip_tunnels.h | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -378,9 +378,11 @@ static inline int ip_tunnel_encap(struct
+ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
+ const struct sk_buff *skb)
+ {
+- if (skb->protocol == htons(ETH_P_IP))
++ __be16 payload_protocol = skb_protocol(skb, true);
++
++ if (payload_protocol == htons(ETH_P_IP))
+ return iph->tos;
+- else if (skb->protocol == htons(ETH_P_IPV6))
++ else if (payload_protocol == htons(ETH_P_IPV6))
+ return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+ else
+ return 0;
+@@ -389,9 +391,11 @@ static inline u8 ip_tunnel_get_dsfield(c
+ static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
+ const struct sk_buff *skb)
+ {
+- if (skb->protocol == htons(ETH_P_IP))
++ __be16 payload_protocol = skb_protocol(skb, true);
++
++ if (payload_protocol == htons(ETH_P_IP))
+ return iph->ttl;
+- else if (skb->protocol == htons(ETH_P_IPV6))
++ else if (payload_protocol == htons(ETH_P_IPV6))
+ return ((const struct ipv6hdr *)iph)->hop_limit;
+ else
+ return 0;
--- /dev/null
+From 3c40eb8145325b0f5b93b8a169146078cb2c49d6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Martin=20Hundeb=C3=B8ll?= <martin@geanix.com>
+Date: Wed, 7 Jun 2023 10:27:12 +0200
+Subject: mmc: meson-gx: remove redundant mmc_request_done() call from irq context
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Hundebøll <martin@geanix.com>
+
+commit 3c40eb8145325b0f5b93b8a169146078cb2c49d6 upstream.
+
+The call to mmc_request_done() can schedule, so it must not be called
+from irq context. Wake the irq thread if it needs to be called, and let
+its existing logic do its work.
+
+Fixes the following kernel bug, which appears when running an RT patched
+kernel on the AmLogic Meson AXG A113X SoC:
+[ 11.111407] BUG: scheduling while atomic: kworker/0:1H/75/0x00010001
+[ 11.111438] Modules linked in:
+[ 11.111451] CPU: 0 PID: 75 Comm: kworker/0:1H Not tainted 6.4.0-rc3-rt2-rtx-00081-gfd07f41ed6b4-dirty #1
+[ 11.111461] Hardware name: RTX AXG A113X Linux Platform Board (DT)
+[ 11.111469] Workqueue: kblockd blk_mq_run_work_fn
+[ 11.111492] Call trace:
+[ 11.111497] dump_backtrace+0xac/0xe8
+[ 11.111510] show_stack+0x18/0x28
+[ 11.111518] dump_stack_lvl+0x48/0x60
+[ 11.111530] dump_stack+0x18/0x24
+[ 11.111537] __schedule_bug+0x4c/0x68
+[ 11.111548] __schedule+0x80/0x574
+[ 11.111558] schedule_loop+0x2c/0x50
+[ 11.111567] schedule_rtlock+0x14/0x20
+[ 11.111576] rtlock_slowlock_locked+0x468/0x730
+[ 11.111587] rt_spin_lock+0x40/0x64
+[ 11.111596] __wake_up_common_lock+0x5c/0xc4
+[ 11.111610] __wake_up+0x18/0x24
+[ 11.111620] mmc_blk_mq_req_done+0x68/0x138
+[ 11.111633] mmc_request_done+0x104/0x118
+[ 11.111644] meson_mmc_request_done+0x38/0x48
+[ 11.111654] meson_mmc_irq+0x128/0x1f0
+[ 11.111663] __handle_irq_event_percpu+0x70/0x114
+[ 11.111674] handle_irq_event_percpu+0x18/0x4c
+[ 11.111683] handle_irq_event+0x80/0xb8
+[ 11.111691] handle_fasteoi_irq+0xa4/0x120
+[ 11.111704] handle_irq_desc+0x20/0x38
+[ 11.111712] generic_handle_domain_irq+0x1c/0x28
+[ 11.111721] gic_handle_irq+0x8c/0xa8
+[ 11.111735] call_on_irq_stack+0x24/0x4c
+[ 11.111746] do_interrupt_handler+0x88/0x94
+[ 11.111757] el1_interrupt+0x34/0x64
+[ 11.111769] el1h_64_irq_handler+0x18/0x24
+[ 11.111779] el1h_64_irq+0x64/0x68
+[ 11.111786] __add_wait_queue+0x0/0x4c
+[ 11.111795] mmc_blk_rw_wait+0x84/0x118
+[ 11.111804] mmc_blk_mq_issue_rq+0x5c4/0x654
+[ 11.111814] mmc_mq_queue_rq+0x194/0x214
+[ 11.111822] blk_mq_dispatch_rq_list+0x3ac/0x528
+[ 11.111834] __blk_mq_sched_dispatch_requests+0x340/0x4d0
+[ 11.111847] blk_mq_sched_dispatch_requests+0x38/0x70
+[ 11.111858] blk_mq_run_work_fn+0x3c/0x70
+[ 11.111865] process_one_work+0x17c/0x1f0
+[ 11.111876] worker_thread+0x1d4/0x26c
+[ 11.111885] kthread+0xe4/0xf4
+[ 11.111894] ret_from_fork+0x10/0x20
+
+Fixes: 51c5d8447bd7 ("MMC: meson: initial support for GX platforms")
+Cc: stable@vger.kernel.org
+Signed-off-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/r/20230607082713.517157-1-martin@geanix.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/meson-gx-mmc.c | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -970,11 +970,8 @@ static irqreturn_t meson_mmc_irq(int irq
+ if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
+ if (data && !cmd->error)
+ data->bytes_xfered = data->blksz * data->blocks;
+- if (meson_mmc_bounce_buf_read(data) ||
+- meson_mmc_get_next_command(cmd))
+- ret = IRQ_WAKE_THREAD;
+- else
+- ret = IRQ_HANDLED;
++
++ return IRQ_WAKE_THREAD;
+ }
+
+ out:
+@@ -986,9 +983,6 @@ out:
+ writel(start, host->regs + SD_EMMC_START);
+ }
+
+- if (ret == IRQ_HANDLED)
+- meson_mmc_request_done(host->mmc, cmd->mrq);
+-
+ return ret;
+ }
+
--- /dev/null
+From 47b3ad6b7842f49d374a01b054a4b1461a621bdc Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+Date: Tue, 13 Jun 2023 15:41:46 +0200
+Subject: mmc: mmci: stm32: fix max busy timeout calculation
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+commit 47b3ad6b7842f49d374a01b054a4b1461a621bdc upstream.
+
+The way that the timeout is currently calculated could lead to a u64
+timeout value in mmci_start_command(). This value is then cast in a u32
+register that leads to mmc erase failed issue with some SD cards.
+
+Fixes: 8266c585f489 ("mmc: mmci: add hardware busy timeout feature")
+Signed-off-by: Yann Gautier <yann.gautier@foss.st.com>
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230613134146.418016-1-yann.gautier@foss.st.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/mmci.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1728,7 +1728,8 @@ static void mmci_set_max_busy_timeout(st
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+- max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
++ max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
++ MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+ }
--- /dev/null
+From e6f9e590b72e12bbb86b1b8be7e1981f357392ad Mon Sep 17 00:00:00 2001
+From: Stephan Gerhold <stephan@gerhold.net>
+Date: Thu, 18 May 2023 11:39:36 +0200
+Subject: mmc: sdhci-msm: Disable broken 64-bit DMA on MSM8916
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+commit e6f9e590b72e12bbb86b1b8be7e1981f357392ad upstream.
+
+While SDHCI claims to support 64-bit DMA on MSM8916 it does not seem to
+be properly functional. It is not immediately obvious because SDHCI is
+usually used with IOMMU bypassed on this SoC, and all physical memory
+has 32-bit addresses. But when trying to enable the IOMMU it quickly
+fails with an error such as the following:
+
+ arm-smmu 1e00000.iommu: Unhandled context fault:
+ fsr=0x402, iova=0xfffff200, fsynr=0xe0000, cbfrsynra=0x140, cb=3
+ mmc1: ADMA error: 0x02000000
+ mmc1: sdhci: ============ SDHCI REGISTER DUMP ===========
+ mmc1: sdhci: Sys addr: 0x00000000 | Version: 0x00002e02
+ mmc1: sdhci: Blk size: 0x00000008 | Blk cnt: 0x00000000
+ mmc1: sdhci: Argument: 0x00000000 | Trn mode: 0x00000013
+ mmc1: sdhci: Present: 0x03f80206 | Host ctl: 0x00000019
+ mmc1: sdhci: Power: 0x0000000f | Blk gap: 0x00000000
+ mmc1: sdhci: Wake-up: 0x00000000 | Clock: 0x00000007
+ mmc1: sdhci: Timeout: 0x0000000a | Int stat: 0x00000001
+ mmc1: sdhci: Int enab: 0x03ff900b | Sig enab: 0x03ff100b
+ mmc1: sdhci: ACmd stat: 0x00000000 | Slot int: 0x00000000
+ mmc1: sdhci: Caps: 0x322dc8b2 | Caps_1: 0x00008007
+ mmc1: sdhci: Cmd: 0x0000333a | Max curr: 0x00000000
+ mmc1: sdhci: Resp[0]: 0x00000920 | Resp[1]: 0x5b590000
+ mmc1: sdhci: Resp[2]: 0xe6487f80 | Resp[3]: 0x0a404094
+ mmc1: sdhci: Host ctl2: 0x00000008
+ mmc1: sdhci: ADMA Err: 0x00000001 | ADMA Ptr: 0x0000000ffffff224
+ mmc1: sdhci_msm: ----------- VENDOR REGISTER DUMP -----------
+ mmc1: sdhci_msm: DLL sts: 0x00000000 | DLL cfg: 0x60006400 | DLL cfg2: 0x00000000
+ mmc1: sdhci_msm: DLL cfg3: 0x00000000 | DLL usr ctl: 0x00000000 | DDR cfg: 0x00000000
+ mmc1: sdhci_msm: Vndr func: 0x00018a9c | Vndr func2 : 0xf88018a8 Vndr func3: 0x00000000
+ mmc1: sdhci: ============================================
+ mmc1: sdhci: fffffffff200: DMA 0x0000ffffffffe100, LEN 0x0008, Attr=0x21
+ mmc1: sdhci: fffffffff20c: DMA 0x0000000000000000, LEN 0x0000, Attr=0x03
+
+Looking closely it's obvious that only the 32-bit part of the address
+(0xfffff200) arrives at the SMMU, the higher 16-bit (0xffff...) get
+lost somewhere. This might not be a limitation of the SDHCI itself but
+perhaps the bus/interconnect it is connected to, or even the connection
+to the SMMU.
+
+Work around this by setting SDHCI_QUIRK2_BROKEN_64_BIT_DMA to avoid
+using 64-bit addresses.
+
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230518-msm8916-64bit-v1-1-5694b0f35211@gerhold.net
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-msm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2228,6 +2228,9 @@ static inline void sdhci_msm_get_of_prop
+ msm_host->ddr_config = DDR_CONFIG_POR_VAL;
+
+ of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
++
++ if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
++ host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
+ }
+
+ static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
--- /dev/null
+From 679bd7ebdd315bf457a4740b306ae99f1d0a403d Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 9 Jun 2023 12:57:32 +0900
+Subject: nilfs2: fix buffer corruption due to concurrent device reads
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 679bd7ebdd315bf457a4740b306ae99f1d0a403d upstream.
+
+As a result of analysis of a syzbot report, it turned out that in three
+cases where nilfs2 allocates block device buffers directly via sb_getblk,
+concurrent reads to the device can corrupt the allocated buffers.
+
+Nilfs2 uses sb_getblk for segment summary blocks, that make up a log
+header, and the super root block, that is the trailer, and when moving and
+writing the second super block after fs resize.
+
+In any of these, since the uptodate flag is not set when storing metadata
+to be written in the allocated buffers, the stored metadata will be
+overwritten if a device read of the same block occurs concurrently before
+the write. This causes metadata corruption and misbehavior in the log
+write itself, causing warnings in nilfs_btree_assign() as reported.
+
+Fix these issues by setting an uptodate flag on the buffer head on the
+first or before modifying each buffer obtained with sb_getblk, and
+clearing the flag on failure.
+
+When setting the uptodate flag, the lock_buffer/unlock_buffer pair is used
+to perform necessary exclusive control, and the buffer is filled to ensure
+that uninitialized bytes are not mixed into the data read from others. As
+for buffers for segment summary blocks, they are filled incrementally, so
+if the uptodate flag was unset on their allocation, set the flag and zero
+fill the buffer once at that point.
+
+Also, regarding the superblock move routine, the starting point of the
+memset call to zerofill the block is incorrectly specified, which can
+cause a buffer overflow on file systems with block sizes greater than
+4KiB. In addition, if the superblock is moved within a large block, it is
+necessary to assume the possibility that the data in the superblock will
+be destroyed by zero-filling before copying. So fix these potential
+issues as well.
+
+Link: https://lkml.kernel.org/r/20230609035732.20426-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+31837fe952932efc8fb9@syzkaller.appspotmail.com
+Closes: https://lkml.kernel.org/r/00000000000030000a05e981f475@google.com
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/segbuf.c | 6 ++++++
+ fs/nilfs2/segment.c | 7 +++++++
+ fs/nilfs2/super.c | 23 ++++++++++++++++++++++-
+ 3 files changed, 35 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/segbuf.c
++++ b/fs/nilfs2/segbuf.c
+@@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct ni
+ if (unlikely(!bh))
+ return -ENOMEM;
+
++ lock_buffer(bh);
++ if (!buffer_uptodate(bh)) {
++ memset(bh->b_data, 0, bh->b_size);
++ set_buffer_uptodate(bh);
++ }
++ unlock_buffer(bh);
+ nilfs_segbuf_add_segsum_buffer(segbuf, bh);
+ return 0;
+ }
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -984,10 +984,13 @@ static void nilfs_segctor_fill_in_super_
+ unsigned int isz, srsz;
+
+ bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
++
++ lock_buffer(bh_sr);
+ raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
+ isz = nilfs->ns_inode_size;
+ srsz = NILFS_SR_BYTES(isz);
+
++ raw_sr->sr_sum = 0; /* Ensure initialization within this update */
+ raw_sr->sr_bytes = cpu_to_le16(srsz);
+ raw_sr->sr_nongc_ctime
+ = cpu_to_le64(nilfs_doing_gc() ?
+@@ -1001,6 +1004,8 @@ static void nilfs_segctor_fill_in_super_
+ nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
+ NILFS_SR_SUFILE_OFFSET(isz), 1);
+ memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
++ set_buffer_uptodate(bh_sr);
++ unlock_buffer(bh_sr);
+ }
+
+ static void nilfs_redirty_inodes(struct list_head *head)
+@@ -1783,6 +1788,7 @@ static void nilfs_abort_logs(struct list
+ list_for_each_entry(segbuf, logs, sb_list) {
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ clear_buffer_uptodate(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1794,6 +1800,7 @@ static void nilfs_abort_logs(struct list
+ b_assoc_buffers) {
+ clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
++ clear_buffer_uptodate(bh);
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+ bd_page = bh->b_page;
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -372,10 +372,31 @@ static int nilfs_move_2nd_super(struct s
+ goto out;
+ }
+ nsbp = (void *)nsbh->b_data + offset;
+- memset(nsbp, 0, nilfs->ns_blocksize);
+
++ lock_buffer(nsbh);
+ if (sb2i >= 0) {
++ /*
++ * The position of the second superblock only changes by 4KiB,
++ * which is larger than the maximum superblock data size
++ * (= 1KiB), so there is no need to use memmove() to allow
++ * overlap between source and destination.
++ */
+ memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
++
++ /*
++ * Zero fill after copy to avoid overwriting in case of move
++ * within the same block.
++ */
++ memset(nsbh->b_data, 0, offset);
++ memset((void *)nsbp + nilfs->ns_sbsize, 0,
++ nsbh->b_size - offset - nilfs->ns_sbsize);
++ } else {
++ memset(nsbh->b_data, 0, nsbh->b_size);
++ }
++ set_buffer_uptodate(nsbh);
++ unlock_buffer(nsbh);
++
++ if (sb2i >= 0) {
+ brelse(nilfs->ns_sbh[sb2i]);
+ nilfs->ns_sbh[sb2i] = nsbh;
+ nilfs->ns_sbp[sb2i] = nsbp;
--- /dev/null
+From 440b5e3663271b0ffbd4908115044a6a51fb938b Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 14 Jun 2023 21:44:47 -0700
+Subject: PCI: hv: Fix a race condition bug in hv_pci_query_relations()
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit 440b5e3663271b0ffbd4908115044a6a51fb938b upstream.
+
+Since day 1 of the driver, there has been a race between
+hv_pci_query_relations() and survey_child_resources(): during fast
+device hotplug, hv_pci_query_relations() may error out due to
+device-remove and the stack variable 'comp' is no longer valid;
+however, pci_devices_present_work() -> survey_child_resources() ->
+complete() may be running on another CPU and accessing the no-longer-valid
+'comp'. Fix the race by flushing the workqueue before we exit from
+hv_pci_query_relations().
+
+Fixes: 4daace0d8ce8 ("PCI: hv: Add paravirtual PCI front-end for Microsoft Hyper-V VMs")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Acked-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230615044451.5580-2-decui@microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pci-hyperv.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -2912,6 +2912,24 @@ static int hv_pci_query_relations(struct
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
+
++ /*
++ * In the case of fast device addition/removal, it's possible that
++ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
++ * already got a PCI_BUS_RELATIONS* message from the host and the
++ * channel callback already scheduled a work to hbus->wq, which can be
++ * running pci_devices_present_work() -> survey_child_resources() ->
++ * complete(&hbus->survey_event), even after hv_pci_query_relations()
++ * exits and the stack variable 'comp' is no longer valid; as a result,
++ * a hang or a page fault may happen when the complete() calls
++ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
++ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
++ * -ENODEV, there can't be any more work item scheduled to hbus->wq
++ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
++ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
++ * channel->rescind = true.
++ */
++ flush_workqueue(hbus->wq);
++
+ return ret;
+ }
+
--- /dev/null
+From 2738d5ab7929a845b654cd171a1e275c37eb428e Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 14 Jun 2023 21:44:48 -0700
+Subject: PCI: hv: Fix a race condition in hv_irq_unmask() that can cause panic
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit 2738d5ab7929a845b654cd171a1e275c37eb428e upstream.
+
+When the host tries to remove a PCI device, the host first sends a
+PCI_EJECT message to the guest, and the guest is supposed to gracefully
+remove the PCI device and send a PCI_EJECTION_COMPLETE message to the host;
+the host then sends a VMBus message CHANNELMSG_RESCIND_CHANNELOFFER to
+the guest (when the guest receives this message, the device is already
+unassigned from the guest) and the guest can do some final cleanup work;
+if the guest fails to respond to the PCI_EJECT message within one minute,
+the host sends the VMBus message CHANNELMSG_RESCIND_CHANNELOFFER and
+removes the PCI device forcibly.
+
+In the case of fast device addition/removal, it's possible that the PCI
+device driver is still configuring MSI-X interrupts when the guest receives
+the PCI_EJECT message; the channel callback calls hv_pci_eject_device(),
+which sets hpdev->state to hv_pcichild_ejecting, and schedules a work
+hv_eject_device_work(); if the PCI device driver is calling
+pci_alloc_irq_vectors() -> ... -> hv_compose_msi_msg(), we can break the
+while loop in hv_compose_msi_msg() due to the updated hpdev->state, and
+leave data->chip_data with its default value of NULL; later, when the PCI
+device driver calls request_irq() -> ... -> hv_irq_unmask(), the guest
+crashes in hv_arch_irq_unmask() due to data->chip_data being NULL.
+
+Fix the issue by not testing hpdev->state in the while loop: when the
+guest receives PCI_EJECT, the device is still assigned to the guest, and
+the guest has one minute to finish the device removal gracefully. We don't
+really need to (and we should not) test hpdev->state in the loop.
+
+Fixes: de0aa7b2f97d ("PCI: hv: Fix 2 hang issues in hv_compose_msi_msg()")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230615044451.5580-3-decui@microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pci-hyperv.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1228,6 +1228,11 @@ static void hv_irq_unmask(struct irq_dat
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
++ if (!int_desc) {
++ dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
++ __func__, data->irq);
++ return;
++ }
+
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+@@ -1544,12 +1549,6 @@ static void hv_compose_msi_msg(struct ir
+ hv_pci_onchannelcallback(hbus);
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+
+- if (hpdev->state == hv_pcichild_ejecting) {
+- dev_err_once(&hbus->hdev->device,
+- "the device is being ejected\n");
+- goto enable_tasklet;
+- }
+-
+ udelay(100);
+ }
+
--- /dev/null
+From add9195e69c94b32e96f78c2f9cea68f0e850b3f Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 14 Jun 2023 21:44:49 -0700
+Subject: PCI: hv: Remove the useless hv_pcichild_state from struct hv_pci_dev
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit add9195e69c94b32e96f78c2f9cea68f0e850b3f upstream.
+
+The hpdev->state is never really useful. The only use in
+hv_pci_eject_device() and hv_eject_device_work() is not really necessary.
+
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Acked-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230615044451.5580-4-decui@microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pci-hyperv.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -520,19 +520,10 @@ struct hv_dr_state {
+ struct hv_pcidev_description func[];
+ };
+
+-enum hv_pcichild_state {
+- hv_pcichild_init = 0,
+- hv_pcichild_requirements,
+- hv_pcichild_resourced,
+- hv_pcichild_ejecting,
+- hv_pcichild_maximum
+-};
+-
+ struct hv_pci_dev {
+ /* List protected by pci_rescan_remove_lock */
+ struct list_head list_entry;
+ refcount_t refs;
+- enum hv_pcichild_state state;
+ struct pci_slot *pci_slot;
+ struct hv_pcidev_description desc;
+ bool reported_missing;
+@@ -2378,8 +2369,6 @@ static void hv_eject_device_work(struct
+ hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
+
+- WARN_ON(hpdev->state != hv_pcichild_ejecting);
+-
+ /*
+ * Ejection can come before or after the PCI bus has been set up, so
+ * attempt to find it and tear down the bus state, if it exists. This
+@@ -2438,7 +2427,6 @@ static void hv_pci_eject_device(struct h
+ return;
+ }
+
+- hpdev->state = hv_pcichild_ejecting;
+ get_pcichild(hpdev);
+ INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+ get_hvpcibus(hbus);
--- /dev/null
+From a847234e24d03d01a9566d1d9dcce018cc018d67 Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Wed, 14 Jun 2023 21:44:50 -0700
+Subject: Revert "PCI: hv: Fix a timing issue which causes kdump to fail occasionally"
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit a847234e24d03d01a9566d1d9dcce018cc018d67 upstream.
+
+This reverts commit d6af2ed29c7c1c311b96dac989dcb991e90ee195.
+
+The statement "the hv_pci_bus_exit() call releases structures of all its
+child devices" in commit d6af2ed29c7c is not true: in the path
+hv_pci_probe() -> hv_pci_enter_d0() -> hv_pci_bus_exit(hdev, true): the
+parameter "keep_devs" is true, so hv_pci_bus_exit() does *not* release the
+child "struct hv_pci_dev *hpdev" that is created earlier in
+pci_devices_present_work() -> new_pcichild_device().
+
+The commit d6af2ed29c7c was originally made in July 2020 for RHEL 7.7,
+where the old version of hv_pci_bus_exit() was used; when the commit was
+rebased and merged into the upstream, people didn't notice that it's
+not really necessary. The commit itself doesn't cause any issue, but it
+makes hv_pci_probe() more complicated. Revert it to facilitate some
+upcoming changes to hv_pci_probe().
+
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Acked-by: Wei Hu <weh@microsoft.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230615044451.5580-5-decui@microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pci-hyperv.c | 71 +++++++++++++++++-------------------
+ 1 file changed, 34 insertions(+), 37 deletions(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -2842,8 +2842,10 @@ static int hv_pci_enter_d0(struct hv_dev
+ struct pci_bus_d0_entry *d0_entry;
+ struct hv_pci_compl comp_pkt;
+ struct pci_packet *pkt;
++ bool retry = true;
+ int ret;
+
++enter_d0_retry:
+ /*
+ * Tell the host that the bus is ready to use, and moved into the
+ * powered-on state. This includes telling the host which region
+@@ -2870,6 +2872,38 @@ static int hv_pci_enter_d0(struct hv_dev
+ if (ret)
+ goto exit;
+
++ /*
++ * In certain case (Kdump) the pci device of interest was
++ * not cleanly shut down and resource is still held on host
++ * side, the host could return invalid device status.
++ * We need to explicitly request host to release the resource
++ * and try to enter D0 again.
++ */
++ if (comp_pkt.completion_status < 0 && retry) {
++ retry = false;
++
++ dev_err(&hdev->device, "Retrying D0 Entry\n");
++
++ /*
++ * Hv_pci_bus_exit() calls hv_send_resource_released()
++ * to free up resources of its child devices.
++ * In the kdump kernel we need to set the
++ * wslot_res_allocated to 255 so it scans all child
++ * devices to release resources allocated in the
++ * normal kernel before panic happened.
++ */
++ hbus->wslot_res_allocated = 255;
++
++ ret = hv_pci_bus_exit(hdev, true);
++
++ if (ret == 0) {
++ kfree(pkt);
++ goto enter_d0_retry;
++ }
++ dev_err(&hdev->device,
++ "Retrying D0 failed with ret %d\n", ret);
++ }
++
+ if (comp_pkt.completion_status < 0) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed D0 Entry with status %x\n",
+@@ -3125,7 +3159,6 @@ static int hv_pci_probe(struct hv_device
+ struct hv_pcibus_device *hbus;
+ u16 dom_req, dom;
+ char *name;
+- bool enter_d0_retry = true;
+ int ret;
+
+ /*
+@@ -3246,47 +3279,11 @@ static int hv_pci_probe(struct hv_device
+ if (ret)
+ goto free_fwnode;
+
+-retry:
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto free_irq_domain;
+
+ ret = hv_pci_enter_d0(hdev);
+- /*
+- * In certain case (Kdump) the pci device of interest was
+- * not cleanly shut down and resource is still held on host
+- * side, the host could return invalid device status.
+- * We need to explicitly request host to release the resource
+- * and try to enter D0 again.
+- * Since the hv_pci_bus_exit() call releases structures
+- * of all its child devices, we need to start the retry from
+- * hv_pci_query_relations() call, requesting host to send
+- * the synchronous child device relations message before this
+- * information is needed in hv_send_resources_allocated()
+- * call later.
+- */
+- if (ret == -EPROTO && enter_d0_retry) {
+- enter_d0_retry = false;
+-
+- dev_err(&hdev->device, "Retrying D0 Entry\n");
+-
+- /*
+- * Hv_pci_bus_exit() calls hv_send_resources_released()
+- * to free up resources of its child devices.
+- * In the kdump kernel we need to set the
+- * wslot_res_allocated to 255 so it scans all child
+- * devices to release resources allocated in the
+- * normal kernel before panic happened.
+- */
+- hbus->wslot_res_allocated = 255;
+- ret = hv_pci_bus_exit(hdev, true);
+-
+- if (ret == 0)
+- goto retry;
+-
+- dev_err(&hdev->device,
+- "Retrying D0 failed with ret %d\n", ret);
+- }
+ if (ret)
+ goto free_irq_domain;
+
selftests-mptcp-lib-skip-if-not-below-kernel-version.patch
selftests-mptcp-pm-nl-remove-hardcoded-default-limits.patch
selftests-mptcp-join-skip-check-if-mib-counter-not-supported.patch
+nilfs2-fix-buffer-corruption-due-to-concurrent-device-reads.patch
+drivers-hv-vmbus-fix-vmbus_wait_for_unload-to-scan-present-cpus.patch
+pci-hv-fix-a-race-condition-bug-in-hv_pci_query_relations.patch
+revert-pci-hv-fix-a-timing-issue-which-causes-kdump-to-fail-occasionally.patch
+pci-hv-remove-the-useless-hv_pcichild_state-from-struct-hv_pci_dev.patch
+pci-hv-fix-a-race-condition-in-hv_irq_unmask-that-can-cause-panic.patch
+cgroup-do-not-corrupt-task-iteration-when-rebinding-subsystem.patch
+mmc-sdhci-msm-disable-broken-64-bit-dma-on-msm8916.patch
+mmc-meson-gx-remove-redundant-mmc_request_done-call-from-irq-context.patch
+mmc-mmci-stm32-fix-max-busy-timeout-calculation.patch
+ip_tunnels-allow-vxlan-geneve-to-inherit-tos-ttl-from-vlan.patch