--- /dev/null
+From ad7558f8fedb1420f9bfe71af2c84e7a0401548b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Feb 2024 15:39:51 +0100
+Subject: mmc: mmci: stm32: fix DMA API overlapping mappings warning
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+[ Upstream commit 6b1ba3f9040be5efc4396d86c9752cdc564730be ]
+
+Turning on CONFIG_DMA_API_DEBUG_SG results in the following warning:
+
+DMA-API: mmci-pl18x 48220000.mmc: cacheline tracking EEXIST,
+overlapping mappings aren't supported
+WARNING: CPU: 1 PID: 51 at kernel/dma/debug.c:568
+add_dma_entry+0x234/0x2f4
+Modules linked in:
+CPU: 1 PID: 51 Comm: kworker/1:2 Not tainted 6.1.28 #1
+Hardware name: STMicroelectronics STM32MP257F-EV1 Evaluation Board (DT)
+Workqueue: events_freezable mmc_rescan
+Call trace:
+add_dma_entry+0x234/0x2f4
+debug_dma_map_sg+0x198/0x350
+__dma_map_sg_attrs+0xa0/0x110
+dma_map_sg_attrs+0x10/0x2c
+sdmmc_idma_prep_data+0x80/0xc0
+mmci_prep_data+0x38/0x84
+mmci_start_data+0x108/0x2dc
+mmci_request+0xe4/0x190
+__mmc_start_request+0x68/0x140
+mmc_start_request+0x94/0xc0
+mmc_wait_for_req+0x70/0x100
+mmc_send_tuning+0x108/0x1ac
+sdmmc_execute_tuning+0x14c/0x210
+mmc_execute_tuning+0x48/0xec
+mmc_sd_init_uhs_card.part.0+0x208/0x464
+mmc_sd_init_card+0x318/0x89c
+mmc_attach_sd+0xe4/0x180
+mmc_rescan+0x244/0x320
+
+DMA API debug brings to light leaking dma-mappings as dma_map_sg and
+dma_unmap_sg are not correctly balanced.
+
+If an error occurs in mmci_cmd_irq function, only mmci_dma_error
+function is called and as this API is not managed on stm32 variant,
+dma_unmap_sg is never called in this error path.
+
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Fixes: 46b723dd867d ("mmc: mmci: add stm32 sdmmc variant")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240207143951.938144-1-christophe.kerello@foss.st.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/mmci_stm32_sdmmc.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 11ae0cb479239..e3201a621870a 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -200,6 +200,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ struct scatterlist *sg;
+ int i;
+
++ host->dma_in_progress = true;
++
+ if (!host->variant->dma_lli || data->sg_len == 1 ||
+ idma->use_bounce_buffer) {
+ u32 dma_addr;
+@@ -238,9 +240,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ return 0;
+ }
+
++static void sdmmc_idma_error(struct mmci_host *host)
++{
++ struct mmc_data *data = host->data;
++ struct sdmmc_idma *idma = host->dma_priv;
++
++ if (!dma_inprogress(host))
++ return;
++
++ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++ host->dma_in_progress = false;
++ data->host_cookie = 0;
++
++ if (!idma->use_bounce_buffer)
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++ mmc_get_dma_dir(data));
++}
++
+ static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
+ {
++ if (!dma_inprogress(host))
++ return;
++
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++ host->dma_in_progress = false;
+
+ if (!data->host_cookie)
+ sdmmc_idma_unprep_data(host, data, 0);
+@@ -566,6 +589,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
+ .dma_setup = sdmmc_idma_setup,
+ .dma_start = sdmmc_idma_start,
+ .dma_finalize = sdmmc_idma_finalize,
++ .dma_error = sdmmc_idma_error,
+ .set_clkreg = mmci_sdmmc_set_clkreg,
+ .set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
+--
+2.43.0
+
--- /dev/null
+From de849606fbe178064425af75d14a6151a6e9e337 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Mar 2022 16:51:14 +0200
+Subject: mmc: mmci: stm32: use a buffer for unaligned DMA requests
+
+From: Yann Gautier <yann.gautier@foss.st.com>
+
+[ Upstream commit 970dc9c11a17994ab878016b536612ab00d1441d ]
+
+In SDIO mode, the sg list for requests can be unaligned with what the
+STM32 SDMMC internal DMA can support. In that case, instead of failing,
+use a temporary bounce buffer to copy from/to the sg list.
+This buffer is limited to 1MB. But for that we need to also limit
+max_req_size to 1MB. It has not shown any throughput penalties for
+SD-cards or eMMC.
+
+Signed-off-by: Yann Gautier <yann.gautier@foss.st.com>
+Link: https://lore.kernel.org/r/20220328145114.334577-1-yann.gautier@foss.st.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: 6b1ba3f9040b ("mmc: mmci: stm32: fix DMA API overlapping mappings warning")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/mmci_stm32_sdmmc.c | 88 +++++++++++++++++++++++------
+ 1 file changed, 71 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 4cceb9bab0361..11ae0cb479239 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -43,6 +43,9 @@ struct sdmmc_lli_desc {
+ struct sdmmc_idma {
+ dma_addr_t sg_dma;
+ void *sg_cpu;
++ dma_addr_t bounce_dma_addr;
++ void *bounce_buf;
++ bool use_bounce_buffer;
+ };
+
+ struct sdmmc_dlyb {
+@@ -54,6 +57,8 @@ struct sdmmc_dlyb {
+ static int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+ {
++ struct sdmmc_idma *idma = host->dma_priv;
++ struct device *dev = mmc_dev(host->mmc);
+ struct scatterlist *sg;
+ int i;
+
+@@ -61,41 +66,69 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
+ * idma has constraints on idmabase & idmasize for each element
+ * excepted the last element which has no constraint on idmasize
+ */
++ idma->use_bounce_buffer = false;
+ for_each_sg(data->sg, sg, data->sg_len - 1, i) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
+- dev_err(mmc_dev(host->mmc),
++ dev_dbg(mmc_dev(host->mmc),
+ "unaligned scatterlist: ofst:%x length:%d\n",
+ data->sg->offset, data->sg->length);
+- return -EINVAL;
++ goto use_bounce_buffer;
+ }
+ }
+
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+- dev_err(mmc_dev(host->mmc),
++ dev_dbg(mmc_dev(host->mmc),
+ "unaligned last scatterlist: ofst:%x length:%d\n",
+ data->sg->offset, data->sg->length);
+- return -EINVAL;
++ goto use_bounce_buffer;
+ }
+
++ return 0;
++
++use_bounce_buffer:
++ if (!idma->bounce_buf) {
++ idma->bounce_buf = dmam_alloc_coherent(dev,
++ host->mmc->max_req_size,
++ &idma->bounce_dma_addr,
++ GFP_KERNEL);
++ if (!idma->bounce_buf) {
++ dev_err(dev, "Unable to map allocate DMA bounce buffer.\n");
++ return -ENOMEM;
++ }
++ }
++
++ idma->use_bounce_buffer = true;
++
+ return 0;
+ }
+
+ static int _sdmmc_idma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+ {
+- int n_elem;
++ struct sdmmc_idma *idma = host->dma_priv;
+
+- n_elem = dma_map_sg(mmc_dev(host->mmc),
+- data->sg,
+- data->sg_len,
+- mmc_get_dma_dir(data));
++ if (idma->use_bounce_buffer) {
++ if (data->flags & MMC_DATA_WRITE) {
++ unsigned int xfer_bytes = data->blksz * data->blocks;
+
+- if (!n_elem) {
+- dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
+- return -EINVAL;
+- }
++ sg_copy_to_buffer(data->sg, data->sg_len,
++ idma->bounce_buf, xfer_bytes);
++ dma_wmb();
++ }
++ } else {
++ int n_elem;
++
++ n_elem = dma_map_sg(mmc_dev(host->mmc),
++ data->sg,
++ data->sg_len,
++ mmc_get_dma_dir(data));
+
++ if (!n_elem) {
++ dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
++ return -EINVAL;
++ }
++ }
+ return 0;
+ }
+
+@@ -112,8 +145,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host,
+ static void sdmmc_idma_unprep_data(struct mmci_host *host,
+ struct mmc_data *data, int err)
+ {
+- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+- mmc_get_dma_dir(data));
++ struct sdmmc_idma *idma = host->dma_priv;
++
++ if (idma->use_bounce_buffer) {
++ if (data->flags & MMC_DATA_READ) {
++ unsigned int xfer_bytes = data->blksz * data->blocks;
++
++ sg_copy_from_buffer(data->sg, data->sg_len,
++ idma->bounce_buf, xfer_bytes);
++ }
++ } else {
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++ mmc_get_dma_dir(data));
++ }
+ }
+
+ static int sdmmc_idma_setup(struct mmci_host *host)
+@@ -137,6 +181,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
+ host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
+ sizeof(struct sdmmc_lli_desc);
+ host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
++
++ host->mmc->max_req_size = SZ_1M;
+ } else {
+ host->mmc->max_segs = 1;
+ host->mmc->max_seg_size = host->mmc->max_req_size;
+@@ -154,8 +200,16 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ struct scatterlist *sg;
+ int i;
+
+- if (!host->variant->dma_lli || data->sg_len == 1) {
+- writel_relaxed(sg_dma_address(data->sg),
++ if (!host->variant->dma_lli || data->sg_len == 1 ||
++ idma->use_bounce_buffer) {
++ u32 dma_addr;
++
++ if (idma->use_bounce_buffer)
++ dma_addr = idma->bounce_dma_addr;
++ else
++ dma_addr = sg_dma_address(data->sg);
++
++ writel_relaxed(dma_addr,
+ host->base + MMCI_STM32_IDMABASE0R);
+ writel_relaxed(MMCI_STM32_IDMAEN,
+ host->base + MMCI_STM32_IDMACTRLR);
+--
+2.43.0
+
--- /dev/null
+From f5465f1470025599c350ab48c5f7ed3406d05f94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Feb 2024 01:51:02 +0000
+Subject: riscv: add CALLER_ADDRx support
+
+From: Zong Li <zong.li@sifive.com>
+
+[ Upstream commit 680341382da56bd192ebfa4e58eaf4fec2e5bca7 ]
+
+CALLER_ADDRx returns caller's address at specified level, they are used
+for several tracers. These macros eventually use
+__builtin_return_address(n) to get the caller's address if arch doesn't
+define their own implementation.
+
+In RISC-V, __builtin_return_address(n) only works when n == 0, we need
+to walk the stack frame to get the caller's address at specified level.
+
+data.level started from 'level + 3' due to the call flow of getting
+caller's address in RISC-V implementation. If we don't have additional
+three iteration, the level is corresponding to follows:
+
+callsite -> return_address -> arch_stack_walk -> walk_stackframe
+| | | |
+level 3 level 2 level 1 level 0
+
+Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Zong Li <zong.li@sifive.com>
+Link: https://lore.kernel.org/r/20240202015102.26251-1-zong.li@sifive.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/ftrace.h | 5 ++++
+ arch/riscv/kernel/Makefile | 2 ++
+ arch/riscv/kernel/return_address.c | 48 ++++++++++++++++++++++++++++++
+ 3 files changed, 55 insertions(+)
+ create mode 100644 arch/riscv/kernel/return_address.c
+
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index bc745900c1631..135517e440105 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -25,6 +25,11 @@
+
+ #define ARCH_SUPPORTS_FTRACE_OPS 1
+ #ifndef __ASSEMBLY__
++
++extern void *return_address(unsigned int level);
++
++#define ftrace_return_address(n) return_address(n)
++
+ void MCOUNT_NAME(void);
+ static inline unsigned long ftrace_call_adjust(unsigned long addr)
+ {
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index bc49d5f2302b6..335465792d933 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
+ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ extra-y += head.o
+@@ -20,6 +21,7 @@ obj-y += irq.o
+ obj-y += process.o
+ obj-y += ptrace.o
+ obj-y += reset.o
++obj-y += return_address.o
+ obj-y += setup.o
+ obj-y += signal.o
+ obj-y += syscall_table.o
+diff --git a/arch/riscv/kernel/return_address.c b/arch/riscv/kernel/return_address.c
+new file mode 100644
+index 0000000000000..c8115ec8fb304
+--- /dev/null
++++ b/arch/riscv/kernel/return_address.c
+@@ -0,0 +1,48 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * This code come from arch/arm64/kernel/return_address.c
++ *
++ * Copyright (C) 2023 SiFive.
++ */
++
++#include <linux/export.h>
++#include <linux/kprobes.h>
++#include <linux/stacktrace.h>
++
++struct return_address_data {
++ unsigned int level;
++ void *addr;
++};
++
++static bool save_return_addr(void *d, unsigned long pc)
++{
++ struct return_address_data *data = d;
++
++ if (!data->level) {
++ data->addr = (void *)pc;
++ return false;
++ }
++
++ --data->level;
++
++ return true;
++}
++NOKPROBE_SYMBOL(save_return_addr);
++
++noinline void *return_address(unsigned int level)
++{
++ struct return_address_data data;
++
++ data.level = level + 3;
++ data.addr = NULL;
++
++ arch_stack_walk(save_return_addr, &data, current, NULL);
++
++ if (!data.level)
++ return data.addr;
++ else
++ return NULL;
++
++}
++EXPORT_SYMBOL_GPL(return_address);
++NOKPROBE_SYMBOL(return_address);
+--
+2.43.0
+
--- /dev/null
+mmc-mmci-stm32-use-a-buffer-for-unaligned-dma-reques.patch
+mmc-mmci-stm32-fix-dma-api-overlapping-mappings-warn.patch
+riscv-add-caller_addrx-support.patch