From 4503f81ae566dab6ba874526e4d8c42acd352a62 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 28 Aug 2023 12:08:37 +0200 Subject: [PATCH] drop some riscv patches in 5.4.y No one should be using 5.4 for riscv, so these aren't needed. --- ...scv-mmio-fix-readx-to-delay-ordering.patch | 73 ---- ...mio-functions-into-their-own-header-.patch | 365 ------------------ queue-5.4/series | 2 - 3 files changed, 440 deletions(-) delete mode 100644 queue-5.4/riscv-mmio-fix-readx-to-delay-ordering.patch delete mode 100644 queue-5.4/riscv-separate-mmio-functions-into-their-own-header-.patch diff --git a/queue-5.4/riscv-mmio-fix-readx-to-delay-ordering.patch b/queue-5.4/riscv-mmio-fix-readx-to-delay-ordering.patch deleted file mode 100644 index 181c857b23d..00000000000 --- a/queue-5.4/riscv-mmio-fix-readx-to-delay-ordering.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 7ef32e3e7eea1d41b5a7b283a715e565114328a9 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 3 Aug 2023 06:27:38 +0200 -Subject: riscv,mmio: Fix readX()-to-delay() ordering - -From: Andrea Parri - -[ Upstream commit 4eb2eb1b4c0eb07793c240744843498564a67b83 ] - -Section 2.1 of the Platform Specification [1] states: - - Unless otherwise specified by a given I/O device, I/O devices are on - ordering channel 0 (i.e., they are point-to-point strongly ordered). - -which is not sufficient to guarantee that a readX() by a hart completes -before a subsequent delay() on the same hart (cf. memory-barriers.txt, -"Kernel I/O barrier effects"). - -Set the I(nput) bit in __io_ar() to restore the ordering, align inline -comments. - -[1] https://github.com/riscv/riscv-platform-specs - -Signed-off-by: Andrea Parri -Link: https://lore.kernel.org/r/20230803042738.5937-1-parri.andrea@gmail.com -Fixes: fab957c11efe ("RISC-V: Atomic and Locking Code") -Cc: stable@vger.kernel.org -Signed-off-by: Palmer Dabbelt -Signed-off-by: Sasha Levin ---- - arch/riscv/include/asm/mmio.h | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) - -diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h -index 308b98f857539..2c08dd4292b27 100644 ---- a/arch/riscv/include/asm/mmio.h -+++ b/arch/riscv/include/asm/mmio.h -@@ -114,9 +114,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) - * Relaxed I/O memory access primitives. These follow the Device memory - * ordering rules but do not guarantee any ordering relative to Normal memory - * accesses. These are defined to order the indicated access (either a read or -- * write) with all other I/O memory accesses. Since the platform specification -- * defines that all I/O regions are strongly ordered on channel 2, no explicit -- * fences are required to enforce this ordering. -+ * write) with all other I/O memory accesses to the same peripheral. Since the -+ * platform specification defines that all I/O regions are strongly ordered on -+ * channel 0, no explicit fences are required to enforce this ordering. - */ - /* FIXME: These are now the same as asm-generic */ - #define __io_rbr() do {} while (0) -@@ -138,14 +138,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) - #endif - - /* -- * I/O memory access primitives. Reads are ordered relative to any -- * following Normal memory access. Writes are ordered relative to any prior -- * Normal memory access. The memory barriers here are necessary as RISC-V -+ * I/O memory access primitives. Reads are ordered relative to any following -+ * Normal memory read and delay() loop. Writes are ordered relative to any -+ * prior Normal memory write. The memory barriers here are necessary as RISC-V - * doesn't define any ordering between the memory space and the I/O space. - */ - #define __io_br() do {} while (0) --#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") --#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") -+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) -+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) - #define __io_aw() mmiowb_set_pending() - - #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) --- -2.40.1 - diff --git a/queue-5.4/riscv-separate-mmio-functions-into-their-own-header-.patch b/queue-5.4/riscv-separate-mmio-functions-into-their-own-header-.patch deleted file mode 100644 index fd191180c45..00000000000 --- a/queue-5.4/riscv-separate-mmio-functions-into-their-own-header-.patch +++ /dev/null @@ -1,365 +0,0 @@ -From 07f9e54dc546bb925eb3e40e9bc32c75453c55f1 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Mon, 28 Oct 2019 13:53:50 -0700 -Subject: riscv: separate MMIO functions into their own header file - -From: Paul Walmsley - -[ Upstream commit 0c3ac28931d578324e93afab6ee7b740dfdaff6f ] - -Separate the low-level MMIO static inline functions and macros, such -as {read,write}{b,w,l,q}(), into their own header file under -arch/riscv/include: asm/mmio.h. This is done to break a header -dependency chain that arises when both asm/pgtable.h and asm/io.h are -included by asm/timex.h. Since the problem is related to the legacy -I/O port support in asm/io.h, this allows files under arch/riscv that -encounter those issues to simply include asm/mmio.h instead, and -bypass the legacy I/O port functions. Existing users of asm/io.h -don't need to change anything, since asm/mmio.h is included by -asm/io.h. - -While here, clean up some checkpatch.pl-related issues with the -original code. - -Signed-off-by: Paul Walmsley -Stable-dep-of: 4eb2eb1b4c0e ("riscv,mmio: Fix readX()-to-delay() ordering") -Signed-off-by: Sasha Levin ---- - arch/riscv/include/asm/io.h | 147 +----------------------------- - arch/riscv/include/asm/mmio.h | 164 ++++++++++++++++++++++++++++++++++ - 2 files changed, 167 insertions(+), 144 deletions(-) - create mode 100644 arch/riscv/include/asm/mmio.h - -diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h -index 2dc5b01d62df8..f08527e62b114 100644 ---- a/arch/riscv/include/asm/io.h -+++ b/arch/riscv/include/asm/io.h -@@ -15,152 +15,11 @@ - #include - #include - --extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); -- --/* -- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't -- * change the properties of memory regions. This should be fixed by the -- * upcoming platform spec. -- */ --#define ioremap_nocache(addr, size) ioremap((addr), (size)) --#define ioremap_wc(addr, size) ioremap((addr), (size)) --#define ioremap_wt(addr, size) ioremap((addr), (size)) -- --extern void iounmap(volatile void __iomem *addr); -- --/* Generic IO read/write. These perform native-endian accesses. */ --#define __raw_writeb __raw_writeb --static inline void __raw_writeb(u8 val, volatile void __iomem *addr) --{ -- asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); --} -- --#define __raw_writew __raw_writew --static inline void __raw_writew(u16 val, volatile void __iomem *addr) --{ -- asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); --} -- --#define __raw_writel __raw_writel --static inline void __raw_writel(u32 val, volatile void __iomem *addr) --{ -- asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); --} -- --#ifdef CONFIG_64BIT --#define __raw_writeq __raw_writeq --static inline void __raw_writeq(u64 val, volatile void __iomem *addr) --{ -- asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); --} --#endif -- --#define __raw_readb __raw_readb --static inline u8 __raw_readb(const volatile void __iomem *addr) --{ -- u8 val; -- -- asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); -- return val; --} -- --#define __raw_readw __raw_readw --static inline u16 __raw_readw(const volatile void __iomem *addr) --{ -- u16 val; -- -- asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); -- return val; --} -- --#define __raw_readl __raw_readl --static inline u32 __raw_readl(const volatile void __iomem *addr) --{ -- u32 val; -- -- asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); -- return val; --} -- --#ifdef CONFIG_64BIT --#define __raw_readq __raw_readq --static inline u64 __raw_readq(const volatile void __iomem *addr) --{ -- u64 val; -- -- asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); -- return val; --} --#endif -- - /* -- * Unordered I/O memory access primitives. These are even more relaxed than -- * the relaxed versions, as they don't even order accesses between successive -- * operations to the I/O regions. -+ * MMIO access functions are separated out to break dependency cycles -+ * when using {read,write}* fns in low-level headers - */ --#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) --#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) --#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) -- --#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c))) --#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) --#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) -- --#ifdef CONFIG_64BIT --#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) --#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) --#endif -- --/* -- * Relaxed I/O memory access primitives. These follow the Device memory -- * ordering rules but do not guarantee any ordering relative to Normal memory -- * accesses. These are defined to order the indicated access (either a read or -- * write) with all other I/O memory accesses. Since the platform specification -- * defines that all I/O regions are strongly ordered on channel 2, no explicit -- * fences are required to enforce this ordering. -- */ --/* FIXME: These are now the same as asm-generic */ --#define __io_rbr() do {} while (0) --#define __io_rar() do {} while (0) --#define __io_rbw() do {} while (0) --#define __io_raw() do {} while (0) -- --#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) --#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) --#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) -- --#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); }) --#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); }) --#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); }) -- --#ifdef CONFIG_64BIT --#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) --#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); }) --#endif -- --/* -- * I/O memory access primitives. Reads are ordered relative to any -- * following Normal memory access. Writes are ordered relative to any prior -- * Normal memory access. The memory barriers here are necessary as RISC-V -- * doesn't define any ordering between the memory space and the I/O space. -- */ --#define __io_br() do {} while (0) --#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); --#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); --#define __io_aw() mmiowb_set_pending() -- --#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) --#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) --#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) -- --#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) --#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) --#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) -- --#ifdef CONFIG_64BIT --#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) --#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) --#endif -+#include - - /* - * I/O port access constants. -diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h -new file mode 100644 -index 0000000000000..308b98f857539 ---- /dev/null -+++ b/arch/riscv/include/asm/mmio.h -@@ -0,0 +1,164 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h -+ * which was based on arch/arm/include/io.h -+ * -+ * Copyright (C) 1996-2000 Russell King -+ * Copyright (C) 2012 ARM Ltd. -+ * Copyright (C) 2014 Regents of the University of California -+ */ -+ -+#ifndef _ASM_RISCV_MMIO_H -+#define _ASM_RISCV_MMIO_H -+ -+#include -+#include -+ -+void __iomem *ioremap(phys_addr_t offset, unsigned long size); -+ -+/* -+ * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't -+ * change the properties of memory regions. This should be fixed by the -+ * upcoming platform spec. -+ */ -+#define ioremap_nocache(addr, size) ioremap((addr), (size)) -+#define ioremap_wc(addr, size) ioremap((addr), (size)) -+#define ioremap_wt(addr, size) ioremap((addr), (size)) -+ -+void iounmap(volatile void __iomem *addr); -+ -+/* Generic IO read/write. These perform native-endian accesses. */ -+#define __raw_writeb __raw_writeb -+static inline void __raw_writeb(u8 val, volatile void __iomem *addr) -+{ -+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); -+} -+ -+#define __raw_writew __raw_writew -+static inline void __raw_writew(u16 val, volatile void __iomem *addr) -+{ -+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); -+} -+ -+#define __raw_writel __raw_writel -+static inline void __raw_writel(u32 val, volatile void __iomem *addr) -+{ -+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); -+} -+ -+#ifdef CONFIG_64BIT -+#define __raw_writeq __raw_writeq -+static inline void __raw_writeq(u64 val, volatile void __iomem *addr) -+{ -+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); -+} -+#endif -+ -+#define __raw_readb __raw_readb -+static inline u8 __raw_readb(const volatile void __iomem *addr) -+{ -+ u8 val; -+ -+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); -+ return val; -+} -+ -+#define __raw_readw __raw_readw -+static inline u16 __raw_readw(const volatile void __iomem *addr) -+{ -+ u16 val; -+ -+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); -+ return val; -+} -+ -+#define __raw_readl __raw_readl -+static inline u32 __raw_readl(const volatile void __iomem *addr) -+{ -+ u32 val; -+ -+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); -+ return val; -+} -+ -+#ifdef CONFIG_64BIT -+#define __raw_readq __raw_readq -+static inline u64 __raw_readq(const volatile void __iomem *addr) -+{ -+ u64 val; -+ -+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); -+ return val; -+} -+#endif -+ -+/* -+ * Unordered I/O memory access primitives. These are even more relaxed than -+ * the relaxed versions, as they don't even order accesses between successive -+ * operations to the I/O regions. -+ */ -+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) -+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) -+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) -+ -+#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c))) -+#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c))) -+#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c))) -+ -+#ifdef CONFIG_64BIT -+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) -+#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c))) -+#endif -+ -+/* -+ * Relaxed I/O memory access primitives. These follow the Device memory -+ * ordering rules but do not guarantee any ordering relative to Normal memory -+ * accesses. These are defined to order the indicated access (either a read or -+ * write) with all other I/O memory accesses. Since the platform specification -+ * defines that all I/O regions are strongly ordered on channel 2, no explicit -+ * fences are required to enforce this ordering. -+ */ -+/* FIXME: These are now the same as asm-generic */ -+#define __io_rbr() do {} while (0) -+#define __io_rar() do {} while (0) -+#define __io_rbw() do {} while (0) -+#define __io_raw() do {} while (0) -+ -+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) -+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) -+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) -+ -+#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); }) -+#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); }) -+#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); }) -+ -+#ifdef CONFIG_64BIT -+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) -+#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); }) -+#endif -+ -+/* -+ * I/O memory access primitives. Reads are ordered relative to any -+ * following Normal memory access. Writes are ordered relative to any prior -+ * Normal memory access. The memory barriers here are necessary as RISC-V -+ * doesn't define any ordering between the memory space and the I/O space. -+ */ -+#define __io_br() do {} while (0) -+#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -+#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") -+#define __io_aw() mmiowb_set_pending() -+ -+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) -+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) -+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) -+ -+#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); }) -+#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); }) -+#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); }) -+ -+#ifdef CONFIG_64BIT -+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) -+#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) -+#endif -+ -+#endif /* _ASM_RISCV_MMIO_H */ --- -2.40.1 - diff --git a/queue-5.4/series b/queue-5.4/series index 73dd706d9d7..67b0a804b39 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -1,7 +1,5 @@ mmc-sdhci_f_sdh30-convert-to-devm_platform_ioremap_r.patch mmc-sdhci-f-sdh30-replace-with-sdhci_pltfm.patch -riscv-separate-mmio-functions-into-their-own-header-.patch -riscv-mmio-fix-readx-to-delay-ordering.patch selftests-forwarding-tc_flower-relax-success-criteri.patch macsec-fix-traffic-counters-statistics.patch macsec-use-dev_stats_inc.patch -- 2.47.3