From: Greg Kroah-Hartman Date: Mon, 15 Apr 2024 14:23:20 +0000 (+0200) Subject: drop some 5.10 patches X-Git-Tag: v5.15.156~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e57038249ad2c683428713772bbd3ade55b35086;p=thirdparty%2Fkernel%2Fstable-queue.git drop some 5.10 patches --- diff --git a/queue-5.10/series b/queue-5.10/series index 651d31a0598..b8ecb6603be 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -3,8 +3,6 @@ bluetooth-fix-memory-leak-in-hci_req_sync_complete.patch media-cec-core-remove-length-check-of-timer-status.patch nouveau-fix-function-cast-warning.patch net-openvswitch-fix-unwanted-error-log-on-timeout-po.patch -u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch -u64_stats-streamline-the-implementation.patch u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch xsk-validate-user-input-for-xdp_-umem-completion-_fi.patch geneve-fix-header-validation-in-geneve-6-_xmit_skb.patch diff --git a/queue-5.10/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch b/queue-5.10/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch deleted file mode 100644 index 9de0b8539d1..00000000000 --- a/queue-5.10/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 34ec7b09c7f0abc2e742f68e5ce91cc53054406f Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 10 Dec 2021 21:29:59 +0100 -Subject: u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT during - updates. - -From: Sebastian Andrzej Siewior - -[ Upstream commit 3c118547f87e930d45a5787e386734015dd93b32 ] - -On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit -architectures even on UP because the softirq (and the threaded IRQ handler) can -be preempted. - -With the seqcount_t for synchronisation, a reader with higher priority can -preempt the writer and then spin endlessly in read_seqcount_begin() while the -writer can't make progress. - -To avoid such a lock up on PREEMPT_RT the writer must disable preemption during -the update. There is no need to disable interrupts because no writer is using -this API in hard-IRQ context on PREEMPT_RT. - -Disable preemption on 32bit-RT within the u64_stats write section. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: David S. Miller -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 42 ++++++++++++++++++++++------------ - 1 file changed, 28 insertions(+), 14 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index e81856c0ba134..6a0f2097d3709 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -66,7 +66,7 @@ - #include - - struct u64_stats_sync { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - seqcount_t seq; - #endif - }; -@@ -115,7 +115,7 @@ static inline void u64_stats_inc(u64_stats_t *p) - } - #endif - --#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) - #else - static inline void u64_stats_init(struct u64_stats_sync *syncp) -@@ -125,15 +125,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp) - - static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_disable(); - write_seqcount_begin(&syncp->seq); - #endif - } - - static inline void u64_stats_update_end(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_enable(); - #endif - } - -@@ -142,8 +146,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) - { - unsigned long flags = 0; - --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -- local_irq_save(flags); -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_disable(); -+ else -+ local_irq_save(flags); - write_seqcount_begin(&syncp->seq); - #endif - return flags; -@@ -153,15 +160,18 @@ static inline void - u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, - unsigned long flags) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -- local_irq_restore(flags); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_enable(); -+ else -+ local_irq_restore(flags); - #endif - } - - static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_begin(&syncp->seq); - #else - return 0; -@@ -170,7 +180,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync * - - static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) - preempt_disable(); - #endif - return __u64_stats_fetch_begin(syncp); -@@ -179,7 +189,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy - static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_retry(&syncp->seq, start); - #else - return false; -@@ -189,7 +199,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) - preempt_enable(); - #endif - return __u64_stats_fetch_retry(syncp, start); -@@ -203,7 +213,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - */ - static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -+ preempt_disable(); -+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) - local_irq_disable(); - #endif - return __u64_stats_fetch_begin(syncp); -@@ -212,7 +224,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync - static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -+ preempt_enable(); -+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) - local_irq_enable(); - #endif - return __u64_stats_fetch_retry(syncp, start); --- -2.43.0 - diff --git a/queue-5.10/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch b/queue-5.10/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch index e34b450b8e6..26c77295292 100644 --- a/queue-5.10/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch +++ b/queue-5.10/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch @@ -1,4 +1,4 @@ -From d126067245d71c1d4a831a86eedbc702a5c6480a Mon Sep 17 00:00:00 2001 +From 57a453fd0d2c0878818807a76fda9f9fa4353f32 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 4 Apr 2024 09:57:40 +0200 Subject: u64_stats: fix u64_stats_init() for lockdep when used repeatedly in @@ -28,29 +28,21 @@ Link: https://lore.kernel.org/r/20240404075740.30682-1-petr@tesarici.cz Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- - include/linux/u64_stats_sync.h | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) + include/linux/u64_stats_sync.h | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 51f2e16b9540b..11c3162dade3b 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h -@@ -125,10 +125,11 @@ static inline void u64_stats_inc(u64_stats_t *p) - p->v++; - } +@@ -116,7 +116,11 @@ static inline void u64_stats_inc(u64_sta + #endif --static inline void u64_stats_init(struct u64_stats_sync *syncp) --{ -- seqcount_init(&syncp->seq); --} + #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +-#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) +#define u64_stats_init(syncp) \ + do { \ + struct u64_stats_sync *__s = (syncp); \ + seqcount_init(&__s->seq); \ + } while (0) - - static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) + #else + static inline void u64_stats_init(struct u64_stats_sync *syncp) { --- -2.43.0 - diff --git a/queue-5.10/u64_stats-streamline-the-implementation.patch b/queue-5.10/u64_stats-streamline-the-implementation.patch deleted file mode 100644 index ebfe04ebf3f..00000000000 --- a/queue-5.10/u64_stats-streamline-the-implementation.patch +++ /dev/null @@ -1,274 +0,0 @@ -From 2253d0d81b05aa0961a0c4f8a1199b5626bb15d0 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 25 Aug 2022 18:41:31 +0200 -Subject: u64_stats: Streamline the implementation - -From: Thomas Gleixner - -[ Upstream commit 44b0c2957adc62b86fcd51adeaf8e993171bc319 ] - -The u64 stats code handles 3 different cases: - - - 32bit UP - - 32bit SMP - - 64bit - -with an unreadable #ifdef maze, which was recently expanded with PREEMPT_RT -conditionals. - -Reduce it to two cases (32bit and 64bit) and drop the optimization for -32bit UP as suggested by Linus. - -Use the new preempt_disable/enable_nested() helpers to get rid of the -CONFIG_PREEMPT_RT conditionals. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner -Acked-by: Peter Zijlstra (Intel) -Link: https://lore.kernel.org/r/20220825164131.402717-9-bigeasy@linutronix.de -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 145 +++++++++++++++------------------ - 1 file changed, 64 insertions(+), 81 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 6a0f2097d3709..51f2e16b9540b 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -8,7 +8,7 @@ - * - * Key points : - * -- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. -+ * - Use a seqcount on 32-bit - * - The whole thing is a no-op on 64-bit architectures. - * - * Usage constraints: -@@ -20,7 +20,8 @@ - * writer and also spin forever. - * - * 3) Write side must use the _irqsave() variant if other writers, or a reader, -- * can be invoked from an IRQ context. -+ * can be invoked from an IRQ context. On 64bit systems this variant does not -+ * disable interrupts. - * - * 4) If reader fetches several counters, there is no guarantee the whole values - * are consistent w.r.t. each other (remember point #2: seqcounts are not -@@ -29,11 +30,6 @@ - * 5) Readers are allowed to sleep or be preempted/interrupted: they perform - * pure reads. - * -- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats -- * might be updated from a hardirq or softirq context (remember point #1: -- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read -- * corrupted 64-bit values otherwise. -- * - * Usage : - * - * Stats producer (writer) should use following template granted it already got -@@ -66,7 +62,7 @@ - #include - - struct u64_stats_sync { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+#if BITS_PER_LONG == 32 - seqcount_t seq; - #endif - }; -@@ -93,7 +89,22 @@ static inline void u64_stats_inc(u64_stats_t *p) - local64_inc(&p->v); - } - --#else -+static inline void u64_stats_init(struct u64_stats_sync *syncp) { } -+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { } -+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { } -+static inline unsigned long __u64_stats_irqsave(void) { return 0; } -+static inline void __u64_stats_irqrestore(unsigned long flags) { } -+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+{ -+ return 0; -+} -+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -+ unsigned int start) -+{ -+ return false; -+} -+ -+#else /* 64 bit */ - - typedef struct { - u64 v; -@@ -113,123 +124,95 @@ static inline void u64_stats_inc(u64_stats_t *p) - { - p->v++; - } --#endif - --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) --#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) --#else - static inline void u64_stats_init(struct u64_stats_sync *syncp) - { -+ seqcount_init(&syncp->seq); - } --#endif - --static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) -+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_disable(); -+ preempt_disable_nested(); - write_seqcount_begin(&syncp->seq); --#endif - } - --static inline void u64_stats_update_end(struct u64_stats_sync *syncp) -+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_enable(); --#endif -+ preempt_enable_nested(); - } - --static inline unsigned long --u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) -+static inline unsigned long __u64_stats_irqsave(void) - { -- unsigned long flags = 0; -+ unsigned long flags; - --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_disable(); -- else -- local_irq_save(flags); -- write_seqcount_begin(&syncp->seq); --#endif -+ local_irq_save(flags); - return flags; - } - --static inline void --u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, -- unsigned long flags) -+static inline void __u64_stats_irqrestore(unsigned long flags) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- write_seqcount_end(&syncp->seq); -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_enable(); -- else -- local_irq_restore(flags); --#endif -+ local_irq_restore(flags); - } - - static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_begin(&syncp->seq); --#else -- return 0; --#endif - } - --static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -+ unsigned int start) - { --#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) -- preempt_disable(); --#endif -- return __u64_stats_fetch_begin(syncp); -+ return read_seqcount_retry(&syncp->seq, start); - } -+#endif /* !64 bit */ - --static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -- unsigned int start) -+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- return read_seqcount_retry(&syncp->seq, start); --#else -- return false; --#endif -+ __u64_stats_update_begin(syncp); -+} -+ -+static inline void u64_stats_update_end(struct u64_stats_sync *syncp) -+{ -+ __u64_stats_update_end(syncp); -+} -+ -+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) -+{ -+ unsigned long flags = __u64_stats_irqsave(); -+ -+ __u64_stats_update_begin(syncp); -+ return flags; -+} -+ -+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, -+ unsigned long flags) -+{ -+ __u64_stats_update_end(syncp); -+ __u64_stats_irqrestore(flags); -+} -+ -+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+{ -+ return __u64_stats_fetch_begin(syncp); - } - - static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) -- preempt_enable(); --#endif - return __u64_stats_fetch_retry(syncp, start); - } - --/* -- * In case irq handlers can update u64 counters, readers can use following helpers -- * - SMP 32bit arches use seqcount protection, irq safe. -- * - UP 32bit must disable irqs. -- * - 64bit have no problem atomically reading u64 values, irq safe. -- */ -+/* Obsolete interfaces */ - static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -- preempt_disable(); --#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) -- local_irq_disable(); --#endif -- return __u64_stats_fetch_begin(syncp); -+ return u64_stats_fetch_begin(syncp); - } - - static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -- preempt_enable(); --#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) -- local_irq_enable(); --#endif -- return __u64_stats_fetch_retry(syncp, start); -+ return u64_stats_fetch_retry(syncp, start); - } - - #endif /* _LINUX_U64_STATS_SYNC_H */ --- -2.43.0 -