From: Greg Kroah-Hartman Date: Mon, 15 Apr 2024 14:18:52 +0000 (+0200) Subject: clean up some 5.4 patches that were not needed. X-Git-Tag: v5.15.156~3 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=df124119119c36566f0888d0aca479cc051d8037;p=thirdparty%2Fkernel%2Fstable-queue.git clean up some 5.4 patches that were not needed. --- diff --git a/queue-5.4/series b/queue-5.4/series index 739a5cee54b..94b842c9f6e 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -2,10 +2,6 @@ batman-adv-avoid-infinite-loop-trying-to-resize-local-tt.patch bluetooth-fix-memory-leak-in-hci_req_sync_complete.patch nouveau-fix-function-cast-warning.patch net-openvswitch-fix-unwanted-error-log-on-timeout-po.patch -u64_stats-provide-u64_stats_t-type.patch -u64_stats-document-writer-non-preemptibility-require.patch -u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch -u64_stats-streamline-the-implementation.patch u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch geneve-fix-header-validation-in-geneve-6-_xmit_skb.patch ipv6-fib-hide-unused-pn-variable.patch diff --git a/queue-5.4/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch b/queue-5.4/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch deleted file mode 100644 index 75cdb2494bd..00000000000 --- a/queue-5.4/u64_stats-disable-preemption-on-32bit-up-smp-preempt.patch +++ /dev/null @@ -1,164 +0,0 @@ -From e52ce42c17d9dc844bdc8c461fd2319e3c0d7e07 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 10 Dec 2021 21:29:59 +0100 -Subject: u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT during - updates. - -From: Sebastian Andrzej Siewior - -[ Upstream commit 3c118547f87e930d45a5787e386734015dd93b32 ] - -On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit -architectures even on UP because the softirq (and the threaded IRQ handler) can -be preempted. - -With the seqcount_t for synchronisation, a reader with higher priority can -preempt the writer and then spin endlessly in read_seqcount_begin() while the -writer can't make progress. - -To avoid such a lock up on PREEMPT_RT the writer must disable preemption during -the update. There is no need to disable interrupts because no writer is using -this API in hard-IRQ context on PREEMPT_RT. - -Disable preemption on 32bit-RT within the u64_stats write section. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: David S. Miller -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 42 ++++++++++++++++++++++------------ - 1 file changed, 28 insertions(+), 14 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index e81856c0ba134..6a0f2097d3709 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -66,7 +66,7 @@ - #include - - struct u64_stats_sync { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - seqcount_t seq; - #endif - }; -@@ -115,7 +115,7 @@ static inline void u64_stats_inc(u64_stats_t *p) - } - #endif - --#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) - #else - static inline void u64_stats_init(struct u64_stats_sync *syncp) -@@ -125,15 +125,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp) - - static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_disable(); - write_seqcount_begin(&syncp->seq); - #endif - } - - static inline void u64_stats_update_end(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_enable(); - #endif - } - -@@ -142,8 +146,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) - { - unsigned long flags = 0; - --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -- local_irq_save(flags); -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_disable(); -+ else -+ local_irq_save(flags); - write_seqcount_begin(&syncp->seq); - #endif - return flags; -@@ -153,15 +160,18 @@ static inline void - u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, - unsigned long flags) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -- local_irq_restore(flags); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) -+ preempt_enable(); -+ else -+ local_irq_restore(flags); - #endif - } - - static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_begin(&syncp->seq); - #else - return 0; -@@ -170,7 +180,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync * - - static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) - preempt_disable(); - #endif - return __u64_stats_fetch_begin(syncp); -@@ -179,7 +189,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy - static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_retry(&syncp->seq, start); - #else - return false; -@@ -189,7 +199,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) - preempt_enable(); - #endif - return __u64_stats_fetch_retry(syncp, start); -@@ -203,7 +213,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - */ - static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -+ preempt_disable(); -+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) - local_irq_disable(); - #endif - return __u64_stats_fetch_begin(syncp); -@@ -212,7 +224,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync - static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) -+#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -+ preempt_enable(); -+#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) - local_irq_enable(); - #endif - return __u64_stats_fetch_retry(syncp, start); --- -2.43.0 - diff --git a/queue-5.4/u64_stats-document-writer-non-preemptibility-require.patch b/queue-5.4/u64_stats-document-writer-non-preemptibility-require.patch deleted file mode 100644 index e7f5f89cc43..00000000000 --- a/queue-5.4/u64_stats-document-writer-non-preemptibility-require.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 4569bd2e0123de30826d9e83d88e7c7f6802f253 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 3 Jun 2020 16:49:46 +0200 -Subject: u64_stats: Document writer non-preemptibility requirement -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -From: Ahmed S. Darwish - -[ Upstream commit 6501bf87602f799b7e502014f8bc0aa58b868277 ] - -The u64_stats mechanism uses sequence counters to protect against 64-bit -values tearing on 32-bit architectures. Updating such statistics is a -sequence counter write side critical section. - -Preemption must be disabled before entering this seqcount write critical -section. Failing to do so, the seqcount read side can preempt the write -side section and spin for the entire scheduler tick. If that reader -belongs to a real-time scheduling class, it can spin forever and the -kernel will livelock. - -Document this statistics update side non-preemptibility requirement. - -Reword the introductory paragraph to highlight u64_stats raison d'être: -64-bit values tearing protection on 32-bit architectures. Divide -documentation on a basis of internal design vs. usage constraints. - -Reword the u64_stats header file top comment to always mention "Reader" -or "Writer" at the start of each bullet point, making it easier to -follow which side each point is actually for. - -Clarify the statement "whole thing is a NOOP on 64bit arches or UP -kernels". For 32-bit UP kernels, preemption is always disabled for the -statistics read side section. - -Signed-off-by: Ahmed S. Darwish -Reviewed-by: Sebastian Andrzej Siewior -Signed-off-by: David S. Miller -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 43 ++++++++++++++++++---------------- - 1 file changed, 23 insertions(+), 20 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 7c316a9fb3ae5..e81856c0ba134 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -3,33 +3,36 @@ - #define _LINUX_U64_STATS_SYNC_H - - /* -- * To properly implement 64bits network statistics on 32bit and 64bit hosts, -- * we provide a synchronization point, that is a noop on 64bit or UP kernels. -+ * Protect against 64-bit values tearing on 32-bit architectures. This is -+ * typically used for statistics read/update in different subsystems. - * - * Key points : -- * 1) Use a seqcount on SMP 32bits, with low overhead. -- * 2) Whole thing is a noop on 64bit arches or UP kernels. -- * 3) Write side must ensure mutual exclusion or one seqcount update could -+ * -+ * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. -+ * - The whole thing is a no-op on 64-bit architectures. -+ * -+ * Usage constraints: -+ * -+ * 1) Write side must ensure mutual exclusion, or one seqcount update could - * be lost, thus blocking readers forever. -- * If this synchronization point is not a mutex, but a spinlock or -- * spinlock_bh() or disable_bh() : -- * 3.1) Write side should not sleep. -- * 3.2) Write side should not allow preemption. -- * 3.3) If applicable, interrupts should be disabled. - * -- * 4) If reader fetches several counters, there is no guarantee the whole values -- * are consistent (remember point 1) : this is a noop on 64bit arches anyway) -+ * 2) Write side must disable preemption, or a seqcount reader can preempt the -+ * writer and also spin forever. -+ * -+ * 3) Write side must use the _irqsave() variant if other writers, or a reader, -+ * can be invoked from an IRQ context. - * -- * 5) readers are allowed to sleep or be preempted/interrupted : They perform -- * pure reads. But if they have to fetch many values, it's better to not allow -- * preemptions/interruptions to avoid many retries. -+ * 4) If reader fetches several counters, there is no guarantee the whole values -+ * are consistent w.r.t. each other (remember point #2: seqcounts are not -+ * used for 64bit architectures). - * -- * 6) If counter might be written by an interrupt, readers should block interrupts. -- * (On UP, there is no seqcount_t protection, a reader allowing interrupts could -- * read partial values) -+ * 5) Readers are allowed to sleep or be preempted/interrupted: they perform -+ * pure reads. - * -- * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and -- * u64_stats_fetch_retry_irq() helpers -+ * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats -+ * might be updated from a hardirq or softirq context (remember point #1: -+ * seqcounts are not used for UP kernels). 32-bit UP stat readers could read -+ * corrupted 64-bit values otherwise. - * - * Usage : - * --- -2.43.0 - diff --git a/queue-5.4/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch b/queue-5.4/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch index a9203f847c2..1fe9e683bfd 100644 --- a/queue-5.4/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch +++ b/queue-5.4/u64_stats-fix-u64_stats_init-for-lockdep-when-used-r.patch @@ -28,29 +28,21 @@ Link: https://lore.kernel.org/r/20240404075740.30682-1-petr@tesarici.cz Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- - include/linux/u64_stats_sync.h | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) + include/linux/u64_stats_sync.h | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 51f2e16b9540b..11c3162dade3b 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h -@@ -125,10 +125,11 @@ static inline void u64_stats_inc(u64_stats_t *p) - p->v++; - } +@@ -70,7 +70,11 @@ struct u64_stats_sync { --static inline void u64_stats_init(struct u64_stats_sync *syncp) --{ -- seqcount_init(&syncp->seq); --} + + #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +-#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) +#define u64_stats_init(syncp) \ + do { \ + struct u64_stats_sync *__s = (syncp); \ + seqcount_init(&__s->seq); \ + } while (0) - - static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) + #else + static inline void u64_stats_init(struct u64_stats_sync *syncp) { --- -2.43.0 - diff --git a/queue-5.4/u64_stats-provide-u64_stats_t-type.patch b/queue-5.4/u64_stats-provide-u64_stats_t-type.patch deleted file mode 100644 index eecc7bba20d..00000000000 --- a/queue-5.4/u64_stats-provide-u64_stats_t-type.patch +++ /dev/null @@ -1,110 +0,0 @@ -From ad77147ffaa2c9efa50a5c47d029e694a1d08c59 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 7 Nov 2019 16:27:20 -0800 -Subject: u64_stats: provide u64_stats_t type - -From: Eric Dumazet - -[ Upstream commit 316580b69d0a7aeeee5063af47438b626bc47cbd ] - -On 64bit arches, struct u64_stats_sync is empty and provides -no help against load/store tearing. - -Using READ_ONCE()/WRITE_ONCE() would be needed. - -But the update side would be slightly more expensive. - -local64_t was defined so that we could use regular adds -in a manner which is atomic wrt IRQs. - -However the u64_stats infra means we do not have to use -local64_t on 32bit arches since the syncp provides the needed -protection. - -Signed-off-by: Eric Dumazet -Signed-off-by: David S. Miller -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 51 +++++++++++++++++++++++++++++++--- - 1 file changed, 47 insertions(+), 4 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 11096b561dab6..7c316a9fb3ae5 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -40,8 +40,8 @@ - * spin_lock_bh(...) or other synchronization to get exclusive access - * ... - * u64_stats_update_begin(&stats->syncp); -- * stats->bytes64 += len; // non atomic operation -- * stats->packets64++; // non atomic operation -+ * u64_stats_add(&stats->bytes64, len); // non atomic operation -+ * u64_stats_inc(&stats->packets64); // non atomic operation - * u64_stats_update_end(&stats->syncp); - * - * While a consumer (reader) should use following template to get consistent -@@ -52,8 +52,8 @@ - * - * do { - * start = u64_stats_fetch_begin(&stats->syncp); -- * tbytes = stats->bytes64; // non atomic operation -- * tpackets = stats->packets64; // non atomic operation -+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation -+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation - * } while (u64_stats_fetch_retry(&stats->syncp, start)); - * - * -@@ -68,6 +68,49 @@ struct u64_stats_sync { - #endif - }; - -+#if BITS_PER_LONG == 64 -+#include -+ -+typedef struct { -+ local64_t v; -+} u64_stats_t ; -+ -+static inline u64 u64_stats_read(const u64_stats_t *p) -+{ -+ return local64_read(&p->v); -+} -+ -+static inline void u64_stats_add(u64_stats_t *p, unsigned long val) -+{ -+ local64_add(val, &p->v); -+} -+ -+static inline void u64_stats_inc(u64_stats_t *p) -+{ -+ local64_inc(&p->v); -+} -+ -+#else -+ -+typedef struct { -+ u64 v; -+} u64_stats_t; -+ -+static inline u64 u64_stats_read(const u64_stats_t *p) -+{ -+ return p->v; -+} -+ -+static inline void u64_stats_add(u64_stats_t *p, unsigned long val) -+{ -+ p->v += val; -+} -+ -+static inline void u64_stats_inc(u64_stats_t *p) -+{ -+ p->v++; -+} -+#endif - - #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) --- -2.43.0 - diff --git a/queue-5.4/u64_stats-streamline-the-implementation.patch b/queue-5.4/u64_stats-streamline-the-implementation.patch deleted file mode 100644 index f25d91643ce..00000000000 --- a/queue-5.4/u64_stats-streamline-the-implementation.patch +++ /dev/null @@ -1,274 +0,0 @@ -From ad3e1e2f65363db776c16c0367741b8a772f3e48 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 25 Aug 2022 18:41:31 +0200 -Subject: u64_stats: Streamline the implementation - -From: Thomas Gleixner - -[ Upstream commit 44b0c2957adc62b86fcd51adeaf8e993171bc319 ] - -The u64 stats code handles 3 different cases: - - - 32bit UP - - 32bit SMP - - 64bit - -with an unreadable #ifdef maze, which was recently expanded with PREEMPT_RT -conditionals. - -Reduce it to two cases (32bit and 64bit) and drop the optimization for -32bit UP as suggested by Linus. - -Use the new preempt_disable/enable_nested() helpers to get rid of the -CONFIG_PREEMPT_RT conditionals. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner -Acked-by: Peter Zijlstra (Intel) -Link: https://lore.kernel.org/r/20220825164131.402717-9-bigeasy@linutronix.de -Stable-dep-of: 38a15d0a50e0 ("u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file") -Signed-off-by: Sasha Levin ---- - include/linux/u64_stats_sync.h | 145 +++++++++++++++------------------ - 1 file changed, 64 insertions(+), 81 deletions(-) - -diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index 6a0f2097d3709..51f2e16b9540b 100644 ---- a/include/linux/u64_stats_sync.h -+++ b/include/linux/u64_stats_sync.h -@@ -8,7 +8,7 @@ - * - * Key points : - * -- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. -+ * - Use a seqcount on 32-bit - * - The whole thing is a no-op on 64-bit architectures. - * - * Usage constraints: -@@ -20,7 +20,8 @@ - * writer and also spin forever. - * - * 3) Write side must use the _irqsave() variant if other writers, or a reader, -- * can be invoked from an IRQ context. -+ * can be invoked from an IRQ context. On 64bit systems this variant does not -+ * disable interrupts. - * - * 4) If reader fetches several counters, there is no guarantee the whole values - * are consistent w.r.t. each other (remember point #2: seqcounts are not -@@ -29,11 +30,6 @@ - * 5) Readers are allowed to sleep or be preempted/interrupted: they perform - * pure reads. - * -- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats -- * might be updated from a hardirq or softirq context (remember point #1: -- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read -- * corrupted 64-bit values otherwise. -- * - * Usage : - * - * Stats producer (writer) should use following template granted it already got -@@ -66,7 +62,7 @@ - #include - - struct u64_stats_sync { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -+#if BITS_PER_LONG == 32 - seqcount_t seq; - #endif - }; -@@ -93,7 +89,22 @@ static inline void u64_stats_inc(u64_stats_t *p) - local64_inc(&p->v); - } - --#else -+static inline void u64_stats_init(struct u64_stats_sync *syncp) { } -+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { } -+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { } -+static inline unsigned long __u64_stats_irqsave(void) { return 0; } -+static inline void __u64_stats_irqrestore(unsigned long flags) { } -+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+{ -+ return 0; -+} -+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -+ unsigned int start) -+{ -+ return false; -+} -+ -+#else /* 64 bit */ - - typedef struct { - u64 v; -@@ -113,123 +124,95 @@ static inline void u64_stats_inc(u64_stats_t *p) - { - p->v++; - } --#endif - --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) --#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) --#else - static inline void u64_stats_init(struct u64_stats_sync *syncp) - { -+ seqcount_init(&syncp->seq); - } --#endif - --static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) -+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_disable(); -+ preempt_disable_nested(); - write_seqcount_begin(&syncp->seq); --#endif - } - --static inline void u64_stats_update_end(struct u64_stats_sync *syncp) -+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - write_seqcount_end(&syncp->seq); -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_enable(); --#endif -+ preempt_enable_nested(); - } - --static inline unsigned long --u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) -+static inline unsigned long __u64_stats_irqsave(void) - { -- unsigned long flags = 0; -+ unsigned long flags; - --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_disable(); -- else -- local_irq_save(flags); -- write_seqcount_begin(&syncp->seq); --#endif -+ local_irq_save(flags); - return flags; - } - --static inline void --u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, -- unsigned long flags) -+static inline void __u64_stats_irqrestore(unsigned long flags) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- write_seqcount_end(&syncp->seq); -- if (IS_ENABLED(CONFIG_PREEMPT_RT)) -- preempt_enable(); -- else -- local_irq_restore(flags); --#endif -+ local_irq_restore(flags); - } - - static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) - return read_seqcount_begin(&syncp->seq); --#else -- return 0; --#endif - } - --static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -+ unsigned int start) - { --#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) -- preempt_disable(); --#endif -- return __u64_stats_fetch_begin(syncp); -+ return read_seqcount_retry(&syncp->seq, start); - } -+#endif /* !64 bit */ - --static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, -- unsigned int start) -+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) -- return read_seqcount_retry(&syncp->seq, start); --#else -- return false; --#endif -+ __u64_stats_update_begin(syncp); -+} -+ -+static inline void u64_stats_update_end(struct u64_stats_sync *syncp) -+{ -+ __u64_stats_update_end(syncp); -+} -+ -+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) -+{ -+ unsigned long flags = __u64_stats_irqsave(); -+ -+ __u64_stats_update_begin(syncp); -+ return flags; -+} -+ -+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, -+ unsigned long flags) -+{ -+ __u64_stats_update_end(syncp); -+ __u64_stats_irqrestore(flags); -+} -+ -+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) -+{ -+ return __u64_stats_fetch_begin(syncp); - } - - static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) -- preempt_enable(); --#endif - return __u64_stats_fetch_retry(syncp, start); - } - --/* -- * In case irq handlers can update u64 counters, readers can use following helpers -- * - SMP 32bit arches use seqcount protection, irq safe. -- * - UP 32bit must disable irqs. -- * - 64bit have no problem atomically reading u64 values, irq safe. -- */ -+/* Obsolete interfaces */ - static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) - { --#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -- preempt_disable(); --#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) -- local_irq_disable(); --#endif -- return __u64_stats_fetch_begin(syncp); -+ return u64_stats_fetch_begin(syncp); - } - - static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) - { --#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) -- preempt_enable(); --#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) -- local_irq_enable(); --#endif -- return __u64_stats_fetch_retry(syncp, start); -+ return u64_stats_fetch_retry(syncp, start); - } - - #endif /* _LINUX_U64_STATS_SYNC_H */ --- -2.43.0 -