--- /dev/null
+From 9ef2ff7976fa10afe28d6b8090e689944a4b9264 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 18 Feb 2024 18:41:37 +0100
+Subject: clocksource/drivers/arm_global_timer: Fix maximum prescaler value
+
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+[ Upstream commit b34b9547cee41575a4fddf390f615570759dc999 ]
+
+The prescaler in the "Global Timer Control Register bit assignments" is
+documented to use bits [15:8], which means that the maximum prescaler
+register value is 0xff.
+
+Fixes: 171b45a4a70e ("clocksource/drivers/arm_global_timer: Implement rate compensation whenever source clock changes")
+Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Link: https://lore.kernel.org/r/20240218174138.1942418-2-martin.blumenstingl@googlemail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clocksource/arm_global_timer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
+index 44a61dc6f9320..e1c773bb55359 100644
+--- a/drivers/clocksource/arm_global_timer.c
++++ b/drivers/clocksource/arm_global_timer.c
+@@ -32,7 +32,7 @@
+ #define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
+ #define GT_CONTROL_AUTO_INC BIT(3) /* banked */
+ #define GT_CONTROL_PRESCALER_SHIFT 8
+-#define GT_CONTROL_PRESCALER_MAX 0xF
++#define GT_CONTROL_PRESCALER_MAX 0xFF
+ #define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
+ GT_CONTROL_PRESCALER_SHIFT)
+
+--
+2.43.0
+
--- /dev/null
+From 5616f15ddd367a2ddb3a67ecdb4fa352ccdc57de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Jul 2022 19:15:25 +0800
+Subject: dma-iommu: add iommu_dma_opt_mapping_size()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit 6d9870b7e5def2450e21316515b9efc0529204dd ]
+
+Add the IOMMU callback for DMA mapping API dma_opt_mapping_size(), which
+allows the drivers to know the optimal mapping limit and thus limit the
+requested IOVA lengths.
+
+This value is based on the IOVA rcache range limit, as IOVAs allocated
+above this limit must always be newly allocated, which may be quite slow.
+
+Signed-off-by: John Garry <john.garry@huawei.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: afc5aa46ed56 ("iommu/dma: Force swiotlb_max_mapping_size on an untrusted device")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/dma-iommu.c | 6 ++++++
+ drivers/iommu/iova.c | 5 +++++
+ include/linux/iova.h | 2 ++
+ 3 files changed, 13 insertions(+)
+
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 8cd63e6ccd2cf..a76a46e94a606 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -1291,6 +1291,11 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
+ return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
+ }
+
++static size_t iommu_dma_opt_mapping_size(void)
++{
++ return iova_rcache_range();
++}
++
+ static const struct dma_map_ops iommu_dma_ops = {
+ .alloc = iommu_dma_alloc,
+ .free = iommu_dma_free,
+@@ -1313,6 +1318,7 @@ static const struct dma_map_ops iommu_dma_ops = {
+ .map_resource = iommu_dma_map_resource,
+ .unmap_resource = iommu_dma_unmap_resource,
+ .get_merge_boundary = iommu_dma_get_merge_boundary,
++ .opt_mapping_size = iommu_dma_opt_mapping_size,
+ };
+
+ /*
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 0835f32e040ad..f6dfb9e45e953 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -27,6 +27,11 @@ static void free_iova_rcaches(struct iova_domain *iovad);
+ static void fq_destroy_all_entries(struct iova_domain *iovad);
+ static void fq_flush_timeout(struct timer_list *t);
+
++unsigned long iova_rcache_range(void)
++{
++ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
++}
++
+ static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+ {
+ struct iova_domain *iovad;
+diff --git a/include/linux/iova.h b/include/linux/iova.h
+index 6b6cc104e300d..9aa0acf9820af 100644
+--- a/include/linux/iova.h
++++ b/include/linux/iova.h
+@@ -137,6 +137,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
+ int iova_cache_get(void);
+ void iova_cache_put(void);
+
++unsigned long iova_rcache_range(void);
++
+ void free_iova(struct iova_domain *iovad, unsigned long pfn);
+ void __free_iova(struct iova_domain *iovad, struct iova *iova);
+ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
+--
+2.43.0
+
--- /dev/null
+From f3accc328fee29391f06da3097bc0cc49b7f84b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Jul 2022 19:15:24 +0800
+Subject: dma-mapping: add dma_opt_mapping_size()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit a229cc14f3395311b899e5e582b71efa8dd01df0 ]
+
+Streaming DMA mapping involving an IOMMU may be much slower for larger
+total mapping size. This is because every IOMMU DMA mapping requires an
+IOVA to be allocated and freed. IOVA sizes above a certain limit are not
+cached, which can have a big impact on DMA mapping performance.
+
+Provide an API for device drivers to know this "optimal" limit, such that
+they may try to produce mapping which don't exceed it.
+
+Signed-off-by: John Garry <john.garry@huawei.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: afc5aa46ed56 ("iommu/dma: Force swiotlb_max_mapping_size on an untrusted device")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/core-api/dma-api.rst | 14 ++++++++++++++
+ include/linux/dma-map-ops.h | 1 +
+ include/linux/dma-mapping.h | 5 +++++
+ kernel/dma/mapping.c | 12 ++++++++++++
+ 4 files changed, 32 insertions(+)
+
+diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
+index 6d6d0edd2d278..829f20a193cab 100644
+--- a/Documentation/core-api/dma-api.rst
++++ b/Documentation/core-api/dma-api.rst
+@@ -204,6 +204,20 @@ Returns the maximum size of a mapping for the device. The size parameter
+ of the mapping functions like dma_map_single(), dma_map_page() and
+ others should not be larger than the returned value.
+
++::
++
++ size_t
++ dma_opt_mapping_size(struct device *dev);
++
++Returns the maximum optimal size of a mapping for the device.
++
++Mapping larger buffers may take much longer in certain scenarios. In
++addition, for high-rate short-lived streaming mappings, the upfront time
++spent on the mapping may account for an appreciable part of the total
++request lifetime. As such, if splitting larger requests incurs no
++significant performance penalty, then device drivers are advised to
++limit total DMA streaming mappings length to the returned value.
++
+ ::
+
+ bool
+diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
+index bfffe494356ad..2ff55ec902f48 100644
+--- a/include/linux/dma-map-ops.h
++++ b/include/linux/dma-map-ops.h
+@@ -69,6 +69,7 @@ struct dma_map_ops {
+ int (*dma_supported)(struct device *dev, u64 mask);
+ u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
++ size_t (*opt_mapping_size)(void);
+ unsigned long (*get_merge_boundary)(struct device *dev);
+ };
+
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index dca2b1355bb13..fe3849434b2a2 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -144,6 +144,7 @@ int dma_set_mask(struct device *dev, u64 mask);
+ int dma_set_coherent_mask(struct device *dev, u64 mask);
+ u64 dma_get_required_mask(struct device *dev);
+ size_t dma_max_mapping_size(struct device *dev);
++size_t dma_opt_mapping_size(struct device *dev);
+ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+ unsigned long dma_get_merge_boundary(struct device *dev);
+ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+@@ -266,6 +267,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
+ {
+ return 0;
+ }
++static inline size_t dma_opt_mapping_size(struct device *dev)
++{
++ return 0;
++}
+ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+ {
+ return false;
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 9478eccd1c8e6..c9dbc8f5812b8 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -777,6 +777,18 @@ size_t dma_max_mapping_size(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(dma_max_mapping_size);
+
++size_t dma_opt_mapping_size(struct device *dev)
++{
++ const struct dma_map_ops *ops = get_dma_ops(dev);
++ size_t size = SIZE_MAX;
++
++ if (ops && ops->opt_mapping_size)
++ size = ops->opt_mapping_size();
++
++ return min(dma_max_mapping_size(dev), size);
++}
++EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
++
+ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+ {
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+--
+2.43.0
+
--- /dev/null
+From de245de28bab089671e2df5559429da6db1fbb4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 21:17:04 +0000
+Subject: entry: Respect changes to system call number by trace_sys_enter()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Rösti <an.roesti@gmail.com>
+
+[ Upstream commit fb13b11d53875e28e7fbf0c26b288e4ea676aa9f ]
+
+When a probe is registered at the trace_sys_enter() tracepoint, and that
+probe changes the system call number, the old system call still gets
+executed. This worked correctly until commit b6ec41346103 ("core/entry:
+Report syscall correctly for trace and audit"), which removed the
+re-evaluation of the syscall number after the trace point.
+
+Restore the original semantics by re-evaluating the system call number
+after trace_sys_enter().
+
+The performance impact of this re-evaluation is minimal because it only
+takes place when a trace point is active, and compared to the actual trace
+point overhead the read from a cache hot variable is negligible.
+
+Fixes: b6ec41346103 ("core/entry: Report syscall correctly for trace and audit")
+Signed-off-by: André Rösti <an.roesti@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240311211704.7262-1-an.roesti@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/entry/common.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index e002bea6b4be3..d11d4047a0ca8 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -73,8 +73,14 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+- if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
++ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
+ trace_sys_enter(regs, syscall);
++ /*
++ * Probes or BPF hooks in the tracepoint may have changed the
++ * system call number as well.
++ */
++ syscall = syscall_get_nr(current, regs);
++ }
+
+ syscall_enter_audit(regs, syscall);
+
+--
+2.43.0
+
--- /dev/null
+From 6480029abc85349ce2bb5607c40a41dd4354f5d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:28 +0000
+Subject: iommu/dma: Force swiotlb_max_mapping_size on an untrusted device
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+[ Upstream commit afc5aa46ed560f01ceda897c053c6a40c77ce5c4 ]
+
+The swiotlb does not support a mapping size > swiotlb_max_mapping_size().
+On the other hand, with a 64KB PAGE_SIZE configuration, it's observed that
+an NVME device can map a size between 300KB~512KB, which certainly failed
+the swiotlb mappings, though the default pool of swiotlb has many slots:
+ systemd[1]: Started Journal Service.
+ => nvme 0000:00:01.0: swiotlb buffer is full (sz: 327680 bytes), total 32768 (slots), used 32 (slots)
+ note: journal-offline[392] exited with irqs disabled
+ note: journal-offline[392] exited with preempt_count 1
+
+Call trace:
+[ 3.099918] swiotlb_tbl_map_single+0x214/0x240
+[ 3.099921] iommu_dma_map_page+0x218/0x328
+[ 3.099928] dma_map_page_attrs+0x2e8/0x3a0
+[ 3.101985] nvme_prep_rq.part.0+0x408/0x878 [nvme]
+[ 3.102308] nvme_queue_rqs+0xc0/0x300 [nvme]
+[ 3.102313] blk_mq_flush_plug_list.part.0+0x57c/0x600
+[ 3.102321] blk_add_rq_to_plug+0x180/0x2a0
+[ 3.102323] blk_mq_submit_bio+0x4c8/0x6b8
+[ 3.103463] __submit_bio+0x44/0x220
+[ 3.103468] submit_bio_noacct_nocheck+0x2b8/0x360
+[ 3.103470] submit_bio_noacct+0x180/0x6c8
+[ 3.103471] submit_bio+0x34/0x130
+[ 3.103473] ext4_bio_write_folio+0x5a4/0x8c8
+[ 3.104766] mpage_submit_folio+0xa0/0x100
+[ 3.104769] mpage_map_and_submit_buffers+0x1a4/0x400
+[ 3.104771] ext4_do_writepages+0x6a0/0xd78
+[ 3.105615] ext4_writepages+0x80/0x118
+[ 3.105616] do_writepages+0x90/0x1e8
+[ 3.105619] filemap_fdatawrite_wbc+0x94/0xe0
+[ 3.105622] __filemap_fdatawrite_range+0x68/0xb8
+[ 3.106656] file_write_and_wait_range+0x84/0x120
+[ 3.106658] ext4_sync_file+0x7c/0x4c0
+[ 3.106660] vfs_fsync_range+0x3c/0xa8
+[ 3.106663] do_fsync+0x44/0xc0
+
+Since untrusted devices might go down the swiotlb pathway with dma-iommu,
+these devices should not map a size larger than swiotlb_max_mapping_size.
+
+To fix this bug, add iommu_dma_max_mapping_size() for untrusted devices to
+take into account swiotlb_max_mapping_size() v.s. iova_rcache_range() from
+the iommu_dma_opt_mapping_size().
+
+Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
+Link: https://lore.kernel.org/r/ee51a3a5c32cf885b18f6416171802669f4a718a.1707851466.git.nicolinc@nvidia.com
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+[will: Drop redundant is_swiotlb_active(dev) check]
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/dma-iommu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index a76a46e94a606..aa47d955de6ba 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -1296,6 +1296,14 @@ static size_t iommu_dma_opt_mapping_size(void)
+ return iova_rcache_range();
+ }
+
++static size_t iommu_dma_max_mapping_size(struct device *dev)
++{
++ if (dev_is_untrusted(dev))
++ return swiotlb_max_mapping_size(dev);
++
++ return SIZE_MAX;
++}
++
+ static const struct dma_map_ops iommu_dma_ops = {
+ .alloc = iommu_dma_alloc,
+ .free = iommu_dma_free,
+@@ -1319,6 +1327,7 @@ static const struct dma_map_ops iommu_dma_ops = {
+ .unmap_resource = iommu_dma_unmap_resource,
+ .get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
++ .max_mapping_size = iommu_dma_max_mapping_size,
+ };
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From a63e0459e7624070e508254701a4041e43c68a5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 08:16:30 +0000
+Subject: minmax: add umin(a, b) and umax(a, b)
+
+From: David Laight <David.Laight@ACULAB.COM>
+
+[ Upstream commit 80fcac55385ccb710d33a20dc1caaef29bd5a921 ]
+
+Patch series "minmax: Relax type checks in min() and max()", v4.
+
+The min() (etc) functions in minmax.h require that the arguments have
+exactly the same types.
+
+However when the type check fails, rather than look at the types and fix
+the type of a variable/constant, everyone seems to jump on min_t(). In
+reality min_t() ought to be rare - when something unusual is being done,
+not normality.
+
+The orginal min() (added in 2.4.9) replaced several inline functions and
+included the type - so matched the implicit casting of the function call.
+This was renamed min_t() in 2.4.10 and the current min() added. There is
+no actual indication that the conversion of negatve values to large
+unsigned values has ever been an actual problem.
+
+A quick grep shows 5734 min() and 4597 min_t(). Having the casts on
+almost half of the calls shows that something is clearly wrong.
+
+If the wrong type is picked (and it is far too easy to pick the type of
+the result instead of the larger input) then significant bits can get
+discarded.
+
+Pretty much the worst example is in the derived clamp_val(), consider:
+ unsigned char x = 200u;
+ y = clamp_val(x, 10u, 300u);
+
+I also suspect that many of the min_t(u16, ...) are actually wrong. For
+example copy_data() in printk_ringbuffer.c contains:
+
+ data_size = min_t(u16, buf_size, len);
+
+Here buf_size is 'unsigned int' and len 'u16', pass a 64k buffer (can you
+prove that doesn't happen?) and no data is returned. Apparantly it did -
+and has since been fixed.
+
+The only reason that most of the min_t() are 'fine' is that pretty much
+all the values in the kernel are between 0 and INT_MAX.
+
+Patch 1 adds umin(), this uses integer promotions to convert both
+arguments to 'unsigned long long'. It can be used to compare a signed
+type that is known to contain a non-negative value with an unsigned type.
+The compiler typically optimises it all away. Added first so that it can
+be referred to in patch 2.
+
+Patch 2 replaces the 'same type' check with a 'same signedness' one. This
+makes min(unsigned_int_var, sizeof()) be ok. The error message is also
+improved and will contain the expanded form of both arguments (useful for
+seeing how constants are defined).
+
+Patch 3 just fixes some whitespace.
+
+Patch 4 allows comparisons of 'unsigned char' and 'unsigned short' to
+signed types. The integer promotion rules convert them both to 'signed
+int' prior to the comparison so they can never cause a negative value be
+converted to a large positive one.
+
+Patch 5 (rewritted for v4) allows comparisons of unsigned values against
+non-negative constant integer expressions. This makes
+min(unsigned_int_var, 4) be ok.
+
+The only common case that is still errored is the comparison of signed
+values against unsigned constant integer expressions below __INT_MAX__.
+Typcally min(int_val, sizeof (foo)), the real fix for this is casting the
+constant: min(int_var, (int)sizeof (foo)).
+
+With all the patches applied pretty much all the min_t() could be replaced
+by min(), and most of the rest by umin(). However they all need careful
+inspection due to code like:
+
+ sz = min_t(unsigned char, sz - 1, LIM - 1) + 1;
+
+which converts 0 to LIM.
+
+This patch (of 6):
+
+umin() and umax() can be used when min()/max() errors a signed v unsigned
+compare when the signed value is known to be non-negative.
+
+Unlike min_t(some_unsigned_type, a, b) umin() will never mask off high
+bits if an inappropriate type is selected.
+
+The '+ 0u + 0ul + 0ull' may look strange.
+The '+ 0u' is needed for 'signed int' on 64bit systems.
+The '+ 0ul' is needed for 'signed long' on 32bit systems.
+The '+ 0ull' is needed for 'signed long long'.
+
+Link: https://lkml.kernel.org/r/b97faef60ad24922b530241c5d7c933c@AcuMS.aculab.com
+Link: https://lkml.kernel.org/r/41d93ca827a248698ec64bf57e0c05a5@AcuMS.aculab.com
+Signed-off-by: David Laight <david.laight@aculab.com>
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 51b30ecb73b4 ("swiotlb: Fix alignment checks when both allocation and DMA masks are present")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/minmax.h | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index 5433c08fcc685..1aea34b8f19bf 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -51,6 +51,23 @@
+ */
+ #define max(x, y) __careful_cmp(x, y, >)
+
++/**
++ * umin - return minimum of two non-negative values
++ * Signed types are zero extended to match a larger unsigned type.
++ * @x: first value
++ * @y: second value
++ */
++#define umin(x, y) \
++ __careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, <)
++
++/**
++ * umax - return maximum of two non-negative values
++ * @x: first value
++ * @y: second value
++ */
++#define umax(x, y) \
++ __careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, >)
++
+ /**
+ * min3 - return minimum of three values
+ * @x: first value
+--
+2.43.0
+
--- /dev/null
+From 743adea0c1397b012ba45b04a5abc66cfceda3b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Feb 2024 13:07:24 +0106
+Subject: printk: Update @console_may_schedule in console_trylock_spinning()
+
+From: John Ogness <john.ogness@linutronix.de>
+
+[ Upstream commit 8076972468584d4a21dab9aa50e388b3ea9ad8c7 ]
+
+console_trylock_spinning() may takeover the console lock from a
+schedulable context. Update @console_may_schedule to make sure it
+reflects a trylock acquire.
+
+Reported-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Closes: https://lore.kernel.org/lkml/20240222090538.23017-1-quic_mojha@quicinc.com
+Fixes: dbdda842fe96 ("printk: Add console owner and waiter logic to load balance console writes")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/875xybmo2z.fsf@jogness.linutronix.de
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/printk/printk.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index df4d07af6d1e2..323931ff61191 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1908,6 +1908,12 @@ static int console_trylock_spinning(void)
+ */
+ mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+
++ /*
++ * Update @console_may_schedule for trylock because the previous
++ * owner may have been schedulable.
++ */
++ console_may_schedule = 0;
++
+ return 1;
+ }
+
+--
+2.43.0
+
i2c-i801-avoid-potential-double-call-to-gpiod_remove_lookup_table.patch
xen-events-close-evtchn-after-mapping-cleanup.patch
acpi-cppc-use-access_width-over-bit_width-for-system-memory-accesses.patch
+clocksource-drivers-arm_global_timer-fix-maximum-pre.patch
+entry-respect-changes-to-system-call-number-by-trace.patch
+minmax-add-umin-a-b-and-umax-a-b.patch
+swiotlb-fix-alignment-checks-when-both-allocation-an.patch
+dma-mapping-add-dma_opt_mapping_size.patch
+dma-iommu-add-iommu_dma_opt_mapping_size.patch
+iommu-dma-force-swiotlb_max_mapping_size-on-an-untru.patch
+printk-update-console_may_schedule-in-console_tryloc.patch
--- /dev/null
+From d4c4f6babfb91590a9e6af565627f541ad5c6f3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:27 +0000
+Subject: swiotlb: Fix alignment checks when both allocation and DMA masks are
+ present
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 51b30ecb73b481d5fac6ccf2ecb4a309c9ee3310 ]
+
+Nicolin reports that swiotlb buffer allocations fail for an NVME device
+behind an IOMMU using 64KiB pages. This is because we end up with a
+minimum allocation alignment of 64KiB (for the IOMMU to map the buffer
+safely) but a minimum DMA alignment mask corresponding to a 4KiB NVME
+page (i.e. preserving the 4KiB page offset from the original allocation).
+If the original address is not 4KiB-aligned, the allocation will fail
+because swiotlb_search_pool_area() erroneously compares these unmasked
+bits with the 64KiB-aligned candidate allocation.
+
+Tweak swiotlb_search_pool_area() so that the DMA alignment mask is
+reduced based on the required alignment of the allocation.
+
+Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
+Link: https://lore.kernel.org/r/cover.1707851466.git.nicolinc@nvidia.com
+Reported-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index a9849670bdb54..5c7ed5d519424 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -469,8 +469,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
+ dma_addr_t tbl_dma_addr =
+ phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
+ unsigned long max_slots = get_max_slots(boundary_mask);
+- unsigned int iotlb_align_mask =
+- dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
++ unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
+ unsigned int nslots = nr_slots(alloc_size), stride;
+ unsigned int index, wrap, count = 0, i;
+ unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+@@ -478,6 +477,14 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
+
+ BUG_ON(!nslots);
+
++ /*
++ * Ensure that the allocation is at least slot-aligned and update
++ * 'iotlb_align_mask' to ignore bits that will be preserved when
++ * offsetting into the allocation.
++ */
++ alloc_align_mask |= (IO_TLB_SIZE - 1);
++ iotlb_align_mask &= ~alloc_align_mask;
++
+ /*
+ * For mappings with an alignment requirement don't bother looping to
+ * unaligned slots once we found an aligned one. For allocations of
+--
+2.43.0
+