--- /dev/null
+From 146e18e0ecb530e9d13cd291be1aad4e58023712 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jun 2025 07:03:30 +0000
+Subject: fs: export anon_inode_make_secure_inode() and fix secretmem LSM
+ bypass
+
+From: Shivank Garg <shivankg@amd.com>
+
+[ Upstream commit cbe4134ea4bc493239786220bd69cb8a13493190 ]
+
+Export anon_inode_make_secure_inode() to allow KVM guest_memfd to create
+anonymous inodes with proper security context. This replaces the current
+pattern of calling alloc_anon_inode() followed by
+inode_init_security_anon() for creating security context manually.
+
+This change also fixes a security regression in secretmem where the
+S_PRIVATE flag was not cleared after alloc_anon_inode(), causing
+LSM/SELinux checks to be bypassed for secretmem file descriptors.
+
+As guest_memfd currently resides in the KVM module, we need to export this
+symbol for use outside the core kernel. In the future, guest_memfd might be
+moved to core-mm, at which point the symbols no longer would have to be
+exported. When/if that happens is still unclear.
+
+Fixes: 2bfe15c52612 ("mm: create security context for memfd_secret inodes")
+Suggested-by: David Hildenbrand <david@redhat.com>
+Suggested-by: Mike Rapoport <rppt@kernel.org>
+Signed-off-by: Shivank Garg <shivankg@amd.com>
+Link: https://lore.kernel.org/20250620070328.803704-3-shivankg@amd.com
+Acked-by: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/anon_inodes.c | 23 ++++++++++++++++++-----
+ include/linux/fs.h | 2 ++
+ mm/secretmem.c | 11 +----------
+ 3 files changed, 21 insertions(+), 15 deletions(-)
+
+diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
+index 24192a7667edf..a25766e90f0a6 100644
+--- a/fs/anon_inodes.c
++++ b/fs/anon_inodes.c
+@@ -55,15 +55,26 @@ static struct file_system_type anon_inode_fs_type = {
+ .kill_sb = kill_anon_super,
+ };
+
+-static struct inode *anon_inode_make_secure_inode(
+- const char *name,
+- const struct inode *context_inode)
++/**
++ * anon_inode_make_secure_inode - allocate an anonymous inode with security context
++ * @sb: [in] Superblock to allocate from
++ * @name: [in] Name of the class of the newfile (e.g., "secretmem")
++ * @context_inode:
++ * [in] Optional parent inode for security inheritance
++ *
++ * The function ensures proper security initialization through the LSM hook
++ * security_inode_init_security_anon().
++ *
++ * Return: Pointer to new inode on success, ERR_PTR on failure.
++ */
++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
++ const struct inode *context_inode)
+ {
+ struct inode *inode;
+ const struct qstr qname = QSTR_INIT(name, strlen(name));
+ int error;
+
+- inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
++ inode = alloc_anon_inode(sb);
+ if (IS_ERR(inode))
+ return inode;
+ inode->i_flags &= ~S_PRIVATE;
+@@ -74,6 +85,7 @@ static struct inode *anon_inode_make_secure_inode(
+ }
+ return inode;
+ }
++EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm");
+
+ static struct file *__anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+@@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name,
+ return ERR_PTR(-ENOENT);
+
+ if (secure) {
+- inode = anon_inode_make_secure_inode(name, context_inode);
++ inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
++ name, context_inode);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err;
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 81edfa1e66b60..b641a01512fb0 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3170,6 +3170,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ extern const struct address_space_operations ram_aops;
+ extern int always_delete_dentry(const struct dentry *);
+ extern struct inode *alloc_anon_inode(struct super_block *);
++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
++ const struct inode *context_inode);
+ extern int simple_nosetlease(struct file *, int, struct file_lock **, void **);
+ extern const struct dentry_operations simple_dentry_operations;
+
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 399552814fd0f..4bedf491a8a74 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -195,19 +195,10 @@ static struct file *secretmem_file_create(unsigned long flags)
+ struct file *file;
+ struct inode *inode;
+ const char *anon_name = "[secretmem]";
+- const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
+- int err;
+
+- inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
++ inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+-
+- err = security_inode_init_security_anon(inode, &qname, NULL);
+- if (err) {
+- file = ERR_PTR(err);
+- goto err_free_inode;
+- }
+-
+ file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
+ O_RDWR, &secretmem_fops);
+ if (IS_ERR(file))
+--
+2.39.5
+
--- /dev/null
+From a1b204341623ffd65190d4e1443a7c5710d2ade2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 10:43:37 -0300
+Subject: iommu: Add IOMMU_DOMAIN_PLATFORM for S390
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit e04c7487a6655722172e93e8f36e51d6ab279f86 ]
+
+The PLATFORM domain will be set as the default domain and attached as
+normal during probe. The driver will ignore the initial attach from a NULL
+domain to the PLATFORM domain.
+
+After this, the PLATFORM domain's attach_dev will be called whenever we
+detach from an UNMANAGED domain (eg for VFIO). This is the same time the
+original design would have called op->detach_dev().
+
+This is temporary until the S390 dma-iommu.c conversion is merged.
+
+Tested-by: Heiko Stuebner <heiko@sntech.de>
+Tested-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/4-v8-81230027b2fa+9d-iommu_all_defdom_jgg@nvidia.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Stable-dep-of: 45537926dd2a ("s390/pci: Fix stale function handles in error handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/s390-iommu.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
+index fbf59a8db29b1..f0c867c57a5b9 100644
+--- a/drivers/iommu/s390-iommu.c
++++ b/drivers/iommu/s390-iommu.c
+@@ -142,14 +142,31 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ return 0;
+ }
+
+-static void s390_iommu_set_platform_dma(struct device *dev)
++/*
++ * Switch control over the IOMMU to S390's internal dma_api ops
++ */
++static int s390_iommu_platform_attach(struct iommu_domain *platform_domain,
++ struct device *dev)
+ {
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+
++ if (!zdev->s390_domain)
++ return 0;
++
+ __s390_iommu_detach_device(zdev);
+ zpci_dma_init_device(zdev);
++ return 0;
+ }
+
++static struct iommu_domain_ops s390_iommu_platform_ops = {
++ .attach_dev = s390_iommu_platform_attach,
++};
++
++static struct iommu_domain s390_iommu_platform_domain = {
++ .type = IOMMU_DOMAIN_PLATFORM,
++ .ops = &s390_iommu_platform_ops,
++};
++
+ static void s390_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
+ {
+@@ -428,12 +445,12 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
+ }
+
+ static const struct iommu_ops s390_iommu_ops = {
++ .default_domain = &s390_iommu_platform_domain,
+ .capable = s390_iommu_capable,
+ .domain_alloc = s390_domain_alloc,
+ .probe_device = s390_iommu_probe_device,
+ .release_device = s390_iommu_release_device,
+ .device_group = generic_device_group,
+- .set_platform_dma_ops = s390_iommu_set_platform_dma,
+ .pgsize_bitmap = SZ_4K,
+ .get_resv_regions = s390_iommu_get_resv_regions,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+--
+2.39.5
+
--- /dev/null
+From f9311abe76e26880f417b581cefba8366abcc893 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Sep 2023 16:31:35 +0200
+Subject: iommu: Allow .iotlb_sync_map to fail and handle s390's -ENOMEM return
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+[ Upstream commit fa4c4507099f781ca89a748c480af9cf97629726 ]
+
+On s390 when using a paging hypervisor, .iotlb_sync_map is used to sync
+mappings by letting the hypervisor inspect the synced IOVA range and
+updating a shadow table. This however means that .iotlb_sync_map can
+fail as the hypervisor may run out of resources while doing the sync.
+This can be due to the hypervisor being unable to pin guest pages, due
+to a limit on mapped addresses such as vfio_iommu_type1.dma_entry_limit
+or lack of other resources. Either way such a failure to sync a mapping
+should result in a DMA_MAPPING_ERROR.
+
+Now especially when running with batched IOTLB flushes for unmap it may
+be that some IOVAs have already been invalidated but not yet synced via
+.iotlb_sync_map. Thus if the hypervisor indicates running out of
+resources, first do a global flush allowing the hypervisor to free
+resources associated with these mappings as well a retry creating the
+new mappings and only if that also fails report this error to callers.
+
+Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Acked-by: Jernej Skrabec <jernej.skrabec@gmail.com> # sun50i
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Link: https://lore.kernel.org/r/20230928-dma_iommu-v13-1-9e5fc4dacc36@linux.ibm.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Stable-dep-of: 45537926dd2a ("s390/pci: Fix stale function handles in error handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/iommu.c | 5 +++--
+ drivers/iommu/apple-dart.c | 5 +++--
+ drivers/iommu/intel/iommu.c | 5 +++--
+ drivers/iommu/iommu.c | 20 ++++++++++++++++----
+ drivers/iommu/msm_iommu.c | 5 +++--
+ drivers/iommu/mtk_iommu.c | 5 +++--
+ drivers/iommu/s390-iommu.c | 29 +++++++++++++++++++++++------
+ drivers/iommu/sprd-iommu.c | 5 +++--
+ drivers/iommu/sun50i-iommu.c | 6 ++++--
+ include/linux/iommu.h | 4 ++--
+ 10 files changed, 63 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index a5d6d786dba52..d6344b74d873c 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -2241,14 +2241,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
+ return ret;
+ }
+
+-static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+- unsigned long iova, size_t size)
++static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
++ unsigned long iova, size_t size)
+ {
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+
+ if (ops->map_pages)
+ domain_flush_np_cache(domain, iova, size);
++ return 0;
+ }
+
+ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
+index 0b89275084274..d6263ce05b9ba 100644
+--- a/drivers/iommu/apple-dart.c
++++ b/drivers/iommu/apple-dart.c
+@@ -506,10 +506,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain,
+ apple_dart_domain_flush_tlb(to_dart_domain(domain));
+ }
+
+-static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
+- unsigned long iova, size_t size)
++static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
+ {
+ apple_dart_domain_flush_tlb(to_dart_domain(domain));
++ return 0;
+ }
+
+ static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 6a745616d85a4..ddfde6edf7566 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4680,8 +4680,8 @@ static bool risky_device(struct pci_dev *pdev)
+ return false;
+ }
+
+-static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+- unsigned long iova, size_t size)
++static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
+ {
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long pages = aligned_nrpages(iova, size);
+@@ -4691,6 +4691,7 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
++ return 0;
+ }
+
+ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 3fa5699b9ff19..481eb6766ee13 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2530,8 +2530,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ return -EINVAL;
+
+ ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
+- if (ret == 0 && ops->iotlb_sync_map)
+- ops->iotlb_sync_map(domain, iova, size);
++ if (ret == 0 && ops->iotlb_sync_map) {
++ ret = ops->iotlb_sync_map(domain, iova, size);
++ if (ret)
++ goto out_err;
++ }
++
++ return ret;
++
++out_err:
++ /* undo mappings already done */
++ iommu_unmap(domain, iova, size);
+
+ return ret;
+ }
+@@ -2672,8 +2681,11 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ sg = sg_next(sg);
+ }
+
+- if (ops->iotlb_sync_map)
+- ops->iotlb_sync_map(domain, iova, mapped);
++ if (ops->iotlb_sync_map) {
++ ret = ops->iotlb_sync_map(domain, iova, mapped);
++ if (ret)
++ goto out_err;
++ }
+ return mapped;
+
+ out_err:
+diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
+index 79d89bad5132b..47926d3290e6c 100644
+--- a/drivers/iommu/msm_iommu.c
++++ b/drivers/iommu/msm_iommu.c
+@@ -486,12 +486,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ return ret;
+ }
+
+-static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+- size_t size)
++static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
++ size_t size)
+ {
+ struct msm_priv *priv = to_msm_priv(domain);
+
+ __flush_iotlb_range(iova, size, SZ_4K, false, priv);
++ return 0;
+ }
+
+ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 06c0770ff894e..2cba98233be0f 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -817,12 +817,13 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
+ }
+
+-static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+- size_t size)
++static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
++ size_t size)
+ {
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+
+ mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
++ return 0;
+ }
+
+ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
+index f0c867c57a5b9..3b512e52610b1 100644
+--- a/drivers/iommu/s390-iommu.c
++++ b/drivers/iommu/s390-iommu.c
+@@ -222,6 +222,12 @@ static void s390_iommu_release_device(struct device *dev)
+ __s390_iommu_detach_device(zdev);
+ }
+
++static int zpci_refresh_all(struct zpci_dev *zdev)
++{
++ return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
++ zdev->end_dma - zdev->start_dma + 1);
++}
++
+ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ {
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+@@ -229,8 +235,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+- zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+- zdev->end_dma - zdev->start_dma + 1);
++ zpci_refresh_all(zdev);
+ }
+ rcu_read_unlock();
+ }
+@@ -254,20 +259,32 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
+ rcu_read_unlock();
+ }
+
+-static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+- unsigned long iova, size_t size)
++static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
+ {
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev;
++ int ret = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ if (!zdev->tlb_refresh)
+ continue;
+- zpci_refresh_trans((u64)zdev->fh << 32,
+- iova, size);
++ ret = zpci_refresh_trans((u64)zdev->fh << 32,
++ iova, size);
++ /*
++ * let the hypervisor discover invalidated entries
++ * allowing it to free IOVAs and unpin pages
++ */
++ if (ret == -ENOMEM) {
++ ret = zpci_refresh_all(zdev);
++ if (ret)
++ break;
++ }
+ }
+ rcu_read_unlock();
++
++ return ret;
+ }
+
+ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index c8e79a2d8b4c6..03aea6ed0d422 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -345,8 +345,8 @@ static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ return size;
+ }
+
+-static void sprd_iommu_sync_map(struct iommu_domain *domain,
+- unsigned long iova, size_t size)
++static int sprd_iommu_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
+ {
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned int reg;
+@@ -358,6 +358,7 @@ static void sprd_iommu_sync_map(struct iommu_domain *domain,
+
+ /* clear IOMMU TLB buffer after page table updated */
+ sprd_iommu_write(dom->sdev, reg, 0xffffffff);
++ return 0;
+ }
+
+ static void sprd_iommu_sync(struct iommu_domain *domain,
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index 94bd7f25f6f26..e6b2fe3db4216 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -402,8 +402,8 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ }
+
+-static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+- unsigned long iova, size_t size)
++static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
++ unsigned long iova, size_t size)
+ {
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+@@ -412,6 +412,8 @@ static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ sun50i_iommu_zap_range(iommu, iova, size);
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
++
++ return 0;
+ }
+
+ static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index b6ef263e85c06..187528e0ebb99 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -346,8 +346,8 @@ struct iommu_domain_ops {
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+- void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+- size_t size);
++ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
++ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+--
+2.39.5
+
--- /dev/null
+From 05ee839a81aab481a93d29bc43f8225b12178049 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 May 2025 16:12:09 +0200
+Subject: module: Provide EXPORT_SYMBOL_GPL_FOR_MODULES() helper
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 707f853d7fa3ce323a6875487890c213e34d81a0 ]
+
+Helper macro to more easily limit the export of a symbol to a given
+list of modules.
+
+Eg:
+
+ EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm");
+
+will limit the use of said function to kvm.ko, any other module trying
+to use this symbol will refure to load (and get modpost build
+failures).
+
+Requested-by: Masahiro Yamada <masahiroy@kernel.org>
+Requested-by: Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Reviewed-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Stable-dep-of: cbe4134ea4bc ("fs: export anon_inode_make_secure_inode() and fix secretmem LSM bypass")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/core-api/symbol-namespaces.rst | 22 ++++++++++++++++++++
+ include/linux/export.h | 12 +++++++++--
+ 2 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
+index 12e4aecdae945..29875e25e376f 100644
+--- a/Documentation/core-api/symbol-namespaces.rst
++++ b/Documentation/core-api/symbol-namespaces.rst
+@@ -28,6 +28,9 @@ kernel. As of today, modules that make use of symbols exported into namespaces,
+ are required to import the namespace. Otherwise the kernel will, depending on
+ its configuration, reject loading the module or warn about a missing import.
+
++Additionally, it is possible to put symbols into a module namespace, strictly
++limiting which modules are allowed to use these symbols.
++
+ 2. How to define Symbol Namespaces
+ ==================================
+
+@@ -84,6 +87,22 @@ unit as preprocessor statement. The above example would then read::
+ within the corresponding compilation unit before any EXPORT_SYMBOL macro is
+ used.
+
++2.3 Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro
++===================================================
++
++Symbols exported using this macro are put into a module namespace. This
++namespace cannot be imported.
++
++The macro takes a comma separated list of module names, allowing only those
++modules to access this symbol. Simple tail-globs are supported.
++
++For example:
++
++ EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
++
++will limit usage of this symbol to modules whoes name matches the given
++patterns.
++
+ 3. How to use Symbols exported in Namespaces
+ ============================================
+
+@@ -155,3 +174,6 @@ in-tree modules::
+ You can also run nsdeps for external module builds. A typical usage is::
+
+ $ make -C <path_to_kernel_src> M=$PWD nsdeps
++
++Note: it will happily generate an import statement for the module namespace;
++which will not work and generates build and runtime failures.
+diff --git a/include/linux/export.h b/include/linux/export.h
+index 9911508a9604f..06f7a4eb64928 100644
+--- a/include/linux/export.h
++++ b/include/linux/export.h
+@@ -42,11 +42,17 @@ extern struct module __this_module;
+ .long sym
+ #endif
+
+-#define ___EXPORT_SYMBOL(sym, license, ns) \
++/*
++ * LLVM integrated assembler cam merge adjacent string literals (like
++ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
++ *
++ * .asciz "MODULE_" "kvm" ;
++ */
++#define ___EXPORT_SYMBOL(sym, license, ns...) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+- .asciz ns ASM_NL \
++ .ascii ns "\0" ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
+
+@@ -88,4 +94,6 @@ extern struct module __this_module;
+ #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
+ #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
+
++#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
++
+ #endif /* _LINUX_EXPORT_H */
+--
+2.39.5
+
--- /dev/null
+From 0ce24e6e9f7e92d157d6c03ab655820995e22f28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jun 2025 15:16:11 -0400
+Subject: NFSv4/flexfiles: Fix handling of NFS level errors in I/O
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 38074de35b015df5623f524d6f2b49a0cd395c40 ]
+
+Allow the flexfiles error handling to recognise NFS level errors (as
+opposed to RPC level errors) and handle them separately. The main
+motivator is the NFSERR_PERM errors that get returned if the NFS client
+connects to the data server through a port number that is lower than
+1024. In that case, the client should disconnect and retry a READ on a
+different data server, or it should retry a WRITE after reconnecting.
+
+Reviewed-by: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c | 121 ++++++++++++++++++-------
+ 1 file changed, 87 insertions(+), 34 deletions(-)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 0bc537de1b295..0a26444fe2023 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1096,6 +1096,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
+ }
+
+ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
++ u32 op_status,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+@@ -1106,32 +1107,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
+
+- switch (task->tk_status) {
+- case -NFS4ERR_BADSESSION:
+- case -NFS4ERR_BADSLOT:
+- case -NFS4ERR_BAD_HIGH_SLOT:
+- case -NFS4ERR_DEADSESSION:
+- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+- case -NFS4ERR_SEQ_FALSE_RETRY:
+- case -NFS4ERR_SEQ_MISORDERED:
++ switch (op_status) {
++ case NFS4_OK:
++ case NFS4ERR_NXIO:
++ break;
++ case NFSERR_PERM:
++ if (!task->tk_xprt)
++ break;
++ xprt_force_disconnect(task->tk_xprt);
++ goto out_retry;
++ case NFS4ERR_BADSESSION:
++ case NFS4ERR_BADSLOT:
++ case NFS4ERR_BAD_HIGH_SLOT:
++ case NFS4ERR_DEADSESSION:
++ case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++ case NFS4ERR_SEQ_FALSE_RETRY:
++ case NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session. Exchangeid "
+ "flags 0x%x\n", __func__, task->tk_status,
+ clp->cl_exchange_flags);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+- break;
+- case -NFS4ERR_DELAY:
+- case -NFS4ERR_GRACE:
++ goto out_retry;
++ case NFS4ERR_DELAY:
++ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++ fallthrough;
++ case NFS4ERR_GRACE:
+ rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+- break;
+- case -NFS4ERR_RETRY_UNCACHED_REP:
+- break;
++ goto out_retry;
++ case NFS4ERR_RETRY_UNCACHED_REP:
++ goto out_retry;
+ /* Invalidate Layout errors */
+- case -NFS4ERR_PNFS_NO_LAYOUT:
+- case -ESTALE: /* mapped NFS4ERR_STALE */
+- case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
+- case -EISDIR: /* mapped NFS4ERR_ISDIR */
+- case -NFS4ERR_FHEXPIRED:
+- case -NFS4ERR_WRONG_TYPE:
++ case NFS4ERR_PNFS_NO_LAYOUT:
++ case NFS4ERR_STALE:
++ case NFS4ERR_BADHANDLE:
++ case NFS4ERR_ISDIR:
++ case NFS4ERR_FHEXPIRED:
++ case NFS4ERR_WRONG_TYPE:
+ dprintk("%s Invalid layout error %d\n", __func__,
+ task->tk_status);
+ /*
+@@ -1144,6 +1155,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ pnfs_destroy_layout(NFS_I(inode));
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ goto reset;
++ default:
++ break;
++ }
++
++ switch (task->tk_status) {
+ /* RPC connection errors */
+ case -ECONNREFUSED:
+ case -EHOSTDOWN:
+@@ -1159,26 +1175,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ &devid->deviceid);
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+- fallthrough;
++ break;
+ default:
+- if (ff_layout_avoid_mds_available_ds(lseg))
+- return -NFS4ERR_RESET_TO_PNFS;
+-reset:
+- dprintk("%s Retry through MDS. Error %d\n", __func__,
+- task->tk_status);
+- return -NFS4ERR_RESET_TO_MDS;
++ break;
+ }
++
++ if (ff_layout_avoid_mds_available_ds(lseg))
++ return -NFS4ERR_RESET_TO_PNFS;
++reset:
++ dprintk("%s Retry through MDS. Error %d\n", __func__,
++ task->tk_status);
++ return -NFS4ERR_RESET_TO_MDS;
++
++out_retry:
+ task->tk_status = 0;
+ return -EAGAIN;
+ }
+
+ /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
+ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
++ u32 op_status,
++ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+ u32 idx)
+ {
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
++ switch (op_status) {
++ case NFS_OK:
++ case NFSERR_NXIO:
++ break;
++ case NFSERR_PERM:
++ if (!task->tk_xprt)
++ break;
++ xprt_force_disconnect(task->tk_xprt);
++ goto out_retry;
++ case NFSERR_ACCES:
++ case NFSERR_BADHANDLE:
++ case NFSERR_FBIG:
++ case NFSERR_IO:
++ case NFSERR_NOSPC:
++ case NFSERR_ROFS:
++ case NFSERR_STALE:
++ goto out_reset_to_pnfs;
++ case NFSERR_JUKEBOX:
++ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++ goto out_retry;
++ default:
++ break;
++ }
++
+ switch (task->tk_status) {
+ /* File access problems. Don't mark the device as unavailable */
+ case -EACCES:
+@@ -1197,6 +1243,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ &devid->deviceid);
+ }
++out_reset_to_pnfs:
+ /* FIXME: Need to prevent infinite looping here. */
+ return -NFS4ERR_RESET_TO_PNFS;
+ out_retry:
+@@ -1207,6 +1254,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ }
+
+ static int ff_layout_async_handle_error(struct rpc_task *task,
++ u32 op_status,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ struct pnfs_layout_segment *lseg,
+@@ -1225,10 +1273,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
+
+ switch (vers) {
+ case 3:
+- return ff_layout_async_handle_error_v3(task, lseg, idx);
+- case 4:
+- return ff_layout_async_handle_error_v4(task, state, clp,
++ return ff_layout_async_handle_error_v3(task, op_status, clp,
+ lseg, idx);
++ case 4:
++ return ff_layout_async_handle_error_v4(task, op_status, state,
++ clp, lseg, idx);
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+@@ -1281,6 +1330,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ switch (status) {
+ case NFS4ERR_DELAY:
+ case NFS4ERR_GRACE:
++ case NFS4ERR_PERM:
+ break;
+ case NFS4ERR_NXIO:
+ ff_layout_mark_ds_unreachable(lseg, idx);
+@@ -1313,7 +1363,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
+ trace_ff_layout_read_error(hdr);
+ }
+
+- err = ff_layout_async_handle_error(task, hdr->args.context->state,
++ err = ff_layout_async_handle_error(task, hdr->res.op_status,
++ hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+@@ -1483,7 +1534,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
+ trace_ff_layout_write_error(hdr);
+ }
+
+- err = ff_layout_async_handle_error(task, hdr->args.context->state,
++ err = ff_layout_async_handle_error(task, hdr->res.op_status,
++ hdr->args.context->state,
+ hdr->ds_clp, hdr->lseg,
+ hdr->pgio_mirror_idx);
+
+@@ -1529,8 +1581,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
+ trace_ff_layout_commit_error(data);
+ }
+
+- err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
+- data->lseg, data->ds_commit_index);
++ err = ff_layout_async_handle_error(task, data->res.op_status,
++ NULL, data->ds_clp, data->lseg,
++ data->ds_commit_index);
+
+ trace_nfs4_pnfs_commit_ds(data, err);
+ switch (err) {
+--
+2.39.5
+
--- /dev/null
+From 007b42e38c88f07284a4c275adbe54fc48a27803 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jun 2025 11:28:28 +0200
+Subject: s390/pci: Fix stale function handles in error handling
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+[ Upstream commit 45537926dd2aaa9190ac0fac5a0fbeefcadfea95 ]
+
+The error event information for PCI error events contains a function
+handle for the respective function. This handle is generally captured at
+the time the error event was recorded. Due to delays in processing or
+cascading issues, it may happen that during firmware recovery multiple
+events are generated. When processing these events in order Linux may
+already have recovered an affected function making the event information
+stale. Fix this by doing an unconditional CLP List PCI function
+retrieving the current function handle with the zdev->state_lock held
+and ignoring the event if its function handle is stale.
+
+Cc: stable@vger.kernel.org
+Fixes: 4cdf2f4e24ff ("s390/pci: implement minimal PCI error recovery")
+Reviewed-by: Julian Ruess <julianr@linux.ibm.com>
+Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>
+Reviewed-by: Farhan Ali <alifm@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/pci/pci_event.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index d969f36bf186f..fd83588f3c11d 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -257,6 +257,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ struct pci_dev *pdev = NULL;
+ pci_ers_result_t ers_res;
++ u32 fh = 0;
++ int rc;
+
+ zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
+ ccdf->fid, ccdf->fh, ccdf->pec);
+@@ -264,6 +266,16 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ zpci_err_hex(ccdf, sizeof(*ccdf));
+
+ if (zdev) {
++ mutex_lock(&zdev->state_lock);
++ rc = clp_refresh_fh(zdev->fid, &fh);
++ if (rc)
++ goto no_pdev;
++ if (!fh || ccdf->fh != fh) {
++ /* Ignore events with stale handles */
++ zpci_dbg(3, "err fid:%x, fh:%x (stale %x)\n",
++ ccdf->fid, fh, ccdf->fh);
++ goto no_pdev;
++ }
+ zpci_update_fh(zdev, ccdf->fh);
+ if (zdev->zbus->bus)
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+@@ -292,6 +304,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ }
+ pci_dev_put(pdev);
+ no_pdev:
++ if (zdev)
++ mutex_unlock(&zdev->state_lock);
+ zpci_zdev_put(zdev);
+ }
+
+--
+2.39.5
+
drm-v3d-disable-interrupts-before-resetting-the-gpu.patch
platform-x86-hp-bioscfg-directly-use-firmware_attrib.patch
platform-x86-hp-bioscfg-fix-class-device-unregistrat.patch
+iommu-add-iommu_domain_platform-for-s390.patch
+iommu-allow-.iotlb_sync_map-to-fail-and-handle-s390-.patch
+module-provide-export_symbol_gpl_for_modules-helper.patch
+fs-export-anon_inode_make_secure_inode-and-fix-secre.patch
+nfsv4-flexfiles-fix-handling-of-nfs-level-errors-in-.patch
+s390-pci-fix-stale-function-handles-in-error-handlin.patch