]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommu/vt-d: Fix __domain_mapping()'s usage of switch_to_super_page()
authorEugene Koira <eugkoira@amazon.com>
Wed, 3 Sep 2025 05:53:29 +0000 (13:53 +0800)
committerJoerg Roedel <joerg.roedel@amd.com>
Fri, 5 Sep 2025 13:11:08 +0000 (15:11 +0200)
switch_to_super_page() assumes the memory range it's working on is aligned
to the target large page level. Unfortunately, __domain_mapping() doesn't
take this into account when using it, and will pass unaligned ranges
ultimately freeing a PTE range larger than expected.

Take for example a mapping with the following iov_pfn range [0x3fe400,
0x4c0600), which should be backed by the following mappings:

   iov_pfn [0x3fe400, 0x3fffff] covered by 2MiB pages
   iov_pfn [0x400000, 0x4bffff] covered by 1GiB pages
   iov_pfn [0x4c0000, 0x4c05ff] covered by 2MiB pages

Under this circumstance, __domain_mapping() will pass [0x400000, 0x4c05ff]
to switch_to_super_page() at a 1 GiB granularity, which will in turn
free PTEs all the way to iov_pfn 0x4fffff.

Mitigate this by rounding down the iov_pfn range passed to
switch_to_super_page() in __domain_mapping()
to the target large page level.

Additionally add range alignment checks to switch_to_super_page.

Fixes: 9906b9352a35 ("iommu/vt-d: Avoid duplicate removing in __domain_mapping()")
Signed-off-by: Eugene Koira <eugkoira@amazon.com>
Cc: stable@vger.kernel.org
Reviewed-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20250826143816.38686-1-eugkoira@amazon.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/intel/iommu.c

index 9c3ab9d9f69a3e9e851a411b1f390372172ab700..dff2d895b8abd7f002e16ddad6ff5ce1484bc079 100644 (file)
@@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
        unsigned long lvl_pages = lvl_to_nr_pages(level);
        struct dma_pte *pte = NULL;
 
+       if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+                   !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+               return;
+
        while (start_pfn <= end_pfn) {
                if (!pte)
                        pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                unsigned long pages_to_remove;
 
                                pteval |= DMA_PTE_LARGE_PAGE;
-                               pages_to_remove = min_t(unsigned long, nr_pages,
+                               pages_to_remove = min_t(unsigned long,
+                                                       round_down(nr_pages, lvl_pages),
                                                        nr_pte_to_next_page(pte) * lvl_pages);
                                end_pfn = iov_pfn + pages_to_remove - 1;
                                switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);