]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/mm: Introduce kernel_ident_mapping_free()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 14 Jun 2024 09:59:02 +0000 (12:59 +0300)
committerBorislav Petkov (AMD) <bp@alien8.de>
Mon, 17 Jun 2024 15:46:22 +0000 (17:46 +0200)
The helper complements kernel_ident_mapping_init(): it frees the identity
mapping that was previously allocated. It will be used in the error path to free
a partially allocated mapping or if the mapping is no longer needed.

The caller provides a struct x86_mapping_info with the free_pgd_page() callback
hooked up and the pgd_t to free.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Kai Huang <kai.huang@intel.com>
Tested-by: Tao Liu <ltao@redhat.com>
Link: https://lore.kernel.org/r/20240614095904.1345461-18-kirill.shutemov@linux.intel.com
arch/x86/include/asm/init.h
arch/x86/mm/ident_map.c

index cc9ccf61b6bd114df3ee9c5fdfa3134daee0f043..14d72727d7eecf68e88426fa6eee7988fe570cb3 100644 (file)
@@ -6,6 +6,7 @@
 
 struct x86_mapping_info {
        void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+       void (*free_pgt_page)(void *, void *); /* free buf for page table */
        void *context;                   /* context for alloc_pgt_page */
        unsigned long page_flag;         /* page flag for PMD or PUD entry */
        unsigned long offset;            /* ident mapping offset */
@@ -16,4 +17,6 @@ struct x86_mapping_info {
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
                                unsigned long pstart, unsigned long pend);
 
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd);
+
 #endif /* _ASM_X86_INIT_H */
index 968d7005f4a72454ccf8678967f040fe06f36ad6..c45127265f2fa370d649478d46a99332fcdce08d 100644 (file)
@@ -4,6 +4,79 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
+static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
+{
+       pte_t *pte = pte_offset_kernel(pmd, 0);
+
+       info->free_pgt_page(pte, info->context);
+}
+
+static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
+{
+       pmd_t *pmd = pmd_offset(pud, 0);
+       int i;
+
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               if (!pmd_present(pmd[i]))
+                       continue;
+
+               if (pmd_leaf(pmd[i]))
+                       continue;
+
+               free_pte(info, &pmd[i]);
+       }
+
+       info->free_pgt_page(pmd, info->context);
+}
+
+static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
+{
+       pud_t *pud = pud_offset(p4d, 0);
+       int i;
+
+       for (i = 0; i < PTRS_PER_PUD; i++) {
+               if (!pud_present(pud[i]))
+                       continue;
+
+               if (pud_leaf(pud[i]))
+                       continue;
+
+               free_pmd(info, &pud[i]);
+       }
+
+       info->free_pgt_page(pud, info->context);
+}
+
+static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
+{
+       p4d_t *p4d = p4d_offset(pgd, 0);
+       int i;
+
+       for (i = 0; i < PTRS_PER_P4D; i++) {
+               if (!p4d_present(p4d[i]))
+                       continue;
+
+               free_pud(info, &p4d[i]);
+       }
+
+       if (pgtable_l5_enabled())
+               info->free_pgt_page(p4d, info->context);
+}
+
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
+{
+       int i;
+
+       for (i = 0; i < PTRS_PER_PGD; i++) {
+               if (!pgd_present(pgd[i]))
+                       continue;
+
+               free_p4d(info, &pgd[i]);
+       }
+
+       info->free_pgt_page(pgd, info->context);
+}
+
 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
                           unsigned long addr, unsigned long end)
 {