]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Rename free_removed to free_unlinked
authorRicardo Koller <ricarkol@google.com>
Wed, 26 Apr 2023 17:23:19 +0000 (17:23 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 16 May 2023 17:39:17 +0000 (17:39 +0000)
Normalize on referring to tables outside of an active paging structure
as 'unlinked'.

A subsequent change to KVM will add support for building page tables
that are not part of an active paging structure. The existing
'removed_table' terminology is quite clunky when applied in this
context.

Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-2-ricarkol@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/mmu.c

index 4cd6762bda805d16fe798a1db258155ced0d1a28..26a4293726c14c9b9355adfdd748c15ac47ba9be 100644 (file)
@@ -104,7 +104,7 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
  *                             allocation is physically contiguous.
  * @free_pages_exact:          Free an exact number of memory pages previously
  *                             allocated by zalloc_pages_exact.
- * @free_removed_table:                Free a removed paging structure by unlinking and
+ * @free_unlinked_table:       Free an unlinked paging structure by unlinking and
  *                             dropping references.
  * @get_page:                  Increment the refcount on a page.
  * @put_page:                  Decrement the refcount on a page. When the
@@ -124,7 +124,7 @@ struct kvm_pgtable_mm_ops {
        void*           (*zalloc_page)(void *arg);
        void*           (*zalloc_pages_exact)(size_t size);
        void            (*free_pages_exact)(void *addr, size_t size);
-       void            (*free_removed_table)(void *addr, u32 level);
+       void            (*free_unlinked_table)(void *addr, u32 level);
        void            (*get_page)(void *addr);
        void            (*put_page)(void *addr);
        int             (*page_count)(void *addr);
@@ -440,7 +440,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
 
 /**
- * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
  * @mm_ops:    Memory management callbacks.
  * @pgtable:   Unlinked stage-2 paging structure to be freed.
  * @level:     Level of the stage-2 paging structure to be freed.
@@ -448,7 +448,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
  * The page-table is assumed to be unreachable by any hardware walkers prior to
  * freeing and therefore no TLB invalidation is performed.
  */
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
 
 /**
  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
index 2e9ec4a2a4a323d238e29f8ac199da8ea5301768..d35e75b13ffe164d7d16fc57d3b7f15ebbe8fa27 100644 (file)
@@ -91,9 +91,9 @@ static void host_s2_put_page(void *addr)
        hyp_put_page(&host_s2_pool, addr);
 }
 
-static void host_s2_free_removed_table(void *addr, u32 level)
+static void host_s2_free_unlinked_table(void *addr, u32 level)
 {
-       kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
+       kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
 }
 
 static int prepare_s2_pool(void *pgt_pool_base)
@@ -110,7 +110,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
        host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
                .zalloc_pages_exact = host_s2_zalloc_pages_exact,
                .zalloc_page = host_s2_zalloc_page,
-               .free_removed_table = host_s2_free_removed_table,
+               .free_unlinked_table = host_s2_free_unlinked_table,
                .phys_to_virt = hyp_phys_to_virt,
                .virt_to_phys = hyp_virt_to_phys,
                .page_count = hyp_page_count,
index 3d61bd3e591d27e9858b028220a9eb81a7498f6d..a3246d6cddec7e9d1e167ca50f3779265141bfe0 100644 (file)
@@ -860,7 +860,7 @@ static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
        if (ret)
                return ret;
 
-       mm_ops->free_removed_table(childp, ctx->level);
+       mm_ops->free_unlinked_table(childp, ctx->level);
        return 0;
 }
 
@@ -905,7 +905,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
  * The TABLE_PRE callback runs for table entries on the way down, looking
  * for table entries which we could conceivably replace with a block entry
  * for this mapping. If it finds one it replaces the entry and calls
- * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
+ * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
  *
  * Otherwise, the LEAF callback performs the mapping at the existing leaves
  * instead.
@@ -1276,7 +1276,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
        pgt->pgd = NULL;
 }
 
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
 {
        kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
        struct kvm_pgtable_walker walker = {
index 3b9d4d24c361ae4f903e12c19df146dcb90c9db8..a0d3c773af99513d89cb44dcf645aa83025aec09 100644 (file)
@@ -131,21 +131,21 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
 
 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
 
-static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
+static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
 {
        struct page *page = container_of(head, struct page, rcu_head);
        void *pgtable = page_to_virt(page);
        u32 level = page_private(page);
 
-       kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
+       kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
 }
 
-static void stage2_free_removed_table(void *addr, u32 level)
+static void stage2_free_unlinked_table(void *addr, u32 level)
 {
        struct page *page = virt_to_page(addr);
 
        set_page_private(page, (unsigned long)level);
-       call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
+       call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
 }
 
 static void kvm_host_get_page(void *addr)
@@ -701,7 +701,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
        .zalloc_page            = stage2_memcache_zalloc_page,
        .zalloc_pages_exact     = kvm_s2_zalloc_pages_exact,
        .free_pages_exact       = kvm_s2_free_pages_exact,
-       .free_removed_table     = stage2_free_removed_table,
+       .free_unlinked_table    = stage2_free_unlinked_table,
        .get_page               = kvm_host_get_page,
        .put_page               = kvm_s2_put_page,
        .page_count             = kvm_host_page_count,