]> git.ipfire.org Git - thirdparty/qemu.git/commitdiff
accel/tcg: Merge tb_invalidate_phys_range{__locked}
authorRichard Henderson <richard.henderson@linaro.org>
Wed, 23 Apr 2025 19:37:28 +0000 (12:37 -0700)
committerRichard Henderson <richard.henderson@linaro.org>
Wed, 30 Apr 2025 19:45:05 +0000 (12:45 -0700)
Merge tb_invalidate_phys_page_fast__locked into its
only caller, tb_invalidate_phys_range_fast.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
accel/tcg/tb-maint.c

index 927e9c8ede26ff0bb87417c98e7d346eb27e93ab..c893ea3073a45c633c1fa94cd579c5d0a9e0447e 100644 (file)
@@ -1203,38 +1203,24 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
     page_collection_unlock(pages);
 }
 
-/*
- * Call with all @pages in the range [@start, @start + len[ locked.
- */
-static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
-                                                 tb_page_addr_t start,
-                                                 unsigned len, uintptr_t ra)
-{
-    PageDesc *p;
-
-    p = page_find(start >> TARGET_PAGE_BITS);
-    if (!p) {
-        return;
-    }
-
-    assert_page_locked(p);
-    tb_invalidate_phys_page_range__locked(NULL, pages, p, start, start + len - 1, ra);
-}
-
 /*
  * len must be <= 8 and start must be a multiple of len.
  * Called via softmmu_template.h when code areas are written to with
  * iothread mutex not held.
  */
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
-                                   unsigned size,
-                                   uintptr_t retaddr)
+void tb_invalidate_phys_range_fast(ram_addr_t start,
+                                   unsigned len, uintptr_t ra)
 {
-    struct page_collection *pages;
+    PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
 
-    pages = page_collection_lock(ram_addr, ram_addr + size - 1);
-    tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
-    page_collection_unlock(pages);
+    if (p) {
+        ram_addr_t last = start + len - 1;
+        struct page_collection *pages = page_collection_lock(start, last);
+
+        tb_invalidate_phys_page_range__locked(NULL, pages, p,
+                                              start, last, ra);
+        page_collection_unlock(pages);
+    }
 }
 
 #endif /* CONFIG_USER_ONLY */