]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
tracing: load/unload page callbacks for simple_ring_buffer
authorVincent Donnefort <vdonnefort@google.com>
Mon, 9 Mar 2026 16:25:03 +0000 (16:25 +0000)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Mon, 9 Mar 2026 16:33:55 +0000 (12:33 -0400)
Add load/unload callback used for each admitted page in the ring-buffer.
This will be later useful for the pKVM hypervisor which uses a different
VA space and need to dynamically map/unmap the ring-buffer pages.

Link: https://patch.msgid.link/20260309162516.2623589-18-vdonnefort@google.com
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
include/linux/simple_ring_buffer.h
kernel/trace/simple_ring_buffer.c

index 2c4c0ae336bccd5b4f7a6d8f7e1e8bfaee1ce759..21aec556293eb2c03d3a80f84ad2999a9a03014e 100644 (file)
@@ -54,4 +54,12 @@ int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer);
 
 int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer);
 
+int simple_ring_buffer_init_mm(struct simple_rb_per_cpu *cpu_buffer,
+                              struct simple_buffer_page *bpages,
+                              const struct ring_buffer_desc *desc,
+                              void *(*load_page)(unsigned long va),
+                              void (*unload_page)(void *va));
+
+void simple_ring_buffer_unload_mm(struct simple_rb_per_cpu *cpu_buffer,
+                                 void (*unload_page)(void *));
 #endif
index 15df9781411b0c8a8baf43869759fb3afc75ef86..02af2297ae5a955481665ed11a835c66327ddf5d 100644 (file)
@@ -71,7 +71,7 @@ static void simple_bpage_reset(struct simple_buffer_page *bpage)
        local_set(&bpage->page->commit, 0);
 }
 
-static void simple_bpage_init(struct simple_buffer_page *bpage, unsigned long page)
+static void simple_bpage_init(struct simple_buffer_page *bpage, void *page)
 {
        INIT_LIST_HEAD(&bpage->link);
        bpage->page = (struct buffer_data_page *)page;
@@ -372,18 +372,15 @@ int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
 }
 EXPORT_SYMBOL_GPL(simple_ring_buffer_reset);
 
-/**
- * simple_ring_buffer_init - Init @cpu_buffer based on @desc
- * @cpu_buffer:        A simple_rb_per_cpu buffer to init, allocated by the caller.
- * @bpages:    Array of simple_buffer_pages, with as many elements as @desc->nr_page_va
- * @desc:      A ring_buffer_desc
- *
- * Returns 0 on success or -EINVAL if the content of @desc is invalid
- */
-int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
-                           const struct ring_buffer_desc *desc)
+int simple_ring_buffer_init_mm(struct simple_rb_per_cpu *cpu_buffer,
+                              struct simple_buffer_page *bpages,
+                              const struct ring_buffer_desc *desc,
+                              void *(*load_page)(unsigned long va),
+                              void (*unload_page)(void *va))
 {
        struct simple_buffer_page *bpage = bpages;
+       int ret = 0;
+       void *page;
        int i;
 
        /* At least 1 reader page and two pages in the ring-buffer */
@@ -392,15 +389,22 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
 
        memset(cpu_buffer, 0, sizeof(*cpu_buffer));
 
-       cpu_buffer->bpages = bpages;
+       cpu_buffer->meta = load_page(desc->meta_va);
+       if (!cpu_buffer->meta)
+               return -EINVAL;
 
-       cpu_buffer->meta = (void *)desc->meta_va;
        memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
        cpu_buffer->meta->meta_page_size = PAGE_SIZE;
        cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
 
        /* The reader page is not part of the ring initially */
-       simple_bpage_init(bpage, desc->page_va[0]);
+       page = load_page(desc->page_va[0]);
+       if (!page) {
+               unload_page(cpu_buffer->meta);
+               return -EINVAL;
+       }
+
+       simple_bpage_init(bpage, page);
        bpage->id = 0;
 
        cpu_buffer->nr_pages = 1;
@@ -410,7 +414,13 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
        cpu_buffer->head_page = bpage + 1;
 
        for (i = 1; i < desc->nr_page_va; i++) {
-               simple_bpage_init(++bpage, desc->page_va[i]);
+               page = load_page(desc->page_va[i]);
+               if (!page) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               simple_bpage_init(++bpage, page);
 
                bpage->link.next = &(bpage + 1)->link;
                bpage->link.prev = &(bpage - 1)->link;
@@ -419,6 +429,14 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
                cpu_buffer->nr_pages = i + 1;
        }
 
+       if (ret) {
+               for (i--; i >= 0; i--)
+                       unload_page((void *)desc->page_va[i]);
+               unload_page(cpu_buffer->meta);
+
+               return ret;
+       }
+
        /* Close the ring */
        bpage->link.next = &cpu_buffer->tail_page->link;
        cpu_buffer->tail_page->link.prev = &bpage->link;
@@ -426,23 +444,58 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
        /* The last init'ed page points to the head page */
        simple_bpage_set_head_link(bpage);
 
+       cpu_buffer->bpages = bpages;
+
        return 0;
 }
-EXPORT_SYMBOL_GPL(simple_ring_buffer_init);
+
+static void *__load_page(unsigned long page)
+{
+       return (void *)page;
+}
+
+static void __unload_page(void *page) { }
 
 /**
- * simple_ring_buffer_unload - Prepare @cpu_buffer for deletion
- * @cpu_buffer:        A simple_rb_per_cpu that will be deleted.
+ * simple_ring_buffer_init - Init @cpu_buffer based on @desc
+ * @cpu_buffer:        A simple_rb_per_cpu buffer to init, allocated by the caller.
+ * @bpages:    Array of simple_buffer_pages, with as many elements as @desc->nr_page_va
+ * @desc:      A ring_buffer_desc
+ *
+ * Returns 0 on success or -EINVAL if the content of @desc is invalid
  */
-void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
+int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
+                           const struct ring_buffer_desc *desc)
+{
+       return simple_ring_buffer_init_mm(cpu_buffer, bpages, desc, __load_page, __unload_page);
+}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_init);
+
+void simple_ring_buffer_unload_mm(struct simple_rb_per_cpu *cpu_buffer,
+                                 void (*unload_page)(void *))
 {
+       int p;
+
        if (!simple_rb_loaded(cpu_buffer))
                return;
 
        simple_rb_enable_tracing(cpu_buffer, false);
 
+       unload_page(cpu_buffer->meta);
+       for (p = 0; p < cpu_buffer->nr_pages; p++)
+               unload_page(cpu_buffer->bpages[p].page);
+
        cpu_buffer->bpages = NULL;
 }
+
+/**
+ * simple_ring_buffer_unload - Prepare @cpu_buffer for deletion
+ * @cpu_buffer:        A simple_rb_per_cpu that will be deleted.
+ */
+void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
+{
+       return simple_ring_buffer_unload_mm(cpu_buffer, __unload_page);
+}
 EXPORT_SYMBOL_GPL(simple_ring_buffer_unload);
 
 /**