#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/page_ext.h>
+#include <linux/pgalloc_tag.h>
#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/string_choices.h>
return mem_profiling_support;
}
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+/*
+ * Track page allocations before page_ext is initialized.
+ * Some pages are allocated before page_ext becomes available, leaving
+ * their codetag uninitialized. Track these early PFNs so we can clear
+ * their codetag refs later to avoid warnings when they are freed.
+ *
+ * Early allocations include:
+ * - Base allocations independent of CPU count
+ * - Per-CPU allocations (e.g., CPU hotplug callbacks during smp_init,
+ * such as trace ring buffers, scheduler per-cpu data)
+ *
+ * For simplicity, we fix the size to 8192.
+ * If insufficient, a warning will be triggered to alert the user.
+ *
+ * TODO: Replace fixed-size array with dynamic allocation using
+ * a GFP flag similar to ___GFP_NO_OBJ_EXT to avoid recursion.
+ */
+#define EARLY_ALLOC_PFN_MAX 8192
+
+static unsigned long early_pfns[EARLY_ALLOC_PFN_MAX] __initdata;
+static atomic_t early_pfn_count __initdata = ATOMIC_INIT(0);
+
+static void __init __alloc_tag_add_early_pfn(unsigned long pfn)
+{
+ int old_idx, new_idx;
+
+ do {
+ old_idx = atomic_read(&early_pfn_count);
+ if (old_idx >= EARLY_ALLOC_PFN_MAX) {
+ pr_warn_once("Early page allocations before page_ext init exceeded EARLY_ALLOC_PFN_MAX (%d)\n",
+ EARLY_ALLOC_PFN_MAX);
+ return;
+ }
+ new_idx = old_idx + 1;
+ } while (!atomic_try_cmpxchg(&early_pfn_count, &old_idx, new_idx));
+
+ early_pfns[old_idx] = pfn;
+}
+
+typedef void alloc_tag_add_func(unsigned long pfn);
+static alloc_tag_add_func __rcu *alloc_tag_add_early_pfn_ptr __refdata =
+ RCU_INITIALIZER(__alloc_tag_add_early_pfn);
+
+void alloc_tag_add_early_pfn(unsigned long pfn)
+{
+ alloc_tag_add_func *alloc_tag_add;
+
+ if (static_key_enabled(&mem_profiling_compressed))
+ return;
+
+ rcu_read_lock();
+ alloc_tag_add = rcu_dereference(alloc_tag_add_early_pfn_ptr);
+ if (alloc_tag_add)
+ alloc_tag_add(pfn);
+ rcu_read_unlock();
+}
+
+static void __init clear_early_alloc_pfn_tag_refs(void)
+{
+ unsigned int i;
+
+ if (static_key_enabled(&mem_profiling_compressed))
+ return;
+
+ rcu_assign_pointer(alloc_tag_add_early_pfn_ptr, NULL);
+ /* Make sure we are not racing with __alloc_tag_add_early_pfn() */
+ synchronize_rcu();
+
+ for (i = 0; i < atomic_read(&early_pfn_count); i++) {
+ unsigned long pfn = early_pfns[i];
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ /*
+ * An early-allocated page could be freed and reallocated
+ * after its page_ext is initialized but before we clear it.
+ * In that case, it already has a valid tag set.
+ * We should not overwrite that valid tag with CODETAG_EMPTY.
+ *
+ * Note: there is still a small race window between checking
+ * ref.ct and calling set_codetag_empty(). We accept this
+ * race as it's unlikely and the extra complexity of atomic
+ * cmpxchg is not worth it for this debug-only code path.
+ */
+ if (ref.ct) {
+ put_page_tag_ref(handle);
+ continue;
+ }
+
+ set_codetag_empty(&ref);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+ }
+
+ }
+}
+#else /* !CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+static inline void __init clear_early_alloc_pfn_tag_refs(void) {}
+#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
static __init void init_page_alloc_tagging(void)
{
+ clear_early_alloc_pfn_tag_refs();
}
struct page_ext_operations page_alloc_tagging_ops = {