1 From 8d5da4d2a3d7d9173208f4e8dc7a709f0bfc9820 Mon Sep 17 00:00:00 2001
2 From: Michael Jeanson <mjeanson@efficios.com>
3 Date: Wed, 8 Jun 2022 12:56:36 -0400
4 Subject: [PATCH 1/3] fix: mm/page_alloc: fix tracepoint
5 mm_page_alloc_zone_locked() (v5.19)
9 commit 10e0f7530205799e7e971aba699a7cb3a47456de
10 Author: Wonhyuk Yang <vvghjk1234@gmail.com>
11 Date: Thu May 19 14:08:54 2022 -0700
13 mm/page_alloc: fix tracepoint mm_page_alloc_zone_locked()
15 Currently, trace point mm_page_alloc_zone_locked() doesn't show correct
18 First, when alloc_flag has ALLOC_HARDER/ALLOC_CMA, page can be allocated
19 from MIGRATE_HIGHATOMIC/MIGRATE_CMA. Nevertheless, tracepoint use
20 requested migration type not MIGRATE_HIGHATOMIC and MIGRATE_CMA.
22 Second, after commit 44042b4498728 ("mm/page_alloc: allow high-order pages
23 to be stored on the per-cpu lists") percpu-list can store high order
24 pages. But trace point determine whether it is a refiil of percpu-list by
25 comparing requested order and 0.
27 To handle these problems, make mm_page_alloc_zone_locked() only be called
28 by __rmqueue_smallest with correct migration type. With a new argument
29 called percpu_refill, it can show roughly whether it is a refill of
32 Upstream-Status: Backport
34 Change-Id: I2e4a57393757f12b9c5a4566c4d1102ee2474a09
35 Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
36 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
38 include/instrumentation/events/kmem.h | 45 +++++++++++++++++++++++++++
39 1 file changed, 45 insertions(+)
41 diff --git a/include/instrumentation/events/kmem.h b/include/instrumentation/events/kmem.h
42 index 29c0fb7f..8c19e962 100644
43 --- a/include/instrumentation/events/kmem.h
44 +++ b/include/instrumentation/events/kmem.h
45 @@ -218,6 +218,50 @@ LTTNG_TRACEPOINT_EVENT_MAP(mm_page_alloc, kmem_mm_page_alloc,
49 +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,19,0))
50 +LTTNG_TRACEPOINT_EVENT_CLASS(kmem_mm_page,
52 + TP_PROTO(struct page *page, unsigned int order, int migratetype,
55 + TP_ARGS(page, order, migratetype, percpu_refill),
58 + ctf_integer_hex(struct page *, page, page)
59 + ctf_integer(unsigned long, pfn,
60 + page ? page_to_pfn(page) : -1UL)
61 + ctf_integer(unsigned int, order, order)
62 + ctf_integer(int, migratetype, migratetype)
63 + ctf_integer(int, percpu_refill, percpu_refill)
67 +LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_mm_page, mm_page_alloc_zone_locked,
69 + kmem_mm_page_alloc_zone_locked,
71 + TP_PROTO(struct page *page, unsigned int order, int migratetype,
74 + TP_ARGS(page, order, migratetype, percpu_refill)
77 +LTTNG_TRACEPOINT_EVENT_MAP(mm_page_pcpu_drain,
79 + kmem_mm_page_pcpu_drain,
81 + TP_PROTO(struct page *page, unsigned int order, int migratetype),
83 + TP_ARGS(page, order, migratetype),
86 + ctf_integer(unsigned long, pfn,
87 + page ? page_to_pfn(page) : -1UL)
88 + ctf_integer(unsigned int, order, order)
89 + ctf_integer(int, migratetype, migratetype)
93 LTTNG_TRACEPOINT_EVENT_CLASS(kmem_mm_page,
95 TP_PROTO(struct page *page, unsigned int order, int migratetype),
96 @@ -250,6 +294,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_mm_page, mm_page_pcpu_drain,
98 TP_ARGS(page, order, migratetype)
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,2) \
103 || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0) \