From: Libo Chen Date: Thu, 24 Apr 2025 02:45:23 +0000 (-0700) Subject: sched/numa: add tracepoint that tracks the skipping of numa balancing due to cpuset... X-Git-Tag: v6.16-rc1~92^2~80 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=3fc567e4c0b71d6c59ba26c5d6e54cf3c490dd3a;p=thirdparty%2Flinux.git sched/numa: add tracepoint that tracks the skipping of numa balancing due to cpuset memory pinning Unlike sched_skip_vma_numa tracepoint which tracks skipped VMAs, this tracks the task subjected to cpuset.mems pinning and prints out its allowed memory node mask. Link: https://lkml.kernel.org/r/20250424024523.2298272-3-libo.chen@oracle.com Signed-off-by: Libo Chen Cc: "Chen, Tim C" Cc: Chen Yu Cc: Chris Hyser Cc: Daniel Jordan Cc: Ingo Molnar Cc: Juri Lelli Cc: K Prateek Nayak Cc: Lorenzo Stoakes Cc: Madadi Vineeth Reddy Cc: Mel Gorman Cc: Michal Koutný Cc: Peter Zijlstra Cc: Raghavendra K T Cc: Srikanth Aithal Cc: Steven Rostedt Cc: Tejun Heo Cc: Venkat Rao Bagalkote Cc: Vincent Guittot Signed-off-by: Andrew Morton --- diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 8994e97d86c13..ff3990318aecd 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -745,6 +745,39 @@ TRACE_EVENT(sched_skip_vma_numa, __entry->vm_end, __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) ); + +TRACE_EVENT(sched_skip_cpuset_numa, + + TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr), + + TP_ARGS(tsk, mem_allowed_ptr), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( pid_t, tgid ) + __field( pid_t, ngid ) + __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES)) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(tsk); + __entry->tgid = task_tgid_nr(tsk); + __entry->ngid = task_numa_group_id(tsk); + BUILD_BUG_ON(sizeof(nodemask_t) != \ + BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long)); + memcpy(__entry->mem_allowed, mem_allowed_ptr->bits, + sizeof(__entry->mem_allowed)); + ), + + TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl", + __entry->comm, + __entry->pid, + __entry->tgid, + __entry->ngid, + MAX_NUMNODES, __entry->mem_allowed) +); #endif /* CONFIG_NUMA_BALANCING */ /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b3b715e8a7cbc..cef163c174bd0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3333,8 +3333,10 @@ static void task_numa_work(struct callback_head *work) * Memory is pinned to only one NUMA node via cpuset.mems, naturally * no page can be migrated. */ - if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1) + if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1) { + trace_sched_skip_cpuset_numa(current, &cpuset_current_mems_allowed); return; + } if (!mm->numa_next_scan) { mm->numa_next_scan = now +