]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memcg: objcg stock trylock without irq disabling
authorShakeel Butt <shakeel.butt@linux.dev>
Wed, 14 May 2025 18:41:58 +0000 (11:41 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 May 2025 21:55:39 +0000 (14:55 -0700)
There is no need to disable irqs to use objcg per-cpu stock, so let's just
not do that but consume_obj_stock() and refill_obj_stock() will need to
use trylock instead to avoid deadlock against irq.  One consequence of
this change is that the charge request from irq context may take slowpath
more often but it should be rare.

Link: https://lkml.kernel.org/r/20250514184158.3471331-8-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index d8508b57d0fa90b2d552f371edfd8eeb8ee781c3..35db91fddd1f5e53c0601462966a83ecb6bcace8 100644 (file)
@@ -1880,18 +1880,17 @@ static void drain_local_memcg_stock(struct work_struct *dummy)
 static void drain_local_obj_stock(struct work_struct *dummy)
 {
        struct obj_stock_pcp *stock;
-       unsigned long flags;
 
        if (WARN_ONCE(!in_task(), "drain in non-task context"))
                return;
 
-       local_lock_irqsave(&obj_stock.lock, flags);
+       local_lock(&obj_stock.lock);
 
        stock = this_cpu_ptr(&obj_stock);
        drain_obj_stock(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-       local_unlock_irqrestore(&obj_stock.lock, flags);
+       local_unlock(&obj_stock.lock);
 }
 
 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2874,10 +2873,10 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
                              struct pglist_data *pgdat, enum node_stat_item idx)
 {
        struct obj_stock_pcp *stock;
-       unsigned long flags;
        bool ret = false;
 
-       local_lock_irqsave(&obj_stock.lock, flags);
+       if (!local_trylock(&obj_stock.lock))
+               return ret;
 
        stock = this_cpu_ptr(&obj_stock);
        if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
@@ -2888,7 +2887,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
                        __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
        }
 
-       local_unlock_irqrestore(&obj_stock.lock, flags);
+       local_unlock(&obj_stock.lock);
 
        return ret;
 }
@@ -2977,10 +2976,16 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
                enum node_stat_item idx)
 {
        struct obj_stock_pcp *stock;
-       unsigned long flags;
        unsigned int nr_pages = 0;
 
-       local_lock_irqsave(&obj_stock.lock, flags);
+       if (!local_trylock(&obj_stock.lock)) {
+               if (pgdat)
+                       mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
+               nr_pages = nr_bytes >> PAGE_SHIFT;
+               nr_bytes = nr_bytes & (PAGE_SIZE - 1);
+               atomic_add(nr_bytes, &objcg->nr_charged_bytes);
+               goto out;
+       }
 
        stock = this_cpu_ptr(&obj_stock);
        if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -3002,8 +3007,8 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
                stock->nr_bytes &= (PAGE_SIZE - 1);
        }
 
-       local_unlock_irqrestore(&obj_stock.lock, flags);
-
+       local_unlock(&obj_stock.lock);
+out:
        if (nr_pages)
                obj_cgroup_uncharge_pages(objcg, nr_pages);
 }