]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memcg: no irq disable for memcg stock lock
authorShakeel Butt <shakeel.butt@linux.dev>
Tue, 6 May 2025 22:55:33 +0000 (15:55 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 13 May 2025 23:28:08 +0000 (16:28 -0700)
There is no need to disable irqs to use memcg per-cpu stock, so let's just
not do that.  One consequence of this change is if the kernel while in
task context has the memcg stock lock and that cpu got interrupted.  The
memcg charges on that cpu in the irq context will take the slow path of
memcg charging.  However that should be super rare and should be fine in
general.

Link: https://lkml.kernel.org/r/20250506225533.2580386-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Eric Dumaze <edumazet@google.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 1f8611e9a8ffd24814bad1b712a288f5ab15f028..2b6768f0e916bf35b3aa1cf0433251fe8fa0a499 100644 (file)
@@ -1829,12 +1829,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        struct memcg_stock_pcp *stock;
        uint8_t stock_pages;
-       unsigned long flags;
        bool ret = false;
        int i;
 
        if (nr_pages > MEMCG_CHARGE_BATCH ||
-           !local_trylock_irqsave(&memcg_stock.lock, flags))
+           !local_trylock(&memcg_stock.lock))
                return ret;
 
        stock = this_cpu_ptr(&memcg_stock);
@@ -1851,7 +1850,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
                break;
        }
 
-       local_unlock_irqrestore(&memcg_stock.lock, flags);
+       local_unlock(&memcg_stock.lock);
 
        return ret;
 }
@@ -1895,18 +1894,17 @@ static void drain_stock_fully(struct memcg_stock_pcp *stock)
 static void drain_local_memcg_stock(struct work_struct *dummy)
 {
        struct memcg_stock_pcp *stock;
-       unsigned long flags;
 
        if (WARN_ONCE(!in_task(), "drain in non-task context"))
                return;
 
-       local_lock_irqsave(&memcg_stock.lock, flags);
+       local_lock(&memcg_stock.lock);
 
        stock = this_cpu_ptr(&memcg_stock);
        drain_stock_fully(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-       local_unlock_irqrestore(&memcg_stock.lock, flags);
+       local_unlock(&memcg_stock.lock);
 }
 
 static void drain_local_obj_stock(struct work_struct *dummy)
@@ -1931,7 +1929,6 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
        struct memcg_stock_pcp *stock;
        struct mem_cgroup *cached;
        uint8_t stock_pages;
-       unsigned long flags;
        bool success = false;
        int empty_slot = -1;
        int i;
@@ -1946,7 +1943,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
        VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
 
        if (nr_pages > MEMCG_CHARGE_BATCH ||
-           !local_trylock_irqsave(&memcg_stock.lock, flags)) {
+           !local_trylock(&memcg_stock.lock)) {
                /*
                 * In case of larger than batch refill or unlikely failure to
                 * lock the percpu memcg_stock.lock, uncharge memcg directly.
@@ -1981,7 +1978,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
                WRITE_ONCE(stock->nr_pages[i], nr_pages);
        }
 
-       local_unlock_irqrestore(&memcg_stock.lock, flags);
+       local_unlock(&memcg_stock.lock);
 }
 
 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,