*/
#define NR_MEMCG_STOCK 7
struct memcg_stock_pcp {
- local_trylock_t stock_lock;
+ local_trylock_t memcg_lock;
uint8_t nr_pages[NR_MEMCG_STOCK];
struct mem_cgroup *cached[NR_MEMCG_STOCK];
+ local_trylock_t obj_lock;
+ unsigned int nr_bytes;
struct obj_cgroup *cached_objcg;
struct pglist_data *cached_pgdat;
- unsigned int nr_bytes;
int nr_slab_reclaimable_b;
int nr_slab_unreclaimable_b;
#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
- .stock_lock = INIT_LOCAL_TRYLOCK(stock_lock),
+ .memcg_lock = INIT_LOCAL_TRYLOCK(memcg_lock),
+ .obj_lock = INIT_LOCAL_TRYLOCK(obj_lock),
};
static DEFINE_MUTEX(percpu_charge_mutex);
int i;
if (nr_pages > MEMCG_CHARGE_BATCH ||
- !local_trylock_irqsave(&memcg_stock.stock_lock, flags))
+ !local_trylock_irqsave(&memcg_stock.memcg_lock, flags))
return ret;
stock = this_cpu_ptr(&memcg_stock);
break;
}
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
return ret;
}
struct memcg_stock_pcp *stock;
unsigned long flags;
- /*
- * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
- * drain_stock races is that we always operate on local CPU stock
- * here with IRQ disabled
- */
- local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ if (WARN_ONCE(!in_task(), "drain in non-task context"))
+ return;
+ local_lock_irqsave(&memcg_stock.obj_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
drain_obj_stock(stock);
+ local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
+
+ local_lock_irqsave(&memcg_stock.memcg_lock, flags);
+ stock = this_cpu_ptr(&memcg_stock);
drain_stock_fully(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
-
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
}
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
if (nr_pages > MEMCG_CHARGE_BATCH ||
- !local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
+ !local_trylock_irqsave(&memcg_stock.memcg_lock, flags)) {
/*
* In case of larger than batch refill or unlikely failure to
- * lock the percpu stock_lock, uncharge memcg directly.
+ * lock the percpu memcg_lock, uncharge memcg directly.
*/
memcg_uncharge(memcg, nr_pages);
return;
WRITE_ONCE(stock->nr_pages[i], nr_pages);
}
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
}
static bool is_drain_needed(struct memcg_stock_pcp *stock,
stock = &per_cpu(memcg_stock, cpu);
- /* drain_obj_stock requires stock_lock */
- local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ /* drain_obj_stock requires obj_lock */
+ local_lock_irqsave(&memcg_stock.obj_lock, flags);
drain_obj_stock(stock);
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
+ /* no need for the local lock */
drain_stock_fully(stock);
return 0;
unsigned long flags;
bool ret = false;
- local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.obj_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
}
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
return ret;
}
unsigned long flags;
unsigned int nr_pages = 0;
- local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.obj_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
stock->nr_bytes &= (PAGE_SIZE - 1);
}
- local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
if (nr_pages)
obj_cgroup_uncharge_pages(objcg, nr_pages);