{
struct memcg_stock_pcp *stock;
uint8_t stock_pages;
- unsigned long flags;
bool ret = false;
int i;
if (nr_pages > MEMCG_CHARGE_BATCH ||
- !local_trylock_irqsave(&memcg_stock.lock, flags))
+ !local_trylock(&memcg_stock.lock))
return ret;
stock = this_cpu_ptr(&memcg_stock);
break;
}
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_unlock(&memcg_stock.lock);
return ret;
}
static void drain_local_memcg_stock(struct work_struct *dummy)
{
struct memcg_stock_pcp *stock;
- unsigned long flags;
if (WARN_ONCE(!in_task(), "drain in non-task context"))
return;
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_lock(&memcg_stock.lock);
stock = this_cpu_ptr(&memcg_stock);
drain_stock_fully(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_unlock(&memcg_stock.lock);
}
static void drain_local_obj_stock(struct work_struct *dummy)
struct memcg_stock_pcp *stock;
struct mem_cgroup *cached;
uint8_t stock_pages;
- unsigned long flags;
bool success = false;
int empty_slot = -1;
int i;
VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
if (nr_pages > MEMCG_CHARGE_BATCH ||
- !local_trylock_irqsave(&memcg_stock.lock, flags)) {
+ !local_trylock(&memcg_stock.lock)) {
/*
* In case of larger than batch refill or unlikely failure to
* lock the percpu memcg_stock.lock, uncharge memcg directly.
WRITE_ONCE(stock->nr_pages[i], nr_pages);
}
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_unlock(&memcg_stock.lock);
}
static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,