if (!val)
return;
- /* TODO: add to cgroup update tree once it is nmi-safe. */
- if (!in_nmi())
- css_rstat_updated(&memcg->css, cpu);
+ css_rstat_updated(&memcg->css, cpu);
statc_pcpu = memcg->vmstats_percpu;
for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
statc = this_cpu_ptr(statc_pcpu);
} else {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
- /* TODO: add to cgroup update tree once it is nmi-safe. */
+ /* preemption is disabled in_nmi(). */
+ css_rstat_updated(&memcg->css, smp_processor_id());
if (idx == NR_SLAB_RECLAIMABLE_B)
atomic_add(nr, &pn->slab_reclaimable);
else
if (likely(!in_nmi())) {
mod_memcg_state(memcg, MEMCG_KMEM, val);
} else {
- /* TODO: add to cgroup update tree once it is nmi-safe. */
+ /* preemption is disabled in_nmi(). */
+ css_rstat_updated(&memcg->css, smp_processor_id());
atomic_add(val, &memcg->kmem_stat);
}
}