BUG_ON(1)
#endif
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
-static inline void __flags_noop(unsigned long *flags) { }
-#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
-({ \
- __flags_noop(&(flags)); \
- spin_lock(&(ptr)->lock); \
-})
-#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
-({ \
- spin_unlock(&(ptr)->lock); \
- __flags_noop(&(flags)); \
-})
-#else
-#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
- spin_lock_irqsave(&(ptr)->lock, flags)
-#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
- spin_unlock_irqrestore(&(ptr)->lock, flags)
-#endif
+/*
+ * In some cases we do not need to pin the task to the CPU because we are
+ * already given a specific cpu's pcp pointer.
+ */
+#define pcp_spin_lock_nopin(ptr) \
+ spin_lock(&(ptr)->lock)
+#define pcp_spin_unlock_nopin(ptr) \
+ spin_unlock(&(ptr)->lock)
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
int high_min, to_drain, to_drain_batched, batch;
- unsigned long UP_flags;
bool todo = false;
high_min = READ_ONCE(pcp->high_min);
to_drain = pcp->count - pcp->high;
while (to_drain > 0) {
to_drain_batched = min(to_drain, batch);
- pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
+ pcp_spin_lock_nopin(pcp);
free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
- pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
+ pcp_spin_unlock_nopin(pcp);
todo = true;
to_drain -= to_drain_batched;
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
- unsigned long UP_flags;
int to_drain, batch;
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
- pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
+ pcp_spin_lock_nopin(pcp);
free_pcppages_bulk(zone, to_drain, pcp, 0);
- pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
+ pcp_spin_unlock_nopin(pcp);
}
}
#endif
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
- unsigned long UP_flags;
int count;
do {
- pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
+ pcp_spin_lock_nopin(pcp);
count = pcp->count;
if (count) {
int to_drain = min(count,
free_pcppages_bulk(zone, to_drain, pcp, 0);
count -= to_drain;
}
- pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
+ pcp_spin_unlock_nopin(pcp);
} while (count);
}
{
struct per_cpu_pages *pcp;
struct cpu_cacheinfo *cci;
- unsigned long UP_flags;
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
cci = get_cpu_cacheinfo(cpu);
* This can reduce zone lock contention without hurting
* cache-hot pages sharing.
*/
- pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
+ pcp_spin_lock_nopin(pcp);
if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
pcp->flags |= PCPF_FREE_HIGH_BATCH;
else
pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
- pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
+ pcp_spin_unlock_nopin(pcp);
}
void setup_pcp_cacheinfo(unsigned int cpu)