static DEFINE_MUTEX(pcp_batch_high_lock);
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
-/*
- * On SMP, spin_trylock is sufficient protection.
- * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
- * Pass flags to a no-op inline function to typecheck and silence the unused
- * variable warning.
- */
-static inline void __pcp_trylock_noop(unsigned long *flags) { }
-#define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags))
-#define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags))
-#else
-
-/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
-#define pcp_trylock_prepare(flags) local_irq_save(flags)
-#define pcp_trylock_finish(flags) local_irq_restore(flags)
-#endif
-
/*
* Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
* a migration causing the wrong PCP to be locked and remote memory being
pcpu_task_unpin(); \
})
-/* struct per_cpu_pages specific helpers. */
-#define pcp_spin_trylock(ptr, UP_flags) \
-({ \
- struct per_cpu_pages *__ret; \
- pcp_trylock_prepare(UP_flags); \
- __ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \
- if (!__ret) \
- pcp_trylock_finish(UP_flags); \
- __ret; \
-})
+/* struct per_cpu_pages specific helpers.*/
+#ifdef CONFIG_SMP
+#define pcp_spin_trylock(ptr) \
+ pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
-#define pcp_spin_unlock(ptr, UP_flags) \
-({ \
- pcpu_spin_unlock(lock, ptr); \
- pcp_trylock_finish(UP_flags); \
-})
+#define pcp_spin_unlock(ptr) \
+ pcpu_spin_unlock(lock, ptr)
/*
- * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e.
- * a potentially remote cpu drain) and get interrupted by an operation that
- * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP
- * spinlock assumptions making the trylock a no-op. So we have to turn that
- * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no
- * remote cpu's so we can only be locking the only existing local one.
+ * On CONFIG_SMP=n the UP implementation of spin_trylock() never fails and thus
+ * is not compatible with our locking scheme. However we do not need pcp for
+ * scalability in the first place, so just make all the trylocks fail and take
+ * the slow path unconditionally.
*/
+#else
+#define pcp_spin_trylock(ptr) \
+ NULL
+
+#define pcp_spin_unlock(ptr) \
+ BUG_ON(1)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline void __flags_noop(unsigned long *flags) { }
#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
*/
static bool free_frozen_page_commit(struct zone *zone,
struct per_cpu_pages *pcp, struct page *page, int migratetype,
- unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags)
+ unsigned int order, fpi_t fpi_flags)
{
int high, batch;
int to_free, to_free_batched;
if (to_free == 0 || pcp->count == 0)
break;
- pcp_spin_unlock(pcp, *UP_flags);
+ pcp_spin_unlock(pcp);
- pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags);
+ pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (!pcp) {
ret = false;
break;
* returned in an unlocked state.
*/
if (smp_processor_id() != cpu) {
- pcp_spin_unlock(pcp, *UP_flags);
+ pcp_spin_unlock(pcp);
ret = false;
break;
}
static void __free_frozen_pages(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
- unsigned long UP_flags;
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
add_page_to_zone_llist(zone, page, order);
return;
}
- pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
+ pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
if (!free_frozen_page_commit(zone, pcp, page, migratetype,
- order, fpi_flags, &UP_flags))
+ order, fpi_flags))
return;
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, fpi_flags);
}
*/
void free_unref_folios(struct folio_batch *folios)
{
- unsigned long UP_flags;
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
int i, j;
if (zone != locked_zone ||
is_migrate_isolate(migratetype)) {
if (pcp) {
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
locked_zone = NULL;
pcp = NULL;
}
* trylock is necessary as folios may be getting freed
* from IRQ or SoftIRQ context after an IO completion.
*/
- pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
+ pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (unlikely(!pcp)) {
free_one_page(zone, &folio->page, pfn,
order, FPI_NONE);
trace_mm_page_free_batched(&folio->page);
if (!free_frozen_page_commit(zone, pcp, &folio->page,
- migratetype, order, FPI_NONE, &UP_flags)) {
+ migratetype, order, FPI_NONE)) {
pcp = NULL;
locked_zone = NULL;
}
}
if (pcp)
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
folio_batch_reinit(folios);
}
struct per_cpu_pages *pcp;
struct list_head *list;
struct page *page;
- unsigned long UP_flags;
/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
- pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
+ pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (!pcp)
return NULL;
pcp->free_count >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, 1);
struct page **page_array)
{
struct page *page;
- unsigned long UP_flags;
struct zone *zone;
struct zoneref *z;
struct per_cpu_pages *pcp;
goto failed;
/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
- pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
+ pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (!pcp)
goto failed;
if (unlikely(!page)) {
/* Try and allocate at least one page */
if (!nr_account) {
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
goto failed;
}
break;
page_array[nr_populated++] = page;
}
- pcp_spin_unlock(pcp, UP_flags);
+ pcp_spin_unlock(pcp);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);