#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/page_owner.h>
+#include <linux/random.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
* This usage means that zero-order pages may not be compound.
*/
-static void free_compound_page(struct page *page)
+void free_compound_page(struct page *page)
{
__free_pages_ok(page, compound_order(page));
}
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
-struct page_ext_operations debug_guardpage_ops = { NULL, };
+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
static inline void set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
static inline void clear_page_guard(struct zone *zone, struct page *page,
int i;
int bad = 0;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ unsigned long index = 1UL << order;
+#endif
+
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ for (; index; --index)
+ sanitize_highpage(page + index - 1);
+#endif
+
arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
local_irq_restore(flags);
}
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+bool __meminitdata extra_latent_entropy;
+
+static int __init setup_pax_extra_latent_entropy(char *str)
+{
+ extra_latent_entropy = true;
+ return 0;
+}
+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
+
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
__ClearPageReserved(p);
set_page_count(p, 0);
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
+ u64 hash = 0;
+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
+ const u64 *data = lowmem_page_address(page);
+
+ for (index = 0; index < end; index++)
+ hash ^= hash + data[index];
+ latent_entropy ^= hash;
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+ }
+#endif
+
page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
+#ifndef CONFIG_PAX_MEMORY_SANITIZE
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
+#endif
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
do {
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
} while (zone++ != preferred_zone);
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);