lockdep_assert_held(&pcpu_alloc_mutex);
if (!pages)
- pages = pcpu_mem_zalloc(pages_size, 0);
+ pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
return pages;
}
unsigned int cpu, tcpu;
int i;
- gfp |= GFP_KERNEL | __GFP_HIGHMEM;
+ gfp |= __GFP_HIGHMEM;
for_each_possible_cpu(cpu) {
for (i = page_start; i < page_end; i++) {
* This is to facilitate passing through whitelisted flags. The
* returned memory is always zeroed.
*
- * CONTEXT:
- * Does GFP_KERNEL allocation.
- *
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
return NULL;
if (size <= PAGE_SIZE)
- return kzalloc(size, gfp | GFP_KERNEL);
+ return kzalloc(size, gfp);
else
- return __vmalloc(size, gfp | GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL);
+ return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
}
/**
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp)
{
+ /* whitelisted flags that can be passed to the backing allocators */
+ gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10;
}
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
- chunk = pcpu_create_chunk(0);
+ chunk = pcpu_create_chunk(pcpu_gfp);
if (!chunk) {
err = "failed to allocate new chunk";
goto fail;
page_start, page_end) {
WARN_ON(chunk->immutable);
- ret = pcpu_populate_chunk(chunk, rs, re, 0);
+ ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
spin_lock_irqsave(&pcpu_lock, flags);
if (ret) {
static void pcpu_balance_workfn(struct work_struct *work)
{
/* gfp flags passed to underlying allocators */
- const gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
+ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
LIST_HEAD(to_free);
struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
struct pcpu_chunk *chunk, *next;