{
unsigned long pfn, base_pfn;
int allocrange, r;
- struct zone *zone;
struct cma_memrange *cmr;
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
* CMA resv range to be in the same zone.
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
- zone = page_zone(pfn_to_page(base_pfn));
- for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) {
- WARN_ON_ONCE(!pfn_valid(pfn));
- if (page_zone(pfn_to_page(pfn)) != zone)
- goto cleanup;
- }
+ if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
+ goto cleanup;
for (pfn = base_pfn; pfn < base_pfn + cmr->count;
pfn += pageblock_nr_pages)
cma->ranges[0].base_pfn = PFN_DOWN(base);
cma->ranges[0].count = cma->count;
cma->nranges = 1;
+ cma->nid = NUMA_NO_NODE;
*res_cma = cma;
}
cma->nranges = nr;
+ cma->nid = nid;
*res_cma = cma;
out:
if (ret)
memblock_phys_free(base, size);
+ (*res_cma)->nid = nid;
+
return ret;
}
struct cma_kobject *cma_kobj;
#endif
bool reserve_pages_on_error;
+ /* NUMA node (NUMA_NO_NODE if unspecified) */
+ int nid;
};
extern struct cma cma_areas[MAX_CMA_AREAS];