1 From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2 Subject: mm: gfp_to_alloc_flags()
4 References: FATE#303834
6 Clean up the code by factoring out the gfp to alloc_flags mapping.
11 - if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
12 - && !in_interrupt()) {
13 - if (!(gfp_mask & __GFP_NOMEMALLOC)) {
15 has been replaced with a slightly weaker one:
17 + if (alloc_flags & ALLOC_NO_WATERMARKS) {
19 we need to ensure we don't recurse when PF_MEMALLOC is set
21 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
22 Acked-by: Neil Brown <neilb@suse.de>
23 Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
26 mm/page_alloc.c | 87 ++++++++++++++++++++++++++++++++++----------------------
27 1 file changed, 54 insertions(+), 33 deletions(-)
31 @@ -1460,6 +1460,44 @@ try_next_zone:
35 + * get the deepest reaching allocation flags for the given gfp_mask
37 +static int gfp_to_alloc_flags(gfp_t gfp_mask)
39 + struct task_struct *p = current;
40 + int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
41 + const gfp_t wait = gfp_mask & __GFP_WAIT;
44 + * The caller may dip into page reserves a bit more if the caller
45 + * cannot run direct reclaim, or if the caller has realtime scheduling
46 + * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
47 + * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
49 + if (gfp_mask & __GFP_HIGH)
50 + alloc_flags |= ALLOC_HIGH;
53 + alloc_flags |= ALLOC_HARDER;
55 + * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
56 + * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
58 + alloc_flags &= ~ALLOC_CPUSET;
59 + } else if (unlikely(rt_task(p)) && !in_interrupt())
60 + alloc_flags |= ALLOC_HARDER;
62 + if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
63 + if (!in_interrupt() &&
64 + ((p->flags & PF_MEMALLOC) ||
65 + unlikely(test_thread_flag(TIF_MEMDIE))))
66 + alloc_flags |= ALLOC_NO_WATERMARKS;
73 * This is the 'heart' of the zoned buddy allocator.
76 @@ -1517,49 +1555,28 @@ restart:
77 * OK, we're below the kswapd watermark and have kicked background
78 * reclaim. Now things get more complex, so set up alloc_flags according
79 * to how we want to proceed.
81 - * The caller may dip into page reserves a bit more if the caller
82 - * cannot run direct reclaim, or if the caller has realtime scheduling
83 - * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
84 - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
86 - alloc_flags = ALLOC_WMARK_MIN;
87 - if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
88 - alloc_flags |= ALLOC_HARDER;
89 - if (gfp_mask & __GFP_HIGH)
90 - alloc_flags |= ALLOC_HIGH;
92 - alloc_flags |= ALLOC_CPUSET;
93 + alloc_flags = gfp_to_alloc_flags(gfp_mask);
96 - * Go through the zonelist again. Let __GFP_HIGH and allocations
97 - * coming from realtime tasks go deeper into reserves.
99 - * This is the last chance, in general, before the goto nopage.
100 - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
101 - * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
103 + /* This is the last chance, in general, before the goto nopage. */
104 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
105 - high_zoneidx, alloc_flags);
106 + high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS);
110 /* This allocation should allow future memory freeing. */
113 - if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
114 - && !in_interrupt()) {
115 - if (!(gfp_mask & __GFP_NOMEMALLOC)) {
116 + if (alloc_flags & ALLOC_NO_WATERMARKS) {
118 - /* go through the zonelist yet again, ignoring mins */
119 - page = get_page_from_freelist(gfp_mask, nodemask, order,
120 + /* go through the zonelist yet again, ignoring mins */
121 + page = get_page_from_freelist(gfp_mask, nodemask, order,
122 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
125 - if (gfp_mask & __GFP_NOFAIL) {
126 - congestion_wait(WRITE, HZ/50);
132 + if (wait && (gfp_mask & __GFP_NOFAIL)) {
133 + congestion_wait(WRITE, HZ/50);
138 @@ -1568,6 +1585,10 @@ nofail_alloc:
142 + /* Avoid recursion of direct reclaim */
143 + if (p->flags & PF_MEMALLOC)
148 /* We now go into synchronous reclaim */