]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.suse/SoN-01-mm-gfp-to-alloc_flags.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / SoN-01-mm-gfp-to-alloc_flags.patch
CommitLineData
00e5a55c
BS
1From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2Subject: mm: gfp_to_alloc_flags()
3Patch-mainline: No
4References: FATE#303834
5
6Clean up the code by factoring out the gfp to alloc_flags mapping.
7
8[neilb@suse.de says]
9As the test:
10
11- if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
12- && !in_interrupt()) {
13- if (!(gfp_mask & __GFP_NOMEMALLOC)) {
14
15has been replaced with a slightly weaker one:
16
17+ if (alloc_flags & ALLOC_NO_WATERMARKS) {
18
19we need to ensure we don't recurse when PF_MEMALLOC is set
20
21Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
22Acked-by: Neil Brown <neilb@suse.de>
23Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
24
25---
26 mm/page_alloc.c | 87 ++++++++++++++++++++++++++++++++++----------------------
27 1 file changed, 54 insertions(+), 33 deletions(-)
28
29--- a/mm/page_alloc.c
30+++ b/mm/page_alloc.c
31@@ -1455,6 +1455,44 @@ try_next_zone:
32 }
33
34 /*
35+ * get the deepest reaching allocation flags for the given gfp_mask
36+ */
37+static int gfp_to_alloc_flags(gfp_t gfp_mask)
38+{
39+ struct task_struct *p = current;
40+ int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
41+ const gfp_t wait = gfp_mask & __GFP_WAIT;
42+
43+ /*
44+ * The caller may dip into page reserves a bit more if the caller
45+ * cannot run direct reclaim, or if the caller has realtime scheduling
46+ * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
47+ * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
48+ */
49+ if (gfp_mask & __GFP_HIGH)
50+ alloc_flags |= ALLOC_HIGH;
51+
52+ if (!wait) {
53+ alloc_flags |= ALLOC_HARDER;
54+ /*
55+ * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
56+ * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
57+ */
58+ alloc_flags &= ~ALLOC_CPUSET;
59+ } else if (unlikely(rt_task(p)) && !in_interrupt())
60+ alloc_flags |= ALLOC_HARDER;
61+
62+ if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
63+ if (!in_interrupt() &&
64+ ((p->flags & PF_MEMALLOC) ||
65+ unlikely(test_thread_flag(TIF_MEMDIE))))
66+ alloc_flags |= ALLOC_NO_WATERMARKS;
67+ }
68+
69+ return alloc_flags;
70+}
71+
72+/*
73 * This is the 'heart' of the zoned buddy allocator.
74 */
75 struct page *
76@@ -1512,49 +1550,28 @@ restart:
77 * OK, we're below the kswapd watermark and have kicked background
78 * reclaim. Now things get more complex, so set up alloc_flags according
79 * to how we want to proceed.
80- *
81- * The caller may dip into page reserves a bit more if the caller
82- * cannot run direct reclaim, or if the caller has realtime scheduling
83- * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
84- * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
85 */
86- alloc_flags = ALLOC_WMARK_MIN;
87- if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
88- alloc_flags |= ALLOC_HARDER;
89- if (gfp_mask & __GFP_HIGH)
90- alloc_flags |= ALLOC_HIGH;
91- if (wait)
92- alloc_flags |= ALLOC_CPUSET;
93+ alloc_flags = gfp_to_alloc_flags(gfp_mask);
94
95- /*
96- * Go through the zonelist again. Let __GFP_HIGH and allocations
97- * coming from realtime tasks go deeper into reserves.
98- *
99- * This is the last chance, in general, before the goto nopage.
100- * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
101- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
102- */
103+ /* This is the last chance, in general, before the goto nopage. */
104 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
105- high_zoneidx, alloc_flags);
106+ high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS);
107 if (page)
108 goto got_pg;
109
110 /* This allocation should allow future memory freeing. */
111-
112 rebalance:
113- if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
114- && !in_interrupt()) {
115- if (!(gfp_mask & __GFP_NOMEMALLOC)) {
116+ if (alloc_flags & ALLOC_NO_WATERMARKS) {
117 nofail_alloc:
118- /* go through the zonelist yet again, ignoring mins */
119- page = get_page_from_freelist(gfp_mask, nodemask, order,
120+ /* go through the zonelist yet again, ignoring mins */
121+ page = get_page_from_freelist(gfp_mask, nodemask, order,
122 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
123- if (page)
124- goto got_pg;
125- if (gfp_mask & __GFP_NOFAIL) {
126- congestion_wait(WRITE, HZ/50);
127- goto nofail_alloc;
128- }
129+ if (page)
130+ goto got_pg;
131+
132+ if (wait && (gfp_mask & __GFP_NOFAIL)) {
133+ congestion_wait(WRITE, HZ/50);
134+ goto nofail_alloc;
135 }
136 goto nopage;
137 }
138@@ -1563,6 +1580,10 @@ nofail_alloc:
139 if (!wait)
140 goto nopage;
141
142+ /* Avoid recursion of direct reclaim */
143+ if (p->flags & PF_MEMALLOC)
144+ goto nopage;
145+
146 cond_resched();
147
148 /* We now go into synchronous reclaim */