]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
132e998c24d3999973c6d863752c2a56126f238d
[thirdparty/kernel/stable-queue.git] /
1 From 42b64281453249dac52861f9b97d18552a7ec62b Mon Sep 17 00:00:00 2001
2 From: Tejun Heo <tj@kernel.org>
3 Date: Fri, 27 Apr 2012 08:42:53 -0700
4 Subject: percpu: pcpu_embed_first_chunk() should free unused parts after all allocs are complete
5
6 From: Tejun Heo <tj@kernel.org>
7
8 commit 42b64281453249dac52861f9b97d18552a7ec62b upstream.
9
10 pcpu_embed_first_chunk() allocates memory for each node, copies percpu
11 data and frees unused portions of it before proceeding to the next
12 group. This assumes that allocations for different nodes doesn't
13 overlap; however, depending on memory topology, the bootmem allocator
14 may end up allocating memory from a different node than the requested
15 one which may overlap with the portion freed from one of the previous
16 percpu areas. This leads to percpu groups for different nodes
17 overlapping which is a serious bug.
18
19 This patch separates out copy & partial free from the allocation loop
20 such that all allocations are complete before partial frees happen.
21
22 This also fixes overlapping frees which could happen on allocation
23 failure path - out_free_areas path frees whole groups but the groups
24 could have portions freed at that point.
25
26 Signed-off-by: Tejun Heo <tj@kernel.org>
27 Reported-by: "Pavel V. Panteleev" <pp_84@mail.ru>
28 Tested-by: "Pavel V. Panteleev" <pp_84@mail.ru>
29 LKML-Reference: <E1SNhwY-0007ui-V7.pp_84-mail-ru@f220.mail.ru>
30 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
31
32 ---
33 mm/percpu.c | 10 ++++++++++
34 1 file changed, 10 insertions(+)
35
36 --- a/mm/percpu.c
37 +++ b/mm/percpu.c
38 @@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t
39 areas[group] = ptr;
40
41 base = min(ptr, base);
42 + }
43 +
44 + /*
45 + * Copy data and free unused parts. This should happen after all
46 + * allocations are complete; otherwise, we may end up with
47 + * overlapping groups.
48 + */
49 + for (group = 0; group < ai->nr_groups; group++) {
50 + struct pcpu_group_info *gi = &ai->groups[group];
51 + void *ptr = areas[group];
52
53 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
54 if (gi->cpu_map[i] == NR_CPUS) {