]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.suse/mem_cgroup_stat-dynamic-alloc
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / mem_cgroup_stat-dynamic-alloc
1 From: Jan Blunck <jblunck@suse.de>
2 Subject: Dynamically allocate struct mem_cgroup_stat_cpu memory
3
4 When increasing NR_CPUS to 4096 the size of struct mem_cgroup is growing to
5 507904 bytes per instance on x86_64. This patch changes the allocation of
6 these structures to use nr_cpu_ids instead. Although the init_mem_cgroup still
7 is that huge since it stays statically allocated.
8
9 Signed-off-by: Jan Blunck <jblunck@suse.de>
10 ---
11 mm/memcontrol.c | 49 +++++++++++++++++++++++++++++++++++++++++--------
12 1 file changed, 41 insertions(+), 8 deletions(-)
13
14 Index: b/mm/memcontrol.c
15 ===================================================================
16 --- a/mm/memcontrol.c
17 +++ b/mm/memcontrol.c
18 @@ -59,7 +59,7 @@ struct mem_cgroup_stat_cpu {
19 } ____cacheline_aligned_in_smp;
20
21 struct mem_cgroup_stat {
22 - struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
23 + struct mem_cgroup_stat_cpu *cpustat;
24 };
25
26 /*
27 @@ -143,6 +143,7 @@ struct mem_cgroup {
28 struct mem_cgroup_stat stat;
29 };
30 static struct mem_cgroup init_mem_cgroup;
31 +static struct mem_cgroup_stat_cpu init_mem_cgroup_stat_cpu[NR_CPUS];
32
33 /*
34 * We use the lower bit of the page->page_cgroup pointer as a bit spin
35 @@ -1097,23 +1098,54 @@ static void free_mem_cgroup_per_zone_inf
36 static struct mem_cgroup *mem_cgroup_alloc(void)
37 {
38 struct mem_cgroup *mem;
39 + struct mem_cgroup_stat_cpu *cpustat;
40 + size_t statsize = nr_cpu_ids * sizeof(*cpustat);
41
42 - if (sizeof(*mem) < PAGE_SIZE)
43 - mem = kmalloc(sizeof(*mem), GFP_KERNEL);
44 - else
45 + if (sizeof(*mem) > PAGE_SIZE) {
46 mem = vmalloc(sizeof(*mem));
47 -
48 - if (mem)
49 + if (!mem)
50 + goto out;
51 memset(mem, 0, sizeof(*mem));
52 + } else
53 + mem = kzalloc(sizeof(*mem), GFP_KERNEL);
54 +
55 + if (!mem)
56 + goto out;
57 +
58 + if (statsize > PAGE_SIZE) {
59 + cpustat = vmalloc(statsize);
60 + if (!cpustat)
61 + goto out_mem;
62 + memset(cpustat, 0, statsize);
63 + } else
64 + cpustat = kzalloc(statsize, GFP_KERNEL);
65 +
66 + if (!cpustat)
67 + goto out_mem;
68 +
69 + mem->stat.cpustat = cpustat;
70 return mem;
71 +
72 +out_mem:
73 + if (is_vmalloc_addr(mem))
74 + vfree(mem);
75 + else
76 + kfree(mem);
77 +out:
78 + return NULL;
79 }
80
81 static void mem_cgroup_free(struct mem_cgroup *mem)
82 {
83 - if (sizeof(*mem) < PAGE_SIZE)
84 - kfree(mem);
85 + if (is_vmalloc_addr(mem->stat.cpustat))
86 + vfree(mem->stat.cpustat);
87 else
88 + kfree(mem->stat.cpustat);
89 +
90 + if (is_vmalloc_addr(mem))
91 vfree(mem);
92 + else
93 + kfree(mem);
94 }
95
96
97 @@ -1125,6 +1157,7 @@ mem_cgroup_create(struct cgroup_subsys *
98
99 if (unlikely((cont->parent) == NULL)) {
100 mem = &init_mem_cgroup;
101 + mem->stat.cpustat = &init_mem_cgroup_stat_cpu[0];
102 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
103 } else {
104 mem = mem_cgroup_alloc();