]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.arch/x86_sgi_cpus4096-01-fix-smp_call_function.patch
Changed checkfs to auto reboot after correctable fsck fixes.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.arch / x86_sgi_cpus4096-01-fix-smp_call_function.patch
1 From: Mike Travis <travis@sgi.com>
2 Date: Fri, 5 Sep 2008 14:40:20 -0700
3 Subject: [PATCH] smp: reduce stack requirements for smp_call_function_mask
4 References: bnc#425240 FATE304266
5 Patch-mainline: 2.6.28
6
7 * Cleanup cpumask_t usages in smp_call_function_mask to remove stack
8 overflow problem when NR_CPUS=4096. This removes over 1000 bytes
9 from the stack with NR_CPUS=4096.
10
11 Signed-off-by: Mike Travis <travis@sgi.com>
12 Signed-off-by: Ingo Molnar <mingo@elte.hu>
13 Signed-off-by: Jiri Slaby <jslaby@suse.cz>
14 Signed-off-by: Thomas Renninger <trenn@suse.de>
15 ---
16 kernel/smp.c | 12 +++++-------
17 1 files changed, 5 insertions(+), 7 deletions(-)
18
19 diff --git a/kernel/smp.c b/kernel/smp.c
20 index f362a85..069d066 100644
21 --- a/kernel/smp.c
22 +++ b/kernel/smp.c
23 @@ -287,7 +287,7 @@ static void quiesce_dummy(void *unused)
24 * If a faster scheme can be made, we could go back to preferring stack based
25 * data -- the data allocation/free is non-zero cost.
26 */
27 -static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
28 +static void smp_call_function_mask_quiesce_stack(const cpumask_t *mask)
29 {
30 struct call_single_data data;
31 int cpu;
32 @@ -295,7 +295,7 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
33 data.func = quiesce_dummy;
34 data.info = NULL;
35
36 - for_each_cpu_mask(cpu, mask) {
37 + for_each_cpu_mask_nr(cpu, *mask) {
38 data.flags = CSD_FLAG_WAIT;
39 generic_exec_single(cpu, &data);
40 }
41 @@ -323,7 +323,6 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
42 {
43 struct call_function_data d;
44 struct call_function_data *data = NULL;
45 - cpumask_t allbutself;
46 unsigned long flags;
47 int cpu, num_cpus;
48 int slowpath = 0;
49 @@ -332,9 +331,8 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
50 WARN_ON(irqs_disabled());
51
52 cpu = smp_processor_id();
53 - allbutself = cpu_online_map;
54 - cpu_clear(cpu, allbutself);
55 - cpus_and(mask, mask, allbutself);
56 + cpus_and(mask, mask, cpu_online_map);
57 + cpu_clear(cpu, mask);
58 num_cpus = cpus_weight(mask);
59
60 /*
61 @@ -377,7 +375,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
62 if (wait) {
63 csd_flag_wait(&data->csd);
64 if (unlikely(slowpath))
65 - smp_call_function_mask_quiesce_stack(mask);
66 + smp_call_function_mask_quiesce_stack(&mask);
67 }
68
69 return 0;
70 --
71 1.6.0.2
72