]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.25/patches.arch/x86_sgi_cpus4096-04-add-for_each_cpu_mask_and.patch
Reenabled linux-xen and xen-image build
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.arch / x86_sgi_cpus4096-04-add-for_each_cpu_mask_and.patch
CommitLineData
00e5a55c
BS
1From: Mike Travis <travis@sgi.com>
2Subject: Add for_each_cpu_mask_and
3References: bnc#425240 FATE304266
4Patch-mainline: 2.6.28
5
6Signed-off-by: Thomas Renninger <trenn@suse.de>
7
8---
9 include/linux/cpumask.h | 33 ++++++++++++++++++++++++---------
10 lib/cpumask.c | 9 +++++++++
11 2 files changed, 33 insertions(+), 9 deletions(-)
12
13--- linux-2.6.27.4-HEAD_20081027185619.orig/include/linux/cpumask.h
14+++ linux-2.6.27.4-HEAD_20081027185619/include/linux/cpumask.h
15@@ -109,6 +109,7 @@
16 *
17 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
18 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
19+ * for_each_cpu_mask_and(cpu, mask, and) for-loop cpu over (mask & and).
20 *
21 * int num_online_cpus() Number of online CPUs
22 * int num_possible_cpus() Number of all possible CPUs
23@@ -400,29 +401,41 @@ static inline void __cpus_fold(cpumask_t
24
25 #if NR_CPUS == 1
26
27-#define nr_cpu_ids 1
28-#define first_cpu(src) ({ (void)(src); 0; })
29-#define next_cpu(n, src) ({ (void)(src); 1; })
30-#define any_online_cpu(mask) 0
31-#define for_each_cpu_mask(cpu, mask) \
32+#define nr_cpu_ids 1
33+#define first_cpu(src) ({ (void)(src); 0; })
34+#define next_cpu(n, src) ({ (void)(src); 1; })
35+#define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; })
36+#define any_online_cpu(mask) 0
37+
38+#define for_each_cpu_mask(cpu, mask) \
39 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
40+#define for_each_cpu_mask_and(cpu, mask, and) \
41+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
42
43 #else /* NR_CPUS > 1 */
44
45 extern int nr_cpu_ids;
46 int __first_cpu(const cpumask_t *srcp);
47 int __next_cpu(int n, const cpumask_t *srcp);
48+int cpumask_next_and(int n, const cpumask_t *srcp, const cpumask_t *andp);
49 int __any_online_cpu(const cpumask_t *mask);
50
51 #define first_cpu(src) __first_cpu(&(src))
52 #define next_cpu(n, src) __next_cpu((n), &(src))
53 #define any_online_cpu(mask) __any_online_cpu(&(mask))
54+
55 #define for_each_cpu_mask(cpu, mask) \
56 for ((cpu) = -1; \
57 (cpu) = next_cpu((cpu), (mask)), \
58- (cpu) < NR_CPUS; )
59+ (cpu) < NR_CPUS;)
60+#define for_each_cpu_mask_and(cpu, mask, and) \
61+ for ((cpu) = -1; \
62+ (cpu) = cpumask_next_and((cpu), &(mask), &(and)), \
63+ (cpu) < nr_cpu_ids;)
64 #endif
65
66+#define cpumask_first_and(mask, and) cpumask_next_and(-1, (mask), (and))
67+
68 #if NR_CPUS <= 64
69
70 #define next_cpu_nr(n, src) next_cpu(n, src)
71@@ -432,12 +445,14 @@ int __any_online_cpu(const cpumask_t *ma
72 #else /* NR_CPUS > 64 */
73
74 int __next_cpu_nr(int n, const cpumask_t *srcp);
75-#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
76-#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
77+
78+#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
79+#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
80+
81 #define for_each_cpu_mask_nr(cpu, mask) \
82 for ((cpu) = -1; \
83 (cpu) = next_cpu_nr((cpu), (mask)), \
84- (cpu) < nr_cpu_ids; )
85+ (cpu) < nr_cpu_ids;)
86
87 #endif /* NR_CPUS > 64 */
88
89--- linux-2.6.27.4-HEAD_20081027185619.orig/lib/cpumask.c
90+++ linux-2.6.27.4-HEAD_20081027185619/lib/cpumask.c
91@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *s
92 }
93 EXPORT_SYMBOL(__next_cpu);
94
95+int cpumask_next_and(int n, const cpumask_t *srcp, const cpumask_t *andp)
96+{
97+ while ((n = next_cpu_nr(n, *srcp)) < nr_cpu_ids)
98+ if (cpu_isset(n, *andp))
99+ break;
100+ return n;
101+}
102+EXPORT_SYMBOL(cpumask_next_and);
103+
104 #if NR_CPUS > 64
105 int __next_cpu_nr(int n, const cpumask_t *srcp)
106 {