]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.21/x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch
fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 4.14.21 / x86-cpu-rename-cpu_data.x86_mask-to-cpu_data.x86_stepping.patch
1 From b399151cb48db30ad1e0e93dd40d68c6d007b637 Mon Sep 17 00:00:00 2001
2 From: Jia Zhang <qianyue.zj@alibaba-inc.com>
3 Date: Mon, 1 Jan 2018 09:52:10 +0800
4 Subject: x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping
5
6 From: Jia Zhang <qianyue.zj@alibaba-inc.com>
7
8 commit b399151cb48db30ad1e0e93dd40d68c6d007b637 upstream.
9
10 x86_mask is a confusing name which is hard to associate with the
11 processor's stepping.
12
13 Additionally, correct an indent issue in lib/cpu.c.
14
15 Signed-off-by: Jia Zhang <qianyue.zj@alibaba-inc.com>
16 [ Updated it to more recent kernels. ]
17 Cc: Linus Torvalds <torvalds@linux-foundation.org>
18 Cc: Peter Zijlstra <peterz@infradead.org>
19 Cc: Thomas Gleixner <tglx@linutronix.de>
20 Cc: bp@alien8.de
21 Cc: tony.luck@intel.com
22 Link: http://lkml.kernel.org/r/1514771530-70829-1-git-send-email-qianyue.zj@alibaba-inc.com
23 Signed-off-by: Ingo Molnar <mingo@kernel.org>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25
26 ---
27 arch/x86/events/intel/core.c | 2 +-
28 arch/x86/events/intel/lbr.c | 2 +-
29 arch/x86/events/intel/p6.c | 2 +-
30 arch/x86/include/asm/acpi.h | 2 +-
31 arch/x86/include/asm/processor.h | 2 +-
32 arch/x86/kernel/amd_nb.c | 2 +-
33 arch/x86/kernel/apic/apic.c | 6 +++---
34 arch/x86/kernel/asm-offsets_32.c | 2 +-
35 arch/x86/kernel/cpu/amd.c | 28 ++++++++++++++--------------
36 arch/x86/kernel/cpu/centaur.c | 4 ++--
37 arch/x86/kernel/cpu/common.c | 8 ++++----
38 arch/x86/kernel/cpu/cyrix.c | 2 +-
39 arch/x86/kernel/cpu/intel.c | 18 +++++++++---------
40 arch/x86/kernel/cpu/intel_rdt.c | 2 +-
41 arch/x86/kernel/cpu/microcode/intel.c | 4 ++--
42 arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
43 arch/x86/kernel/cpu/mtrr/main.c | 4 ++--
44 arch/x86/kernel/cpu/proc.c | 4 ++--
45 arch/x86/kernel/head_32.S | 4 ++--
46 arch/x86/kernel/mpparse.c | 2 +-
47 arch/x86/lib/cpu.c | 2 +-
48 drivers/char/hw_random/via-rng.c | 2 +-
49 drivers/cpufreq/acpi-cpufreq.c | 2 +-
50 drivers/cpufreq/longhaul.c | 6 +++---
51 drivers/cpufreq/p4-clockmod.c | 2 +-
52 drivers/cpufreq/powernow-k7.c | 2 +-
53 drivers/cpufreq/speedstep-centrino.c | 4 ++--
54 drivers/cpufreq/speedstep-lib.c | 6 +++---
55 drivers/crypto/padlock-aes.c | 2 +-
56 drivers/edac/amd64_edac.c | 2 +-
57 drivers/hwmon/coretemp.c | 6 +++---
58 drivers/hwmon/hwmon-vid.c | 2 +-
59 drivers/hwmon/k10temp.c | 2 +-
60 drivers/hwmon/k8temp.c | 2 +-
61 drivers/video/fbdev/geode/video_gx.c | 2 +-
62 35 files changed, 73 insertions(+), 73 deletions(-)
63
64 --- a/arch/x86/events/intel/core.c
65 +++ b/arch/x86/events/intel/core.c
66 @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu
67 break;
68
69 case INTEL_FAM6_SANDYBRIDGE_X:
70 - switch (cpu_data(cpu).x86_mask) {
71 + switch (cpu_data(cpu).x86_stepping) {
72 case 6: rev = 0x618; break;
73 case 7: rev = 0x70c; break;
74 }
75 --- a/arch/x86/events/intel/lbr.c
76 +++ b/arch/x86/events/intel/lbr.c
77 @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void
78 * on PMU interrupt
79 */
80 if (boot_cpu_data.x86_model == 28
81 - && boot_cpu_data.x86_mask < 10) {
82 + && boot_cpu_data.x86_stepping < 10) {
83 pr_cont("LBR disabled due to erratum");
84 return;
85 }
86 --- a/arch/x86/events/intel/p6.c
87 +++ b/arch/x86/events/intel/p6.c
88 @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu
89
90 static __init void p6_pmu_rdpmc_quirk(void)
91 {
92 - if (boot_cpu_data.x86_mask < 9) {
93 + if (boot_cpu_data.x86_stepping < 9) {
94 /*
95 * PPro erratum 26; fixed in stepping 9 and above.
96 */
97 --- a/arch/x86/include/asm/acpi.h
98 +++ b/arch/x86/include/asm/acpi.h
99 @@ -94,7 +94,7 @@ static inline unsigned int acpi_processo
100 if (boot_cpu_data.x86 == 0x0F &&
101 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
102 boot_cpu_data.x86_model <= 0x05 &&
103 - boot_cpu_data.x86_mask < 0x0A)
104 + boot_cpu_data.x86_stepping < 0x0A)
105 return 1;
106 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
107 return 1;
108 --- a/arch/x86/include/asm/processor.h
109 +++ b/arch/x86/include/asm/processor.h
110 @@ -91,7 +91,7 @@ struct cpuinfo_x86 {
111 __u8 x86; /* CPU family */
112 __u8 x86_vendor; /* CPU vendor */
113 __u8 x86_model;
114 - __u8 x86_mask;
115 + __u8 x86_stepping;
116 #ifdef CONFIG_X86_64
117 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
118 int x86_tlbsize;
119 --- a/arch/x86/kernel/amd_nb.c
120 +++ b/arch/x86/kernel/amd_nb.c
121 @@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
122 if (boot_cpu_data.x86 == 0x10 &&
123 boot_cpu_data.x86_model >= 0x8 &&
124 (boot_cpu_data.x86_model > 0x9 ||
125 - boot_cpu_data.x86_mask >= 0x1))
126 + boot_cpu_data.x86_stepping >= 0x1))
127 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
128
129 if (boot_cpu_data.x86 == 0x15)
130 --- a/arch/x86/kernel/apic/apic.c
131 +++ b/arch/x86/kernel/apic/apic.c
132 @@ -553,7 +553,7 @@ static DEFINE_PER_CPU(struct clock_event
133
134 static u32 hsx_deadline_rev(void)
135 {
136 - switch (boot_cpu_data.x86_mask) {
137 + switch (boot_cpu_data.x86_stepping) {
138 case 0x02: return 0x3a; /* EP */
139 case 0x04: return 0x0f; /* EX */
140 }
141 @@ -563,7 +563,7 @@ static u32 hsx_deadline_rev(void)
142
143 static u32 bdx_deadline_rev(void)
144 {
145 - switch (boot_cpu_data.x86_mask) {
146 + switch (boot_cpu_data.x86_stepping) {
147 case 0x02: return 0x00000011;
148 case 0x03: return 0x0700000e;
149 case 0x04: return 0x0f00000c;
150 @@ -575,7 +575,7 @@ static u32 bdx_deadline_rev(void)
151
152 static u32 skx_deadline_rev(void)
153 {
154 - switch (boot_cpu_data.x86_mask) {
155 + switch (boot_cpu_data.x86_stepping) {
156 case 0x03: return 0x01000136;
157 case 0x04: return 0x02000014;
158 }
159 --- a/arch/x86/kernel/asm-offsets_32.c
160 +++ b/arch/x86/kernel/asm-offsets_32.c
161 @@ -18,7 +18,7 @@ void foo(void)
162 OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
163 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
164 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
165 - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
166 + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
167 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
168 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
169 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
170 --- a/arch/x86/kernel/cpu/amd.c
171 +++ b/arch/x86/kernel/cpu/amd.c
172 @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x
173 return;
174 }
175
176 - if (c->x86_model == 6 && c->x86_mask == 1) {
177 + if (c->x86_model == 6 && c->x86_stepping == 1) {
178 const int K6_BUG_LOOP = 1000000;
179 int n;
180 void (*f_vide)(void);
181 @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x
182
183 /* K6 with old style WHCR */
184 if (c->x86_model < 8 ||
185 - (c->x86_model == 8 && c->x86_mask < 8)) {
186 + (c->x86_model == 8 && c->x86_stepping < 8)) {
187 /* We can only write allocate on the low 508Mb */
188 if (mbytes > 508)
189 mbytes = 508;
190 @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x
191 return;
192 }
193
194 - if ((c->x86_model == 8 && c->x86_mask > 7) ||
195 + if ((c->x86_model == 8 && c->x86_stepping > 7) ||
196 c->x86_model == 9 || c->x86_model == 13) {
197 /* The more serious chips .. */
198
199 @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x
200 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
201 * As per AMD technical note 27212 0.2
202 */
203 - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
204 + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
205 rdmsr(MSR_K7_CLK_CTL, l, h);
206 if ((l & 0xfff00000) != 0x20000000) {
207 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
208 @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x
209 * but they are not certified as MP capable.
210 */
211 /* Athlon 660/661 is valid. */
212 - if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
213 - (c->x86_mask == 1)))
214 + if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
215 + (c->x86_stepping == 1)))
216 return;
217
218 /* Duron 670 is valid */
219 - if ((c->x86_model == 7) && (c->x86_mask == 0))
220 + if ((c->x86_model == 7) && (c->x86_stepping == 0))
221 return;
222
223 /*
224 @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x
225 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
226 * more.
227 */
228 - if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
229 - ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
230 + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
231 + ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
232 (c->x86_model > 7))
233 if (cpu_has(c, X86_FEATURE_MP))
234 return;
235 @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinf
236 /* Set MTRR capability flag if appropriate */
237 if (c->x86 == 5)
238 if (c->x86_model == 13 || c->x86_model == 9 ||
239 - (c->x86_model == 8 && c->x86_mask >= 8))
240 + (c->x86_model == 8 && c->x86_stepping >= 8))
241 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
242 #endif
243 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
244 @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x
245 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
246 * all up to and including B1.
247 */
248 - if (c->x86_model <= 1 && c->x86_mask <= 1)
249 + if (c->x86_model <= 1 && c->x86_stepping <= 1)
250 set_cpu_cap(c, X86_FEATURE_CPB);
251 }
252
253 @@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struc
254 /* AMD errata T13 (order #21922) */
255 if ((c->x86 == 6)) {
256 /* Duron Rev A0 */
257 - if (c->x86_model == 3 && c->x86_mask == 0)
258 + if (c->x86_model == 3 && c->x86_stepping == 0)
259 size = 64;
260 /* Tbird rev A1/A2 */
261 if (c->x86_model == 4 &&
262 - (c->x86_mask == 0 || c->x86_mask == 1))
263 + (c->x86_stepping == 0 || c->x86_stepping == 1))
264 size = 256;
265 }
266 return size;
267 @@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct c
268 }
269
270 /* OSVW unavailable or ID unknown, match family-model-stepping range */
271 - ms = (cpu->x86_model << 4) | cpu->x86_mask;
272 + ms = (cpu->x86_model << 4) | cpu->x86_stepping;
273 while ((range = *erratum++))
274 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
275 (ms >= AMD_MODEL_RANGE_START(range)) &&
276 --- a/arch/x86/kernel/cpu/centaur.c
277 +++ b/arch/x86/kernel/cpu/centaur.c
278 @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_
279 clear_cpu_cap(c, X86_FEATURE_TSC);
280 break;
281 case 8:
282 - switch (c->x86_mask) {
283 + switch (c->x86_stepping) {
284 default:
285 name = "2";
286 break;
287 @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c
288 * - Note, it seems this may only be in engineering samples.
289 */
290 if ((c->x86 == 6) && (c->x86_model == 9) &&
291 - (c->x86_mask == 1) && (size == 65))
292 + (c->x86_stepping == 1) && (size == 65))
293 size -= 1;
294 return size;
295 }
296 --- a/arch/x86/kernel/cpu/common.c
297 +++ b/arch/x86/kernel/cpu/common.c
298 @@ -707,7 +707,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
299 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
300 c->x86 = x86_family(tfms);
301 c->x86_model = x86_model(tfms);
302 - c->x86_mask = x86_stepping(tfms);
303 + c->x86_stepping = x86_stepping(tfms);
304
305 if (cap0 & (1<<19)) {
306 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
307 @@ -1162,7 +1162,7 @@ static void identify_cpu(struct cpuinfo_
308 c->loops_per_jiffy = loops_per_jiffy;
309 c->x86_cache_size = -1;
310 c->x86_vendor = X86_VENDOR_UNKNOWN;
311 - c->x86_model = c->x86_mask = 0; /* So far unknown... */
312 + c->x86_model = c->x86_stepping = 0; /* So far unknown... */
313 c->x86_vendor_id[0] = '\0'; /* Unset */
314 c->x86_model_id[0] = '\0'; /* Unset */
315 c->x86_max_cores = 1;
316 @@ -1353,8 +1353,8 @@ void print_cpu_info(struct cpuinfo_x86 *
317
318 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
319
320 - if (c->x86_mask || c->cpuid_level >= 0)
321 - pr_cont(", stepping: 0x%x)\n", c->x86_mask);
322 + if (c->x86_stepping || c->cpuid_level >= 0)
323 + pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
324 else
325 pr_cont(")\n");
326 }
327 --- a/arch/x86/kernel/cpu/cyrix.c
328 +++ b/arch/x86/kernel/cpu/cyrix.c
329 @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x8
330
331 /* common case step number/rev -- exceptions handled below */
332 c->x86_model = (dir1 >> 4) + 1;
333 - c->x86_mask = dir1 & 0xf;
334 + c->x86_stepping = dir1 & 0xf;
335
336 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
337 * We do the same thing for each generation: we work out
338 --- a/arch/x86/kernel/cpu/intel.c
339 +++ b/arch/x86/kernel/cpu/intel.c
340 @@ -146,7 +146,7 @@ static bool bad_spectre_microcode(struct
341
342 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
343 if (c->x86_model == spectre_bad_microcodes[i].model &&
344 - c->x86_mask == spectre_bad_microcodes[i].stepping)
345 + c->x86_stepping == spectre_bad_microcodes[i].stepping)
346 return (c->microcode <= spectre_bad_microcodes[i].microcode);
347 }
348 return false;
349 @@ -193,7 +193,7 @@ static void early_init_intel(struct cpui
350 * need the microcode to have already been loaded... so if it is
351 * not, recommend a BIOS update and disable large pages.
352 */
353 - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
354 + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
355 c->microcode < 0x20e) {
356 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
357 clear_cpu_cap(c, X86_FEATURE_PSE);
358 @@ -209,7 +209,7 @@ static void early_init_intel(struct cpui
359
360 /* CPUID workaround for 0F33/0F34 CPU */
361 if (c->x86 == 0xF && c->x86_model == 0x3
362 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
363 + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
364 c->x86_phys_bits = 36;
365
366 /*
367 @@ -307,7 +307,7 @@ int ppro_with_ram_bug(void)
368 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
369 boot_cpu_data.x86 == 6 &&
370 boot_cpu_data.x86_model == 1 &&
371 - boot_cpu_data.x86_mask < 8) {
372 + boot_cpu_data.x86_stepping < 8) {
373 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
374 return 1;
375 }
376 @@ -324,7 +324,7 @@ static void intel_smp_check(struct cpuin
377 * Mask B, Pentium, but not Pentium MMX
378 */
379 if (c->x86 == 5 &&
380 - c->x86_mask >= 1 && c->x86_mask <= 4 &&
381 + c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
382 c->x86_model <= 3) {
383 /*
384 * Remember we have B step Pentia with bugs
385 @@ -367,7 +367,7 @@ static void intel_workarounds(struct cpu
386 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
387 * model 3 mask 3
388 */
389 - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
390 + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
391 clear_cpu_cap(c, X86_FEATURE_SEP);
392
393 /*
394 @@ -385,7 +385,7 @@ static void intel_workarounds(struct cpu
395 * P4 Xeon erratum 037 workaround.
396 * Hardware prefetcher may cause stale data to be loaded into the cache.
397 */
398 - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
399 + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
400 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
401 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
402 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
403 @@ -400,7 +400,7 @@ static void intel_workarounds(struct cpu
404 * Specification Update").
405 */
406 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
407 - (c->x86_mask < 0x6 || c->x86_mask == 0xb))
408 + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
409 set_cpu_bug(c, X86_BUG_11AP);
410
411
412 @@ -647,7 +647,7 @@ static void init_intel(struct cpuinfo_x8
413 case 6:
414 if (l2 == 128)
415 p = "Celeron (Mendocino)";
416 - else if (c->x86_mask == 0 || c->x86_mask == 5)
417 + else if (c->x86_stepping == 0 || c->x86_stepping == 5)
418 p = "Celeron-A";
419 break;
420
421 --- a/arch/x86/kernel/cpu/intel_rdt.c
422 +++ b/arch/x86/kernel/cpu/intel_rdt.c
423 @@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
424 cache_alloc_hsw_probe();
425 break;
426 case INTEL_FAM6_SKYLAKE_X:
427 - if (boot_cpu_data.x86_mask <= 4)
428 + if (boot_cpu_data.x86_stepping <= 4)
429 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
430 }
431 }
432 --- a/arch/x86/kernel/cpu/microcode/intel.c
433 +++ b/arch/x86/kernel/cpu/microcode/intel.c
434 @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int
435 */
436 if (c->x86 == 6 &&
437 c->x86_model == INTEL_FAM6_BROADWELL_X &&
438 - c->x86_mask == 0x01 &&
439 + c->x86_stepping == 0x01 &&
440 llc_size_per_core > 2621440 &&
441 c->microcode < 0x0b000021) {
442 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
443 @@ -944,7 +944,7 @@ static enum ucode_state request_microcod
444 return UCODE_NFOUND;
445
446 sprintf(name, "intel-ucode/%02x-%02x-%02x",
447 - c->x86, c->x86_model, c->x86_mask);
448 + c->x86, c->x86_model, c->x86_stepping);
449
450 if (request_firmware_direct(&firmware, name, device)) {
451 pr_debug("data file %s load failed\n", name);
452 --- a/arch/x86/kernel/cpu/mtrr/generic.c
453 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
454 @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned l
455 */
456 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
457 boot_cpu_data.x86_model == 1 &&
458 - boot_cpu_data.x86_mask <= 7) {
459 + boot_cpu_data.x86_stepping <= 7) {
460 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
461 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
462 return -EINVAL;
463 --- a/arch/x86/kernel/cpu/mtrr/main.c
464 +++ b/arch/x86/kernel/cpu/mtrr/main.c
465 @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
466 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
467 boot_cpu_data.x86 == 0xF &&
468 boot_cpu_data.x86_model == 0x3 &&
469 - (boot_cpu_data.x86_mask == 0x3 ||
470 - boot_cpu_data.x86_mask == 0x4))
471 + (boot_cpu_data.x86_stepping == 0x3 ||
472 + boot_cpu_data.x86_stepping == 0x4))
473 phys_addr = 36;
474
475 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
476 --- a/arch/x86/kernel/cpu/proc.c
477 +++ b/arch/x86/kernel/cpu/proc.c
478 @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file
479 c->x86_model,
480 c->x86_model_id[0] ? c->x86_model_id : "unknown");
481
482 - if (c->x86_mask || c->cpuid_level >= 0)
483 - seq_printf(m, "stepping\t: %d\n", c->x86_mask);
484 + if (c->x86_stepping || c->cpuid_level >= 0)
485 + seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
486 else
487 seq_puts(m, "stepping\t: unknown\n");
488 if (c->microcode)
489 --- a/arch/x86/kernel/head_32.S
490 +++ b/arch/x86/kernel/head_32.S
491 @@ -37,7 +37,7 @@
492 #define X86 new_cpu_data+CPUINFO_x86
493 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
494 #define X86_MODEL new_cpu_data+CPUINFO_x86_model
495 -#define X86_MASK new_cpu_data+CPUINFO_x86_mask
496 +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
497 #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
498 #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
499 #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
500 @@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
501 shrb $4,%al
502 movb %al,X86_MODEL
503 andb $0x0f,%cl # mask mask revision
504 - movb %cl,X86_MASK
505 + movb %cl,X86_STEPPING
506 movl %edx,X86_CAPABILITY
507
508 .Lis486:
509 --- a/arch/x86/kernel/mpparse.c
510 +++ b/arch/x86/kernel/mpparse.c
511 @@ -407,7 +407,7 @@ static inline void __init construct_defa
512 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
513 processor.cpuflag = CPU_ENABLED;
514 processor.cpufeature = (boot_cpu_data.x86 << 8) |
515 - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
516 + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
517 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
518 processor.reserved[0] = 0;
519 processor.reserved[1] = 0;
520 --- a/arch/x86/lib/cpu.c
521 +++ b/arch/x86/lib/cpu.c
522 @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
523 {
524 unsigned int fam, model;
525
526 - fam = x86_family(sig);
527 + fam = x86_family(sig);
528
529 model = (sig >> 4) & 0xf;
530
531 --- a/drivers/char/hw_random/via-rng.c
532 +++ b/drivers/char/hw_random/via-rng.c
533 @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rn
534 /* Enable secondary noise source on CPUs where it is present. */
535
536 /* Nehemiah stepping 8 and higher */
537 - if ((c->x86_model == 9) && (c->x86_mask > 7))
538 + if ((c->x86_model == 9) && (c->x86_stepping > 7))
539 lo |= VIA_NOISESRC2;
540
541 /* Esther */
542 --- a/drivers/cpufreq/acpi-cpufreq.c
543 +++ b/drivers/cpufreq/acpi-cpufreq.c
544 @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct
545 if (c->x86_vendor == X86_VENDOR_INTEL) {
546 if ((c->x86 == 15) &&
547 (c->x86_model == 6) &&
548 - (c->x86_mask == 8)) {
549 + (c->x86_stepping == 8)) {
550 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
551 return -ENODEV;
552 }
553 --- a/drivers/cpufreq/longhaul.c
554 +++ b/drivers/cpufreq/longhaul.c
555 @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpuf
556 break;
557
558 case 7:
559 - switch (c->x86_mask) {
560 + switch (c->x86_stepping) {
561 case 0:
562 longhaul_version = TYPE_LONGHAUL_V1;
563 cpu_model = CPU_SAMUEL2;
564 @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpuf
565 break;
566 case 1 ... 15:
567 longhaul_version = TYPE_LONGHAUL_V2;
568 - if (c->x86_mask < 8) {
569 + if (c->x86_stepping < 8) {
570 cpu_model = CPU_SAMUEL2;
571 cpuname = "C3 'Samuel 2' [C5B]";
572 } else {
573 @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpuf
574 numscales = 32;
575 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
576 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
577 - switch (c->x86_mask) {
578 + switch (c->x86_stepping) {
579 case 0 ... 1:
580 cpu_model = CPU_NEHEMIAH;
581 cpuname = "C3 'Nehemiah A' [C5XLOE]";
582 --- a/drivers/cpufreq/p4-clockmod.c
583 +++ b/drivers/cpufreq/p4-clockmod.c
584 @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cp
585 #endif
586
587 /* Errata workaround */
588 - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
589 + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
590 switch (cpuid) {
591 case 0x0f07:
592 case 0x0f0a:
593 --- a/drivers/cpufreq/powernow-k7.c
594 +++ b/drivers/cpufreq/powernow-k7.c
595 @@ -131,7 +131,7 @@ static int check_powernow(void)
596 return 0;
597 }
598
599 - if ((c->x86_model == 6) && (c->x86_mask == 0)) {
600 + if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
601 pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
602 have_a0 = 1;
603 }
604 --- a/drivers/cpufreq/speedstep-centrino.c
605 +++ b/drivers/cpufreq/speedstep-centrino.c
606 @@ -37,7 +37,7 @@ struct cpu_id
607 {
608 __u8 x86; /* CPU family */
609 __u8 x86_model; /* model */
610 - __u8 x86_mask; /* stepping */
611 + __u8 x86_stepping; /* stepping */
612 };
613
614 enum {
615 @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const
616 {
617 if ((c->x86 == x->x86) &&
618 (c->x86_model == x->x86_model) &&
619 - (c->x86_mask == x->x86_mask))
620 + (c->x86_stepping == x->x86_stepping))
621 return 1;
622 return 0;
623 }
624 --- a/drivers/cpufreq/speedstep-lib.c
625 +++ b/drivers/cpufreq/speedstep-lib.c
626 @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(
627 ebx = cpuid_ebx(0x00000001);
628 ebx &= 0x000000FF;
629
630 - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
631 + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
632
633 - switch (c->x86_mask) {
634 + switch (c->x86_stepping) {
635 case 4:
636 /*
637 * B-stepping [M-P4-M]
638 @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(
639 msr_lo, msr_hi);
640 if ((msr_hi & (1<<18)) &&
641 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
642 - if (c->x86_mask == 0x01) {
643 + if (c->x86_stepping == 0x01) {
644 pr_debug("early PIII version\n");
645 return SPEEDSTEP_CPU_PIII_C_EARLY;
646 } else
647 --- a/drivers/crypto/padlock-aes.c
648 +++ b/drivers/crypto/padlock-aes.c
649 @@ -512,7 +512,7 @@ static int __init padlock_init(void)
650
651 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
652
653 - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
654 + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
655 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
656 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
657 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
658 --- a/drivers/edac/amd64_edac.c
659 +++ b/drivers/edac/amd64_edac.c
660 @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_fam
661 struct amd64_family_type *fam_type = NULL;
662
663 pvt->ext_model = boot_cpu_data.x86_model >> 4;
664 - pvt->stepping = boot_cpu_data.x86_mask;
665 + pvt->stepping = boot_cpu_data.x86_stepping;
666 pvt->model = boot_cpu_data.x86_model;
667 pvt->fam = boot_cpu_data.x86;
668
669 --- a/drivers/hwmon/coretemp.c
670 +++ b/drivers/hwmon/coretemp.c
671 @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x
672 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
673 const struct tjmax_model *tm = &tjmax_model_table[i];
674 if (c->x86_model == tm->model &&
675 - (tm->mask == ANY || c->x86_mask == tm->mask))
676 + (tm->mask == ANY || c->x86_stepping == tm->mask))
677 return tm->tjmax;
678 }
679
680 /* Early chips have no MSR for TjMax */
681
682 - if (c->x86_model == 0xf && c->x86_mask < 4)
683 + if (c->x86_model == 0xf && c->x86_stepping < 4)
684 usemsr_ee = 0;
685
686 if (c->x86_model > 0xe && usemsr_ee) {
687 @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned in
688 * Readings might stop update when processor visited too deep sleep,
689 * fixed for stepping D0 (6EC).
690 */
691 - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
692 + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
693 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
694 return -ENODEV;
695 }
696 --- a/drivers/hwmon/hwmon-vid.c
697 +++ b/drivers/hwmon/hwmon-vid.c
698 @@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
699 if (c->x86 < 6) /* Any CPU with family lower than 6 */
700 return 0; /* doesn't have VID */
701
702 - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
703 + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
704 if (vrm_ret == 134)
705 vrm_ret = get_via_model_d_vrm();
706 if (vrm_ret == 0)
707 --- a/drivers/hwmon/k10temp.c
708 +++ b/drivers/hwmon/k10temp.c
709 @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_d
710 * and AM3 formats, but that's the best we can do.
711 */
712 return boot_cpu_data.x86_model < 4 ||
713 - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
714 + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
715 }
716
717 static int k10temp_probe(struct pci_dev *pdev,
718 --- a/drivers/hwmon/k8temp.c
719 +++ b/drivers/hwmon/k8temp.c
720 @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *
721 return -ENOMEM;
722
723 model = boot_cpu_data.x86_model;
724 - stepping = boot_cpu_data.x86_mask;
725 + stepping = boot_cpu_data.x86_stepping;
726
727 /* feature available since SH-C0, exclude older revisions */
728 if ((model == 4 && stepping == 0) ||
729 --- a/drivers/video/fbdev/geode/video_gx.c
730 +++ b/drivers/video/fbdev/geode/video_gx.c
731 @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_inf
732 int timeout = 1000;
733
734 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
735 - if (cpu_data(0).x86_mask == 1) {
736 + if (cpu_data(0).x86_stepping == 1) {
737 pll_table = gx_pll_table_14MHz;
738 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
739 } else {