1 From b399151cb48db30ad1e0e93dd40d68c6d007b637 Mon Sep 17 00:00:00 2001
2 From: Jia Zhang <qianyue.zj@alibaba-inc.com>
3 Date: Mon, 1 Jan 2018 09:52:10 +0800
4 Subject: x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping
6 From: Jia Zhang <qianyue.zj@alibaba-inc.com>
8 commit b399151cb48db30ad1e0e93dd40d68c6d007b637 upstream.
10 x86_mask is a confusing name which is hard to associate with the
13 Additionally, correct an indent issue in lib/cpu.c.
15 Signed-off-by: Jia Zhang <qianyue.zj@alibaba-inc.com>
16 [ Updated it to more recent kernels. ]
17 Cc: Linus Torvalds <torvalds@linux-foundation.org>
18 Cc: Peter Zijlstra <peterz@infradead.org>
19 Cc: Thomas Gleixner <tglx@linutronix.de>
21 Cc: tony.luck@intel.com
22 Link: http://lkml.kernel.org/r/1514771530-70829-1-git-send-email-qianyue.zj@alibaba-inc.com
23 Signed-off-by: Ingo Molnar <mingo@kernel.org>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27 arch/x86/events/intel/core.c | 2 +-
28 arch/x86/events/intel/lbr.c | 2 +-
29 arch/x86/events/intel/p6.c | 2 +-
30 arch/x86/include/asm/acpi.h | 2 +-
31 arch/x86/include/asm/processor.h | 2 +-
32 arch/x86/kernel/amd_nb.c | 2 +-
33 arch/x86/kernel/apic/apic.c | 6 +++---
34 arch/x86/kernel/asm-offsets_32.c | 2 +-
35 arch/x86/kernel/cpu/amd.c | 28 ++++++++++++++--------------
36 arch/x86/kernel/cpu/centaur.c | 4 ++--
37 arch/x86/kernel/cpu/common.c | 8 ++++----
38 arch/x86/kernel/cpu/cyrix.c | 2 +-
39 arch/x86/kernel/cpu/intel.c | 18 +++++++++---------
40 arch/x86/kernel/cpu/intel_rdt.c | 2 +-
41 arch/x86/kernel/cpu/microcode/intel.c | 4 ++--
42 arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
43 arch/x86/kernel/cpu/mtrr/main.c | 4 ++--
44 arch/x86/kernel/cpu/proc.c | 4 ++--
45 arch/x86/kernel/head_32.S | 4 ++--
46 arch/x86/kernel/mpparse.c | 2 +-
47 arch/x86/lib/cpu.c | 2 +-
48 drivers/char/hw_random/via-rng.c | 2 +-
49 drivers/cpufreq/acpi-cpufreq.c | 2 +-
50 drivers/cpufreq/longhaul.c | 6 +++---
51 drivers/cpufreq/p4-clockmod.c | 2 +-
52 drivers/cpufreq/powernow-k7.c | 2 +-
53 drivers/cpufreq/speedstep-centrino.c | 4 ++--
54 drivers/cpufreq/speedstep-lib.c | 6 +++---
55 drivers/crypto/padlock-aes.c | 2 +-
56 drivers/edac/amd64_edac.c | 2 +-
57 drivers/hwmon/coretemp.c | 6 +++---
58 drivers/hwmon/hwmon-vid.c | 2 +-
59 drivers/hwmon/k10temp.c | 2 +-
60 drivers/hwmon/k8temp.c | 2 +-
61 drivers/video/fbdev/geode/video_gx.c | 2 +-
62 35 files changed, 73 insertions(+), 73 deletions(-)
64 --- a/arch/x86/events/intel/core.c
65 +++ b/arch/x86/events/intel/core.c
66 @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu
69 case INTEL_FAM6_SANDYBRIDGE_X:
70 - switch (cpu_data(cpu).x86_mask) {
71 + switch (cpu_data(cpu).x86_stepping) {
72 case 6: rev = 0x618; break;
73 case 7: rev = 0x70c; break;
75 --- a/arch/x86/events/intel/lbr.c
76 +++ b/arch/x86/events/intel/lbr.c
77 @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void
80 if (boot_cpu_data.x86_model == 28
81 - && boot_cpu_data.x86_mask < 10) {
82 + && boot_cpu_data.x86_stepping < 10) {
83 pr_cont("LBR disabled due to erratum");
86 --- a/arch/x86/events/intel/p6.c
87 +++ b/arch/x86/events/intel/p6.c
88 @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu
90 static __init void p6_pmu_rdpmc_quirk(void)
92 - if (boot_cpu_data.x86_mask < 9) {
93 + if (boot_cpu_data.x86_stepping < 9) {
95 * PPro erratum 26; fixed in stepping 9 and above.
97 --- a/arch/x86/include/asm/acpi.h
98 +++ b/arch/x86/include/asm/acpi.h
99 @@ -94,7 +94,7 @@ static inline unsigned int acpi_processo
100 if (boot_cpu_data.x86 == 0x0F &&
101 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
102 boot_cpu_data.x86_model <= 0x05 &&
103 - boot_cpu_data.x86_mask < 0x0A)
104 + boot_cpu_data.x86_stepping < 0x0A)
106 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
108 --- a/arch/x86/include/asm/processor.h
109 +++ b/arch/x86/include/asm/processor.h
110 @@ -91,7 +91,7 @@ struct cpuinfo_x86 {
111 __u8 x86; /* CPU family */
112 __u8 x86_vendor; /* CPU vendor */
117 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
119 --- a/arch/x86/kernel/amd_nb.c
120 +++ b/arch/x86/kernel/amd_nb.c
121 @@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
122 if (boot_cpu_data.x86 == 0x10 &&
123 boot_cpu_data.x86_model >= 0x8 &&
124 (boot_cpu_data.x86_model > 0x9 ||
125 - boot_cpu_data.x86_mask >= 0x1))
126 + boot_cpu_data.x86_stepping >= 0x1))
127 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
129 if (boot_cpu_data.x86 == 0x15)
130 --- a/arch/x86/kernel/apic/apic.c
131 +++ b/arch/x86/kernel/apic/apic.c
132 @@ -553,7 +553,7 @@ static DEFINE_PER_CPU(struct clock_event
134 static u32 hsx_deadline_rev(void)
136 - switch (boot_cpu_data.x86_mask) {
137 + switch (boot_cpu_data.x86_stepping) {
138 case 0x02: return 0x3a; /* EP */
139 case 0x04: return 0x0f; /* EX */
141 @@ -563,7 +563,7 @@ static u32 hsx_deadline_rev(void)
143 static u32 bdx_deadline_rev(void)
145 - switch (boot_cpu_data.x86_mask) {
146 + switch (boot_cpu_data.x86_stepping) {
147 case 0x02: return 0x00000011;
148 case 0x03: return 0x0700000e;
149 case 0x04: return 0x0f00000c;
150 @@ -575,7 +575,7 @@ static u32 bdx_deadline_rev(void)
152 static u32 skx_deadline_rev(void)
154 - switch (boot_cpu_data.x86_mask) {
155 + switch (boot_cpu_data.x86_stepping) {
156 case 0x03: return 0x01000136;
157 case 0x04: return 0x02000014;
159 --- a/arch/x86/kernel/asm-offsets_32.c
160 +++ b/arch/x86/kernel/asm-offsets_32.c
161 @@ -18,7 +18,7 @@ void foo(void)
162 OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
163 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
164 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
165 - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
166 + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
167 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
168 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
169 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
170 --- a/arch/x86/kernel/cpu/amd.c
171 +++ b/arch/x86/kernel/cpu/amd.c
172 @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x
176 - if (c->x86_model == 6 && c->x86_mask == 1) {
177 + if (c->x86_model == 6 && c->x86_stepping == 1) {
178 const int K6_BUG_LOOP = 1000000;
180 void (*f_vide)(void);
181 @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x
183 /* K6 with old style WHCR */
184 if (c->x86_model < 8 ||
185 - (c->x86_model == 8 && c->x86_mask < 8)) {
186 + (c->x86_model == 8 && c->x86_stepping < 8)) {
187 /* We can only write allocate on the low 508Mb */
190 @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x
194 - if ((c->x86_model == 8 && c->x86_mask > 7) ||
195 + if ((c->x86_model == 8 && c->x86_stepping > 7) ||
196 c->x86_model == 9 || c->x86_model == 13) {
197 /* The more serious chips .. */
199 @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x
200 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
201 * As per AMD technical note 27212 0.2
203 - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
204 + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
205 rdmsr(MSR_K7_CLK_CTL, l, h);
206 if ((l & 0xfff00000) != 0x20000000) {
207 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
208 @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x
209 * but they are not certified as MP capable.
211 /* Athlon 660/661 is valid. */
212 - if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
213 - (c->x86_mask == 1)))
214 + if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
215 + (c->x86_stepping == 1)))
218 /* Duron 670 is valid */
219 - if ((c->x86_model == 7) && (c->x86_mask == 0))
220 + if ((c->x86_model == 7) && (c->x86_stepping == 0))
224 @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x
225 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
228 - if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
229 - ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
230 + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
231 + ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
233 if (cpu_has(c, X86_FEATURE_MP))
235 @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinf
236 /* Set MTRR capability flag if appropriate */
238 if (c->x86_model == 13 || c->x86_model == 9 ||
239 - (c->x86_model == 8 && c->x86_mask >= 8))
240 + (c->x86_model == 8 && c->x86_stepping >= 8))
241 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
243 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
244 @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x
245 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
246 * all up to and including B1.
248 - if (c->x86_model <= 1 && c->x86_mask <= 1)
249 + if (c->x86_model <= 1 && c->x86_stepping <= 1)
250 set_cpu_cap(c, X86_FEATURE_CPB);
253 @@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struc
254 /* AMD errata T13 (order #21922) */
257 - if (c->x86_model == 3 && c->x86_mask == 0)
258 + if (c->x86_model == 3 && c->x86_stepping == 0)
260 /* Tbird rev A1/A2 */
261 if (c->x86_model == 4 &&
262 - (c->x86_mask == 0 || c->x86_mask == 1))
263 + (c->x86_stepping == 0 || c->x86_stepping == 1))
267 @@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct c
270 /* OSVW unavailable or ID unknown, match family-model-stepping range */
271 - ms = (cpu->x86_model << 4) | cpu->x86_mask;
272 + ms = (cpu->x86_model << 4) | cpu->x86_stepping;
273 while ((range = *erratum++))
274 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
275 (ms >= AMD_MODEL_RANGE_START(range)) &&
276 --- a/arch/x86/kernel/cpu/centaur.c
277 +++ b/arch/x86/kernel/cpu/centaur.c
278 @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_
279 clear_cpu_cap(c, X86_FEATURE_TSC);
282 - switch (c->x86_mask) {
283 + switch (c->x86_stepping) {
287 @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c
288 * - Note, it seems this may only be in engineering samples.
290 if ((c->x86 == 6) && (c->x86_model == 9) &&
291 - (c->x86_mask == 1) && (size == 65))
292 + (c->x86_stepping == 1) && (size == 65))
296 --- a/arch/x86/kernel/cpu/common.c
297 +++ b/arch/x86/kernel/cpu/common.c
298 @@ -707,7 +707,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
299 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
300 c->x86 = x86_family(tfms);
301 c->x86_model = x86_model(tfms);
302 - c->x86_mask = x86_stepping(tfms);
303 + c->x86_stepping = x86_stepping(tfms);
305 if (cap0 & (1<<19)) {
306 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
307 @@ -1162,7 +1162,7 @@ static void identify_cpu(struct cpuinfo_
308 c->loops_per_jiffy = loops_per_jiffy;
309 c->x86_cache_size = -1;
310 c->x86_vendor = X86_VENDOR_UNKNOWN;
311 - c->x86_model = c->x86_mask = 0; /* So far unknown... */
312 + c->x86_model = c->x86_stepping = 0; /* So far unknown... */
313 c->x86_vendor_id[0] = '\0'; /* Unset */
314 c->x86_model_id[0] = '\0'; /* Unset */
315 c->x86_max_cores = 1;
316 @@ -1353,8 +1353,8 @@ void print_cpu_info(struct cpuinfo_x86 *
318 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
320 - if (c->x86_mask || c->cpuid_level >= 0)
321 - pr_cont(", stepping: 0x%x)\n", c->x86_mask);
322 + if (c->x86_stepping || c->cpuid_level >= 0)
323 + pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
327 --- a/arch/x86/kernel/cpu/cyrix.c
328 +++ b/arch/x86/kernel/cpu/cyrix.c
329 @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x8
331 /* common case step number/rev -- exceptions handled below */
332 c->x86_model = (dir1 >> 4) + 1;
333 - c->x86_mask = dir1 & 0xf;
334 + c->x86_stepping = dir1 & 0xf;
336 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
337 * We do the same thing for each generation: we work out
338 --- a/arch/x86/kernel/cpu/intel.c
339 +++ b/arch/x86/kernel/cpu/intel.c
340 @@ -146,7 +146,7 @@ static bool bad_spectre_microcode(struct
342 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
343 if (c->x86_model == spectre_bad_microcodes[i].model &&
344 - c->x86_mask == spectre_bad_microcodes[i].stepping)
345 + c->x86_stepping == spectre_bad_microcodes[i].stepping)
346 return (c->microcode <= spectre_bad_microcodes[i].microcode);
349 @@ -193,7 +193,7 @@ static void early_init_intel(struct cpui
350 * need the microcode to have already been loaded... so if it is
351 * not, recommend a BIOS update and disable large pages.
353 - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
354 + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
355 c->microcode < 0x20e) {
356 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
357 clear_cpu_cap(c, X86_FEATURE_PSE);
358 @@ -209,7 +209,7 @@ static void early_init_intel(struct cpui
360 /* CPUID workaround for 0F33/0F34 CPU */
361 if (c->x86 == 0xF && c->x86_model == 0x3
362 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
363 + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
364 c->x86_phys_bits = 36;
367 @@ -307,7 +307,7 @@ int ppro_with_ram_bug(void)
368 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
369 boot_cpu_data.x86 == 6 &&
370 boot_cpu_data.x86_model == 1 &&
371 - boot_cpu_data.x86_mask < 8) {
372 + boot_cpu_data.x86_stepping < 8) {
373 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
376 @@ -324,7 +324,7 @@ static void intel_smp_check(struct cpuin
377 * Mask B, Pentium, but not Pentium MMX
380 - c->x86_mask >= 1 && c->x86_mask <= 4 &&
381 + c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
384 * Remember we have B step Pentia with bugs
385 @@ -367,7 +367,7 @@ static void intel_workarounds(struct cpu
386 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
389 - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
390 + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
391 clear_cpu_cap(c, X86_FEATURE_SEP);
394 @@ -385,7 +385,7 @@ static void intel_workarounds(struct cpu
395 * P4 Xeon erratum 037 workaround.
396 * Hardware prefetcher may cause stale data to be loaded into the cache.
398 - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
399 + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
400 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
401 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
402 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
403 @@ -400,7 +400,7 @@ static void intel_workarounds(struct cpu
404 * Specification Update").
406 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
407 - (c->x86_mask < 0x6 || c->x86_mask == 0xb))
408 + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
409 set_cpu_bug(c, X86_BUG_11AP);
412 @@ -647,7 +647,7 @@ static void init_intel(struct cpuinfo_x8
415 p = "Celeron (Mendocino)";
416 - else if (c->x86_mask == 0 || c->x86_mask == 5)
417 + else if (c->x86_stepping == 0 || c->x86_stepping == 5)
421 --- a/arch/x86/kernel/cpu/intel_rdt.c
422 +++ b/arch/x86/kernel/cpu/intel_rdt.c
423 @@ -771,7 +771,7 @@ static __init void rdt_quirks(void)
424 cache_alloc_hsw_probe();
426 case INTEL_FAM6_SKYLAKE_X:
427 - if (boot_cpu_data.x86_mask <= 4)
428 + if (boot_cpu_data.x86_stepping <= 4)
429 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
432 --- a/arch/x86/kernel/cpu/microcode/intel.c
433 +++ b/arch/x86/kernel/cpu/microcode/intel.c
434 @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int
437 c->x86_model == INTEL_FAM6_BROADWELL_X &&
438 - c->x86_mask == 0x01 &&
439 + c->x86_stepping == 0x01 &&
440 llc_size_per_core > 2621440 &&
441 c->microcode < 0x0b000021) {
442 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
443 @@ -944,7 +944,7 @@ static enum ucode_state request_microcod
446 sprintf(name, "intel-ucode/%02x-%02x-%02x",
447 - c->x86, c->x86_model, c->x86_mask);
448 + c->x86, c->x86_model, c->x86_stepping);
450 if (request_firmware_direct(&firmware, name, device)) {
451 pr_debug("data file %s load failed\n", name);
452 --- a/arch/x86/kernel/cpu/mtrr/generic.c
453 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
454 @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned l
456 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
457 boot_cpu_data.x86_model == 1 &&
458 - boot_cpu_data.x86_mask <= 7) {
459 + boot_cpu_data.x86_stepping <= 7) {
460 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
461 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
463 --- a/arch/x86/kernel/cpu/mtrr/main.c
464 +++ b/arch/x86/kernel/cpu/mtrr/main.c
465 @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
466 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
467 boot_cpu_data.x86 == 0xF &&
468 boot_cpu_data.x86_model == 0x3 &&
469 - (boot_cpu_data.x86_mask == 0x3 ||
470 - boot_cpu_data.x86_mask == 0x4))
471 + (boot_cpu_data.x86_stepping == 0x3 ||
472 + boot_cpu_data.x86_stepping == 0x4))
475 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
476 --- a/arch/x86/kernel/cpu/proc.c
477 +++ b/arch/x86/kernel/cpu/proc.c
478 @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file
480 c->x86_model_id[0] ? c->x86_model_id : "unknown");
482 - if (c->x86_mask || c->cpuid_level >= 0)
483 - seq_printf(m, "stepping\t: %d\n", c->x86_mask);
484 + if (c->x86_stepping || c->cpuid_level >= 0)
485 + seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
487 seq_puts(m, "stepping\t: unknown\n");
489 --- a/arch/x86/kernel/head_32.S
490 +++ b/arch/x86/kernel/head_32.S
492 #define X86 new_cpu_data+CPUINFO_x86
493 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
494 #define X86_MODEL new_cpu_data+CPUINFO_x86_model
495 -#define X86_MASK new_cpu_data+CPUINFO_x86_mask
496 +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
497 #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
498 #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
499 #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
500 @@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
503 andb $0x0f,%cl # mask mask revision
505 + movb %cl,X86_STEPPING
506 movl %edx,X86_CAPABILITY
509 --- a/arch/x86/kernel/mpparse.c
510 +++ b/arch/x86/kernel/mpparse.c
511 @@ -407,7 +407,7 @@ static inline void __init construct_defa
512 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
513 processor.cpuflag = CPU_ENABLED;
514 processor.cpufeature = (boot_cpu_data.x86 << 8) |
515 - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
516 + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
517 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
518 processor.reserved[0] = 0;
519 processor.reserved[1] = 0;
520 --- a/arch/x86/lib/cpu.c
521 +++ b/arch/x86/lib/cpu.c
522 @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
524 unsigned int fam, model;
526 - fam = x86_family(sig);
527 + fam = x86_family(sig);
529 model = (sig >> 4) & 0xf;
531 --- a/drivers/char/hw_random/via-rng.c
532 +++ b/drivers/char/hw_random/via-rng.c
533 @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rn
534 /* Enable secondary noise source on CPUs where it is present. */
536 /* Nehemiah stepping 8 and higher */
537 - if ((c->x86_model == 9) && (c->x86_mask > 7))
538 + if ((c->x86_model == 9) && (c->x86_stepping > 7))
542 --- a/drivers/cpufreq/acpi-cpufreq.c
543 +++ b/drivers/cpufreq/acpi-cpufreq.c
544 @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct
545 if (c->x86_vendor == X86_VENDOR_INTEL) {
546 if ((c->x86 == 15) &&
547 (c->x86_model == 6) &&
548 - (c->x86_mask == 8)) {
549 + (c->x86_stepping == 8)) {
550 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
553 --- a/drivers/cpufreq/longhaul.c
554 +++ b/drivers/cpufreq/longhaul.c
555 @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpuf
559 - switch (c->x86_mask) {
560 + switch (c->x86_stepping) {
562 longhaul_version = TYPE_LONGHAUL_V1;
563 cpu_model = CPU_SAMUEL2;
564 @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpuf
567 longhaul_version = TYPE_LONGHAUL_V2;
568 - if (c->x86_mask < 8) {
569 + if (c->x86_stepping < 8) {
570 cpu_model = CPU_SAMUEL2;
571 cpuname = "C3 'Samuel 2' [C5B]";
573 @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpuf
575 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
576 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
577 - switch (c->x86_mask) {
578 + switch (c->x86_stepping) {
580 cpu_model = CPU_NEHEMIAH;
581 cpuname = "C3 'Nehemiah A' [C5XLOE]";
582 --- a/drivers/cpufreq/p4-clockmod.c
583 +++ b/drivers/cpufreq/p4-clockmod.c
584 @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cp
587 /* Errata workaround */
588 - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
589 + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
593 --- a/drivers/cpufreq/powernow-k7.c
594 +++ b/drivers/cpufreq/powernow-k7.c
595 @@ -131,7 +131,7 @@ static int check_powernow(void)
599 - if ((c->x86_model == 6) && (c->x86_mask == 0)) {
600 + if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
601 pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
604 --- a/drivers/cpufreq/speedstep-centrino.c
605 +++ b/drivers/cpufreq/speedstep-centrino.c
606 @@ -37,7 +37,7 @@ struct cpu_id
608 __u8 x86; /* CPU family */
609 __u8 x86_model; /* model */
610 - __u8 x86_mask; /* stepping */
611 + __u8 x86_stepping; /* stepping */
615 @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const
617 if ((c->x86 == x->x86) &&
618 (c->x86_model == x->x86_model) &&
619 - (c->x86_mask == x->x86_mask))
620 + (c->x86_stepping == x->x86_stepping))
624 --- a/drivers/cpufreq/speedstep-lib.c
625 +++ b/drivers/cpufreq/speedstep-lib.c
626 @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(
627 ebx = cpuid_ebx(0x00000001);
630 - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
631 + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
633 - switch (c->x86_mask) {
634 + switch (c->x86_stepping) {
637 * B-stepping [M-P4-M]
638 @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(
640 if ((msr_hi & (1<<18)) &&
641 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
642 - if (c->x86_mask == 0x01) {
643 + if (c->x86_stepping == 0x01) {
644 pr_debug("early PIII version\n");
645 return SPEEDSTEP_CPU_PIII_C_EARLY;
647 --- a/drivers/crypto/padlock-aes.c
648 +++ b/drivers/crypto/padlock-aes.c
649 @@ -512,7 +512,7 @@ static int __init padlock_init(void)
651 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
653 - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
654 + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
655 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
656 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
657 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
658 --- a/drivers/edac/amd64_edac.c
659 +++ b/drivers/edac/amd64_edac.c
660 @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_fam
661 struct amd64_family_type *fam_type = NULL;
663 pvt->ext_model = boot_cpu_data.x86_model >> 4;
664 - pvt->stepping = boot_cpu_data.x86_mask;
665 + pvt->stepping = boot_cpu_data.x86_stepping;
666 pvt->model = boot_cpu_data.x86_model;
667 pvt->fam = boot_cpu_data.x86;
669 --- a/drivers/hwmon/coretemp.c
670 +++ b/drivers/hwmon/coretemp.c
671 @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x
672 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
673 const struct tjmax_model *tm = &tjmax_model_table[i];
674 if (c->x86_model == tm->model &&
675 - (tm->mask == ANY || c->x86_mask == tm->mask))
676 + (tm->mask == ANY || c->x86_stepping == tm->mask))
680 /* Early chips have no MSR for TjMax */
682 - if (c->x86_model == 0xf && c->x86_mask < 4)
683 + if (c->x86_model == 0xf && c->x86_stepping < 4)
686 if (c->x86_model > 0xe && usemsr_ee) {
687 @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned in
688 * Readings might stop update when processor visited too deep sleep,
689 * fixed for stepping D0 (6EC).
691 - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
692 + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
693 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
696 --- a/drivers/hwmon/hwmon-vid.c
697 +++ b/drivers/hwmon/hwmon-vid.c
698 @@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
699 if (c->x86 < 6) /* Any CPU with family lower than 6 */
700 return 0; /* doesn't have VID */
702 - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
703 + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
705 vrm_ret = get_via_model_d_vrm();
707 --- a/drivers/hwmon/k10temp.c
708 +++ b/drivers/hwmon/k10temp.c
709 @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_d
710 * and AM3 formats, but that's the best we can do.
712 return boot_cpu_data.x86_model < 4 ||
713 - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
714 + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
717 static int k10temp_probe(struct pci_dev *pdev,
718 --- a/drivers/hwmon/k8temp.c
719 +++ b/drivers/hwmon/k8temp.c
720 @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *
723 model = boot_cpu_data.x86_model;
724 - stepping = boot_cpu_data.x86_mask;
725 + stepping = boot_cpu_data.x86_stepping;
727 /* feature available since SH-C0, exclude older revisions */
728 if ((model == 4 && stepping == 0) ||
729 --- a/drivers/video/fbdev/geode/video_gx.c
730 +++ b/drivers/video/fbdev/geode/video_gx.c
731 @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_inf
734 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
735 - if (cpu_data(0).x86_mask == 1) {
736 + if (cpu_data(0).x86_stepping == 1) {
737 pll_table = gx_pll_table_14MHz;
738 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);