]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arc/kernel/setup.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / arc / kernel / setup.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6 #include <linux/seq_file.h>
7 #include <linux/fs.h>
8 #include <linux/delay.h>
9 #include <linux/root_dev.h>
10 #include <linux/clk.h>
11 #include <linux/clocksource.h>
12 #include <linux/console.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/of_clk.h>
16 #include <linux/of_fdt.h>
17 #include <linux/of.h>
18 #include <linux/cache.h>
19 #include <uapi/linux/mount.h>
20 #include <asm/sections.h>
21 #include <asm/arcregs.h>
22 #include <asm/asserts.h>
23 #include <asm/tlb.h>
24 #include <asm/setup.h>
25 #include <asm/page.h>
26 #include <asm/irq.h>
27 #include <asm/unwind.h>
28 #include <asm/mach_desc.h>
29 #include <asm/smp.h>
30 #include <asm/dsp-impl.h>
31
32 #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
33
34 unsigned int intr_to_DE_cnt;
35
36 /* Part of U-boot ABI: see head.S */
37 int __initdata uboot_tag;
38 int __initdata uboot_magic;
39 char __initdata *uboot_arg;
40
41 const struct machine_desc *machine_desc;
42
43 struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
44
45 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
46
47 static const struct id_to_str arc_legacy_rel[] = {
48 /* ID.ARCVER, Release */
49 #ifdef CONFIG_ISA_ARCOMPACT
50 { 0x34, "R4.10"},
51 { 0x35, "R4.11"},
52 #else
53 { 0x51, "R2.0" },
54 { 0x52, "R2.1" },
55 { 0x53, "R3.0" },
56 #endif
57 { 0x00, NULL }
58 };
59
60 static const struct id_to_str arc_cpu_rel[] = {
61 /* UARCH.MAJOR, Release */
62 { 0, "R3.10a"},
63 { 1, "R3.50a"},
64 { 0xFF, NULL }
65 };
66
67 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
68 {
69 if (is_isa_arcompact()) {
70 struct bcr_iccm_arcompact iccm;
71 struct bcr_dccm_arcompact dccm;
72
73 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
74 if (iccm.ver) {
75 cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
76 cpu->iccm.base_addr = iccm.base << 16;
77 }
78
79 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
80 if (dccm.ver) {
81 unsigned long base;
82 cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
83
84 base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
85 cpu->dccm.base_addr = base & ~0xF;
86 }
87 } else {
88 struct bcr_iccm_arcv2 iccm;
89 struct bcr_dccm_arcv2 dccm;
90 unsigned long region;
91
92 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
93 if (iccm.ver) {
94 cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
95 if (iccm.sz00 == 0xF && iccm.sz01 > 0)
96 cpu->iccm.sz <<= iccm.sz01;
97
98 region = read_aux_reg(ARC_REG_AUX_ICCM);
99 cpu->iccm.base_addr = region & 0xF0000000;
100 }
101
102 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
103 if (dccm.ver) {
104 cpu->dccm.sz = 256 << dccm.sz0;
105 if (dccm.sz0 == 0xF && dccm.sz1 > 0)
106 cpu->dccm.sz <<= dccm.sz1;
107
108 region = read_aux_reg(ARC_REG_AUX_DCCM);
109 cpu->dccm.base_addr = region & 0xF0000000;
110 }
111 }
112 }
113
114 static void decode_arc_core(struct cpuinfo_arc *cpu)
115 {
116 struct bcr_uarch_build_arcv2 uarch;
117 const struct id_to_str *tbl;
118
119 /*
120 * Up until (including) the first core4 release (0x54) things were
121 * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
122 * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
123 */
124
125 if (cpu->core.family < 0x54) { /* includes arc700 */
126
127 for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
128 if (cpu->core.family == tbl->id) {
129 cpu->release = tbl->str;
130 break;
131 }
132 }
133
134 if (is_isa_arcompact())
135 cpu->name = "ARC700";
136 else if (tbl->str)
137 cpu->name = "HS38";
138 else
139 cpu->name = cpu->release = "Unknown";
140
141 return;
142 }
143
144 /*
145 * However the subsequent HS release (same 0x54) allow HS38 or HS48
146 * configurations and encode this info in a different BCR.
147 * The BCR was introduced in 0x54 so can't be read unconditionally.
148 */
149
150 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
151
152 if (uarch.prod == 4) {
153 cpu->name = "HS48";
154 cpu->extn.dual = 1;
155
156 } else {
157 cpu->name = "HS38";
158 }
159
160 for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
161 if (uarch.maj == tbl->id) {
162 cpu->release = tbl->str;
163 break;
164 }
165 }
166 }
167
168 static void read_arc_build_cfg_regs(void)
169 {
170 struct bcr_timer timer;
171 struct bcr_generic bcr;
172 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
173 struct bcr_isa_arcv2 isa;
174 struct bcr_actionpoint ap;
175
176 FIX_PTR(cpu);
177
178 READ_BCR(AUX_IDENTITY, cpu->core);
179 decode_arc_core(cpu);
180
181 READ_BCR(ARC_REG_TIMERS_BCR, timer);
182 cpu->extn.timer0 = timer.t0;
183 cpu->extn.timer1 = timer.t1;
184 cpu->extn.rtc = timer.rtc;
185
186 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
187
188 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
189
190 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
191 read_decode_ccm_bcr(cpu);
192
193 read_decode_mmu_bcr();
194 read_decode_cache_bcr();
195
196 if (is_isa_arcompact()) {
197 struct bcr_fp_arcompact sp, dp;
198 struct bcr_bpu_arcompact bpu;
199
200 READ_BCR(ARC_REG_FP_BCR, sp);
201 READ_BCR(ARC_REG_DPFP_BCR, dp);
202 cpu->extn.fpu_sp = sp.ver ? 1 : 0;
203 cpu->extn.fpu_dp = dp.ver ? 1 : 0;
204
205 READ_BCR(ARC_REG_BPU_BCR, bpu);
206 cpu->bpu.ver = bpu.ver;
207 cpu->bpu.full = bpu.fam ? 1 : 0;
208 if (bpu.ent) {
209 cpu->bpu.num_cache = 256 << (bpu.ent - 1);
210 cpu->bpu.num_pred = 256 << (bpu.ent - 1);
211 }
212 } else {
213 struct bcr_fp_arcv2 spdp;
214 struct bcr_bpu_arcv2 bpu;
215
216 READ_BCR(ARC_REG_FP_V2_BCR, spdp);
217 cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
218 cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
219
220 READ_BCR(ARC_REG_BPU_BCR, bpu);
221 cpu->bpu.ver = bpu.ver;
222 cpu->bpu.full = bpu.ft;
223 cpu->bpu.num_cache = 256 << bpu.bce;
224 cpu->bpu.num_pred = 2048 << bpu.pte;
225 cpu->bpu.ret_stk = 4 << bpu.rse;
226
227 /* if dual issue hardware, is it enabled ? */
228 if (cpu->extn.dual) {
229 unsigned int exec_ctrl;
230
231 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
232 cpu->extn.dual_enb = !(exec_ctrl & 1);
233 }
234 }
235
236 READ_BCR(ARC_REG_AP_BCR, ap);
237 if (ap.ver) {
238 cpu->extn.ap_num = 2 << ap.num;
239 cpu->extn.ap_full = !ap.min;
240 }
241
242 READ_BCR(ARC_REG_SMART_BCR, bcr);
243 cpu->extn.smart = bcr.ver ? 1 : 0;
244
245 READ_BCR(ARC_REG_RTT_BCR, bcr);
246 cpu->extn.rtt = bcr.ver ? 1 : 0;
247
248 READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
249
250 /* some hacks for lack of feature BCR info in old ARC700 cores */
251 if (is_isa_arcompact()) {
252 if (!isa.ver) /* ISA BCR absent, use Kconfig info */
253 cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
254 else {
255 /* ARC700_BUILD only has 2 bits of isa info */
256 struct bcr_generic bcr = *(struct bcr_generic *)&isa;
257 cpu->isa.atomic = bcr.info & 1;
258 }
259
260 cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
261
262 /* there's no direct way to distinguish 750 vs. 770 */
263 if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
264 cpu->name = "ARC750";
265 } else {
266 cpu->isa = isa;
267 }
268 }
269
270 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
271 {
272 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
273 struct bcr_identity *core = &cpu->core;
274 char mpy_opt[16];
275 int n = 0;
276
277 FIX_PTR(cpu);
278
279 n += scnprintf(buf + n, len - n,
280 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
281 core->family, core->cpu_id, core->chip_id);
282
283 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
284 cpu_id, cpu->name, cpu->release,
285 is_isa_arcompact() ? "ARCompact" : "ARCv2",
286 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
287 IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
288
289 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
290 IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
291 IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
292 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
293 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
294
295 if (cpu->extn_mpy.ver) {
296 if (is_isa_arcompact()) {
297 scnprintf(mpy_opt, 16, "mpy");
298 } else {
299
300 int opt = 2; /* stock MPY/MPYH */
301
302 if (cpu->extn_mpy.dsp) /* OPT 7-9 */
303 opt = cpu->extn_mpy.dsp + 6;
304
305 scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
306 }
307 }
308
309 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
310 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
311 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
312 IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
313 IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
314 IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
315
316 if (cpu->bpu.ver) {
317 n += scnprintf(buf + n, len - n,
318 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
319 IS_AVAIL1(cpu->bpu.full, "full"),
320 IS_AVAIL1(!cpu->bpu.full, "partial"),
321 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
322
323 if (is_isa_arcv2()) {
324 struct bcr_lpb lpb;
325
326 READ_BCR(ARC_REG_LPB_BUILD, lpb);
327 if (lpb.ver) {
328 unsigned int ctl;
329 ctl = read_aux_reg(ARC_REG_LPB_CTRL);
330
331 n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
332 lpb.entries,
333 IS_DISABLED_RUN(!ctl));
334 }
335 }
336 n += scnprintf(buf + n, len - n, "\n");
337 }
338
339 return buf;
340 }
341
342 static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
343 {
344 int n = 0;
345 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
346
347 FIX_PTR(cpu);
348
349 n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
350
351 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
352 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
353 IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
354 IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
355
356 if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
357 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
358 IS_AVAIL1(cpu->extn.smart, "smaRT "),
359 IS_AVAIL1(cpu->extn.rtt, "RTT "));
360 if (cpu->extn.ap_num) {
361 n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
362 cpu->extn.ap_num,
363 cpu->extn.ap_full ? "full":"min");
364 }
365 n += scnprintf(buf + n, len - n, "\n");
366 }
367
368 if (cpu->dccm.sz || cpu->iccm.sz)
369 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
370 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
371 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
372
373 if (is_isa_arcv2()) {
374
375 /* Error Protection: ECC/Parity */
376 struct bcr_erp erp;
377 READ_BCR(ARC_REG_ERP_BUILD, erp);
378
379 if (erp.ver) {
380 struct ctl_erp ctl;
381 READ_BCR(ARC_REG_ERP_CTRL, ctl);
382
383 /* inverted bits: 0 means enabled */
384 n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
385 IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
386 IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
387 IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
388 }
389 }
390
391 return buf;
392 }
393
394 void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
395 {
396 if (hw_exists && !opt_ena)
397 pr_warn(" ! Enable %s for working apps\n", opt_name);
398 else if (!hw_exists && opt_ena)
399 panic("Disable %s, hardware NOT present\n", opt_name);
400 }
401
402 void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
403 {
404 if (!hw_exists && opt_ena)
405 panic("Disable %s, hardware NOT present\n", opt_name);
406 }
407
408 static void arc_chk_core_config(void)
409 {
410 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
411 int present = 0;
412
413 if (!cpu->extn.timer0)
414 panic("Timer0 is not present!\n");
415
416 if (!cpu->extn.timer1)
417 panic("Timer1 is not present!\n");
418
419 #ifdef CONFIG_ARC_HAS_DCCM
420 /*
421 * DCCM can be arbit placed in hardware.
422 * Make sure it's placement/sz matches what Linux is built with
423 */
424 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
425 panic("Linux built with incorrect DCCM Base address\n");
426
427 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
428 panic("Linux built with incorrect DCCM Size\n");
429 #endif
430
431 #ifdef CONFIG_ARC_HAS_ICCM
432 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
433 panic("Linux built with incorrect ICCM Size\n");
434 #endif
435
436 /*
437 * FP hardware/software config sanity
438 * -If hardware present, kernel needs to save/restore FPU state
439 * -If not, it will crash trying to save/restore the non-existant regs
440 */
441
442 if (is_isa_arcompact()) {
443 /* only DPDP checked since SP has no arch visible regs */
444 present = cpu->extn.fpu_dp;
445 CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
446 } else {
447 /* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
448 present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
449 CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
450
451 dsp_config_check();
452 }
453 }
454
455 /*
456 * Initialize and setup the processor core
457 * This is called by all the CPUs thus should not do special case stuff
458 * such as only for boot CPU etc
459 */
460
461 void setup_processor(void)
462 {
463 char str[512];
464 int cpu_id = smp_processor_id();
465
466 read_arc_build_cfg_regs();
467 arc_init_IRQ();
468
469 pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
470
471 arc_mmu_init();
472 arc_cache_init();
473
474 pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
475 pr_info("%s", arc_platform_smp_cpuinfo());
476
477 arc_chk_core_config();
478 }
479
480 static inline bool uboot_arg_invalid(unsigned long addr)
481 {
482 /*
483 * Check that it is a untranslated address (although MMU is not enabled
484 * yet, it being a high address ensures this is not by fluke)
485 */
486 if (addr < PAGE_OFFSET)
487 return true;
488
489 /* Check that address doesn't clobber resident kernel image */
490 return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
491 }
492
493 #define IGNORE_ARGS "Ignore U-boot args: "
494
495 /* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
496 #define UBOOT_TAG_NONE 0
497 #define UBOOT_TAG_CMDLINE 1
498 #define UBOOT_TAG_DTB 2
499 /* We always pass 0 as magic from U-boot */
500 #define UBOOT_MAGIC_VALUE 0
501
502 void __init handle_uboot_args(void)
503 {
504 bool use_embedded_dtb = true;
505 bool append_cmdline = false;
506
507 /* check that we know this tag */
508 if (uboot_tag != UBOOT_TAG_NONE &&
509 uboot_tag != UBOOT_TAG_CMDLINE &&
510 uboot_tag != UBOOT_TAG_DTB) {
511 pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
512 goto ignore_uboot_args;
513 }
514
515 if (uboot_magic != UBOOT_MAGIC_VALUE) {
516 pr_warn(IGNORE_ARGS "non zero uboot magic\n");
517 goto ignore_uboot_args;
518 }
519
520 if (uboot_tag != UBOOT_TAG_NONE &&
521 uboot_arg_invalid((unsigned long)uboot_arg)) {
522 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
523 goto ignore_uboot_args;
524 }
525
526 /* see if U-boot passed an external Device Tree blob */
527 if (uboot_tag == UBOOT_TAG_DTB) {
528 machine_desc = setup_machine_fdt((void *)uboot_arg);
529
530 /* external Device Tree blob is invalid - use embedded one */
531 use_embedded_dtb = !machine_desc;
532 }
533
534 if (uboot_tag == UBOOT_TAG_CMDLINE)
535 append_cmdline = true;
536
537 ignore_uboot_args:
538
539 if (use_embedded_dtb) {
540 machine_desc = setup_machine_fdt(__dtb_start);
541 if (!machine_desc)
542 panic("Embedded DT invalid\n");
543 }
544
545 /*
546 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
547 * append processing can only happen after.
548 */
549 if (append_cmdline) {
550 /* Ensure a whitespace between the 2 cmdlines */
551 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
552 strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
553 }
554 }
555
556 void __init setup_arch(char **cmdline_p)
557 {
558 handle_uboot_args();
559
560 /* Save unparsed command line copy for /proc/cmdline */
561 *cmdline_p = boot_command_line;
562
563 /* To force early parsing of things like mem=xxx */
564 parse_early_param();
565
566 /* Platform/board specific: e.g. early console registration */
567 if (machine_desc->init_early)
568 machine_desc->init_early();
569
570 smp_init_cpus();
571
572 setup_processor();
573 setup_arch_memory();
574
575 /* copy flat DT out of .init and then unflatten it */
576 unflatten_and_copy_device_tree();
577
578 /* Can be issue if someone passes cmd line arg "ro"
579 * But that is unlikely so keeping it as it is
580 */
581 root_mountflags &= ~MS_RDONLY;
582
583 arc_unwind_init();
584 }
585
586 /*
587 * Called from start_kernel() - boot CPU only
588 */
589 void __init time_init(void)
590 {
591 of_clk_init(NULL);
592 timer_probe();
593 }
594
595 static int __init customize_machine(void)
596 {
597 if (machine_desc->init_machine)
598 machine_desc->init_machine();
599
600 return 0;
601 }
602 arch_initcall(customize_machine);
603
604 static int __init init_late_machine(void)
605 {
606 if (machine_desc->init_late)
607 machine_desc->init_late();
608
609 return 0;
610 }
611 late_initcall(init_late_machine);
612 /*
613 * Get CPU information for use by the procfs.
614 */
615
616 #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
617 #define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
618
619 static int show_cpuinfo(struct seq_file *m, void *v)
620 {
621 char *str;
622 int cpu_id = ptr_to_cpu(v);
623 struct device *cpu_dev = get_cpu_device(cpu_id);
624 struct clk *cpu_clk;
625 unsigned long freq = 0;
626
627 if (!cpu_online(cpu_id)) {
628 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
629 goto done;
630 }
631
632 str = (char *)__get_free_page(GFP_KERNEL);
633 if (!str)
634 goto done;
635
636 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
637
638 cpu_clk = clk_get(cpu_dev, NULL);
639 if (IS_ERR(cpu_clk)) {
640 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
641 cpu_id);
642 } else {
643 freq = clk_get_rate(cpu_clk);
644 }
645 if (freq)
646 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
647 freq / 1000000, (freq / 10000) % 100);
648
649 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
650 loops_per_jiffy / (500000 / HZ),
651 (loops_per_jiffy / (5000 / HZ)) % 100);
652
653 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
654 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
655 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
656 seq_printf(m, arc_platform_smp_cpuinfo());
657
658 free_page((unsigned long)str);
659 done:
660 seq_printf(m, "\n");
661
662 return 0;
663 }
664
665 static void *c_start(struct seq_file *m, loff_t *pos)
666 {
667 /*
668 * Callback returns cpu-id to iterator for show routine, NULL to stop.
669 * However since NULL is also a valid cpu-id (0), we use a round-about
670 * way to pass it w/o having to kmalloc/free a 2 byte string.
671 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
672 */
673 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
674 }
675
676 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
677 {
678 ++*pos;
679 return c_start(m, pos);
680 }
681
682 static void c_stop(struct seq_file *m, void *v)
683 {
684 }
685
686 const struct seq_operations cpuinfo_op = {
687 .start = c_start,
688 .next = c_next,
689 .stop = c_stop,
690 .show = show_cpuinfo
691 };
692
693 static DEFINE_PER_CPU(struct cpu, cpu_topology);
694
695 static int __init topology_init(void)
696 {
697 int cpu;
698
699 for_each_present_cpu(cpu)
700 register_cpu(&per_cpu(cpu_topology, cpu), cpu);
701
702 return 0;
703 }
704
705 subsys_initcall(topology_init);