]> git.ipfire.org Git - people/ms/linux.git/blame - arch/sparc/mm/init_64.c
Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[people/ms/linux.git] / arch / sparc / mm / init_64.c
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
cdd4f4c7 8#include <linux/extable.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
1da177e4
LT
16#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
c9cf5528 19#include <linux/poison.h>
1da177e4
LT
20#include <linux/fs.h>
21#include <linux/seq_file.h>
05e14cb3 22#include <linux/kprobes.h>
1ac4f5eb 23#include <linux/cache.h>
13edad7a 24#include <linux/sort.h>
f6d4fb5c 25#include <linux/ioport.h>
5cbc3073 26#include <linux/percpu.h>
95f72d1e 27#include <linux/memblock.h>
919ee677 28#include <linux/mmzone.h>
5a0e3ad6 29#include <linux/gfp.h>
1da177e4
LT
30
31#include <asm/head.h>
1da177e4
LT
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
7c0f6ba6 38#include <linux/uaccess.h>
1da177e4
LT
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
517af332 46#include <asm/tsb.h>
481295f9 47#include <asm/hypervisor.h>
372b07bb 48#include <asm/prom.h>
5cbc3073 49#include <asm/mdesc.h>
3d5ae6b6 50#include <asm/cpudata.h>
59dec13b 51#include <asm/setup.h>
4f70f7a9 52#include <asm/irq.h>
1da177e4 53
27137e52 54#include "init_64.h"
9cc3a1ac 55
4f93d21d 56unsigned long kern_linear_pte_xor[4] __read_mostly;
494e5b6f 57static unsigned long page_cache4v_flag;
9cc3a1ac 58
4f93d21d
DM
59/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
9cc3a1ac 78 */
9cc3a1ac 79
d1acb421 80#ifndef CONFIG_DEBUG_PAGEALLOC
4f93d21d
DM
81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
2d9e2763
DM
84 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 86#endif
0dd5b7b0 87extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
d7744a09 88
ce33fdc5
DM
89static unsigned long cpu_pgsz_mask;
90
d195b71b 91#define MAX_BANKS 1024
13edad7a 92
7c9503b8
GKH
93static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
13edad7a 95
52708d69
NG
96u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
13edad7a
DM
98static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
8d125562 113 phandle node = prom_finddevice("/memory");
13edad7a
DM
114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
5da444aa
AM
127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
13edad7a
DM
129 prom_halt();
130 }
131
13edad7a
DM
132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
10147570 140
13edad7a
DM
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
0015d3d6
DM
150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(&regs[i], &regs[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
486ad10a 157 i--;
0015d3d6
DM
158 ents--;
159 continue;
486ad10a 160 }
0015d3d6
DM
161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
486ad10a
DM
163 }
164
165 *num_ents = ents;
166
c9c10830 167 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
168 cmp_p64, NULL);
169}
1da177e4 170
d1112018 171/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
1da177e4 174
1da177e4
LT
175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
1ac4f5eb 180struct page *mem_map_zero __read_mostly;
35802c0b 181EXPORT_SYMBOL(mem_map_zero);
1da177e4 182
0835ae0f
DM
183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
64658743 189int num_kernel_image_mappings;
1da177e4 190
1da177e4
LT
191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
7a591cfe 198inline void flush_dcache_page_impl(struct page *page)
1da177e4 199{
7a591cfe 200 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
209#else
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
22adb358
DM
217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
220
221#define dcache_dirty_cpu(page) \
48b0e548 222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4 223
d979f179 224static inline void set_dcache_dirty(struct page *page, int this_cpu)
1da177e4
LT
225{
226 unsigned long mask = this_cpu;
48b0e548
DM
227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
1da177e4
LT
232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
b445e26c 239 " nop"
1da177e4
LT
240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
d979f179 245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
1da177e4
LT
246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
48b0e548 252 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
b445e26c 260 " nop\n"
1da177e4
LT
261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
1da177e4
LT
266 : "g1", "g7");
267}
268
517af332
DM
269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
3b3ab2eb 273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
c4bce90e 279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
c4bce90e 280
ff9aefbf 281static void flush_dcache(unsigned long pfn)
1da177e4 282{
ff9aefbf 283 struct page *page;
7a591cfe 284
ff9aefbf 285 page = pfn_to_page(pfn);
1a78cedb 286 if (page) {
7a591cfe 287 unsigned long pg_flags;
7a591cfe 288
ff9aefbf
SR
289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
7a591cfe
DM
291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
294
295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
302
303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
1da177e4 307 }
ff9aefbf
SR
308}
309
9e695d2e
DM
310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
bcd896ba
DM
318 if (unlikely(!tsb))
319 return;
320
9e695d2e
DM
321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
4b3073e1 327void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
ff9aefbf
SR
328{
329 struct mm_struct *mm;
bcd896ba 330 unsigned long flags;
4b3073e1 331 pte_t pte = *ptep;
ff9aefbf
SR
332
333 if (tlb_type != hypervisor) {
334 unsigned long pfn = pte_pfn(pte);
335
336 if (pfn_valid(pfn))
337 flush_dcache(pfn);
338 }
bd40791e
DM
339
340 mm = vma->vm_mm;
7a1ac526 341
18f38132
DM
342 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
343 if (!pte_accessible(mm, pte))
344 return;
345
7a1ac526
DM
346 spin_lock_irqsave(&mm->context.lock, flags);
347
9e695d2e 348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
af1b1a9b 349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
7bc3777c
NG
350 is_hugetlb_pte(pte)) {
351 /* We are fabricating 8MB pages using 4MB real hw pages. */
352 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
37b3a8ff 353 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
bcd896ba 354 address, pte_val(pte));
7bc3777c 355 } else
dcc1e8dd 356#endif
bcd896ba
DM
357 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
358 address, pte_val(pte));
7a1ac526
DM
359
360 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
361}
362
363void flush_dcache_page(struct page *page)
364{
a9546f59
DM
365 struct address_space *mapping;
366 int this_cpu;
1da177e4 367
7a591cfe
DM
368 if (tlb_type == hypervisor)
369 return;
370
a9546f59
DM
371 /* Do not bother with the expensive D-cache flush if it
372 * is merely the zero page. The 'bigcore' testcase in GDB
373 * causes this case to run millions of times.
374 */
375 if (page == ZERO_PAGE(0))
376 return;
377
378 this_cpu = get_cpu();
379
380 mapping = page_mapping(page);
1da177e4 381 if (mapping && !mapping_mapped(mapping)) {
a9546f59 382 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 383 if (dirty) {
a9546f59
DM
384 int dirty_cpu = dcache_dirty_cpu(page);
385
1da177e4
LT
386 if (dirty_cpu == this_cpu)
387 goto out;
388 smp_flush_dcache_page_impl(page, dirty_cpu);
389 }
390 set_dcache_dirty(page, this_cpu);
391 } else {
392 /* We could delay the flush for the !page_mapping
393 * case too. But that case is for exec env/arg
394 * pages and those are %99 certainly going to get
395 * faulted into the tlb (and thus flushed) anyways.
396 */
397 flush_dcache_page_impl(page);
398 }
399
400out:
401 put_cpu();
402}
917c3660 403EXPORT_SYMBOL(flush_dcache_page);
1da177e4 404
05e14cb3 405void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 406{
a43fe0e7 407 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
408 if (tlb_type == spitfire) {
409 unsigned long kaddr;
410
a94aa253
DM
411 /* This code only runs on Spitfire cpus so this is
412 * why we can assume _PAGE_PADDR_4U.
413 */
414 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
415 unsigned long paddr, mask = _PAGE_PADDR_4U;
416
417 if (kaddr >= PAGE_OFFSET)
418 paddr = kaddr & mask;
419 else {
420 pgd_t *pgdp = pgd_offset_k(kaddr);
421 pud_t *pudp = pud_offset(pgdp, kaddr);
422 pmd_t *pmdp = pmd_offset(pudp, kaddr);
423 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
424
425 paddr = pte_val(*ptep) & mask;
426 }
427 __flush_icache_page(paddr);
428 }
1da177e4
LT
429 }
430}
917c3660 431EXPORT_SYMBOL(flush_icache_range);
1da177e4 432
1da177e4
LT
433void mmu_info(struct seq_file *m)
434{
ce33fdc5
DM
435 static const char *pgsz_strings[] = {
436 "8K", "64K", "512K", "4MB", "32MB",
437 "256MB", "2GB", "16GB",
438 };
439 int i, printed;
440
1da177e4
LT
441 if (tlb_type == cheetah)
442 seq_printf(m, "MMU Type\t: Cheetah\n");
443 else if (tlb_type == cheetah_plus)
444 seq_printf(m, "MMU Type\t: Cheetah+\n");
445 else if (tlb_type == spitfire)
446 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
447 else if (tlb_type == hypervisor)
448 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
449 else
450 seq_printf(m, "MMU Type\t: ???\n");
451
ce33fdc5
DM
452 seq_printf(m, "MMU PGSZs\t: ");
453 printed = 0;
454 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
455 if (cpu_pgsz_mask & (1UL << i)) {
456 seq_printf(m, "%s%s",
457 printed ? "," : "", pgsz_strings[i]);
458 printed++;
459 }
460 }
461 seq_putc(m, '\n');
462
1da177e4
LT
463#ifdef CONFIG_DEBUG_DCFLUSH
464 seq_printf(m, "DCPageFlushes\t: %d\n",
465 atomic_read(&dcpage_flushes));
466#ifdef CONFIG_SMP
467 seq_printf(m, "DCPageFlushesXC\t: %d\n",
468 atomic_read(&dcpage_flushes_xcall));
469#endif /* CONFIG_SMP */
470#endif /* CONFIG_DEBUG_DCFLUSH */
471}
472
a94aa253
DM
473struct linux_prom_translation prom_trans[512] __read_mostly;
474unsigned int prom_trans_ents __read_mostly;
475
1da177e4
LT
476unsigned long kern_locked_tte_data;
477
c9c10830
DM
478/* The obp translations are saved based on 8k pagesize, since obp can
479 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 480 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 481 */
5085b4a5
DM
482static inline int in_obp_range(unsigned long vaddr)
483{
484 return (vaddr >= LOW_OBP_ADDRESS &&
485 vaddr < HI_OBP_ADDRESS);
486}
487
c9c10830 488static int cmp_ptrans(const void *a, const void *b)
405599bd 489{
c9c10830 490 const struct linux_prom_translation *x = a, *y = b;
405599bd 491
c9c10830
DM
492 if (x->virt > y->virt)
493 return 1;
494 if (x->virt < y->virt)
495 return -1;
496 return 0;
405599bd
DM
497}
498
c9c10830 499/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 500static void __init read_obp_translations(void)
405599bd 501{
c9c10830 502 int n, node, ents, first, last, i;
1da177e4
LT
503
504 node = prom_finddevice("/virtual-memory");
505 n = prom_getproplen(node, "translations");
405599bd 506 if (unlikely(n == 0 || n == -1)) {
b206fc4c 507 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
508 prom_halt();
509 }
405599bd 510 if (unlikely(n > sizeof(prom_trans))) {
5da444aa 511 prom_printf("prom_mappings: Size %d is too big.\n", n);
1da177e4
LT
512 prom_halt();
513 }
405599bd 514
b206fc4c 515 if ((n = prom_getproperty(node, "translations",
405599bd
DM
516 (char *)&prom_trans[0],
517 sizeof(prom_trans))) == -1) {
b206fc4c 518 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
519 prom_halt();
520 }
9ad98c5b 521
b206fc4c 522 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 523
c9c10830
DM
524 ents = n;
525
526 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
527 cmp_ptrans, NULL);
528
529 /* Now kick out all the non-OBP entries. */
530 for (i = 0; i < ents; i++) {
531 if (in_obp_range(prom_trans[i].virt))
532 break;
533 }
534 first = i;
535 for (; i < ents; i++) {
536 if (!in_obp_range(prom_trans[i].virt))
537 break;
538 }
539 last = i;
540
541 for (i = 0; i < (last - first); i++) {
542 struct linux_prom_translation *src = &prom_trans[i + first];
543 struct linux_prom_translation *dest = &prom_trans[i];
544
545 *dest = *src;
546 }
547 for (; i < ents; i++) {
548 struct linux_prom_translation *dest = &prom_trans[i];
549 dest->virt = dest->size = dest->data = 0x0UL;
550 }
551
552 prom_trans_ents = last - first;
553
554 if (tlb_type == spitfire) {
555 /* Clear diag TTE bits. */
556 for (i = 0; i < prom_trans_ents; i++)
557 prom_trans[i].data &= ~0x0003fe0000000000UL;
558 }
f4142cba
DM
559
560 /* Force execute bit on. */
561 for (i = 0; i < prom_trans_ents; i++)
562 prom_trans[i].data |= (tlb_type == hypervisor ?
563 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
405599bd 564}
1da177e4 565
d82ace7d
DM
566static void __init hypervisor_tlb_lock(unsigned long vaddr,
567 unsigned long pte,
568 unsigned long mmu)
569{
7db35f31
DM
570 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
571
572 if (ret != 0) {
5da444aa 573 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
7db35f31 574 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
12e126ad
DM
575 prom_halt();
576 }
d82ace7d
DM
577}
578
c4bce90e
DM
579static unsigned long kern_large_tte(unsigned long paddr);
580
898cf0ec 581static void __init remap_kernel(void)
405599bd
DM
582{
583 unsigned long phys_page, tte_vaddr, tte_data;
64658743 584 int i, tlb_ent = sparc64_highest_locked_tlbent();
405599bd 585
1da177e4 586 tte_vaddr = (unsigned long) KERNBASE;
0eef331a 587 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
c4bce90e 588 tte_data = kern_large_tte(phys_page);
1da177e4
LT
589
590 kern_locked_tte_data = tte_data;
591
d82ace7d
DM
592 /* Now lock us into the TLBs via Hypervisor or OBP. */
593 if (tlb_type == hypervisor) {
64658743 594 for (i = 0; i < num_kernel_image_mappings; i++) {
d82ace7d
DM
595 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
596 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
64658743
DM
597 tte_vaddr += 0x400000;
598 tte_data += 0x400000;
d82ace7d
DM
599 }
600 } else {
64658743
DM
601 for (i = 0; i < num_kernel_image_mappings; i++) {
602 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
603 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
604 tte_vaddr += 0x400000;
605 tte_data += 0x400000;
d82ace7d 606 }
64658743 607 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
1da177e4 608 }
0835ae0f
DM
609 if (tlb_type == cheetah_plus) {
610 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
611 CTX_CHEETAH_PLUS_NUC);
612 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
613 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
614 }
405599bd 615}
1da177e4 616
405599bd 617
c9c10830 618static void __init inherit_prom_mappings(void)
9ad98c5b 619{
405599bd 620 /* Now fixup OBP's idea about where we really are mapped. */
3c62a2d3 621 printk("Remapping the kernel... ");
405599bd 622 remap_kernel();
3c62a2d3 623 printk("done.\n");
1da177e4
LT
624}
625
1da177e4
LT
626void prom_world(int enter)
627{
1da177e4 628 if (!enter)
dff933da 629 set_fs(get_fs());
1da177e4 630
3487d1d4 631 __asm__ __volatile__("flushw");
1da177e4
LT
632}
633
1da177e4
LT
634void __flush_dcache_range(unsigned long start, unsigned long end)
635{
636 unsigned long va;
637
638 if (tlb_type == spitfire) {
639 int n = 0;
640
641 for (va = start; va < end; va += 32) {
642 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
643 if (++n >= 512)
644 break;
645 }
a43fe0e7 646 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
647 start = __pa(start);
648 end = __pa(end);
649 for (va = start; va < end; va += 32)
650 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
651 "membar #Sync"
652 : /* no outputs */
653 : "r" (va),
654 "i" (ASI_DCACHE_INVALIDATE));
655 }
656}
917c3660 657EXPORT_SYMBOL(__flush_dcache_range);
1da177e4 658
85f1e1f6
DM
659/* get_new_mmu_context() uses "cache + 1". */
660DEFINE_SPINLOCK(ctx_alloc_lock);
661unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
662#define MAX_CTX_NR (1UL << CTX_NR_BITS)
663#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
664DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
665
1da177e4
LT
666/* Caller does TLB context flushing on local CPU if necessary.
667 * The caller also ensures that CTX_VALID(mm->context) is false.
668 *
669 * We must be careful about boundary cases so that we never
670 * let the user have CTX 0 (nucleus) or we ever use a CTX
671 * version of zero (and thus NO_CONTEXT would not be caught
672 * by version mis-match tests in mmu_context.h).
a0663a79
DM
673 *
674 * Always invoked with interrupts disabled.
1da177e4
LT
675 */
676void get_new_mmu_context(struct mm_struct *mm)
677{
678 unsigned long ctx, new_ctx;
679 unsigned long orig_pgsz_bits;
a0663a79 680 int new_version;
1da177e4 681
07df8418 682 spin_lock(&ctx_alloc_lock);
1da177e4
LT
683 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
684 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
685 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 686 new_version = 0;
1da177e4
LT
687 if (new_ctx >= (1 << CTX_NR_BITS)) {
688 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
689 if (new_ctx >= ctx) {
690 int i;
691 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
692 CTX_FIRST_VERSION;
693 if (new_ctx == 1)
694 new_ctx = CTX_FIRST_VERSION;
695
696 /* Don't call memset, for 16 entries that's just
697 * plain silly...
698 */
699 mmu_context_bmap[0] = 3;
700 mmu_context_bmap[1] = 0;
701 mmu_context_bmap[2] = 0;
702 mmu_context_bmap[3] = 0;
703 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
704 mmu_context_bmap[i + 0] = 0;
705 mmu_context_bmap[i + 1] = 0;
706 mmu_context_bmap[i + 2] = 0;
707 mmu_context_bmap[i + 3] = 0;
708 }
a0663a79 709 new_version = 1;
1da177e4
LT
710 goto out;
711 }
712 }
713 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
714 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
715out:
716 tlb_context_cache = new_ctx;
717 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
07df8418 718 spin_unlock(&ctx_alloc_lock);
a0663a79
DM
719
720 if (unlikely(new_version))
721 smp_new_mmu_context_version();
1da177e4
LT
722}
723
919ee677
DM
724static int numa_enabled = 1;
725static int numa_debug;
726
727static int __init early_numa(char *p)
1da177e4 728{
919ee677
DM
729 if (!p)
730 return 0;
731
732 if (strstr(p, "off"))
733 numa_enabled = 0;
d1112018 734
919ee677
DM
735 if (strstr(p, "debug"))
736 numa_debug = 1;
d1112018 737
919ee677 738 return 0;
d1112018 739}
919ee677
DM
740early_param("numa", early_numa);
741
742#define numadbg(f, a...) \
743do { if (numa_debug) \
744 printk(KERN_INFO f, ## a); \
745} while (0)
d1112018 746
4e82c9a6
DM
747static void __init find_ramdisk(unsigned long phys_base)
748{
749#ifdef CONFIG_BLK_DEV_INITRD
750 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
751 unsigned long ramdisk_image;
752
753 /* Older versions of the bootloader only supported a
754 * 32-bit physical address for the ramdisk image
755 * location, stored at sparc_ramdisk_image. Newer
756 * SILO versions set sparc_ramdisk_image to zero and
757 * provide a full 64-bit physical address at
758 * sparc_ramdisk_image64.
759 */
760 ramdisk_image = sparc_ramdisk_image;
761 if (!ramdisk_image)
762 ramdisk_image = sparc_ramdisk_image64;
763
764 /* Another bootloader quirk. The bootloader normalizes
765 * the physical address to KERNBASE, so we have to
766 * factor that back out and add in the lowest valid
767 * physical page address to get the true physical address.
768 */
769 ramdisk_image -= KERNBASE;
770 ramdisk_image += phys_base;
771
919ee677
DM
772 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
773 ramdisk_image, sparc_ramdisk_size);
774
4e82c9a6
DM
775 initrd_start = ramdisk_image;
776 initrd_end = ramdisk_image + sparc_ramdisk_size;
3b2a7e23 777
95f72d1e 778 memblock_reserve(initrd_start, sparc_ramdisk_size);
d45100f7
DM
779
780 initrd_start += PAGE_OFFSET;
781 initrd_end += PAGE_OFFSET;
4e82c9a6
DM
782 }
783#endif
784}
785
919ee677
DM
786struct node_mem_mask {
787 unsigned long mask;
788 unsigned long val;
919ee677
DM
789};
790static struct node_mem_mask node_masks[MAX_NUMNODES];
791static int num_node_masks;
792
48d37216
SR
793#ifdef CONFIG_NEED_MULTIPLE_NODES
794
919ee677
DM
795int numa_cpu_lookup_table[NR_CPUS];
796cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
797
919ee677
DM
798struct mdesc_mblock {
799 u64 base;
800 u64 size;
801 u64 offset; /* RA-to-PA */
802};
803static struct mdesc_mblock *mblocks;
804static int num_mblocks;
74a5ed5c
TT
805static int find_numa_node_for_addr(unsigned long pa,
806 struct node_mem_mask *pnode_mask);
919ee677 807
87a349f9 808static unsigned long __init ra_to_pa(unsigned long addr)
919ee677
DM
809{
810 int i;
811
812 for (i = 0; i < num_mblocks; i++) {
813 struct mdesc_mblock *m = &mblocks[i];
814
815 if (addr >= m->base &&
816 addr < (m->base + m->size)) {
817 addr += m->offset;
818 break;
819 }
820 }
821 return addr;
822}
823
87a349f9 824static int __init find_node(unsigned long addr)
919ee677 825{
74a5ed5c
TT
826 static bool search_mdesc = true;
827 static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
828 static int last_index;
919ee677
DM
829 int i;
830
831 addr = ra_to_pa(addr);
832 for (i = 0; i < num_node_masks; i++) {
833 struct node_mem_mask *p = &node_masks[i];
834
835 if ((addr & p->mask) == p->val)
836 return i;
837 }
74a5ed5c
TT
838 /* The following condition has been observed on LDOM guests because
839 * node_masks only contains the best latency mask and value.
840 * LDOM guest's mdesc can contain a single latency group to
841 * cover multiple address range. Print warning message only if the
842 * address cannot be found in node_masks nor mdesc.
843 */
844 if ((search_mdesc) &&
845 ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
846 /* find the available node in the mdesc */
847 last_index = find_numa_node_for_addr(addr, &last_mem_mask);
848 numadbg("find_node: latency group for address 0x%lx is %d\n",
849 addr, last_index);
850 if ((last_index < 0) || (last_index >= num_node_masks)) {
851 /* WARN_ONCE() and use default group 0 */
852 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
853 search_mdesc = false;
854 last_index = 0;
855 }
856 }
857
858 return last_index;
919ee677
DM
859}
860
87a349f9 861static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
919ee677
DM
862{
863 *nid = find_node(start);
864 start += PAGE_SIZE;
865 while (start < end) {
866 int n = find_node(start);
867
868 if (n != *nid)
869 break;
870 start += PAGE_SIZE;
871 }
872
c918dcce
DM
873 if (start > end)
874 start = end;
875
919ee677
DM
876 return start;
877}
919ee677
DM
878#endif
879
880/* This must be invoked after performing all of the necessary
2a4814df 881 * memblock_set_node() calls for 'nid'. We need to be able to get
919ee677 882 * correct data from get_pfn_range_for_nid().
f1cfdb55 883 */
919ee677
DM
884static void __init allocate_node_data(int nid)
885{
919ee677 886 struct pglist_data *p;
aa6f0790 887 unsigned long start_pfn, end_pfn;
919ee677 888#ifdef CONFIG_NEED_MULTIPLE_NODES
aa6f0790
PG
889 unsigned long paddr;
890
9d1e2492 891 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
919ee677
DM
892 if (!paddr) {
893 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
894 prom_halt();
895 }
896 NODE_DATA(nid) = __va(paddr);
897 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
898
625d693e 899 NODE_DATA(nid)->node_id = nid;
919ee677
DM
900#endif
901
902 p = NODE_DATA(nid);
903
904 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
905 p->node_start_pfn = start_pfn;
906 p->node_spanned_pages = end_pfn - start_pfn;
919ee677
DM
907}
908
909static void init_node_masks_nonnuma(void)
d1112018 910{
48d37216 911#ifdef CONFIG_NEED_MULTIPLE_NODES
1da177e4 912 int i;
48d37216 913#endif
1da177e4 914
919ee677 915 numadbg("Initializing tables for non-numa.\n");
6fc5bae7 916
919ee677
DM
917 node_masks[0].mask = node_masks[0].val = 0;
918 num_node_masks = 1;
d1112018 919
48d37216 920#ifdef CONFIG_NEED_MULTIPLE_NODES
919ee677
DM
921 for (i = 0; i < NR_CPUS; i++)
922 numa_cpu_lookup_table[i] = 0;
1da177e4 923
fb1fece5 924 cpumask_setall(&numa_cpumask_lookup_table[0]);
48d37216 925#endif
919ee677
DM
926}
927
928#ifdef CONFIG_NEED_MULTIPLE_NODES
929struct pglist_data *node_data[MAX_NUMNODES];
930
931EXPORT_SYMBOL(numa_cpu_lookup_table);
932EXPORT_SYMBOL(numa_cpumask_lookup_table);
933EXPORT_SYMBOL(node_data);
934
935struct mdesc_mlgroup {
936 u64 node;
937 u64 latency;
938 u64 match;
939 u64 mask;
940};
941static struct mdesc_mlgroup *mlgroups;
942static int num_mlgroups;
943
944static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
945 u32 cfg_handle)
946{
947 u64 arc;
948
949 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
950 u64 target = mdesc_arc_target(md, arc);
951 const u64 *val;
952
953 val = mdesc_get_property(md, target,
954 "cfg-handle", NULL);
955 if (val && *val == cfg_handle)
956 return 0;
957 }
958 return -ENODEV;
959}
960
961static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
962 u32 cfg_handle)
963{
964 u64 arc, candidate, best_latency = ~(u64)0;
965
966 candidate = MDESC_NODE_NULL;
967 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
968 u64 target = mdesc_arc_target(md, arc);
969 const char *name = mdesc_node_name(md, target);
970 const u64 *val;
971
972 if (strcmp(name, "pio-latency-group"))
973 continue;
974
975 val = mdesc_get_property(md, target, "latency", NULL);
976 if (!val)
977 continue;
978
979 if (*val < best_latency) {
980 candidate = target;
981 best_latency = *val;
982 }
983 }
984
985 if (candidate == MDESC_NODE_NULL)
986 return -ENODEV;
987
988 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
989}
990
991int of_node_to_nid(struct device_node *dp)
992{
993 const struct linux_prom64_registers *regs;
994 struct mdesc_handle *md;
995 u32 cfg_handle;
996 int count, nid;
997 u64 grp;
998
072bd413
DM
999 /* This is the right thing to do on currently supported
1000 * SUN4U NUMA platforms as well, as the PCI controller does
1001 * not sit behind any particular memory controller.
1002 */
919ee677
DM
1003 if (!mlgroups)
1004 return -1;
1005
1006 regs = of_get_property(dp, "reg", NULL);
1007 if (!regs)
1008 return -1;
1009
1010 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1011
1012 md = mdesc_grab();
1013
1014 count = 0;
1015 nid = -1;
1016 mdesc_for_each_node_by_name(md, grp, "group") {
1017 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1018 nid = count;
1019 break;
1020 }
1021 count++;
1022 }
1023
1024 mdesc_release(md);
1025
1026 return nid;
1027}
1028
01c45381 1029static void __init add_node_ranges(void)
919ee677 1030{
08b84798 1031 struct memblock_region *reg;
919ee677 1032
08b84798
BH
1033 for_each_memblock(memory, reg) {
1034 unsigned long size = reg->size;
919ee677
DM
1035 unsigned long start, end;
1036
08b84798 1037 start = reg->base;
919ee677
DM
1038 end = start + size;
1039 while (start < end) {
1040 unsigned long this_end;
1041 int nid;
1042
35a1f0bd 1043 this_end = memblock_nid_range(start, end, &nid);
919ee677 1044
2a4814df 1045 numadbg("Setting memblock NUMA node nid[%d] "
919ee677
DM
1046 "start[%lx] end[%lx]\n",
1047 nid, start, this_end);
1048
e7e8de59
TC
1049 memblock_set_node(start, this_end - start,
1050 &memblock.memory, nid);
919ee677
DM
1051 start = this_end;
1052 }
1053 }
1054}
1055
1056static int __init grab_mlgroups(struct mdesc_handle *md)
1057{
1058 unsigned long paddr;
1059 int count = 0;
1060 u64 node;
1061
1062 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1063 count++;
1064 if (!count)
1065 return -ENOENT;
1066
95f72d1e 1067 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
919ee677
DM
1068 SMP_CACHE_BYTES);
1069 if (!paddr)
1070 return -ENOMEM;
1071
1072 mlgroups = __va(paddr);
1073 num_mlgroups = count;
1074
1075 count = 0;
1076 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1077 struct mdesc_mlgroup *m = &mlgroups[count++];
1078 const u64 *val;
1079
1080 m->node = node;
1081
1082 val = mdesc_get_property(md, node, "latency", NULL);
1083 m->latency = *val;
1084 val = mdesc_get_property(md, node, "address-match", NULL);
1085 m->match = *val;
1086 val = mdesc_get_property(md, node, "address-mask", NULL);
1087 m->mask = *val;
1088
90181136
SR
1089 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1090 "match[%llx] mask[%llx]\n",
919ee677
DM
1091 count - 1, m->node, m->latency, m->match, m->mask);
1092 }
1093
1094 return 0;
1095}
1096
1097static int __init grab_mblocks(struct mdesc_handle *md)
1098{
1099 unsigned long paddr;
1100 int count = 0;
1101 u64 node;
1102
1103 mdesc_for_each_node_by_name(md, node, "mblock")
1104 count++;
1105 if (!count)
1106 return -ENOENT;
1107
95f72d1e 1108 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
919ee677
DM
1109 SMP_CACHE_BYTES);
1110 if (!paddr)
1111 return -ENOMEM;
1112
1113 mblocks = __va(paddr);
1114 num_mblocks = count;
1115
1116 count = 0;
1117 mdesc_for_each_node_by_name(md, node, "mblock") {
1118 struct mdesc_mblock *m = &mblocks[count++];
1119 const u64 *val;
1120
1121 val = mdesc_get_property(md, node, "base", NULL);
1122 m->base = *val;
1123 val = mdesc_get_property(md, node, "size", NULL);
1124 m->size = *val;
1125 val = mdesc_get_property(md, node,
1126 "address-congruence-offset", NULL);
771a37ff 1127
1128 /* The address-congruence-offset property is optional.
1129 * Explicity zero it be identifty this.
1130 */
1131 if (val)
1132 m->offset = *val;
1133 else
1134 m->offset = 0UL;
919ee677 1135
90181136 1136 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
919ee677
DM
1137 count - 1, m->base, m->size, m->offset);
1138 }
1139
1140 return 0;
1141}
1142
1143static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1144 u64 grp, cpumask_t *mask)
1145{
1146 u64 arc;
1147
fb1fece5 1148 cpumask_clear(mask);
919ee677
DM
1149
1150 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1151 u64 target = mdesc_arc_target(md, arc);
1152 const char *name = mdesc_node_name(md, target);
1153 const u64 *id;
1154
1155 if (strcmp(name, "cpu"))
1156 continue;
1157 id = mdesc_get_property(md, target, "id", NULL);
e305cb8f 1158 if (*id < nr_cpu_ids)
fb1fece5 1159 cpumask_set_cpu(*id, mask);
919ee677
DM
1160 }
1161}
1162
1163static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1164{
1165 int i;
1166
1167 for (i = 0; i < num_mlgroups; i++) {
1168 struct mdesc_mlgroup *m = &mlgroups[i];
1169 if (m->node == node)
1170 return m;
1171 }
1172 return NULL;
1173}
1174
52708d69
NG
1175int __node_distance(int from, int to)
1176{
1177 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1178 pr_warn("Returning default NUMA distance value for %d->%d\n",
1179 from, to);
1180 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1181 }
1182 return numa_latency[from][to];
1183}
1184
74a5ed5c
TT
1185static int find_numa_node_for_addr(unsigned long pa,
1186 struct node_mem_mask *pnode_mask)
1187{
1188 struct mdesc_handle *md = mdesc_grab();
1189 u64 node, arc;
1190 int i = 0;
1191
1192 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1193 if (node == MDESC_NODE_NULL)
1194 goto out;
1195
1196 mdesc_for_each_node_by_name(md, node, "group") {
1197 mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1198 u64 target = mdesc_arc_target(md, arc);
1199 struct mdesc_mlgroup *m = find_mlgroup(target);
1200
1201 if (!m)
1202 continue;
1203 if ((pa & m->mask) == m->match) {
1204 if (pnode_mask) {
1205 pnode_mask->mask = m->mask;
1206 pnode_mask->val = m->match;
1207 }
1208 mdesc_release(md);
1209 return i;
1210 }
1211 }
1212 i++;
1213 }
1214
1215out:
1216 mdesc_release(md);
1217 return -1;
1218}
1219
bdf2f59e 1220static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
52708d69
NG
1221{
1222 int i;
1223
1224 for (i = 0; i < MAX_NUMNODES; i++) {
1225 struct node_mem_mask *n = &node_masks[i];
1226
1227 if ((grp->mask == n->mask) && (grp->match == n->val))
1228 break;
1229 }
1230 return i;
1231}
1232
bdf2f59e
PG
1233static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1234 u64 grp, int index)
52708d69
NG
1235{
1236 u64 arc;
1237
1238 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1239 int tnode;
1240 u64 target = mdesc_arc_target(md, arc);
1241 struct mdesc_mlgroup *m = find_mlgroup(target);
1242
1243 if (!m)
1244 continue;
1245 tnode = find_best_numa_node_for_mlgroup(m);
1246 if (tnode == MAX_NUMNODES)
1247 continue;
1248 numa_latency[index][tnode] = m->latency;
1249 }
1250}
1251
919ee677
DM
1252static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1253 int index)
1254{
1255 struct mdesc_mlgroup *candidate = NULL;
1256 u64 arc, best_latency = ~(u64)0;
1257 struct node_mem_mask *n;
1258
1259 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1260 u64 target = mdesc_arc_target(md, arc);
1261 struct mdesc_mlgroup *m = find_mlgroup(target);
1262 if (!m)
1263 continue;
1264 if (m->latency < best_latency) {
1265 candidate = m;
1266 best_latency = m->latency;
1267 }
1268 }
1269 if (!candidate)
1270 return -ENOENT;
1271
1272 if (num_node_masks != index) {
1273 printk(KERN_ERR "Inconsistent NUMA state, "
1274 "index[%d] != num_node_masks[%d]\n",
1275 index, num_node_masks);
1276 return -EINVAL;
1277 }
1278
1279 n = &node_masks[num_node_masks++];
1280
1281 n->mask = candidate->mask;
1282 n->val = candidate->match;
1da177e4 1283
90181136 1284 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
919ee677 1285 index, n->mask, n->val, candidate->latency);
1da177e4 1286
919ee677
DM
1287 return 0;
1288}
1289
1290static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1291 int index)
1292{
1293 cpumask_t mask;
1294 int cpu;
1295
1296 numa_parse_mdesc_group_cpus(md, grp, &mask);
1297
fb1fece5 1298 for_each_cpu(cpu, &mask)
919ee677 1299 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1300 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
919ee677
DM
1301
1302 if (numa_debug) {
1303 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
fb1fece5 1304 for_each_cpu(cpu, &mask)
919ee677
DM
1305 printk("%d ", cpu);
1306 printk("]\n");
1307 }
1308
1309 return numa_attach_mlgroup(md, grp, index);
1310}
1311
1312static int __init numa_parse_mdesc(void)
1313{
1314 struct mdesc_handle *md = mdesc_grab();
52708d69 1315 int i, j, err, count;
919ee677
DM
1316 u64 node;
1317
1318 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1319 if (node == MDESC_NODE_NULL) {
1320 mdesc_release(md);
1321 return -ENOENT;
1322 }
1323
1324 err = grab_mblocks(md);
1325 if (err < 0)
1326 goto out;
1327
1328 err = grab_mlgroups(md);
1329 if (err < 0)
1330 goto out;
1331
1332 count = 0;
1333 mdesc_for_each_node_by_name(md, node, "group") {
1334 err = numa_parse_mdesc_group(md, node, count);
1335 if (err < 0)
1336 break;
1337 count++;
1338 }
1339
52708d69
NG
1340 count = 0;
1341 mdesc_for_each_node_by_name(md, node, "group") {
1342 find_numa_latencies_for_group(md, node, count);
1343 count++;
1344 }
1345
1346 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1347 for (i = 0; i < MAX_NUMNODES; i++) {
1348 u64 self_latency = numa_latency[i][i];
1349
1350 for (j = 0; j < MAX_NUMNODES; j++) {
1351 numa_latency[i][j] =
1352 (numa_latency[i][j] * LOCAL_DISTANCE) /
1353 self_latency;
1354 }
1355 }
1356
919ee677
DM
1357 add_node_ranges();
1358
1359 for (i = 0; i < num_node_masks; i++) {
1360 allocate_node_data(i);
1361 node_set_online(i);
1362 }
1363
1364 err = 0;
1365out:
1366 mdesc_release(md);
1367 return err;
1368}
1369
072bd413
DM
1370static int __init numa_parse_jbus(void)
1371{
1372 unsigned long cpu, index;
1373
1374 /* NUMA node id is encoded in bits 36 and higher, and there is
1375 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1376 */
1377 index = 0;
1378 for_each_present_cpu(cpu) {
1379 numa_cpu_lookup_table[cpu] = index;
fb1fece5 1380 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
072bd413
DM
1381 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1382 node_masks[index].val = cpu << 36UL;
1383
1384 index++;
1385 }
1386 num_node_masks = index;
1387
1388 add_node_ranges();
1389
1390 for (index = 0; index < num_node_masks; index++) {
1391 allocate_node_data(index);
1392 node_set_online(index);
1393 }
1394
1395 return 0;
1396}
1397
919ee677
DM
1398static int __init numa_parse_sun4u(void)
1399{
072bd413
DM
1400 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1401 unsigned long ver;
1402
1403 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1404 if ((ver >> 32UL) == __JALAPENO_ID ||
1405 (ver >> 32UL) == __SERRANO_ID)
1406 return numa_parse_jbus();
1407 }
919ee677
DM
1408 return -1;
1409}
1410
1411static int __init bootmem_init_numa(void)
1412{
36beca65 1413 int i, j;
919ee677
DM
1414 int err = -1;
1415
1416 numadbg("bootmem_init_numa()\n");
1417
36beca65
NG
1418 /* Some sane defaults for numa latency values */
1419 for (i = 0; i < MAX_NUMNODES; i++) {
1420 for (j = 0; j < MAX_NUMNODES; j++)
1421 numa_latency[i][j] = (i == j) ?
1422 LOCAL_DISTANCE : REMOTE_DISTANCE;
1423 }
1424
919ee677
DM
1425 if (numa_enabled) {
1426 if (tlb_type == hypervisor)
1427 err = numa_parse_mdesc();
1428 else
1429 err = numa_parse_sun4u();
1430 }
1431 return err;
1432}
1433
1434#else
1da177e4 1435
919ee677
DM
1436static int bootmem_init_numa(void)
1437{
1438 return -1;
1439}
1440
1441#endif
1442
1443static void __init bootmem_init_nonnuma(void)
1444{
95f72d1e
YL
1445 unsigned long top_of_ram = memblock_end_of_DRAM();
1446 unsigned long total_ram = memblock_phys_mem_size();
919ee677
DM
1447
1448 numadbg("bootmem_init_nonnuma()\n");
1449
1450 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1451 top_of_ram, total_ram);
1452 printk(KERN_INFO "Memory hole size: %ldMB\n",
1453 (top_of_ram - total_ram) >> 20);
1454
1455 init_node_masks_nonnuma();
e7e8de59 1456 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
919ee677 1457 allocate_node_data(0);
919ee677
DM
1458 node_set_online(0);
1459}
1460
919ee677
DM
1461static unsigned long __init bootmem_init(unsigned long phys_base)
1462{
1463 unsigned long end_pfn;
919ee677 1464
95f72d1e 1465 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
919ee677
DM
1466 max_pfn = max_low_pfn = end_pfn;
1467 min_low_pfn = (phys_base >> PAGE_SHIFT);
1468
1469 if (bootmem_init_numa() < 0)
1470 bootmem_init_nonnuma();
1471
625d693e
DM
1472 /* Dump memblock with node info. */
1473 memblock_dump_all();
919ee677 1474
625d693e 1475 /* XXX cpu notifier XXX */
d1112018 1476
625d693e 1477 sparse_memory_present_with_active_regions(MAX_NUMNODES);
d1112018
DM
1478 sparse_init();
1479
1da177e4
LT
1480 return end_pfn;
1481}
1482
9cc3a1ac
DM
1483static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1484static int pall_ents __initdata;
1485
0dd5b7b0
DM
1486static unsigned long max_phys_bits = 40;
1487
1488bool kern_addr_valid(unsigned long addr)
1489{
0dd5b7b0
DM
1490 pgd_t *pgd;
1491 pud_t *pud;
1492 pmd_t *pmd;
1493 pte_t *pte;
1494
bb4e6e85 1495 if ((long)addr < 0L) {
0dd5b7b0
DM
1496 unsigned long pa = __pa(addr);
1497
bb4e6e85
DM
1498 if ((addr >> max_phys_bits) != 0UL)
1499 return false;
1500
0dd5b7b0
DM
1501 return pfn_valid(pa >> PAGE_SHIFT);
1502 }
1503
bb4e6e85
DM
1504 if (addr >= (unsigned long) KERNBASE &&
1505 addr < (unsigned long)&_end)
1506 return true;
1507
0dd5b7b0
DM
1508 pgd = pgd_offset_k(addr);
1509 if (pgd_none(*pgd))
1510 return 0;
1511
1512 pud = pud_offset(pgd, addr);
1513 if (pud_none(*pud))
1514 return 0;
1515
1516 if (pud_large(*pud))
1517 return pfn_valid(pud_pfn(*pud));
1518
1519 pmd = pmd_offset(pud, addr);
1520 if (pmd_none(*pmd))
1521 return 0;
1522
1523 if (pmd_large(*pmd))
1524 return pfn_valid(pmd_pfn(*pmd));
1525
1526 pte = pte_offset_kernel(pmd, addr);
1527 if (pte_none(*pte))
1528 return 0;
1529
1530 return pfn_valid(pte_pfn(*pte));
1531}
1532EXPORT_SYMBOL(kern_addr_valid);
1533
1534static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1535 unsigned long vend,
1536 pud_t *pud)
1537{
1538 const unsigned long mask16gb = (1UL << 34) - 1UL;
1539 u64 pte_val = vstart;
1540
1541 /* Each PUD is 8GB */
1542 if ((vstart & mask16gb) ||
1543 (vend - vstart <= mask16gb)) {
1544 pte_val ^= kern_linear_pte_xor[2];
1545 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1546
1547 return vstart + PUD_SIZE;
1548 }
1549
1550 pte_val ^= kern_linear_pte_xor[3];
1551 pte_val |= _PAGE_PUD_HUGE;
1552
1553 vend = vstart + mask16gb + 1UL;
1554 while (vstart < vend) {
1555 pud_val(*pud) = pte_val;
1556
1557 pte_val += PUD_SIZE;
1558 vstart += PUD_SIZE;
1559 pud++;
1560 }
1561 return vstart;
1562}
1563
1564static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1565 bool guard)
1566{
1567 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1568 return true;
1569
1570 return false;
1571}
1572
1573static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1574 unsigned long vend,
1575 pmd_t *pmd)
1576{
1577 const unsigned long mask256mb = (1UL << 28) - 1UL;
1578 const unsigned long mask2gb = (1UL << 31) - 1UL;
1579 u64 pte_val = vstart;
1580
1581 /* Each PMD is 8MB */
1582 if ((vstart & mask256mb) ||
1583 (vend - vstart <= mask256mb)) {
1584 pte_val ^= kern_linear_pte_xor[0];
1585 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1586
1587 return vstart + PMD_SIZE;
1588 }
1589
1590 if ((vstart & mask2gb) ||
1591 (vend - vstart <= mask2gb)) {
1592 pte_val ^= kern_linear_pte_xor[1];
1593 pte_val |= _PAGE_PMD_HUGE;
1594 vend = vstart + mask256mb + 1UL;
1595 } else {
1596 pte_val ^= kern_linear_pte_xor[2];
1597 pte_val |= _PAGE_PMD_HUGE;
1598 vend = vstart + mask2gb + 1UL;
1599 }
1600
1601 while (vstart < vend) {
1602 pmd_val(*pmd) = pte_val;
1603
1604 pte_val += PMD_SIZE;
1605 vstart += PMD_SIZE;
1606 pmd++;
1607 }
1608
1609 return vstart;
1610}
1611
1612static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1613 bool guard)
1614{
1615 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1616 return true;
1617
1618 return false;
1619}
1620
896aef43 1621static unsigned long __ref kernel_map_range(unsigned long pstart,
0dd5b7b0
DM
1622 unsigned long pend, pgprot_t prot,
1623 bool use_huge)
56425306
DM
1624{
1625 unsigned long vstart = PAGE_OFFSET + pstart;
1626 unsigned long vend = PAGE_OFFSET + pend;
1627 unsigned long alloc_bytes = 0UL;
1628
1629 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1630 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1631 vstart, vend);
1632 prom_halt();
1633 }
1634
1635 while (vstart < vend) {
1636 unsigned long this_end, paddr = __pa(vstart);
1637 pgd_t *pgd = pgd_offset_k(vstart);
1638 pud_t *pud;
1639 pmd_t *pmd;
1640 pte_t *pte;
1641
ac55c768
DM
1642 if (pgd_none(*pgd)) {
1643 pud_t *new;
1644
1645 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1646 alloc_bytes += PAGE_SIZE;
1647 pgd_populate(&init_mm, pgd, new);
1648 }
56425306
DM
1649 pud = pud_offset(pgd, vstart);
1650 if (pud_none(*pud)) {
1651 pmd_t *new;
1652
0dd5b7b0
DM
1653 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1654 vstart = kernel_map_hugepud(vstart, vend, pud);
1655 continue;
1656 }
56425306
DM
1657 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1658 alloc_bytes += PAGE_SIZE;
1659 pud_populate(&init_mm, pud, new);
1660 }
1661
1662 pmd = pmd_offset(pud, vstart);
0dd5b7b0 1663 if (pmd_none(*pmd)) {
56425306
DM
1664 pte_t *new;
1665
0dd5b7b0
DM
1666 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1667 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1668 continue;
1669 }
56425306
DM
1670 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1671 alloc_bytes += PAGE_SIZE;
1672 pmd_populate_kernel(&init_mm, pmd, new);
1673 }
1674
1675 pte = pte_offset_kernel(pmd, vstart);
1676 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1677 if (this_end > vend)
1678 this_end = vend;
1679
1680 while (vstart < this_end) {
1681 pte_val(*pte) = (paddr | pgprot_val(prot));
1682
1683 vstart += PAGE_SIZE;
1684 paddr += PAGE_SIZE;
1685 pte++;
1686 }
1687 }
1688
1689 return alloc_bytes;
1690}
1691
0dd5b7b0 1692static void __init flush_all_kernel_tsbs(void)
4f93d21d 1693{
0dd5b7b0 1694 int i;
4f93d21d 1695
0dd5b7b0
DM
1696 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1697 struct tsb *ent = &swapper_tsb[i];
4f93d21d 1698
0dd5b7b0 1699 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
4f93d21d 1700 }
0dd5b7b0
DM
1701#ifndef CONFIG_DEBUG_PAGEALLOC
1702 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1703 struct tsb *ent = &swapper_4m_tsb[i];
4f93d21d 1704
0dd5b7b0 1705 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
9cc3a1ac 1706 }
0dd5b7b0 1707#endif
9cc3a1ac 1708}
56425306 1709
0dd5b7b0 1710extern unsigned int kvmap_linear_patch[1];
9cc3a1ac 1711
8f361453
DM
1712static void __init kernel_physical_mapping_init(void)
1713{
8f361453 1714 unsigned long i, mem_alloced = 0UL;
0dd5b7b0 1715 bool use_huge = true;
8f361453 1716
0dd5b7b0
DM
1717#ifdef CONFIG_DEBUG_PAGEALLOC
1718 use_huge = false;
1719#endif
8f361453
DM
1720 for (i = 0; i < pall_ents; i++) {
1721 unsigned long phys_start, phys_end;
1722
1723 phys_start = pall[i].phys_addr;
1724 phys_end = phys_start + pall[i].reg_size;
1725
56425306 1726 mem_alloced += kernel_map_range(phys_start, phys_end,
0dd5b7b0 1727 PAGE_KERNEL, use_huge);
56425306
DM
1728 }
1729
1730 printk("Allocated %ld bytes for kernel page tables.\n",
1731 mem_alloced);
1732
1733 kvmap_linear_patch[0] = 0x01000000; /* nop */
1734 flushi(&kvmap_linear_patch[0]);
1735
0dd5b7b0
DM
1736 flush_all_kernel_tsbs();
1737
56425306
DM
1738 __flush_tlb_all();
1739}
1740
9cc3a1ac 1741#ifdef CONFIG_DEBUG_PAGEALLOC
031bc574 1742void __kernel_map_pages(struct page *page, int numpages, int enable)
56425306
DM
1743{
1744 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1745 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1746
1747 kernel_map_range(phys_start, phys_end,
0dd5b7b0 1748 (enable ? PAGE_KERNEL : __pgprot(0)), false);
56425306 1749
74bf4312
DM
1750 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1751 PAGE_OFFSET + phys_end);
1752
56425306
DM
1753 /* we should perform an IPI and flush all tlbs,
1754 * but that can deadlock->flush only current cpu.
1755 */
1756 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1757 PAGE_OFFSET + phys_end);
1758}
1759#endif
1760
10147570
DM
1761unsigned long __init find_ecache_flush_span(unsigned long size)
1762{
0836a0eb
DM
1763 int i;
1764
13edad7a
DM
1765 for (i = 0; i < pavail_ents; i++) {
1766 if (pavail[i].reg_size >= size)
1767 return pavail[i].phys_addr;
0836a0eb
DM
1768 }
1769
13edad7a 1770 return ~0UL;
0836a0eb
DM
1771}
1772
b2d43834
DM
1773unsigned long PAGE_OFFSET;
1774EXPORT_SYMBOL(PAGE_OFFSET);
1775
bb4e6e85
DM
1776unsigned long VMALLOC_END = 0x0000010000000000UL;
1777EXPORT_SYMBOL(VMALLOC_END);
1778
4397bed0
DM
1779unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1780unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1781
b2d43834
DM
1782static void __init setup_page_offset(void)
1783{
b2d43834 1784 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
4397bed0
DM
1785 /* Cheetah/Panther support a full 64-bit virtual
1786 * address, so we can use all that our page tables
1787 * support.
1788 */
1789 sparc64_va_hole_top = 0xfff0000000000000UL;
1790 sparc64_va_hole_bottom = 0x0010000000000000UL;
1791
b2d43834
DM
1792 max_phys_bits = 42;
1793 } else if (tlb_type == hypervisor) {
1794 switch (sun4v_chip_type) {
1795 case SUN4V_CHIP_NIAGARA1:
1796 case SUN4V_CHIP_NIAGARA2:
4397bed0
DM
1797 /* T1 and T2 support 48-bit virtual addresses. */
1798 sparc64_va_hole_top = 0xffff800000000000UL;
1799 sparc64_va_hole_bottom = 0x0000800000000000UL;
1800
b2d43834
DM
1801 max_phys_bits = 39;
1802 break;
1803 case SUN4V_CHIP_NIAGARA3:
4397bed0
DM
1804 /* T3 supports 48-bit virtual addresses. */
1805 sparc64_va_hole_top = 0xffff800000000000UL;
1806 sparc64_va_hole_bottom = 0x0000800000000000UL;
1807
b2d43834
DM
1808 max_phys_bits = 43;
1809 break;
1810 case SUN4V_CHIP_NIAGARA4:
1811 case SUN4V_CHIP_NIAGARA5:
1812 case SUN4V_CHIP_SPARC64X:
7c0fa0f2 1813 case SUN4V_CHIP_SPARC_M6:
4397bed0
DM
1814 /* T4 and later support 52-bit virtual addresses. */
1815 sparc64_va_hole_top = 0xfff8000000000000UL;
1816 sparc64_va_hole_bottom = 0x0008000000000000UL;
b2d43834
DM
1817 max_phys_bits = 47;
1818 break;
7c0fa0f2 1819 case SUN4V_CHIP_SPARC_M7:
c5b8b5be 1820 case SUN4V_CHIP_SPARC_SN:
7c0fa0f2
DM
1821 default:
1822 /* M7 and later support 52-bit virtual addresses. */
1823 sparc64_va_hole_top = 0xfff8000000000000UL;
1824 sparc64_va_hole_bottom = 0x0008000000000000UL;
1825 max_phys_bits = 49;
1826 break;
b2d43834
DM
1827 }
1828 }
1829
1830 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1831 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1832 max_phys_bits);
1833 prom_halt();
1834 }
1835
bb4e6e85
DM
1836 PAGE_OFFSET = sparc64_va_hole_top;
1837 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1838 (sparc64_va_hole_bottom >> 2));
b2d43834 1839
bb4e6e85 1840 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
b2d43834 1841 PAGE_OFFSET, max_phys_bits);
bb4e6e85
DM
1842 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1843 VMALLOC_START, VMALLOC_END);
1844 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1845 VMEMMAP_BASE, VMEMMAP_BASE << 1);
b2d43834
DM
1846}
1847
517af332
DM
1848static void __init tsb_phys_patch(void)
1849{
d257d5da 1850 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1851 struct tsb_phys_patch_entry *p;
1852
d257d5da
DM
1853 pquad = &__tsb_ldquad_phys_patch;
1854 while (pquad < &__tsb_ldquad_phys_patch_end) {
1855 unsigned long addr = pquad->addr;
1856
1857 if (tlb_type == hypervisor)
1858 *(unsigned int *) addr = pquad->sun4v_insn;
1859 else
1860 *(unsigned int *) addr = pquad->sun4u_insn;
1861 wmb();
1862 __asm__ __volatile__("flush %0"
1863 : /* no outputs */
1864 : "r" (addr));
1865
1866 pquad++;
1867 }
1868
517af332
DM
1869 p = &__tsb_phys_patch;
1870 while (p < &__tsb_phys_patch_end) {
1871 unsigned long addr = p->addr;
1872
1873 *(unsigned int *) addr = p->insn;
1874 wmb();
1875 __asm__ __volatile__("flush %0"
1876 : /* no outputs */
1877 : "r" (addr));
1878
1879 p++;
1880 }
1881}
1882
490384e7 1883/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1884#ifndef CONFIG_DEBUG_PAGEALLOC
1885#define NUM_KTSB_DESCR 2
1886#else
1887#define NUM_KTSB_DESCR 1
1888#endif
1889static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7 1890
8c82dc0e
DM
1891/* The swapper TSBs are loaded with a base sequence of:
1892 *
1893 * sethi %uhi(SYMBOL), REG1
1894 * sethi %hi(SYMBOL), REG2
1895 * or REG1, %ulo(SYMBOL), REG1
1896 * or REG2, %lo(SYMBOL), REG2
1897 * sllx REG1, 32, REG1
1898 * or REG1, REG2, REG1
1899 *
1900 * When we use physical addressing for the TSB accesses, we patch the
1901 * first four instructions in the above sequence.
1902 */
1903
9076d0e7
DM
1904static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1905{
8c82dc0e
DM
1906 unsigned long high_bits, low_bits;
1907
1908 high_bits = (pa >> 32) & 0xffffffff;
1909 low_bits = (pa >> 0) & 0xffffffff;
9076d0e7
DM
1910
1911 while (start < end) {
1912 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1913
8c82dc0e 1914 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
9076d0e7
DM
1915 __asm__ __volatile__("flush %0" : : "r" (ia));
1916
8c82dc0e 1917 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
9076d0e7
DM
1918 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1919
8c82dc0e
DM
1920 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1921 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1922
1923 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1924 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1925
9076d0e7
DM
1926 start++;
1927 }
1928}
1929
1930static void ktsb_phys_patch(void)
1931{
1932 extern unsigned int __swapper_tsb_phys_patch;
1933 extern unsigned int __swapper_tsb_phys_patch_end;
9076d0e7
DM
1934 unsigned long ktsb_pa;
1935
1936 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1937 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1938 &__swapper_tsb_phys_patch_end, ktsb_pa);
1939#ifndef CONFIG_DEBUG_PAGEALLOC
0785a8e8
DM
1940 {
1941 extern unsigned int __swapper_4m_tsb_phys_patch;
1942 extern unsigned int __swapper_4m_tsb_phys_patch_end;
9076d0e7
DM
1943 ktsb_pa = (kern_base +
1944 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1945 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1946 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
0785a8e8 1947 }
9076d0e7
DM
1948#endif
1949}
1950
490384e7
DM
1951static void __init sun4v_ktsb_init(void)
1952{
1953 unsigned long ktsb_pa;
1954
d7744a09 1955 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1956 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1957
1958 switch (PAGE_SIZE) {
1959 case 8 * 1024:
1960 default:
1961 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1962 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1963 break;
1964
1965 case 64 * 1024:
1966 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1967 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1968 break;
1969
1970 case 512 * 1024:
1971 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1972 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1973 break;
1974
1975 case 4 * 1024 * 1024:
1976 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1977 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1978 break;
6cb79b3f 1979 }
490384e7 1980
3f19a84e 1981 ktsb_descr[0].assoc = 1;
490384e7
DM
1982 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1983 ktsb_descr[0].ctx_idx = 0;
1984 ktsb_descr[0].tsb_base = ktsb_pa;
1985 ktsb_descr[0].resv = 0;
1986
d1acb421 1987#ifndef CONFIG_DEBUG_PAGEALLOC
4f93d21d 1988 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
d7744a09
DM
1989 ktsb_pa = (kern_base +
1990 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1991
1992 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
c69ad0a3
DM
1993 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1994 HV_PGSZ_MASK_256MB |
1995 HV_PGSZ_MASK_2GB |
1996 HV_PGSZ_MASK_16GB) &
1997 cpu_pgsz_mask);
d7744a09
DM
1998 ktsb_descr[1].assoc = 1;
1999 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2000 ktsb_descr[1].ctx_idx = 0;
2001 ktsb_descr[1].tsb_base = ktsb_pa;
2002 ktsb_descr[1].resv = 0;
d1acb421 2003#endif
490384e7
DM
2004}
2005
2066aadd 2006void sun4v_ktsb_register(void)
490384e7 2007{
7db35f31 2008 unsigned long pa, ret;
490384e7
DM
2009
2010 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2011
7db35f31
DM
2012 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2013 if (ret != 0) {
2014 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2015 "errors with %lx\n", pa, ret);
2016 prom_halt();
2017 }
490384e7
DM
2018}
2019
c69ad0a3
DM
2020static void __init sun4u_linear_pte_xor_finalize(void)
2021{
2022#ifndef CONFIG_DEBUG_PAGEALLOC
2023 /* This is where we would add Panther support for
2024 * 32MB and 256MB pages.
2025 */
2026#endif
2027}
2028
2029static void __init sun4v_linear_pte_xor_finalize(void)
2030{
494e5b6f
KA
2031 unsigned long pagecv_flag;
2032
2033 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2034 * enables MCD error. Do not set bit 9 on M7 processor.
2035 */
2036 switch (sun4v_chip_type) {
2037 case SUN4V_CHIP_SPARC_M7:
c5b8b5be 2038 case SUN4V_CHIP_SPARC_SN:
494e5b6f
KA
2039 pagecv_flag = 0x00;
2040 break;
2041 default:
2042 pagecv_flag = _PAGE_CV_4V;
2043 break;
2044 }
c69ad0a3
DM
2045#ifndef CONFIG_DEBUG_PAGEALLOC
2046 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2047 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
922631b9 2048 PAGE_OFFSET;
494e5b6f 2049 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
c69ad0a3
DM
2050 _PAGE_P_4V | _PAGE_W_4V);
2051 } else {
2052 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2053 }
2054
2055 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2056 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
922631b9 2057 PAGE_OFFSET;
494e5b6f 2058 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
c69ad0a3
DM
2059 _PAGE_P_4V | _PAGE_W_4V);
2060 } else {
2061 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2062 }
2063
2064 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2065 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
922631b9 2066 PAGE_OFFSET;
494e5b6f 2067 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
c69ad0a3
DM
2068 _PAGE_P_4V | _PAGE_W_4V);
2069 } else {
2070 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2071 }
2072#endif
2073}
2074
1da177e4
LT
2075/* paging_init() sets up the page tables */
2076
1da177e4 2077static unsigned long last_valid_pfn;
ac55c768 2078
c4bce90e
DM
2079static void sun4u_pgprot_init(void);
2080static void sun4v_pgprot_init(void);
2081
7c21d533 2082static phys_addr_t __init available_memory(void)
2083{
2084 phys_addr_t available = 0ULL;
2085 phys_addr_t pa_start, pa_end;
2086 u64 i;
2087
fc6daaf9
TL
2088 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2089 &pa_end, NULL)
7c21d533 2090 available = available + (pa_end - pa_start);
2091
2092 return available;
2093}
2094
494e5b6f
KA
2095#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2096#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2097#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2098#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2099#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2100#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2101
7c21d533 2102/* We need to exclude reserved regions. This exclusion will include
2103 * vmlinux and initrd. To be more precise the initrd size could be used to
2104 * compute a new lower limit because it is freed later during initialization.
2105 */
2106static void __init reduce_memory(phys_addr_t limit_ram)
2107{
2108 phys_addr_t avail_ram = available_memory();
2109 phys_addr_t pa_start, pa_end;
2110 u64 i;
2111
2112 if (limit_ram >= avail_ram)
2113 return;
2114
fc6daaf9
TL
2115 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2116 &pa_end, NULL) {
7c21d533 2117 phys_addr_t region_size = pa_end - pa_start;
2118 phys_addr_t clip_start = pa_start;
2119
2120 avail_ram = avail_ram - region_size;
2121 /* Are we consuming too much? */
2122 if (avail_ram < limit_ram) {
2123 phys_addr_t give_back = limit_ram - avail_ram;
2124
2125 region_size = region_size - give_back;
2126 clip_start = clip_start + give_back;
2127 }
2128
2129 memblock_remove(clip_start, region_size);
2130
2131 if (avail_ram <= limit_ram)
2132 break;
2133 i = 0UL;
2134 }
2135}
2136
1da177e4
LT
2137void __init paging_init(void)
2138{
919ee677 2139 unsigned long end_pfn, shift, phys_base;
0836a0eb
DM
2140 unsigned long real_end, i;
2141
b2d43834
DM
2142 setup_page_offset();
2143
22adb358
DM
2144 /* These build time checkes make sure that the dcache_dirty_cpu()
2145 * page->flags usage will work.
2146 *
2147 * When a page gets marked as dcache-dirty, we store the
2148 * cpu number starting at bit 32 in the page->flags. Also,
2149 * functions like clear_dcache_dirty_cpu use the cpu mask
2150 * in 13-bit signed-immediate instruction fields.
2151 */
9223b419
CL
2152
2153 /*
2154 * Page flags must not reach into upper 32 bits that are used
2155 * for the cpu number
2156 */
2157 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2158
2159 /*
2160 * The bit fields placed in the high range must not reach below
2161 * the 32 bit boundary. Otherwise we cannot place the cpu field
2162 * at the 32 bit boundary.
2163 */
22adb358 2164 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
9223b419
CL
2165 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2166
22adb358
DM
2167 BUILD_BUG_ON(NR_CPUS > 4096);
2168
0eef331a 2169 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
481295f9
DM
2170 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2171
d7744a09 2172 /* Invalidate both kernel TSBs. */
8b234274 2173 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 2174#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 2175 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 2176#endif
8b234274 2177
494e5b6f
KA
2178 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2179 * bit on M7 processor. This is a conflicting usage of the same
2180 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2181 * Detection error on all pages and this will lead to problems
2182 * later. Kernel does not run with MCD enabled and hence rest
2183 * of the required steps to fully configure memory corruption
2184 * detection are not taken. We need to ensure TTE.mcde is not
2185 * set on M7 processor. Compute the value of cacheability
2186 * flag for use later taking this into consideration.
2187 */
2188 switch (sun4v_chip_type) {
2189 case SUN4V_CHIP_SPARC_M7:
c5b8b5be 2190 case SUN4V_CHIP_SPARC_SN:
494e5b6f
KA
2191 page_cache4v_flag = _PAGE_CP_4V;
2192 break;
2193 default:
2194 page_cache4v_flag = _PAGE_CACHE_4V;
2195 break;
2196 }
2197
c4bce90e
DM
2198 if (tlb_type == hypervisor)
2199 sun4v_pgprot_init();
2200 else
2201 sun4u_pgprot_init();
2202
d257d5da 2203 if (tlb_type == cheetah_plus ||
9076d0e7 2204 tlb_type == hypervisor) {
517af332 2205 tsb_phys_patch();
9076d0e7
DM
2206 ktsb_phys_patch();
2207 }
517af332 2208
c69ad0a3 2209 if (tlb_type == hypervisor)
d257d5da
DM
2210 sun4v_patch_tlb_handlers();
2211
a94a172d
DM
2212 /* Find available physical memory...
2213 *
2214 * Read it twice in order to work around a bug in openfirmware.
2215 * The call to grab this table itself can cause openfirmware to
2216 * allocate memory, which in turn can take away some space from
2217 * the list of available memory. Reading it twice makes sure
2218 * we really do get the final value.
2219 */
2220 read_obp_translations();
2221 read_obp_memory("reg", &pall[0], &pall_ents);
2222 read_obp_memory("available", &pavail[0], &pavail_ents);
13edad7a 2223 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
2224
2225 phys_base = 0xffffffffffffffffUL;
3b2a7e23 2226 for (i = 0; i < pavail_ents; i++) {
13edad7a 2227 phys_base = min(phys_base, pavail[i].phys_addr);
95f72d1e 2228 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
3b2a7e23
DM
2229 }
2230
95f72d1e 2231 memblock_reserve(kern_base, kern_size);
0836a0eb 2232
4e82c9a6
DM
2233 find_ramdisk(phys_base);
2234
7c21d533 2235 if (cmdline_memory_size)
2236 reduce_memory(cmdline_memory_size);
25b0c659 2237
1aadc056 2238 memblock_allow_resize();
95f72d1e 2239 memblock_dump_all();
3b2a7e23 2240
1da177e4
LT
2241 set_bit(0, mmu_context_bmap);
2242
2bdb3cb2
DM
2243 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2244
1da177e4 2245 real_end = (unsigned long)_end;
0eef331a 2246 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
64658743
DM
2247 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2248 num_kernel_image_mappings);
2bdb3cb2
DM
2249
2250 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
2251 * work.
2252 */
2253 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2254
d195b71b 2255 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
0dd5b7b0 2256
c9c10830 2257 inherit_prom_mappings();
5085b4a5 2258
a8b900d8
DM
2259 /* Ok, we can use our TLB miss and window trap handlers safely. */
2260 setup_tba();
1da177e4 2261
c9c10830 2262 __flush_tlb_all();
9ad98c5b 2263
ad072004 2264 prom_build_devicetree();
b696fdc2 2265 of_populate_present_mask();
b99c6ebe
DM
2266#ifndef CONFIG_SMP
2267 of_fill_in_cpu_data();
2268#endif
ad072004 2269
890db403 2270 if (tlb_type == hypervisor) {
4a283339 2271 sun4v_mdesc_init();
6ac5c610 2272 mdesc_populate_present_mask(cpu_all_mask);
b99c6ebe
DM
2273#ifndef CONFIG_SMP
2274 mdesc_fill_in_cpu_data(cpu_all_mask);
2275#endif
ce33fdc5 2276 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
c69ad0a3
DM
2277
2278 sun4v_linear_pte_xor_finalize();
2279
2280 sun4v_ktsb_init();
2281 sun4v_ktsb_register();
ce33fdc5
DM
2282 } else {
2283 unsigned long impl, ver;
2284
2285 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2286 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2287
2288 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2289 impl = ((ver >> 32) & 0xffff);
2290 if (impl == PANTHER_IMPL)
2291 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2292 HV_PGSZ_MASK_256MB);
c69ad0a3
DM
2293
2294 sun4u_linear_pte_xor_finalize();
890db403 2295 }
4a283339 2296
c69ad0a3
DM
2297 /* Flush the TLBs and the 4M TSB so that the updated linear
2298 * pte XOR settings are realized for all mappings.
2299 */
2300 __flush_tlb_all();
2301#ifndef CONFIG_DEBUG_PAGEALLOC
2302 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2303#endif
2304 __flush_tlb_all();
2305
5ed56f1a
DM
2306 /* Setup bootmem... */
2307 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2308
56425306 2309 kernel_physical_mapping_init();
56425306 2310
1da177e4 2311 {
919ee677 2312 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4 2313
919ee677 2314 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1da177e4 2315
919ee677 2316 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1da177e4 2317
919ee677 2318 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
2319 }
2320
3c62a2d3 2321 printk("Booting Linux...\n");
1da177e4
LT
2322}
2323
7c9503b8 2324int page_in_phys_avail(unsigned long paddr)
919ee677
DM
2325{
2326 int i;
2327
2328 paddr &= PAGE_MASK;
2329
2330 for (i = 0; i < pavail_ents; i++) {
2331 unsigned long start, end;
2332
2333 start = pavail[i].phys_addr;
2334 end = start + pavail[i].reg_size;
2335
2336 if (paddr >= start && paddr < end)
2337 return 1;
2338 }
2339 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2340 return 1;
2341#ifdef CONFIG_BLK_DEV_INITRD
2342 if (paddr >= __pa(initrd_start) &&
2343 paddr < __pa(PAGE_ALIGN(initrd_end)))
2344 return 1;
2345#endif
2346
2347 return 0;
2348}
2349
961f8fa0
YL
2350static void __init register_page_bootmem_info(void)
2351{
2352#ifdef CONFIG_NEED_MULTIPLE_NODES
2353 int i;
2354
2355 for_each_online_node(i)
2356 if (NODE_DATA(i)->node_spanned_pages)
2357 register_page_bootmem_info_node(NODE_DATA(i));
2358#endif
2359}
1da177e4
LT
2360void __init mem_init(void)
2361{
1da177e4
LT
2362 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2363
961f8fa0 2364 register_page_bootmem_info();
0c988534 2365 free_all_bootmem();
919ee677 2366
1da177e4
LT
2367 /*
2368 * Set up the zero page, mark it reserved, so that page count
2369 * is not manipulated when freeing the page from user ptes.
2370 */
2371 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2372 if (mem_map_zero == NULL) {
2373 prom_printf("paging_init: Cannot alloc zero page.\n");
2374 prom_halt();
2375 }
70affe45 2376 mark_page_reserved(mem_map_zero);
1da177e4 2377
dceccbe9 2378 mem_init_print_info(NULL);
1da177e4
LT
2379
2380 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2381 cheetah_ecache_flush_init();
2382}
2383
898cf0ec 2384void free_initmem(void)
1da177e4
LT
2385{
2386 unsigned long addr, initend;
f2b60794
DM
2387 int do_free = 1;
2388
2389 /* If the physical memory maps were trimmed by kernel command
2390 * line options, don't even try freeing this initmem stuff up.
2391 * The kernel image could have been in the trimmed out region
2392 * and if so the freeing below will free invalid page structs.
2393 */
2394 if (cmdline_memory_size)
2395 do_free = 0;
1da177e4
LT
2396
2397 /*
2398 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2399 */
2400 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2401 initend = (unsigned long)(__init_end) & PAGE_MASK;
2402 for (; addr < initend; addr += PAGE_SIZE) {
2403 unsigned long page;
1da177e4
LT
2404
2405 page = (addr +
2406 ((unsigned long) __va(kern_base)) -
2407 ((unsigned long) KERNBASE));
c9cf5528 2408 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4 2409
70affe45
JL
2410 if (do_free)
2411 free_reserved_page(virt_to_page(page));
1da177e4
LT
2412 }
2413}
2414
2415#ifdef CONFIG_BLK_DEV_INITRD
2416void free_initrd_mem(unsigned long start, unsigned long end)
2417{
dceccbe9
JL
2418 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2419 "initrd");
1da177e4
LT
2420}
2421#endif
c4bce90e 2422
c4bce90e
DM
2423pgprot_t PAGE_KERNEL __read_mostly;
2424EXPORT_SYMBOL(PAGE_KERNEL);
2425
2426pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2427pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
2428
2429pgprot_t PAGE_SHARED __read_mostly;
2430EXPORT_SYMBOL(PAGE_SHARED);
2431
c4bce90e
DM
2432unsigned long pg_iobits __read_mostly;
2433
2434unsigned long _PAGE_IE __read_mostly;
987c74fc 2435EXPORT_SYMBOL(_PAGE_IE);
b2bef442 2436
c4bce90e 2437unsigned long _PAGE_E __read_mostly;
b2bef442
DM
2438EXPORT_SYMBOL(_PAGE_E);
2439
c4bce90e 2440unsigned long _PAGE_CACHE __read_mostly;
b2bef442 2441EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e 2442
46644c24 2443#ifdef CONFIG_SPARSEMEM_VMEMMAP
0aad818b
JW
2444int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2445 int node)
46644c24 2446{
46644c24
DM
2447 unsigned long pte_base;
2448
2449 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2450 _PAGE_CP_4U | _PAGE_CV_4U |
2451 _PAGE_P_4U | _PAGE_W_4U);
2452 if (tlb_type == hypervisor)
2453 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
494e5b6f 2454 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
46644c24 2455
c06240c7 2456 pte_base |= _PAGE_PMD_HUGE;
46644c24 2457
c06240c7
DM
2458 vstart = vstart & PMD_MASK;
2459 vend = ALIGN(vend, PMD_SIZE);
2460 for (; vstart < vend; vstart += PMD_SIZE) {
2461 pgd_t *pgd = pgd_offset_k(vstart);
2462 unsigned long pte;
2463 pud_t *pud;
2464 pmd_t *pmd;
2465
2466 if (pgd_none(*pgd)) {
2467 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2468
2469 if (!new)
46644c24 2470 return -ENOMEM;
c06240c7
DM
2471 pgd_populate(&init_mm, pgd, new);
2472 }
46644c24 2473
c06240c7
DM
2474 pud = pud_offset(pgd, vstart);
2475 if (pud_none(*pud)) {
2476 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
46644c24 2477
c06240c7
DM
2478 if (!new)
2479 return -ENOMEM;
2480 pud_populate(&init_mm, pud, new);
46644c24 2481 }
2856cc2e 2482
c06240c7
DM
2483 pmd = pmd_offset(pud, vstart);
2484
2485 pte = pmd_val(*pmd);
2486 if (!(pte & _PAGE_VALID)) {
2487 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2488
2489 if (!block)
2490 return -ENOMEM;
2491
2492 pmd_val(*pmd) = pte_base | __pa(block);
2493 }
2856cc2e 2494 }
c06240c7
DM
2495
2496 return 0;
2856cc2e 2497}
46723bfa 2498
0aad818b 2499void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
2500{
2501}
46644c24
DM
2502#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2503
c4bce90e
DM
2504static void prot_init_common(unsigned long page_none,
2505 unsigned long page_shared,
2506 unsigned long page_copy,
2507 unsigned long page_readonly,
2508 unsigned long page_exec_bit)
2509{
2510 PAGE_COPY = __pgprot(page_copy);
0f15952a 2511 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
2512
2513 protection_map[0x0] = __pgprot(page_none);
2514 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2515 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2516 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2517 protection_map[0x4] = __pgprot(page_readonly);
2518 protection_map[0x5] = __pgprot(page_readonly);
2519 protection_map[0x6] = __pgprot(page_copy);
2520 protection_map[0x7] = __pgprot(page_copy);
2521 protection_map[0x8] = __pgprot(page_none);
2522 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2523 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2524 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2525 protection_map[0xc] = __pgprot(page_readonly);
2526 protection_map[0xd] = __pgprot(page_readonly);
2527 protection_map[0xe] = __pgprot(page_shared);
2528 protection_map[0xf] = __pgprot(page_shared);
2529}
2530
2531static void __init sun4u_pgprot_init(void)
2532{
2533 unsigned long page_none, page_shared, page_copy, page_readonly;
2534 unsigned long page_exec_bit;
4f93d21d 2535 int i;
c4bce90e
DM
2536
2537 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2538 _PAGE_CACHE_4U | _PAGE_P_4U |
2539 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2540 _PAGE_EXEC_4U);
2541 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2542 _PAGE_CACHE_4U | _PAGE_P_4U |
2543 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2544 _PAGE_EXEC_4U | _PAGE_L_4U);
c4bce90e
DM
2545
2546 _PAGE_IE = _PAGE_IE_4U;
2547 _PAGE_E = _PAGE_E_4U;
2548 _PAGE_CACHE = _PAGE_CACHE_4U;
2549
2550 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2551 __ACCESS_BITS_4U | _PAGE_E_4U);
2552
d1acb421 2553#ifdef CONFIG_DEBUG_PAGEALLOC
922631b9 2554 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
d1acb421 2555#else
9cc3a1ac 2556 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
922631b9 2557 PAGE_OFFSET;
d1acb421 2558#endif
9cc3a1ac
DM
2559 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2560 _PAGE_P_4U | _PAGE_W_4U);
2561
4f93d21d
DM
2562 for (i = 1; i < 4; i++)
2563 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
c4bce90e 2564
c4bce90e
DM
2565 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2566 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2567 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2568
2569
2570 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2571 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2572 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2573 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2574 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2575 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2576 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2577
2578 page_exec_bit = _PAGE_EXEC_4U;
2579
2580 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2581 page_exec_bit);
2582}
2583
2584static void __init sun4v_pgprot_init(void)
2585{
2586 unsigned long page_none, page_shared, page_copy, page_readonly;
2587 unsigned long page_exec_bit;
4f93d21d 2588 int i;
c4bce90e
DM
2589
2590 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
494e5b6f 2591 page_cache4v_flag | _PAGE_P_4V |
c4bce90e
DM
2592 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2593 _PAGE_EXEC_4V);
2594 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
c4bce90e
DM
2595
2596 _PAGE_IE = _PAGE_IE_4V;
2597 _PAGE_E = _PAGE_E_4V;
494e5b6f 2598 _PAGE_CACHE = page_cache4v_flag;
c4bce90e 2599
d1acb421 2600#ifdef CONFIG_DEBUG_PAGEALLOC
922631b9 2601 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
d1acb421 2602#else
9cc3a1ac 2603 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
922631b9 2604 PAGE_OFFSET;
d1acb421 2605#endif
494e5b6f
KA
2606 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2607 _PAGE_W_4V);
9cc3a1ac 2608
c69ad0a3
DM
2609 for (i = 1; i < 4; i++)
2610 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
4f93d21d 2611
c4bce90e
DM
2612 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2613 __ACCESS_BITS_4V | _PAGE_E_4V);
2614
c4bce90e
DM
2615 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2616 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2617 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2618 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2619
494e5b6f
KA
2620 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2621 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
c4bce90e 2622 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
494e5b6f 2623 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
c4bce90e 2624 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
494e5b6f 2625 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
c4bce90e
DM
2626 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2627
2628 page_exec_bit = _PAGE_EXEC_4V;
2629
2630 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2631 page_exec_bit);
2632}
2633
2634unsigned long pte_sz_bits(unsigned long sz)
2635{
2636 if (tlb_type == hypervisor) {
2637 switch (sz) {
2638 case 8 * 1024:
2639 default:
2640 return _PAGE_SZ8K_4V;
2641 case 64 * 1024:
2642 return _PAGE_SZ64K_4V;
2643 case 512 * 1024:
2644 return _PAGE_SZ512K_4V;
2645 case 4 * 1024 * 1024:
2646 return _PAGE_SZ4MB_4V;
6cb79b3f 2647 }
c4bce90e
DM
2648 } else {
2649 switch (sz) {
2650 case 8 * 1024:
2651 default:
2652 return _PAGE_SZ8K_4U;
2653 case 64 * 1024:
2654 return _PAGE_SZ64K_4U;
2655 case 512 * 1024:
2656 return _PAGE_SZ512K_4U;
2657 case 4 * 1024 * 1024:
2658 return _PAGE_SZ4MB_4U;
6cb79b3f 2659 }
c4bce90e
DM
2660 }
2661}
2662
2663pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2664{
2665 pte_t pte;
cf627156
DM
2666
2667 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
2668 pte_val(pte) |= (((unsigned long)space) << 32);
2669 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 2670
cf627156 2671 return pte;
c4bce90e
DM
2672}
2673
2674static unsigned long kern_large_tte(unsigned long paddr)
2675{
2676 unsigned long val;
2677
2678 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2679 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2680 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2681 if (tlb_type == hypervisor)
2682 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
494e5b6f 2683 page_cache4v_flag | _PAGE_P_4V |
c4bce90e
DM
2684 _PAGE_EXEC_4V | _PAGE_W_4V);
2685
2686 return val | paddr;
2687}
2688
c4bce90e
DM
2689/* If not locked, zap it. */
2690void __flush_tlb_all(void)
2691{
2692 unsigned long pstate;
2693 int i;
2694
2695 __asm__ __volatile__("flushw\n\t"
2696 "rdpr %%pstate, %0\n\t"
2697 "wrpr %0, %1, %%pstate"
2698 : "=r" (pstate)
2699 : "i" (PSTATE_IE));
8f361453
DM
2700 if (tlb_type == hypervisor) {
2701 sun4v_mmu_demap_all();
2702 } else if (tlb_type == spitfire) {
c4bce90e
DM
2703 for (i = 0; i < 64; i++) {
2704 /* Spitfire Errata #32 workaround */
2705 /* NOTE: Always runs on spitfire, so no
2706 * cheetah+ page size encodings.
2707 */
2708 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2709 "flush %%g6"
2710 : /* No outputs */
2711 : "r" (0),
2712 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2713
2714 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2715 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2716 "membar #Sync"
2717 : /* no outputs */
2718 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2719 spitfire_put_dtlb_data(i, 0x0UL);
2720 }
2721
2722 /* Spitfire Errata #32 workaround */
2723 /* NOTE: Always runs on spitfire, so no
2724 * cheetah+ page size encodings.
2725 */
2726 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2727 "flush %%g6"
2728 : /* No outputs */
2729 : "r" (0),
2730 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2731
2732 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2733 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2734 "membar #Sync"
2735 : /* no outputs */
2736 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2737 spitfire_put_itlb_data(i, 0x0UL);
2738 }
2739 }
2740 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2741 cheetah_flush_dtlb_all();
2742 cheetah_flush_itlb_all();
2743 }
2744 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2745 : : "r" (pstate));
2746}
c460bec7 2747
c460bec7
DM
2748pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2749 unsigned long address)
2750{
32d6bd90 2751 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
37b3a8ff 2752 pte_t *pte = NULL;
c460bec7 2753
c460bec7
DM
2754 if (page)
2755 pte = (pte_t *) page_address(page);
2756
2757 return pte;
2758}
2759
2760pgtable_t pte_alloc_one(struct mm_struct *mm,
2761 unsigned long address)
2762{
32d6bd90 2763 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
1ae9ae5f
KS
2764 if (!page)
2765 return NULL;
2766 if (!pgtable_page_ctor(page)) {
2767 free_hot_cold_page(page, 0);
2768 return NULL;
c460bec7 2769 }
1ae9ae5f 2770 return (pte_t *) page_address(page);
c460bec7
DM
2771}
2772
2773void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2774{
37b3a8ff 2775 free_page((unsigned long)pte);
c460bec7
DM
2776}
2777
2778static void __pte_free(pgtable_t pte)
2779{
2780 struct page *page = virt_to_page(pte);
37b3a8ff
DM
2781
2782 pgtable_page_dtor(page);
2783 __free_page(page);
c460bec7
DM
2784}
2785
2786void pte_free(struct mm_struct *mm, pgtable_t pte)
2787{
2788 __pte_free(pte);
2789}
2790
2791void pgtable_free(void *table, bool is_page)
2792{
2793 if (is_page)
2794 __pte_free(table);
2795 else
2796 kmem_cache_free(pgtable_cache, table);
2797}
9e695d2e
DM
2798
2799#ifdef CONFIG_TRANSPARENT_HUGEPAGE
9e695d2e
DM
2800void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2801 pmd_t *pmd)
2802{
2803 unsigned long pte, flags;
2804 struct mm_struct *mm;
2805 pmd_t entry = *pmd;
9e695d2e
DM
2806
2807 if (!pmd_large(entry) || !pmd_young(entry))
2808 return;
2809
a7b9403f 2810 pte = pmd_val(entry);
9e695d2e 2811
18f38132
DM
2812 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2813 if (!(pte & _PAGE_VALID))
2814 return;
2815
37b3a8ff
DM
2816 /* We are fabricating 8MB pages using 4MB real hw pages. */
2817 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
9e695d2e
DM
2818
2819 mm = vma->vm_mm;
2820
2821 spin_lock_irqsave(&mm->context.lock, flags);
2822
2823 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
37b3a8ff 2824 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
9e695d2e
DM
2825 addr, pte);
2826
2827 spin_unlock_irqrestore(&mm->context.lock, flags);
2828}
2829#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2830
2831#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2832static void context_reload(void *__data)
2833{
2834 struct mm_struct *mm = __data;
2835
2836 if (mm == current->mm)
2837 load_secondary_context(mm);
2838}
2839
0fbebed6 2840void hugetlb_setup(struct pt_regs *regs)
9e695d2e 2841{
0fbebed6
DM
2842 struct mm_struct *mm = current->mm;
2843 struct tsb_config *tp;
9e695d2e 2844
70ffdb93 2845 if (faulthandler_disabled() || !mm) {
0fbebed6
DM
2846 const struct exception_table_entry *entry;
2847
2848 entry = search_exception_tables(regs->tpc);
2849 if (entry) {
2850 regs->tpc = entry->fixup;
2851 regs->tnpc = regs->tpc + 4;
2852 return;
2853 }
2854 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2855 die_if_kernel("HugeTSB in atomic", regs);
2856 }
2857
2858 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2859 if (likely(tp->tsb == NULL))
2860 tsb_grow(mm, MM_TSB_HUGE, 0);
9e695d2e 2861
9e695d2e
DM
2862 tsb_context_switch(mm);
2863 smp_tsb_sync(mm);
2864
2865 /* On UltraSPARC-III+ and later, configure the second half of
2866 * the Data-TLB for huge pages.
2867 */
2868 if (tlb_type == cheetah_plus) {
9ea46abe 2869 bool need_context_reload = false;
9e695d2e
DM
2870 unsigned long ctx;
2871
9ea46abe 2872 spin_lock_irq(&ctx_alloc_lock);
9e695d2e
DM
2873 ctx = mm->context.sparc64_ctx_val;
2874 ctx &= ~CTX_PGSZ_MASK;
2875 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2876 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2877
2878 if (ctx != mm->context.sparc64_ctx_val) {
2879 /* When changing the page size fields, we
2880 * must perform a context flush so that no
2881 * stale entries match. This flush must
2882 * occur with the original context register
2883 * settings.
2884 */
2885 do_flush_tlb_mm(mm);
2886
2887 /* Reload the context register of all processors
2888 * also executing in this address space.
2889 */
2890 mm->context.sparc64_ctx_val = ctx;
9ea46abe 2891 need_context_reload = true;
9e695d2e 2892 }
9ea46abe
DM
2893 spin_unlock_irq(&ctx_alloc_lock);
2894
2895 if (need_context_reload)
2896 on_each_cpu(context_reload, mm, 0);
9e695d2e
DM
2897 }
2898}
2899#endif
f6d4fb5c 2900
2901static struct resource code_resource = {
2902 .name = "Kernel code",
35d98e93 2903 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
f6d4fb5c 2904};
2905
2906static struct resource data_resource = {
2907 .name = "Kernel data",
35d98e93 2908 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
f6d4fb5c 2909};
2910
2911static struct resource bss_resource = {
2912 .name = "Kernel bss",
35d98e93 2913 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
f6d4fb5c 2914};
2915
2916static inline resource_size_t compute_kern_paddr(void *addr)
2917{
2918 return (resource_size_t) (addr - KERNBASE + kern_base);
2919}
2920
2921static void __init kernel_lds_init(void)
2922{
2923 code_resource.start = compute_kern_paddr(_text);
2924 code_resource.end = compute_kern_paddr(_etext - 1);
2925 data_resource.start = compute_kern_paddr(_etext);
2926 data_resource.end = compute_kern_paddr(_edata - 1);
2927 bss_resource.start = compute_kern_paddr(__bss_start);
2928 bss_resource.end = compute_kern_paddr(_end - 1);
2929}
2930
2931static int __init report_memory(void)
2932{
2933 int i;
2934 struct resource *res;
2935
2936 kernel_lds_init();
2937
2938 for (i = 0; i < pavail_ents; i++) {
2939 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2940
2941 if (!res) {
2942 pr_warn("Failed to allocate source.\n");
2943 break;
2944 }
2945
2946 res->name = "System RAM";
2947 res->start = pavail[i].phys_addr;
2948 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
35d98e93 2949 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
f6d4fb5c 2950
2951 if (insert_resource(&iomem_resource, res) < 0) {
2952 pr_warn("Resource insertion failed.\n");
2953 break;
2954 }
2955
2956 insert_resource(res, &code_resource);
2957 insert_resource(res, &data_resource);
2958 insert_resource(res, &bss_resource);
2959 }
2960
2961 return 0;
2962}
3c08158e 2963arch_initcall(report_memory);
e9011d08 2964
4ca9a237
DM
2965#ifdef CONFIG_SMP
2966#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2967#else
2968#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2969#endif
2970
2971void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2972{
2973 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2974 if (start < LOW_OBP_ADDRESS) {
2975 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2976 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2977 }
2978 if (end > HI_OBP_ADDRESS) {
473ad7f4
DM
2979 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2980 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
4ca9a237
DM
2981 }
2982 } else {
2983 flush_tsb_kernel_range(start, end);
2984 do_flush_tlb_kernel_range(start, end);
2985 }
2986}