]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/powerpc/mm/mem.c
[PATCH] remove set_page_count() outside mm/
[thirdparty/kernel/linux.git] / arch / powerpc / mm / mem.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
14cf11af 46#include <asm/prom.h>
7c8c6b97
PM
47#include <asm/lmb.h>
48#include <asm/sections.h>
ab1f9dac 49#include <asm/vdso.h>
14cf11af 50
14cf11af
PM
51#include "mmu_decl.h"
52
53#ifndef CPU_FTR_COHERENT_ICACHE
54#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55#define CPU_FTR_NOEXECUTE 0
56#endif
57
7c8c6b97
PM
58int init_bootmem_done;
59int mem_init_done;
cf00a8d1 60unsigned long memory_limit;
7c8c6b97 61
3c726f8d
BH
62extern void hash_preload(struct mm_struct *mm, unsigned long ea,
63 unsigned long access, unsigned long trap);
64
14cf11af
PM
65/*
66 * This is called by /dev/mem to know if a given address has to
67 * be mapped non-cacheable or not
68 */
69int page_is_ram(unsigned long pfn)
70{
71 unsigned long paddr = (pfn << PAGE_SHIFT);
72
73#ifndef CONFIG_PPC64 /* XXX for now */
74 return paddr < __pa(high_memory);
75#else
76 int i;
77 for (i=0; i < lmb.memory.cnt; i++) {
78 unsigned long base;
79
80 base = lmb.memory.region[i].base;
81
82 if ((paddr >= base) &&
83 (paddr < (base + lmb.memory.region[i].size))) {
84 return 1;
85 }
86 }
87
88 return 0;
89#endif
90}
91EXPORT_SYMBOL(page_is_ram);
92
8b150478 93pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
14cf11af
PM
94 unsigned long size, pgprot_t vma_prot)
95{
96 if (ppc_md.phys_mem_access_prot)
8b150478 97 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
14cf11af 98
8b150478 99 if (!page_is_ram(pfn))
14cf11af
PM
100 vma_prot = __pgprot(pgprot_val(vma_prot)
101 | _PAGE_GUARDED | _PAGE_NO_CACHE);
102 return vma_prot;
103}
104EXPORT_SYMBOL(phys_mem_access_prot);
105
23fd0775
PM
106#ifdef CONFIG_MEMORY_HOTPLUG
107
108void online_page(struct page *page)
109{
110 ClearPageReserved(page);
7835e98b 111 init_page_count(page);
70dc991d 112 __free_page(page);
23fd0775
PM
113 totalram_pages++;
114 num_physpages++;
115}
116
23fd0775
PM
117int __devinit add_memory(u64 start, u64 size)
118{
237a0989 119 struct pglist_data *pgdata;
23fd0775 120 struct zone *zone;
237a0989 121 int nid;
23fd0775
PM
122 unsigned long start_pfn = start >> PAGE_SHIFT;
123 unsigned long nr_pages = size >> PAGE_SHIFT;
124
237a0989
MK
125 nid = hot_add_scn_to_nid(start);
126 pgdata = NODE_DATA(nid);
127
e0fa93d6 128 start = __va(start);
54b79248
MK
129 create_section_mapping(start, start + size);
130
23fd0775
PM
131 /* this should work for most non-highmem platforms */
132 zone = pgdata->node_zones;
133
134 return __add_pages(zone, start_pfn, nr_pages);
135
136 return 0;
137}
138
139/*
140 * First pass at this code will check to determine if the remove
141 * request is within the RMO. Do not allow removal within the RMO.
142 */
143int __devinit remove_memory(u64 start, u64 size)
144{
145 struct zone *zone;
146 unsigned long start_pfn, end_pfn, nr_pages;
147
148 start_pfn = start >> PAGE_SHIFT;
149 nr_pages = size >> PAGE_SHIFT;
150 end_pfn = start_pfn + nr_pages;
151
152 printk("%s(): Attempting to remove memoy in range "
153 "%lx to %lx\n", __func__, start, start+size);
154 /*
155 * check for range within RMO
156 */
157 zone = page_zone(pfn_to_page(start_pfn));
158
159 printk("%s(): memory will be removed from "
160 "the %s zone\n", __func__, zone->name);
161
162 /*
163 * not handling removing memory ranges that
164 * overlap multiple zones yet
165 */
166 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
167 goto overlap;
168
169 /* make sure it is NOT in RMO */
170 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
171 printk("%s(): range to be removed must NOT be in RMO!\n",
172 __func__);
173 goto in_rmo;
174 }
175
176 return __remove_pages(zone, start_pfn, nr_pages);
177
178overlap:
179 printk("%s(): memory range to be removed overlaps "
180 "multiple zones!!!\n", __func__);
181in_rmo:
182 return -1;
183}
184#endif /* CONFIG_MEMORY_HOTPLUG */
185
14cf11af
PM
186void show_mem(void)
187{
188 unsigned long total = 0, reserved = 0;
189 unsigned long shared = 0, cached = 0;
190 unsigned long highmem = 0;
191 struct page *page;
192 pg_data_t *pgdat;
193 unsigned long i;
194
195 printk("Mem-info:\n");
196 show_free_areas();
197 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
198 for_each_pgdat(pgdat) {
23fd0775
PM
199 unsigned long flags;
200 pgdat_resize_lock(pgdat, &flags);
14cf11af 201 for (i = 0; i < pgdat->node_spanned_pages; i++) {
fb6d73d3
PM
202 if (!pfn_valid(pgdat->node_start_pfn + i))
203 continue;
14cf11af
PM
204 page = pgdat_page_nr(pgdat, i);
205 total++;
206 if (PageHighMem(page))
207 highmem++;
208 if (PageReserved(page))
209 reserved++;
210 else if (PageSwapCache(page))
211 cached++;
212 else if (page_count(page))
213 shared += page_count(page) - 1;
214 }
23fd0775 215 pgdat_resize_unlock(pgdat, &flags);
14cf11af
PM
216 }
217 printk("%ld pages of RAM\n", total);
218#ifdef CONFIG_HIGHMEM
219 printk("%ld pages of HIGHMEM\n", highmem);
220#endif
221 printk("%ld reserved pages\n", reserved);
222 printk("%ld pages shared\n", shared);
223 printk("%ld pages swap cached\n", cached);
224}
225
7c8c6b97
PM
226/*
227 * Initialize the bootmem system and give it all the memory we
228 * have available. If we are using highmem, we only put the
229 * lowmem into the bootmem system.
230 */
231#ifndef CONFIG_NEED_MULTIPLE_NODES
232void __init do_init_bootmem(void)
233{
234 unsigned long i;
235 unsigned long start, bootmap_pages;
236 unsigned long total_pages;
237 int boot_mapsize;
238
239 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
240#ifdef CONFIG_HIGHMEM
241 total_pages = total_lowmem >> PAGE_SHIFT;
242#endif
243
244 /*
245 * Find an area to use for the bootmem bitmap. Calculate the size of
246 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
247 * Add 1 additional page in case the address isn't page-aligned.
248 */
249 bootmap_pages = bootmem_bootmap_pages(total_pages);
250
251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
252 BUG_ON(!start);
253
254 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
255
256 /* Add all physical memory to the bootmem map, mark each area
257 * present.
258 */
259 for (i = 0; i < lmb.memory.cnt; i++) {
260 unsigned long base = lmb.memory.region[i].base;
261 unsigned long size = lmb_size_bytes(&lmb.memory, i);
262#ifdef CONFIG_HIGHMEM
263 if (base >= total_lowmem)
264 continue;
265 if (base + size > total_lowmem)
266 size = total_lowmem - base;
267#endif
268 free_bootmem(base, size);
269 }
270
271 /* reserve the sections we're already using */
272 for (i = 0; i < lmb.reserved.cnt; i++)
273 reserve_bootmem(lmb.reserved.region[i].base,
274 lmb_size_bytes(&lmb.reserved, i));
275
276 /* XXX need to clip this if using highmem? */
277 for (i = 0; i < lmb.memory.cnt; i++)
278 memory_present(0, lmb_start_pfn(&lmb.memory, i),
279 lmb_end_pfn(&lmb.memory, i));
280 init_bootmem_done = 1;
281}
282
283/*
284 * paging_init() sets up the page tables - in fact we've already done this.
285 */
286void __init paging_init(void)
287{
288 unsigned long zones_size[MAX_NR_ZONES];
289 unsigned long zholes_size[MAX_NR_ZONES];
290 unsigned long total_ram = lmb_phys_mem_size();
291 unsigned long top_of_ram = lmb_end_of_DRAM();
292
293#ifdef CONFIG_HIGHMEM
294 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
295 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
296 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
297 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
298 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
299 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
300 kmap_prot = PAGE_KERNEL;
301#endif /* CONFIG_HIGHMEM */
302
303 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
304 top_of_ram, total_ram);
305 printk(KERN_INFO "Memory hole size: %ldMB\n",
306 (top_of_ram - total_ram) >> 20);
307 /*
308 * All pages are DMA-able so we put them all in the DMA zone.
309 */
310 memset(zones_size, 0, sizeof(zones_size));
311 memset(zholes_size, 0, sizeof(zholes_size));
312
313 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
314 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
315
316#ifdef CONFIG_HIGHMEM
317 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
318 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
319 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
320#else
321 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
322 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
323#endif /* CONFIG_HIGHMEM */
324
325 free_area_init_node(0, NODE_DATA(0), zones_size,
326 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
327}
328#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
329
330void __init mem_init(void)
331{
332#ifdef CONFIG_NEED_MULTIPLE_NODES
333 int nid;
334#endif
335 pg_data_t *pgdat;
336 unsigned long i;
337 struct page *page;
338 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
339
fb6d73d3 340 num_physpages = lmb.memory.size >> PAGE_SHIFT;
7c8c6b97
PM
341 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
342
343#ifdef CONFIG_NEED_MULTIPLE_NODES
344 for_each_online_node(nid) {
345 if (NODE_DATA(nid)->node_spanned_pages != 0) {
346 printk("freeing bootmem node %x\n", nid);
347 totalram_pages +=
348 free_all_bootmem_node(NODE_DATA(nid));
349 }
350 }
351#else
fb6d73d3 352 max_mapnr = max_pfn;
7c8c6b97
PM
353 totalram_pages += free_all_bootmem();
354#endif
355 for_each_pgdat(pgdat) {
356 for (i = 0; i < pgdat->node_spanned_pages; i++) {
fb6d73d3
PM
357 if (!pfn_valid(pgdat->node_start_pfn + i))
358 continue;
7c8c6b97
PM
359 page = pgdat_page_nr(pgdat, i);
360 if (PageReserved(page))
361 reservedpages++;
362 }
363 }
364
365 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
bcb35576 366 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
7c8c6b97
PM
367 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
368 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
369
370#ifdef CONFIG_HIGHMEM
371 {
372 unsigned long pfn, highmem_mapnr;
373
374 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
375 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
376 struct page *page = pfn_to_page(pfn);
377
378 ClearPageReserved(page);
7835e98b 379 init_page_count(page);
7c8c6b97
PM
380 __free_page(page);
381 totalhigh_pages++;
382 }
383 totalram_pages += totalhigh_pages;
384 printk(KERN_INFO "High memory: %luk\n",
385 totalhigh_pages << (PAGE_SHIFT-10));
386 }
387#endif /* CONFIG_HIGHMEM */
388
389 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
390 "%luk reserved, %luk data, %luk bss, %luk init)\n",
391 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
392 num_physpages << (PAGE_SHIFT-10),
393 codesize >> 10,
394 reservedpages << (PAGE_SHIFT-10),
395 datasize >> 10,
396 bsssize >> 10,
397 initsize >> 10);
398
399 mem_init_done = 1;
400
7c8c6b97
PM
401 /* Initialize the vDSO */
402 vdso_init();
7c8c6b97
PM
403}
404
14cf11af
PM
405/*
406 * This is called when a page has been modified by the kernel.
407 * It just marks the page as not i-cache clean. We do the i-cache
408 * flush later when the page is given to a user process, if necessary.
409 */
410void flush_dcache_page(struct page *page)
411{
412 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
413 return;
414 /* avoid an atomic op if possible */
415 if (test_bit(PG_arch_1, &page->flags))
416 clear_bit(PG_arch_1, &page->flags);
417}
418EXPORT_SYMBOL(flush_dcache_page);
419
420void flush_dcache_icache_page(struct page *page)
421{
422#ifdef CONFIG_BOOKE
423 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
424 __flush_dcache_icache(start);
425 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
ab1f9dac 426#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
14cf11af
PM
427 /* On 8xx there is no need to kmap since highmem is not supported */
428 __flush_dcache_icache(page_address(page));
429#else
430 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
431#endif
432
433}
434void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
435{
436 clear_page(page);
437
14cf11af
PM
438 /*
439 * We shouldnt have to do this, but some versions of glibc
440 * require it (ld.so assumes zero filled pages are icache clean)
441 * - Anton
442 */
09f5dc44 443 flush_dcache_page(pg);
14cf11af
PM
444}
445EXPORT_SYMBOL(clear_user_page);
446
447void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
448 struct page *pg)
449{
450 copy_page(vto, vfrom);
451
452 /*
453 * We should be able to use the following optimisation, however
454 * there are two problems.
455 * Firstly a bug in some versions of binutils meant PLT sections
456 * were not marked executable.
457 * Secondly the first word in the GOT section is blrl, used
458 * to establish the GOT address. Until recently the GOT was
459 * not marked executable.
460 * - Anton
461 */
462#if 0
463 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
464 return;
465#endif
466
09f5dc44 467 flush_dcache_page(pg);
14cf11af
PM
468}
469
470void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
471 unsigned long addr, int len)
472{
473 unsigned long maddr;
474
475 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
476 flush_icache_range(maddr, maddr + len);
477 kunmap(page);
478}
479EXPORT_SYMBOL(flush_icache_user_range);
480
481/*
482 * This is called at the end of handling a user page fault, when the
483 * fault has been handled by updating a PTE in the linux page tables.
484 * We use it to preload an HPTE into the hash table corresponding to
485 * the updated linux PTE.
486 *
01edcd89 487 * This must always be called with the pte lock held.
14cf11af
PM
488 */
489void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
490 pte_t pte)
491{
3c726f8d
BH
492#ifdef CONFIG_PPC_STD_MMU
493 unsigned long access = 0, trap;
14cf11af 494#endif
3c726f8d 495 unsigned long pfn = pte_pfn(pte);
14cf11af
PM
496
497 /* handle i-cache coherency */
498 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
499 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
500 pfn_valid(pfn)) {
501 struct page *page = pfn_to_page(pfn);
502 if (!PageReserved(page)
503 && !test_bit(PG_arch_1, &page->flags)) {
504 if (vma->vm_mm == current->active_mm) {
505#ifdef CONFIG_8xx
506 /* On 8xx, cache control instructions (particularly
507 * "dcbst" from flush_dcache_icache) fault as write
508 * operation if there is an unpopulated TLB entry
509 * for the address in question. To workaround that,
510 * we invalidate the TLB here, thus avoiding dcbst
511 * misbehaviour.
512 */
513 _tlbie(address);
514#endif
515 __flush_dcache_icache((void *) address);
516 } else
517 flush_dcache_icache_page(page);
518 set_bit(PG_arch_1, &page->flags);
519 }
520 }
521
522#ifdef CONFIG_PPC_STD_MMU
523 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
524 if (!pte_young(pte) || address >= TASK_SIZE)
525 return;
14cf11af 526
3c726f8d
BH
527 /* We try to figure out if we are coming from an instruction
528 * access fault and pass that down to __hash_page so we avoid
529 * double-faulting on execution of fresh text. We have to test
530 * for regs NULL since init will get here first thing at boot
531 *
532 * We also avoid filling the hash if not coming from a fault
533 */
534 if (current->thread.regs == NULL)
14cf11af 535 return;
3c726f8d
BH
536 trap = TRAP(current->thread.regs);
537 if (trap == 0x400)
538 access |= _PAGE_EXEC;
539 else if (trap != 0x300)
540 return;
541 hash_preload(vma->vm_mm, address, access, trap);
542#endif /* CONFIG_PPC_STD_MMU */
14cf11af 543}