]> git.ipfire.org Git - people/ms/linux.git/blame - arch/microblaze/mm/consistent.c
mm: remove include/linux/bootmem.h
[people/ms/linux.git] / arch / microblaze / mm / consistent.c
CommitLineData
3a0d7a4d
MS
1/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
d64af918 16#include <linux/export.h>
3a0d7a4d
MS
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
57c8a661 31#include <linux/memblock.h>
3a0d7a4d
MS
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
5a0e3ad6 35#include <linux/gfp.h>
5411ad27 36#include <linux/dma-noncoherent.h>
3a0d7a4d
MS
37
38#include <asm/pgalloc.h>
39#include <linux/io.h>
40#include <linux/hardirq.h>
6bd55f0b 41#include <linux/mmu_context.h>
3a0d7a4d
MS
42#include <asm/mmu.h>
43#include <linux/uaccess.h>
44#include <asm/pgtable.h>
45#include <asm/cpuinfo.h>
f1525765 46#include <asm/tlbflush.h>
3a0d7a4d
MS
47
48#ifndef CONFIG_MMU
3a0d7a4d 49/* I have to use dcache values because I can't relate on ram size */
f1525765
MS
50# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
51#endif
3a0d7a4d
MS
52
53/*
54 * Consistent memory allocators. Used for DMA devices that want to
55 * share uncached memory with the processor core.
56 * My crufty no-MMU approach is simple. In the HW platform we can optionally
57 * mirror the DDR up above the processor cacheable region. So, memory accessed
58 * in this mirror region will not be cached. It's alloced from the same
59 * pool as normal memory, but the handle we return is shifted up into the
60 * uncached region. This will no doubt cause big problems if memory allocated
61 * here is not also freed properly. -- JW
62 */
5411ad27
CH
63void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
64 gfp_t gfp, unsigned long attrs)
3a0d7a4d 65{
f1525765
MS
66 unsigned long order, vaddr;
67 void *ret;
68 unsigned int i, err = 0;
69 struct page *page, *end;
3a0d7a4d 70
f1525765 71#ifdef CONFIG_MMU
3a0d7a4d
MS
72 phys_addr_t pa;
73 struct vm_struct *area;
f1525765
MS
74 unsigned long va;
75#endif
3a0d7a4d
MS
76
77 if (in_interrupt())
78 BUG();
79
80 /* Only allocate page size areas. */
81 size = PAGE_ALIGN(size);
82 order = get_order(size);
83
f1525765
MS
84 vaddr = __get_free_pages(gfp, order);
85 if (!vaddr)
3a0d7a4d 86 return NULL;
3a0d7a4d
MS
87
88 /*
89 * we need to ensure that there are no cachelines in use,
90 * or worse dirty in this area.
91 */
f1525765
MS
92 flush_dcache_range(virt_to_phys((void *)vaddr),
93 virt_to_phys((void *)vaddr) + size);
94
95#ifndef CONFIG_MMU
96 ret = (void *)vaddr;
97 /*
98 * Here's the magic! Note if the uncached shadow is not implemented,
99 * it's up to the calling code to also test that condition and make
100 * other arranegments, such as manually flushing the cache and so on.
101 */
102# ifdef CONFIG_XILINX_UNCACHED_SHADOW
103 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
104# endif
105 if ((unsigned int)ret > cpuinfo.dcache_base &&
106 (unsigned int)ret < cpuinfo.dcache_high)
6bd55f0b 107 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
3a0d7a4d 108
f1525765
MS
109 /* dma_handle is same as physical (shadowed) address */
110 *dma_handle = (dma_addr_t)ret;
111#else
3a0d7a4d
MS
112 /* Allocate some common virtual space to map the new pages. */
113 area = get_vm_area(size, VM_ALLOC);
f1525765
MS
114 if (!area) {
115 free_pages(vaddr, order);
3a0d7a4d
MS
116 return NULL;
117 }
118 va = (unsigned long) area->addr;
119 ret = (void *)va;
120
121 /* This gives us the real physical address of the first page. */
a66a6265 122 *dma_handle = pa = __virt_to_phys(vaddr);
f1525765 123#endif
3a0d7a4d
MS
124
125 /*
f1525765
MS
126 * free wasted pages. We skip the first page since we know
127 * that it will have count = 1 and won't require freeing.
128 * We also mark the pages in use as reserved so that
129 * remap_page_range works.
3a0d7a4d 130 */
f1525765
MS
131 page = virt_to_page(vaddr);
132 end = page + (1 << order);
133
134 split_page(page, order);
135
136 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
137#ifdef CONFIG_MMU
138 /* MS: This is the whole magic - use cache inhibit pages */
139 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
140#endif
141
142 SetPageReserved(page);
143 page++;
3a0d7a4d
MS
144 }
145
f1525765
MS
146 /* Free the otherwise unused pages. */
147 while (page < end) {
148 __free_page(page);
149 page++;
150 }
3a0d7a4d
MS
151
152 if (err) {
f1525765 153 free_pages(vaddr, order);
3a0d7a4d
MS
154 return NULL;
155 }
156
157 return ret;
158}
3a0d7a4d 159
3a8e3265
LPC
160#ifdef CONFIG_MMU
161static pte_t *consistent_virt_to_pte(void *vaddr)
162{
163 unsigned long addr = (unsigned long)vaddr;
164
165 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
166}
167
58b04406
CH
168long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
169 dma_addr_t dma_addr)
3a8e3265
LPC
170{
171 pte_t *ptep = consistent_virt_to_pte(vaddr);
172
173 if (pte_none(*ptep) || !pte_present(*ptep))
174 return 0;
175
176 return pte_pfn(*ptep);
177}
178#endif
179
3a0d7a4d
MS
180/*
181 * free page(s) as defined by the above mapping.
182 */
5411ad27
CH
183void arch_dma_free(struct device *dev, size_t size, void *vaddr,
184 dma_addr_t dma_addr, unsigned long attrs)
3a0d7a4d 185{
f1525765
MS
186 struct page *page;
187
3a0d7a4d
MS
188 if (in_interrupt())
189 BUG();
190
f1525765
MS
191 size = PAGE_ALIGN(size);
192
193#ifndef CONFIG_MMU
3a0d7a4d 194 /* Clear SHADOW_MASK bit in address, and free as per usual */
f1525765 195# ifdef CONFIG_XILINX_UNCACHED_SHADOW
3a0d7a4d 196 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
f1525765
MS
197# endif
198 page = virt_to_page(vaddr);
199
200 do {
c1ce4b37 201 __free_reserved_page(page);
f1525765
MS
202 page++;
203 } while (size -= PAGE_SIZE);
204#else
205 do {
3a8e3265 206 pte_t *ptep = consistent_virt_to_pte(vaddr);
f1525765
MS
207 unsigned long pfn;
208
f1525765
MS
209 if (!pte_none(*ptep) && pte_present(*ptep)) {
210 pfn = pte_pfn(*ptep);
211 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
212 if (pfn_valid(pfn)) {
213 page = pfn_to_page(pfn);
c1ce4b37 214 __free_reserved_page(page);
f1525765
MS
215 }
216 }
217 vaddr += PAGE_SIZE;
218 } while (size -= PAGE_SIZE);
219
220 /* flush tlb */
221 flush_tlb_all();
3a0d7a4d 222#endif
3a0d7a4d 223}