]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/mips/include/asm/io.h
ee7a59290deb7290e2151b9905f894dfd045344c
[people/ms/u-boot.git] / arch / mips / include / asm / io.h
1 /*
2 * Copyright (C) 1994, 1995 Waldorf GmbH
3 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
4 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
5 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
6 * Author: Maciej W. Rozycki <macro@mips.com>
7 *
8 * SPDX-License-Identifier: GPL-2.0
9 */
10 #ifndef _ASM_IO_H
11 #define _ASM_IO_H
12
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16
17 #include <asm/addrspace.h>
18 #include <asm/byteorder.h>
19 #include <asm/cpu-features.h>
20 #include <asm/pgtable-bits.h>
21 #include <asm/processor.h>
22 #include <asm/string.h>
23
24 #include <ioremap.h>
25 #include <mangle-port.h>
26 #include <spaces.h>
27
28 /*
29 * Raw operations are never swapped in software. OTOH values that raw
30 * operations are working on may or may not have been swapped by the bus
31 * hardware. An example use would be for flash memory that's used for
32 * execute in place.
33 */
34 # define __raw_ioswabb(a, x) (x)
35 # define __raw_ioswabw(a, x) (x)
36 # define __raw_ioswabl(a, x) (x)
37 # define __raw_ioswabq(a, x) (x)
38 # define ____raw_ioswabq(a, x) (x)
39
40 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
41
42 #define IO_SPACE_LIMIT 0xffff
43
44 #ifdef CONFIG_DYNAMIC_IO_PORT_BASE
45
46 static inline ulong mips_io_port_base(void)
47 {
48 DECLARE_GLOBAL_DATA_PTR;
49
50 return gd->arch.io_port_base;
51 }
52
53 static inline void set_io_port_base(unsigned long base)
54 {
55 DECLARE_GLOBAL_DATA_PTR;
56
57 gd->arch.io_port_base = base;
58 barrier();
59 }
60
61 #else /* !CONFIG_DYNAMIC_IO_PORT_BASE */
62
63 static inline ulong mips_io_port_base(void)
64 {
65 return 0;
66 }
67
68 static inline void set_io_port_base(unsigned long base)
69 {
70 BUG_ON(base);
71 }
72
73 #endif /* !CONFIG_DYNAMIC_IO_PORT_BASE */
74
75 /*
76 * virt_to_phys - map virtual addresses to physical
77 * @address: address to remap
78 *
79 * The returned physical address is the physical (CPU) mapping for
80 * the memory address given. It is only valid to use this function on
81 * addresses directly mapped or allocated via kmalloc.
82 *
83 * This function does not give bus mappings for DMA transfers. In
84 * almost all conceivable cases a device driver should not be using
85 * this function
86 */
87 static inline unsigned long virt_to_phys(volatile const void *address)
88 {
89 unsigned long addr = (unsigned long)address;
90
91 /* this corresponds to kernel implementation of __pa() */
92 #ifdef CONFIG_64BIT
93 if (addr < CKSEG0)
94 return XPHYSADDR(addr);
95 #endif
96 return CPHYSADDR(addr);
97 }
98
99 /*
100 * phys_to_virt - map physical address to virtual
101 * @address: address to remap
102 *
103 * The returned virtual address is a current CPU mapping for
104 * the memory address given. It is only valid to use this function on
105 * addresses that have a kernel mapping
106 *
107 * This function does not handle bus mappings for DMA transfers. In
108 * almost all conceivable cases a device driver should not be using
109 * this function
110 */
111 static inline void *phys_to_virt(unsigned long address)
112 {
113 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
114 }
115
116 /*
117 * ISA I/O bus memory addresses are 1:1 with the physical address.
118 */
119 static inline unsigned long isa_virt_to_bus(volatile void *address)
120 {
121 return (unsigned long)address - PAGE_OFFSET;
122 }
123
124 static inline void *isa_bus_to_virt(unsigned long address)
125 {
126 return (void *)(address + PAGE_OFFSET);
127 }
128
129 #define isa_page_to_bus page_to_phys
130
131 /*
132 * However PCI ones are not necessarily 1:1 and therefore these interfaces
133 * are forbidden in portable PCI drivers.
134 *
135 * Allow them for x86 for legacy drivers, though.
136 */
137 #define virt_to_bus virt_to_phys
138 #define bus_to_virt phys_to_virt
139
140 static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size,
141 unsigned long flags)
142 {
143 void __iomem *addr;
144 phys_addr_t phys_addr;
145
146 addr = plat_ioremap(offset, size, flags);
147 if (addr)
148 return addr;
149
150 phys_addr = fixup_bigphys_addr(offset, size);
151 return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr);
152 }
153
154 /*
155 * ioremap - map bus memory into CPU space
156 * @offset: bus address of the memory
157 * @size: size of the resource to map
158 *
159 * ioremap performs a platform specific sequence of operations to
160 * make bus memory CPU accessible via the readb/readw/readl/writeb/
161 * writew/writel functions and the other mmio helpers. The returned
162 * address is not guaranteed to be usable directly as a virtual
163 * address.
164 */
165 #define ioremap(offset, size) \
166 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
167
168 /*
169 * ioremap_nocache - map bus memory into CPU space
170 * @offset: bus address of the memory
171 * @size: size of the resource to map
172 *
173 * ioremap_nocache performs a platform specific sequence of operations to
174 * make bus memory CPU accessible via the readb/readw/readl/writeb/
175 * writew/writel functions and the other mmio helpers. The returned
176 * address is not guaranteed to be usable directly as a virtual
177 * address.
178 *
179 * This version of ioremap ensures that the memory is marked uncachable
180 * on the CPU as well as honouring existing caching rules from things like
181 * the PCI bus. Note that there are other caches and buffers on many
182 * busses. In particular driver authors should read up on PCI writes
183 *
184 * It's useful if some control registers are in such an area and
185 * write combining or read caching is not desirable:
186 */
187 #define ioremap_nocache(offset, size) \
188 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
189 #define ioremap_uc ioremap_nocache
190
191 /*
192 * ioremap_cachable - map bus memory into CPU space
193 * @offset: bus address of the memory
194 * @size: size of the resource to map
195 *
196 * ioremap_nocache performs a platform specific sequence of operations to
197 * make bus memory CPU accessible via the readb/readw/readl/writeb/
198 * writew/writel functions and the other mmio helpers. The returned
199 * address is not guaranteed to be usable directly as a virtual
200 * address.
201 *
202 * This version of ioremap ensures that the memory is marked cachable by
203 * the CPU. Also enables full write-combining. Useful for some
204 * memory-like regions on I/O busses.
205 */
206 #define ioremap_cachable(offset, size) \
207 __ioremap_mode((offset), (size), _page_cachable_default)
208
209 /*
210 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
211 * requests a cachable mapping, ioremap_uncached_accelerated requests a
212 * mapping using the uncached accelerated mode which isn't supported on
213 * all processors.
214 */
215 #define ioremap_cacheable_cow(offset, size) \
216 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
217 #define ioremap_uncached_accelerated(offset, size) \
218 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
219
220 static inline void iounmap(const volatile void __iomem *addr)
221 {
222 plat_iounmap(addr);
223 }
224
225 #ifdef CONFIG_CPU_CAVIUM_OCTEON
226 #define war_octeon_io_reorder_wmb() wmb()
227 #else
228 #define war_octeon_io_reorder_wmb() do { } while (0)
229 #endif
230
231 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
232 \
233 static inline void pfx##write##bwlq(type val, \
234 volatile void __iomem *mem) \
235 { \
236 volatile type *__mem; \
237 type __val; \
238 \
239 war_octeon_io_reorder_wmb(); \
240 \
241 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
242 \
243 __val = pfx##ioswab##bwlq(__mem, val); \
244 \
245 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
246 *__mem = __val; \
247 else if (cpu_has_64bits) { \
248 type __tmp; \
249 \
250 __asm__ __volatile__( \
251 ".set arch=r4000" "\t\t# __writeq""\n\t" \
252 "dsll32 %L0, %L0, 0" "\n\t" \
253 "dsrl32 %L0, %L0, 0" "\n\t" \
254 "dsll32 %M0, %M0, 0" "\n\t" \
255 "or %L0, %L0, %M0" "\n\t" \
256 "sd %L0, %2" "\n\t" \
257 ".set mips0" "\n" \
258 : "=r" (__tmp) \
259 : "0" (__val), "m" (*__mem)); \
260 } else \
261 BUG(); \
262 } \
263 \
264 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
265 { \
266 volatile type *__mem; \
267 type __val; \
268 \
269 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
270 \
271 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
272 __val = *__mem; \
273 else if (cpu_has_64bits) { \
274 __asm__ __volatile__( \
275 ".set arch=r4000" "\t\t# __readq" "\n\t" \
276 "ld %L0, %1" "\n\t" \
277 "dsra32 %M0, %L0, 0" "\n\t" \
278 "sll %L0, %L0, 0" "\n\t" \
279 ".set mips0" "\n" \
280 : "=r" (__val) \
281 : "m" (*__mem)); \
282 } else { \
283 __val = 0; \
284 BUG(); \
285 } \
286 \
287 return pfx##ioswab##bwlq(__mem, __val); \
288 }
289
290 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \
291 \
292 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
293 { \
294 volatile type *__addr; \
295 type __val; \
296 \
297 war_octeon_io_reorder_wmb(); \
298 \
299 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \
300 \
301 __val = pfx##ioswab##bwlq(__addr, val); \
302 \
303 /* Really, we want this to be atomic */ \
304 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
305 \
306 *__addr = __val; \
307 } \
308 \
309 static inline type pfx##in##bwlq##p(unsigned long port) \
310 { \
311 volatile type *__addr; \
312 type __val; \
313 \
314 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \
315 \
316 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
317 \
318 __val = *__addr; \
319 \
320 return pfx##ioswab##bwlq(__addr, __val); \
321 }
322
323 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \
324 \
325 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
326
327 #define BUILDIO_MEM(bwlq, type) \
328 \
329 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \
330 __BUILD_MEMORY_PFX(, bwlq, type) \
331 __BUILD_MEMORY_PFX(__mem_, bwlq, type) \
332
333 BUILDIO_MEM(b, u8)
334 BUILDIO_MEM(w, u16)
335 BUILDIO_MEM(l, u32)
336 BUILDIO_MEM(q, u64)
337
338 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
339 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ) \
340 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p)
341
342 #define BUILDIO_IOPORT(bwlq, type) \
343 __BUILD_IOPORT_PFX(, bwlq, type) \
344 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
345
346 BUILDIO_IOPORT(b, u8)
347 BUILDIO_IOPORT(w, u16)
348 BUILDIO_IOPORT(l, u32)
349 #ifdef CONFIG_64BIT
350 BUILDIO_IOPORT(q, u64)
351 #endif
352
353 #define __BUILDIO(bwlq, type) \
354 \
355 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
356
357 __BUILDIO(q, u64)
358
359 #define readb_relaxed readb
360 #define readw_relaxed readw
361 #define readl_relaxed readl
362 #define readq_relaxed readq
363
364 #define writeb_relaxed writeb
365 #define writew_relaxed writew
366 #define writel_relaxed writel
367 #define writeq_relaxed writeq
368
369 #define readb_be(addr) \
370 __raw_readb((__force unsigned *)(addr))
371 #define readw_be(addr) \
372 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
373 #define readl_be(addr) \
374 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
375 #define readq_be(addr) \
376 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
377
378 #define writeb_be(val, addr) \
379 __raw_writeb((val), (__force unsigned *)(addr))
380 #define writew_be(val, addr) \
381 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
382 #define writel_be(val, addr) \
383 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
384 #define writeq_be(val, addr) \
385 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
386
387 /*
388 * Some code tests for these symbols
389 */
390 #define readq readq
391 #define writeq writeq
392
393 #define __BUILD_MEMORY_STRING(bwlq, type) \
394 \
395 static inline void writes##bwlq(volatile void __iomem *mem, \
396 const void *addr, unsigned int count) \
397 { \
398 const volatile type *__addr = addr; \
399 \
400 while (count--) { \
401 __mem_write##bwlq(*__addr, mem); \
402 __addr++; \
403 } \
404 } \
405 \
406 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
407 unsigned int count) \
408 { \
409 volatile type *__addr = addr; \
410 \
411 while (count--) { \
412 *__addr = __mem_read##bwlq(mem); \
413 __addr++; \
414 } \
415 }
416
417 #define __BUILD_IOPORT_STRING(bwlq, type) \
418 \
419 static inline void outs##bwlq(unsigned long port, const void *addr, \
420 unsigned int count) \
421 { \
422 const volatile type *__addr = addr; \
423 \
424 while (count--) { \
425 __mem_out##bwlq(*__addr, port); \
426 __addr++; \
427 } \
428 } \
429 \
430 static inline void ins##bwlq(unsigned long port, void *addr, \
431 unsigned int count) \
432 { \
433 volatile type *__addr = addr; \
434 \
435 while (count--) { \
436 *__addr = __mem_in##bwlq(port); \
437 __addr++; \
438 } \
439 }
440
441 #define BUILDSTRING(bwlq, type) \
442 \
443 __BUILD_MEMORY_STRING(bwlq, type) \
444 __BUILD_IOPORT_STRING(bwlq, type)
445
446 BUILDSTRING(b, u8)
447 BUILDSTRING(w, u16)
448 BUILDSTRING(l, u32)
449 #ifdef CONFIG_64BIT
450 BUILDSTRING(q, u64)
451 #endif
452
453
454 #ifdef CONFIG_CPU_CAVIUM_OCTEON
455 #define mmiowb() wmb()
456 #else
457 /* Depends on MIPS II instruction set */
458 #define mmiowb() asm volatile ("sync" ::: "memory")
459 #endif
460
461 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
462 {
463 memset((void __force *)addr, val, count);
464 }
465 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
466 {
467 memcpy(dst, (void __force *)src, count);
468 }
469 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
470 {
471 memcpy((void __force *)dst, src, count);
472 }
473
474 /*
475 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
476 * Avoid interrupt mucking, just adjust the address for 4-byte access.
477 * Assume the addresses are 8-byte aligned.
478 */
479 #ifdef __MIPSEB__
480 #define __CSR_32_ADJUST 4
481 #else
482 #define __CSR_32_ADJUST 0
483 #endif
484
485 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
486 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
487
488 /*
489 * U-Boot specific
490 */
491 #define sync() mmiowb()
492
493 #define MAP_NOCACHE (1)
494 #define MAP_WRCOMBINE (0)
495 #define MAP_WRBACK (0)
496 #define MAP_WRTHROUGH (0)
497
498 static inline void *
499 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
500 {
501 if (flags == MAP_NOCACHE)
502 return ioremap(paddr, len);
503
504 return (void *)CKSEG0ADDR(paddr);
505 }
506
507 /*
508 * Take down a mapping set up by map_physmem().
509 */
510 static inline void unmap_physmem(void *vaddr, unsigned long flags)
511 {
512 }
513
514 #define __BUILD_CLRBITS(bwlq, sfx, end, type) \
515 \
516 static inline void clrbits_##sfx(volatile void __iomem *mem, type clr) \
517 { \
518 type __val = __raw_read##bwlq(mem); \
519 __val = end##_to_cpu(__val); \
520 __val &= ~clr; \
521 __val = cpu_to_##end(__val); \
522 __raw_write##bwlq(__val, mem); \
523 }
524
525 #define __BUILD_SETBITS(bwlq, sfx, end, type) \
526 \
527 static inline void setbits_##sfx(volatile void __iomem *mem, type set) \
528 { \
529 type __val = __raw_read##bwlq(mem); \
530 __val = end##_to_cpu(__val); \
531 __val |= set; \
532 __val = cpu_to_##end(__val); \
533 __raw_write##bwlq(__val, mem); \
534 }
535
536 #define __BUILD_CLRSETBITS(bwlq, sfx, end, type) \
537 \
538 static inline void clrsetbits_##sfx(volatile void __iomem *mem, \
539 type clr, type set) \
540 { \
541 type __val = __raw_read##bwlq(mem); \
542 __val = end##_to_cpu(__val); \
543 __val &= ~clr; \
544 __val |= set; \
545 __val = cpu_to_##end(__val); \
546 __raw_write##bwlq(__val, mem); \
547 }
548
549 #define BUILD_CLRSETBITS(bwlq, sfx, end, type) \
550 \
551 __BUILD_CLRBITS(bwlq, sfx, end, type) \
552 __BUILD_SETBITS(bwlq, sfx, end, type) \
553 __BUILD_CLRSETBITS(bwlq, sfx, end, type)
554
555 #define __to_cpu(v) (v)
556 #define cpu_to__(v) (v)
557
558 BUILD_CLRSETBITS(b, 8, _, u8)
559 BUILD_CLRSETBITS(w, le16, le16, u16)
560 BUILD_CLRSETBITS(w, be16, be16, u16)
561 BUILD_CLRSETBITS(w, 16, _, u16)
562 BUILD_CLRSETBITS(l, le32, le32, u32)
563 BUILD_CLRSETBITS(l, be32, be32, u32)
564 BUILD_CLRSETBITS(l, 32, _, u32)
565 BUILD_CLRSETBITS(q, le64, le64, u64)
566 BUILD_CLRSETBITS(q, be64, be64, u64)
567 BUILD_CLRSETBITS(q, 64, _, u64)
568
569 #endif /* _ASM_IO_H */