]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - include/asm-generic/io.h
riscv: mm: synchronize MMU after pte change
[thirdparty/kernel/linux.git] / include / asm-generic / io.h
CommitLineData
739d875d 1/* Generic I/O port emulation.
3f7e212d
AB
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef __ASM_GENERIC_IO_H
12#define __ASM_GENERIC_IO_H
13
14#include <asm/page.h> /* I/O is all done through memory accesses */
9216efaf 15#include <linux/string.h> /* for memset() and memcpy() */
3f7e212d
AB
16#include <linux/types.h>
17
18#ifdef CONFIG_GENERIC_IOMAP
19#include <asm-generic/iomap.h>
20#endif
21
60ca1e5a 22#include <asm/mmiowb.h>
66eab4df
MT
23#include <asm-generic/pci_iomap.h>
24
64e2c673
SK
25#ifndef __io_br
26#define __io_br() barrier()
27#endif
28
29/* prevent prefetching of coherent DMA data ahead of a dma-complete */
30#ifndef __io_ar
31#ifdef rmb
abbbbc83 32#define __io_ar(v) rmb()
64e2c673 33#else
abbbbc83 34#define __io_ar(v) barrier()
64e2c673
SK
35#endif
36#endif
37
38/* flush writes to coherent DMA data before possibly triggering a DMA read */
39#ifndef __io_bw
40#ifdef wmb
41#define __io_bw() wmb()
42#else
43#define __io_bw() barrier()
44#endif
45#endif
46
47/* serialize device access against a spin_unlock, usually handled there. */
48#ifndef __io_aw
60ca1e5a 49#define __io_aw() mmiowb_set_pending()
64e2c673
SK
50#endif
51
52#ifndef __io_pbw
53#define __io_pbw() __io_bw()
54#endif
55
56#ifndef __io_paw
57#define __io_paw() __io_aw()
58#endif
59
60#ifndef __io_pbr
61#define __io_pbr() __io_br()
62#endif
63
64#ifndef __io_par
abbbbc83 65#define __io_par(v) __io_ar(v)
64e2c673
SK
66#endif
67
68
3f7e212d 69/*
9216efaf
TR
70 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
71 *
72 * On some architectures memory mapped IO needs to be accessed differently.
73 * On the simple architectures, we just read/write the memory location
74 * directly.
3f7e212d 75 */
9216efaf 76
35dbc0e0 77#ifndef __raw_readb
9216efaf 78#define __raw_readb __raw_readb
3f7e212d
AB
79static inline u8 __raw_readb(const volatile void __iomem *addr)
80{
9216efaf 81 return *(const volatile u8 __force *)addr;
3f7e212d 82}
35dbc0e0 83#endif
3f7e212d 84
35dbc0e0 85#ifndef __raw_readw
9216efaf 86#define __raw_readw __raw_readw
3f7e212d
AB
87static inline u16 __raw_readw(const volatile void __iomem *addr)
88{
9216efaf 89 return *(const volatile u16 __force *)addr;
3f7e212d 90}
35dbc0e0 91#endif
3f7e212d 92
35dbc0e0 93#ifndef __raw_readl
9216efaf 94#define __raw_readl __raw_readl
3f7e212d
AB
95static inline u32 __raw_readl(const volatile void __iomem *addr)
96{
9216efaf 97 return *(const volatile u32 __force *)addr;
3f7e212d 98}
35dbc0e0 99#endif
3f7e212d 100
9216efaf
TR
101#ifdef CONFIG_64BIT
102#ifndef __raw_readq
103#define __raw_readq __raw_readq
104static inline u64 __raw_readq(const volatile void __iomem *addr)
7292e7e0 105{
9216efaf 106 return *(const volatile u64 __force *)addr;
7292e7e0 107}
9216efaf
TR
108#endif
109#endif /* CONFIG_64BIT */
3f7e212d 110
35dbc0e0 111#ifndef __raw_writeb
9216efaf
TR
112#define __raw_writeb __raw_writeb
113static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
3f7e212d 114{
9216efaf 115 *(volatile u8 __force *)addr = value;
3f7e212d 116}
35dbc0e0 117#endif
3f7e212d 118
35dbc0e0 119#ifndef __raw_writew
9216efaf
TR
120#define __raw_writew __raw_writew
121static inline void __raw_writew(u16 value, volatile void __iomem *addr)
3f7e212d 122{
9216efaf 123 *(volatile u16 __force *)addr = value;
3f7e212d 124}
35dbc0e0 125#endif
3f7e212d 126
35dbc0e0 127#ifndef __raw_writel
9216efaf
TR
128#define __raw_writel __raw_writel
129static inline void __raw_writel(u32 value, volatile void __iomem *addr)
3f7e212d 130{
9216efaf 131 *(volatile u32 __force *)addr = value;
3f7e212d 132}
35dbc0e0 133#endif
3f7e212d 134
3f7e212d 135#ifdef CONFIG_64BIT
9216efaf
TR
136#ifndef __raw_writeq
137#define __raw_writeq __raw_writeq
138static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
3f7e212d 139{
9216efaf 140 *(volatile u64 __force *)addr = value;
3f7e212d 141}
cd248341 142#endif
9216efaf 143#endif /* CONFIG_64BIT */
cd248341 144
9216efaf
TR
145/*
146 * {read,write}{b,w,l,q}() access little endian memory and return result in
147 * native endianness.
148 */
3f7e212d 149
9216efaf
TR
150#ifndef readb
151#define readb readb
152static inline u8 readb(const volatile void __iomem *addr)
3f7e212d 153{
032d59e1
SK
154 u8 val;
155
156 __io_br();
157 val = __raw_readb(addr);
abbbbc83 158 __io_ar(val);
032d59e1 159 return val;
3f7e212d 160}
3f7e212d
AB
161#endif
162
9216efaf
TR
163#ifndef readw
164#define readw readw
165static inline u16 readw(const volatile void __iomem *addr)
166{
032d59e1
SK
167 u16 val;
168
169 __io_br();
170 val = __le16_to_cpu(__raw_readw(addr));
abbbbc83 171 __io_ar(val);
032d59e1 172 return val;
9216efaf 173}
7dc59bdd
G
174#endif
175
9216efaf
TR
176#ifndef readl
177#define readl readl
178static inline u32 readl(const volatile void __iomem *addr)
3f7e212d 179{
032d59e1
SK
180 u32 val;
181
182 __io_br();
183 val = __le32_to_cpu(__raw_readl(addr));
abbbbc83 184 __io_ar(val);
032d59e1 185 return val;
3f7e212d 186}
9216efaf 187#endif
3f7e212d 188
9216efaf
TR
189#ifdef CONFIG_64BIT
190#ifndef readq
191#define readq readq
192static inline u64 readq(const volatile void __iomem *addr)
3f7e212d 193{
032d59e1
SK
194 u64 val;
195
196 __io_br();
197 val = __le64_to_cpu(__raw_readq(addr));
abbbbc83 198 __io_ar(val);
032d59e1 199 return val;
3f7e212d 200}
9216efaf
TR
201#endif
202#endif /* CONFIG_64BIT */
3f7e212d 203
9216efaf
TR
204#ifndef writeb
205#define writeb writeb
206static inline void writeb(u8 value, volatile void __iomem *addr)
3f7e212d 207{
755bd04a 208 __io_bw();
9216efaf 209 __raw_writeb(value, addr);
755bd04a 210 __io_aw();
3f7e212d 211}
9216efaf 212#endif
3f7e212d 213
9216efaf
TR
214#ifndef writew
215#define writew writew
216static inline void writew(u16 value, volatile void __iomem *addr)
3f7e212d 217{
755bd04a 218 __io_bw();
9216efaf 219 __raw_writew(cpu_to_le16(value), addr);
755bd04a 220 __io_aw();
3f7e212d 221}
9216efaf 222#endif
3f7e212d 223
9216efaf
TR
224#ifndef writel
225#define writel writel
226static inline void writel(u32 value, volatile void __iomem *addr)
3f7e212d 227{
755bd04a 228 __io_bw();
9216efaf 229 __raw_writel(__cpu_to_le32(value), addr);
755bd04a 230 __io_aw();
3f7e212d 231}
9216efaf 232#endif
3f7e212d 233
9216efaf
TR
234#ifdef CONFIG_64BIT
235#ifndef writeq
236#define writeq writeq
237static inline void writeq(u64 value, volatile void __iomem *addr)
3f7e212d 238{
755bd04a 239 __io_bw();
9216efaf 240 __raw_writeq(__cpu_to_le64(value), addr);
755bd04a 241 __io_aw();
3f7e212d 242}
9216efaf
TR
243#endif
244#endif /* CONFIG_64BIT */
3f7e212d 245
1c8d2969
AB
246/*
247 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
248 * are not guaranteed to provide ordering against spinlocks or memory
249 * accesses.
250 */
251#ifndef readb_relaxed
8875c554
SK
252#define readb_relaxed readb_relaxed
253static inline u8 readb_relaxed(const volatile void __iomem *addr)
254{
255 return __raw_readb(addr);
256}
1c8d2969
AB
257#endif
258
259#ifndef readw_relaxed
8875c554
SK
260#define readw_relaxed readw_relaxed
261static inline u16 readw_relaxed(const volatile void __iomem *addr)
262{
263 return __le16_to_cpu(__raw_readw(addr));
264}
1c8d2969
AB
265#endif
266
267#ifndef readl_relaxed
8875c554
SK
268#define readl_relaxed readl_relaxed
269static inline u32 readl_relaxed(const volatile void __iomem *addr)
270{
271 return __le32_to_cpu(__raw_readl(addr));
272}
1c8d2969
AB
273#endif
274
e511267b 275#if defined(readq) && !defined(readq_relaxed)
8875c554
SK
276#define readq_relaxed readq_relaxed
277static inline u64 readq_relaxed(const volatile void __iomem *addr)
278{
279 return __le64_to_cpu(__raw_readq(addr));
280}
1c8d2969
AB
281#endif
282
9439eb3a 283#ifndef writeb_relaxed
a71e7c44
SK
284#define writeb_relaxed writeb_relaxed
285static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
286{
287 __raw_writeb(value, addr);
288}
9439eb3a
WD
289#endif
290
9439eb3a 291#ifndef writew_relaxed
a71e7c44
SK
292#define writew_relaxed writew_relaxed
293static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
294{
295 __raw_writew(cpu_to_le16(value), addr);
296}
9439eb3a
WD
297#endif
298
9439eb3a 299#ifndef writel_relaxed
a71e7c44
SK
300#define writel_relaxed writel_relaxed
301static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
302{
303 __raw_writel(__cpu_to_le32(value), addr);
304}
9439eb3a 305#endif
3f7e212d 306
e511267b 307#if defined(writeq) && !defined(writeq_relaxed)
a71e7c44
SK
308#define writeq_relaxed writeq_relaxed
309static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
310{
311 __raw_writeq(__cpu_to_le64(value), addr);
312}
1c8d2969
AB
313#endif
314
9ab3a7a0
TR
315/*
316 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
317 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
318 */
319#ifndef readsb
320#define readsb readsb
321static inline void readsb(const volatile void __iomem *addr, void *buffer,
322 unsigned int count)
3f7e212d
AB
323{
324 if (count) {
325 u8 *buf = buffer;
9ab3a7a0 326
3f7e212d 327 do {
9ab3a7a0 328 u8 x = __raw_readb(addr);
3f7e212d
AB
329 *buf++ = x;
330 } while (--count);
331 }
332}
35dbc0e0 333#endif
3f7e212d 334
9ab3a7a0
TR
335#ifndef readsw
336#define readsw readsw
337static inline void readsw(const volatile void __iomem *addr, void *buffer,
338 unsigned int count)
3f7e212d
AB
339{
340 if (count) {
341 u16 *buf = buffer;
9ab3a7a0 342
3f7e212d 343 do {
9ab3a7a0 344 u16 x = __raw_readw(addr);
3f7e212d
AB
345 *buf++ = x;
346 } while (--count);
347 }
348}
35dbc0e0 349#endif
3f7e212d 350
9ab3a7a0
TR
351#ifndef readsl
352#define readsl readsl
353static inline void readsl(const volatile void __iomem *addr, void *buffer,
354 unsigned int count)
3f7e212d
AB
355{
356 if (count) {
357 u32 *buf = buffer;
9ab3a7a0 358
3f7e212d 359 do {
9ab3a7a0 360 u32 x = __raw_readl(addr);
3f7e212d
AB
361 *buf++ = x;
362 } while (--count);
363 }
364}
35dbc0e0 365#endif
3f7e212d 366
9ab3a7a0
TR
367#ifdef CONFIG_64BIT
368#ifndef readsq
369#define readsq readsq
370static inline void readsq(const volatile void __iomem *addr, void *buffer,
371 unsigned int count)
372{
373 if (count) {
374 u64 *buf = buffer;
375
376 do {
377 u64 x = __raw_readq(addr);
378 *buf++ = x;
379 } while (--count);
380 }
381}
382#endif
383#endif /* CONFIG_64BIT */
384
385#ifndef writesb
386#define writesb writesb
387static inline void writesb(volatile void __iomem *addr, const void *buffer,
388 unsigned int count)
3f7e212d
AB
389{
390 if (count) {
391 const u8 *buf = buffer;
9ab3a7a0 392
3f7e212d 393 do {
9ab3a7a0 394 __raw_writeb(*buf++, addr);
3f7e212d
AB
395 } while (--count);
396 }
397}
35dbc0e0 398#endif
3f7e212d 399
9ab3a7a0
TR
400#ifndef writesw
401#define writesw writesw
402static inline void writesw(volatile void __iomem *addr, const void *buffer,
403 unsigned int count)
3f7e212d
AB
404{
405 if (count) {
406 const u16 *buf = buffer;
9ab3a7a0 407
3f7e212d 408 do {
9ab3a7a0 409 __raw_writew(*buf++, addr);
3f7e212d
AB
410 } while (--count);
411 }
412}
35dbc0e0 413#endif
3f7e212d 414
9ab3a7a0
TR
415#ifndef writesl
416#define writesl writesl
417static inline void writesl(volatile void __iomem *addr, const void *buffer,
418 unsigned int count)
3f7e212d
AB
419{
420 if (count) {
421 const u32 *buf = buffer;
9ab3a7a0 422
3f7e212d 423 do {
9ab3a7a0 424 __raw_writel(*buf++, addr);
3f7e212d
AB
425 } while (--count);
426 }
427}
35dbc0e0 428#endif
3f7e212d 429
9ab3a7a0
TR
430#ifdef CONFIG_64BIT
431#ifndef writesq
432#define writesq writesq
433static inline void writesq(volatile void __iomem *addr, const void *buffer,
434 unsigned int count)
435{
436 if (count) {
437 const u64 *buf = buffer;
438
439 do {
440 __raw_writeq(*buf++, addr);
441 } while (--count);
442 }
443}
444#endif
445#endif /* CONFIG_64BIT */
3f7e212d 446
9216efaf
TR
447#ifndef PCI_IOBASE
448#define PCI_IOBASE ((void __iomem *)0)
449#endif
450
7dc59bdd
G
451#ifndef IO_SPACE_LIMIT
452#define IO_SPACE_LIMIT 0xffff
453#endif
3f7e212d 454
031e3601
ZY
455#include <linux/logic_pio.h>
456
9216efaf
TR
457/*
458 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
459 * implemented on hardware that needs an additional delay for I/O accesses to
460 * take effect.
461 */
462
463#ifndef inb
464#define inb inb
465static inline u8 inb(unsigned long addr)
466{
87fe2d54
SK
467 u8 val;
468
469 __io_pbr();
470 val = __raw_readb(PCI_IOBASE + addr);
abbbbc83 471 __io_par(val);
87fe2d54 472 return val;
9216efaf
TR
473}
474#endif
475
476#ifndef inw
477#define inw inw
478static inline u16 inw(unsigned long addr)
479{
87fe2d54
SK
480 u16 val;
481
482 __io_pbr();
483 val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
abbbbc83 484 __io_par(val);
87fe2d54 485 return val;
9216efaf
TR
486}
487#endif
488
489#ifndef inl
490#define inl inl
491static inline u32 inl(unsigned long addr)
492{
87fe2d54
SK
493 u32 val;
494
495 __io_pbr();
496 val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
abbbbc83 497 __io_par(val);
87fe2d54 498 return val;
9216efaf
TR
499}
500#endif
501
502#ifndef outb
503#define outb outb
504static inline void outb(u8 value, unsigned long addr)
505{
a7851aa5
SK
506 __io_pbw();
507 __raw_writeb(value, PCI_IOBASE + addr);
508 __io_paw();
9216efaf
TR
509}
510#endif
511
512#ifndef outw
513#define outw outw
514static inline void outw(u16 value, unsigned long addr)
515{
a7851aa5
SK
516 __io_pbw();
517 __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
518 __io_paw();
9216efaf
TR
519}
520#endif
521
522#ifndef outl
523#define outl outl
524static inline void outl(u32 value, unsigned long addr)
525{
a7851aa5
SK
526 __io_pbw();
527 __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
528 __io_paw();
9216efaf
TR
529}
530#endif
531
532#ifndef inb_p
533#define inb_p inb_p
534static inline u8 inb_p(unsigned long addr)
535{
536 return inb(addr);
537}
538#endif
539
540#ifndef inw_p
541#define inw_p inw_p
542static inline u16 inw_p(unsigned long addr)
543{
544 return inw(addr);
545}
546#endif
547
548#ifndef inl_p
549#define inl_p inl_p
550static inline u32 inl_p(unsigned long addr)
551{
552 return inl(addr);
553}
554#endif
555
556#ifndef outb_p
557#define outb_p outb_p
558static inline void outb_p(u8 value, unsigned long addr)
559{
560 outb(value, addr);
561}
562#endif
563
564#ifndef outw_p
565#define outw_p outw_p
566static inline void outw_p(u16 value, unsigned long addr)
567{
568 outw(value, addr);
569}
570#endif
571
572#ifndef outl_p
573#define outl_p outl_p
574static inline void outl_p(u32 value, unsigned long addr)
575{
576 outl(value, addr);
577}
578#endif
579
9ab3a7a0
TR
580/*
581 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
582 * single I/O port multiple times.
583 */
584
585#ifndef insb
586#define insb insb
587static inline void insb(unsigned long addr, void *buffer, unsigned int count)
588{
589 readsb(PCI_IOBASE + addr, buffer, count);
590}
591#endif
592
593#ifndef insw
594#define insw insw
595static inline void insw(unsigned long addr, void *buffer, unsigned int count)
596{
597 readsw(PCI_IOBASE + addr, buffer, count);
598}
599#endif
600
601#ifndef insl
602#define insl insl
603static inline void insl(unsigned long addr, void *buffer, unsigned int count)
604{
605 readsl(PCI_IOBASE + addr, buffer, count);
606}
607#endif
608
609#ifndef outsb
610#define outsb outsb
611static inline void outsb(unsigned long addr, const void *buffer,
612 unsigned int count)
613{
614 writesb(PCI_IOBASE + addr, buffer, count);
615}
616#endif
617
618#ifndef outsw
619#define outsw outsw
620static inline void outsw(unsigned long addr, const void *buffer,
621 unsigned int count)
622{
623 writesw(PCI_IOBASE + addr, buffer, count);
624}
625#endif
626
627#ifndef outsl
628#define outsl outsl
629static inline void outsl(unsigned long addr, const void *buffer,
630 unsigned int count)
631{
632 writesl(PCI_IOBASE + addr, buffer, count);
633}
634#endif
635
636#ifndef insb_p
637#define insb_p insb_p
638static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
639{
640 insb(addr, buffer, count);
641}
642#endif
643
644#ifndef insw_p
645#define insw_p insw_p
646static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
647{
648 insw(addr, buffer, count);
649}
650#endif
651
652#ifndef insl_p
653#define insl_p insl_p
654static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
655{
656 insl(addr, buffer, count);
657}
658#endif
659
660#ifndef outsb_p
661#define outsb_p outsb_p
662static inline void outsb_p(unsigned long addr, const void *buffer,
663 unsigned int count)
664{
665 outsb(addr, buffer, count);
666}
667#endif
668
669#ifndef outsw_p
670#define outsw_p outsw_p
671static inline void outsw_p(unsigned long addr, const void *buffer,
672 unsigned int count)
673{
674 outsw(addr, buffer, count);
675}
676#endif
677
678#ifndef outsl_p
679#define outsl_p outsl_p
680static inline void outsl_p(unsigned long addr, const void *buffer,
681 unsigned int count)
682{
683 outsl(addr, buffer, count);
684}
685#endif
686
9216efaf
TR
687#ifndef CONFIG_GENERIC_IOMAP
688#ifndef ioread8
689#define ioread8 ioread8
690static inline u8 ioread8(const volatile void __iomem *addr)
691{
692 return readb(addr);
693}
694#endif
695
696#ifndef ioread16
697#define ioread16 ioread16
698static inline u16 ioread16(const volatile void __iomem *addr)
699{
700 return readw(addr);
701}
702#endif
703
704#ifndef ioread32
705#define ioread32 ioread32
706static inline u32 ioread32(const volatile void __iomem *addr)
707{
708 return readl(addr);
709}
710#endif
711
9e44fb18
HG
712#ifdef CONFIG_64BIT
713#ifndef ioread64
714#define ioread64 ioread64
715static inline u64 ioread64(const volatile void __iomem *addr)
716{
717 return readq(addr);
718}
719#endif
720#endif /* CONFIG_64BIT */
721
9216efaf
TR
722#ifndef iowrite8
723#define iowrite8 iowrite8
724static inline void iowrite8(u8 value, volatile void __iomem *addr)
725{
726 writeb(value, addr);
727}
728#endif
729
730#ifndef iowrite16
731#define iowrite16 iowrite16
732static inline void iowrite16(u16 value, volatile void __iomem *addr)
733{
734 writew(value, addr);
735}
736#endif
737
738#ifndef iowrite32
739#define iowrite32 iowrite32
740static inline void iowrite32(u32 value, volatile void __iomem *addr)
741{
742 writel(value, addr);
743}
744#endif
745
9e44fb18
HG
746#ifdef CONFIG_64BIT
747#ifndef iowrite64
748#define iowrite64 iowrite64
749static inline void iowrite64(u64 value, volatile void __iomem *addr)
750{
751 writeq(value, addr);
752}
753#endif
754#endif /* CONFIG_64BIT */
755
9216efaf
TR
756#ifndef ioread16be
757#define ioread16be ioread16be
758static inline u16 ioread16be(const volatile void __iomem *addr)
759{
7a1aedba 760 return swab16(readw(addr));
9216efaf
TR
761}
762#endif
763
764#ifndef ioread32be
765#define ioread32be ioread32be
766static inline u32 ioread32be(const volatile void __iomem *addr)
767{
7a1aedba 768 return swab32(readl(addr));
9216efaf
TR
769}
770#endif
771
9e44fb18
HG
772#ifdef CONFIG_64BIT
773#ifndef ioread64be
774#define ioread64be ioread64be
775static inline u64 ioread64be(const volatile void __iomem *addr)
776{
777 return swab64(readq(addr));
778}
779#endif
780#endif /* CONFIG_64BIT */
781
9216efaf
TR
782#ifndef iowrite16be
783#define iowrite16be iowrite16be
784static inline void iowrite16be(u16 value, void volatile __iomem *addr)
785{
7a1aedba 786 writew(swab16(value), addr);
9216efaf
TR
787}
788#endif
789
790#ifndef iowrite32be
791#define iowrite32be iowrite32be
792static inline void iowrite32be(u32 value, volatile void __iomem *addr)
793{
7a1aedba 794 writel(swab32(value), addr);
9216efaf
TR
795}
796#endif
9ab3a7a0 797
9e44fb18
HG
798#ifdef CONFIG_64BIT
799#ifndef iowrite64be
800#define iowrite64be iowrite64be
801static inline void iowrite64be(u64 value, volatile void __iomem *addr)
802{
803 writeq(swab64(value), addr);
804}
805#endif
806#endif /* CONFIG_64BIT */
807
9ab3a7a0
TR
808#ifndef ioread8_rep
809#define ioread8_rep ioread8_rep
810static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
811 unsigned int count)
812{
813 readsb(addr, buffer, count);
814}
815#endif
816
817#ifndef ioread16_rep
818#define ioread16_rep ioread16_rep
819static inline void ioread16_rep(const volatile void __iomem *addr,
820 void *buffer, unsigned int count)
821{
822 readsw(addr, buffer, count);
823}
824#endif
825
826#ifndef ioread32_rep
827#define ioread32_rep ioread32_rep
828static inline void ioread32_rep(const volatile void __iomem *addr,
829 void *buffer, unsigned int count)
830{
831 readsl(addr, buffer, count);
832}
833#endif
834
9e44fb18
HG
835#ifdef CONFIG_64BIT
836#ifndef ioread64_rep
837#define ioread64_rep ioread64_rep
838static inline void ioread64_rep(const volatile void __iomem *addr,
839 void *buffer, unsigned int count)
840{
841 readsq(addr, buffer, count);
842}
843#endif
844#endif /* CONFIG_64BIT */
845
9ab3a7a0
TR
846#ifndef iowrite8_rep
847#define iowrite8_rep iowrite8_rep
848static inline void iowrite8_rep(volatile void __iomem *addr,
849 const void *buffer,
850 unsigned int count)
851{
852 writesb(addr, buffer, count);
853}
854#endif
855
856#ifndef iowrite16_rep
857#define iowrite16_rep iowrite16_rep
858static inline void iowrite16_rep(volatile void __iomem *addr,
859 const void *buffer,
860 unsigned int count)
861{
862 writesw(addr, buffer, count);
863}
864#endif
865
866#ifndef iowrite32_rep
867#define iowrite32_rep iowrite32_rep
868static inline void iowrite32_rep(volatile void __iomem *addr,
869 const void *buffer,
870 unsigned int count)
871{
872 writesl(addr, buffer, count);
873}
874#endif
9e44fb18
HG
875
876#ifdef CONFIG_64BIT
877#ifndef iowrite64_rep
878#define iowrite64_rep iowrite64_rep
879static inline void iowrite64_rep(volatile void __iomem *addr,
880 const void *buffer,
881 unsigned int count)
882{
883 writesq(addr, buffer, count);
884}
885#endif
886#endif /* CONFIG_64BIT */
9216efaf
TR
887#endif /* CONFIG_GENERIC_IOMAP */
888
3f7e212d
AB
889#ifdef __KERNEL__
890
891#include <linux/vmalloc.h>
9216efaf 892#define __io_virt(x) ((void __force *)(x))
3f7e212d
AB
893
894#ifndef CONFIG_GENERIC_IOMAP
3f7e212d 895struct pci_dev;
cd248341
JG
896extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
897
898#ifndef pci_iounmap
9216efaf 899#define pci_iounmap pci_iounmap
3f7e212d
AB
900static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
901{
902}
cd248341 903#endif
3f7e212d
AB
904#endif /* CONFIG_GENERIC_IOMAP */
905
906/*
907 * Change virtual addresses to physical addresses and vv.
908 * These are pretty trivial
909 */
cd248341 910#ifndef virt_to_phys
9216efaf 911#define virt_to_phys virt_to_phys
3f7e212d
AB
912static inline unsigned long virt_to_phys(volatile void *address)
913{
914 return __pa((unsigned long)address);
915}
9216efaf 916#endif
3f7e212d 917
9216efaf
TR
918#ifndef phys_to_virt
919#define phys_to_virt phys_to_virt
3f7e212d
AB
920static inline void *phys_to_virt(unsigned long address)
921{
922 return __va(address);
923}
cd248341 924#endif
3f7e212d 925
8c7ea50c
LR
926/**
927 * DOC: ioremap() and ioremap_*() variants
928 *
929 * If you have an IOMMU your architecture is expected to have both ioremap()
930 * and iounmap() implemented otherwise the asm-generic helpers will provide a
931 * direct mapping.
932 *
933 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
934 * default to direct mapping for all of them, you can override these defaults.
935 * If you have an IOMMU you are highly encouraged to provide your own
936 * ioremap variant implementation as there currently is no safe architecture
937 * agnostic default. To avoid possible improper behaviour default asm-generic
938 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
939 * defined your own ioremap_*() variant you must then declare your own
940 * ioremap_*() variant as defined to itself to avoid the default NULL return.
941 */
942
943#ifdef CONFIG_MMU
944
945#ifndef ioremap_uc
946#define ioremap_uc ioremap_uc
947static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
948{
949 return NULL;
950}
951#endif
952
953#else /* !CONFIG_MMU */
954
3f7e212d
AB
955/*
956 * Change "struct page" to physical address.
f1ecc698
JB
957 *
958 * This implementation is for the no-MMU case only... if you have an MMU
959 * you'll need to provide your own definitions.
3f7e212d 960 */
9216efaf 961
9216efaf
TR
962#ifndef ioremap
963#define ioremap ioremap
964static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
3f7e212d 965{
9216efaf 966 return (void __iomem *)(unsigned long)offset;
3f7e212d 967}
9216efaf 968#endif
3f7e212d 969
9216efaf
TR
970#ifndef __ioremap
971#define __ioremap __ioremap
972static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
973 unsigned long flags)
974{
975 return ioremap(offset, size);
976}
977#endif
3f7e212d 978
b3ada9d0
GH
979#ifndef iounmap
980#define iounmap iounmap
981
982static inline void iounmap(void __iomem *addr)
983{
984}
985#endif
986#endif /* CONFIG_MMU */
3f7e212d 987#ifndef ioremap_nocache
b3ada9d0 988void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
9216efaf
TR
989#define ioremap_nocache ioremap_nocache
990static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
991{
992 return ioremap(offset, size);
993}
3f7e212d
AB
994#endif
995
e4b6be33
LR
996#ifndef ioremap_uc
997#define ioremap_uc ioremap_uc
998static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
999{
1000 return ioremap_nocache(offset, size);
1001}
1002#endif
1003
3f7e212d 1004#ifndef ioremap_wc
9216efaf
TR
1005#define ioremap_wc ioremap_wc
1006static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
1007{
1008 return ioremap_nocache(offset, size);
1009}
3f7e212d
AB
1010#endif
1011
d838270e
TK
1012#ifndef ioremap_wt
1013#define ioremap_wt ioremap_wt
1014static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
1015{
1016 return ioremap_nocache(offset, size);
1017}
1018#endif
1019
ce816fa8 1020#ifdef CONFIG_HAS_IOPORT_MAP
3f7e212d 1021#ifndef CONFIG_GENERIC_IOMAP
9216efaf
TR
1022#ifndef ioport_map
1023#define ioport_map ioport_map
3f7e212d
AB
1024static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1025{
500dd232
AM
1026 port &= IO_SPACE_LIMIT;
1027 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
3f7e212d 1028}
9216efaf 1029#endif
3f7e212d 1030
9216efaf
TR
1031#ifndef ioport_unmap
1032#define ioport_unmap ioport_unmap
3f7e212d
AB
1033static inline void ioport_unmap(void __iomem *p)
1034{
1035}
9216efaf 1036#endif
3f7e212d
AB
1037#else /* CONFIG_GENERIC_IOMAP */
1038extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1039extern void ioport_unmap(void __iomem *p);
1040#endif /* CONFIG_GENERIC_IOMAP */
ce816fa8 1041#endif /* CONFIG_HAS_IOPORT_MAP */
3f7e212d 1042
eabc2a7c
AS
1043/*
1044 * Convert a virtual cached pointer to an uncached pointer
1045 */
576ebd74 1046#ifndef xlate_dev_kmem_ptr
9216efaf
TR
1047#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
1048static inline void *xlate_dev_kmem_ptr(void *addr)
1049{
1050 return addr;
1051}
576ebd74 1052#endif
9216efaf 1053
576ebd74 1054#ifndef xlate_dev_mem_ptr
9216efaf
TR
1055#define xlate_dev_mem_ptr xlate_dev_mem_ptr
1056static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1057{
1058 return __va(addr);
1059}
1060#endif
1061
1062#ifndef unxlate_dev_mem_ptr
1063#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
1064static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1065{
1066}
576ebd74 1067#endif
3f7e212d 1068
c93d0312 1069#ifdef CONFIG_VIRT_TO_BUS
3f7e212d 1070#ifndef virt_to_bus
9216efaf 1071static inline unsigned long virt_to_bus(void *address)
3f7e212d 1072{
9216efaf 1073 return (unsigned long)address;
3f7e212d
AB
1074}
1075
1076static inline void *bus_to_virt(unsigned long address)
1077{
9216efaf 1078 return (void *)address;
3f7e212d
AB
1079}
1080#endif
c93d0312 1081#endif
3f7e212d 1082
cd248341 1083#ifndef memset_io
9216efaf 1084#define memset_io memset_io
c2327da0
AS
1085/**
1086 * memset_io Set a range of I/O memory to a constant value
1087 * @addr: The beginning of the I/O-memory range to set
1088 * @val: The value to set the memory to
1089 * @count: The number of bytes to set
1090 *
1091 * Set a range of I/O memory to a given value.
1092 */
9216efaf
TR
1093static inline void memset_io(volatile void __iomem *addr, int value,
1094 size_t size)
1095{
1096 memset(__io_virt(addr), value, size);
1097}
cd248341
JG
1098#endif
1099
1100#ifndef memcpy_fromio
9216efaf 1101#define memcpy_fromio memcpy_fromio
c2327da0
AS
1102/**
1103 * memcpy_fromio Copy a block of data from I/O memory
1104 * @dst: The (RAM) destination for the copy
1105 * @src: The (I/O memory) source for the data
1106 * @count: The number of bytes to copy
1107 *
1108 * Copy a block of data from I/O memory.
1109 */
9216efaf
TR
1110static inline void memcpy_fromio(void *buffer,
1111 const volatile void __iomem *addr,
1112 size_t size)
1113{
1114 memcpy(buffer, __io_virt(addr), size);
1115}
cd248341 1116#endif
9216efaf 1117
cd248341 1118#ifndef memcpy_toio
9216efaf 1119#define memcpy_toio memcpy_toio
c2327da0
AS
1120/**
1121 * memcpy_toio Copy a block of data into I/O memory
1122 * @dst: The (I/O memory) destination for the copy
1123 * @src: The (RAM) source for the data
1124 * @count: The number of bytes to copy
1125 *
1126 * Copy a block of data to I/O memory.
1127 */
9216efaf
TR
1128static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1129 size_t size)
1130{
1131 memcpy(__io_virt(addr), buffer, size);
1132}
cd248341 1133#endif
3f7e212d
AB
1134
1135#endif /* __KERNEL__ */
1136
1137#endif /* __ASM_GENERIC_IO_H */