extern "C" {
#endif
-
-/*
- * _GET_CURRENT_PC --
- * GET_CURRENT_PC --
- *
- * Returns the current program counter. In the example below:
- *
- * foo.c
- * L123: Foo(GET_CURRENT_PC())
- *
- * the return value from GET_CURRENT_PC will point a debugger to L123.
- */
-
-#define _GET_CURRENT_PC(pc) \
- asm volatile("1: adr %0, 1b" : "=r" (pc))
-
-static INLINE_ALWAYS void *
-GET_CURRENT_PC(void)
-{
- void *pc;
-
- _GET_CURRENT_PC(pc);
- return pc;
-}
-
-/*
- * GET_CURRENT_LOCATION --
- *
- * Updates the arguments with the values of the pc, x29, sp and x30
- * registers at the current code location where the macro is invoked.
- */
-
-#define GET_CURRENT_LOCATION(pc, fp, sp, lr) do { \
- _GET_CURRENT_PC(pc); \
- asm volatile("mov %0, x29" "\n\t" \
- "mov %1, sp" "\n\t" \
- "mov %2, x30" \
- : "=r" (fp), "=r" (sp), "=r" (lr)); \
-} while (0)
-
-
/*
*----------------------------------------------------------------------
*
}
+/*
+ * Memory Barriers
+ * ===============
+ *
+ * Terminology
+ * -----------
+ *
+ * A compiler memory barrier prevents the compiler from re-ordering memory
+ * accesses accross the barrier. It is not a CPU instruction, it is a compiler
+ * directive (i.e. it does not emit any code).
+ *
+ * A CPU memory barrier prevents the CPU from re-ordering memory accesses
+ * accross the barrier. It is a CPU instruction.
+ *
+ * A memory barrier is the union of a compiler memory barrier and a CPU memory
+ * barrier. A compiler memory barrier is a useless construct by itself. It is
+ * only useful when combined with a CPU memory barrier, to implement a memory
+ * barrier.
+ *
+ * Semantics
+ * ---------
+ *
+ * At the time COMPILER_*_BARRIER were created (and references to them were
+ * added to the code), the code was only targetting x86. The intent of the code
+ * was really to use a memory barrier, but because x86 uses a strongly ordered
+ * memory model, the CPU would not re-order memory accesses, and the code could
+ * get away with using just a compiler memory barrier. So COMPILER_*_BARRIER
+ * were born and were implemented as compiler memory barriers _on x86_. But
+ * make no mistake, _the semantics that the code expects from
+ * COMPILER_*_BARRIER are that of a memory barrier_!
+ *
+ * DO NOT USE!
+ * -----------
+ *
+ * On at least one non-x86 architecture, COMPILER_*_BARRIER are
+ * 1) Misnomers
+ * 2) Not fine-grained enough to provide the best performance.
+ * For the above two reasons, usage of COMPILER_*_BARRIER is now deprecated.
+ * _Do not add new references to COMPILER_*_BARRIER._ Instead, precisely
+ * document the intent of your code by using
+ * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type>.
+ * Existing references to COMPILER_*_BARRIER are being slowly but surely
+ * converted, and when no references are left, COMPILER_*_BARRIER will be
+ * retired.
+ *
+ * Thanks for pasting this whole comment into every architecture header.
+ */
+
+#define COMPILER_READ_BARRIER() SMP_R_BARRIER_R()
+#define COMPILER_WRITE_BARRIER() SMP_W_BARRIER_W()
+#define COMPILER_MEM_BARRIER() SMP_RW_BARRIER_RW()
+
+/*
+ * Memory barriers. These take the form of
+ *
+ * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type>
+ *
+ * where:
+ * <mem_type/purpose> is either SMP, DMA, or MMIO.
+ * <*_access type> is either R(load), W(store) or RW(any).
+ *
+ * Above every use of these memory barriers in the code, there _must_ be a
+ * comment to justify the use, i.e. a comment which:
+ * 1) Precisely identifies which memory accesses must not be re-ordered across
+ * the memory barrier.
+ * 2) Explains why it is important that the memory accesses not be re-ordered.
+ *
+ * Thanks for pasting this whole comment into every architecture header.
+ */
+
+#define SMP_R_BARRIER_R() SMP_R_BARRIER_RW()
+#define SMP_R_BARRIER_W() SMP_R_BARRIER_RW()
+#define SMP_R_BARRIER_RW() _DMB(_ISHLD)
+#define SMP_W_BARRIER_R() SMP_RW_BARRIER_RW()
+#define SMP_W_BARRIER_W() _DMB(_ISHST)
+#define SMP_W_BARRIER_RW() SMP_RW_BARRIER_RW()
+#define SMP_RW_BARRIER_R() SMP_RW_BARRIER_RW()
+#define SMP_RW_BARRIER_W() SMP_RW_BARRIER_RW()
+#define SMP_RW_BARRIER_RW() _DMB(_ISH)
+
+/*
+ * Like the above, only for use with cache coherent observers other than CPUs,
+ * i.e. DMA masters.
+ * On ARM it means that we extend the `dmb' options to an outer-shareable
+ * memory where all our devices are.
+ */
+
+#define DMA_R_BARRIER_R() DMA_R_BARRIER_RW()
+#define DMA_R_BARRIER_W() DMA_R_BARRIER_RW()
+#define DMA_R_BARRIER_RW() _DMB(_OSHLD)
+#define DMA_W_BARRIER_R() DMA_RW_BARRIER_RW()
+#define DMA_W_BARRIER_W() _DMB(_OSHST)
+#define DMA_W_BARRIER_RW() DMA_RW_BARRIER_RW()
+#define DMA_RW_BARRIER_R() DMA_RW_BARRIER_RW()
+#define DMA_RW_BARRIER_W() DMA_RW_BARRIER_RW()
+#define DMA_RW_BARRIER_RW() _DMB(_OSH)
+
+/*
+ * And finally a set for use with MMIO accesses.
+ * Synchronization of accesses to a non-cache coherent device memory
+ * (in general case) requires strongest available barriers on ARM.
+ */
+
+#define MMIO_R_BARRIER_R() MMIO_R_BARRIER_RW()
+#define MMIO_R_BARRIER_W() MMIO_R_BARRIER_RW()
+#define MMIO_R_BARRIER_RW() _DSB(_LD)
+#define MMIO_W_BARRIER_R() MMIO_RW_BARRIER_RW()
+#define MMIO_W_BARRIER_W() _DSB(_ST)
+#define MMIO_W_BARRIER_RW() MMIO_RW_BARRIER_RW()
+#define MMIO_RW_BARRIER_R() MMIO_RW_BARRIER_RW()
+#define MMIO_RW_BARRIER_W() MMIO_RW_BARRIER_RW()
+#define MMIO_RW_BARRIER_RW() _DSB(_SY)
+
+
+/*
+ * _GET_CURRENT_PC --
+ * GET_CURRENT_PC --
+ *
+ * Returns the current program counter. In the example below:
+ *
+ * foo.c
+ * L123: Foo(GET_CURRENT_PC())
+ *
+ * the return value from GET_CURRENT_PC will point a debugger to L123.
+ */
+
+#define _GET_CURRENT_PC(pc) \
+ asm volatile("1: adr %0, 1b" : "=r" (pc))
+
+static INLINE_ALWAYS void *
+GET_CURRENT_PC(void)
+{
+ void *pc;
+
+ _GET_CURRENT_PC(pc);
+ return pc;
+}
+
+/*
+ * GET_CURRENT_LOCATION --
+ *
+ * Updates the arguments with the values of the pc, x29, sp and x30
+ * registers at the current code location where the macro is invoked.
+ */
+
+#define GET_CURRENT_LOCATION(pc, fp, sp, lr) do { \
+ _GET_CURRENT_PC(pc); \
+ asm volatile("mov %0, x29" "\n\t" \
+ "mov %1, sp" "\n\t" \
+ "mov %2, x30" \
+ : "=r" (fp), "=r" (sp), "=r" (lr)); \
+} while (0)
+
+
/*
*----------------------------------------------------------------------
*
}
-/*
- * Memory Barriers
- * ===============
- *
- * Terminology
- * -----------
- *
- * A compiler memory barrier prevents the compiler from re-ordering memory
- * accesses accross the barrier. It is not a CPU instruction, it is a compiler
- * directive (i.e. it does not emit any code).
- *
- * A CPU memory barrier prevents the CPU from re-ordering memory accesses
- * accross the barrier. It is a CPU instruction.
- *
- * A memory barrier is the union of a compiler memory barrier and a CPU memory
- * barrier. A compiler memory barrier is a useless construct by itself. It is
- * only useful when combined with a CPU memory barrier, to implement a memory
- * barrier.
- *
- * Semantics
- * ---------
- *
- * At the time COMPILER_*_BARRIER were created (and references to them were
- * added to the code), the code was only targetting x86. The intent of the code
- * was really to use a memory barrier, but because x86 uses a strongly ordered
- * memory model, the CPU would not re-order memory accesses, and the code could
- * get away with using just a compiler memory barrier. So COMPILER_*_BARRIER
- * were born and were implemented as compiler memory barriers _on x86_. But
- * make no mistake, _the semantics that the code expects from
- * COMPILER_*_BARRIER are that of a memory barrier_!
- *
- * DO NOT USE!
- * -----------
- *
- * On at least one non-x86 architecture, COMPILER_*_BARRIER are
- * 1) Misnomers
- * 2) Not fine-grained enough to provide the best performance.
- * For the above two reasons, usage of COMPILER_*_BARRIER is now deprecated.
- * _Do not add new references to COMPILER_*_BARRIER._ Instead, precisely
- * document the intent of your code by using
- * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type>.
- * Existing references to COMPILER_*_BARRIER are being slowly but surely
- * converted, and when no references are left, COMPILER_*_BARRIER will be
- * retired.
- *
- * Thanks for pasting this whole comment into every architecture header.
- */
-
-#define COMPILER_READ_BARRIER() SMP_R_BARRIER_R()
-#define COMPILER_WRITE_BARRIER() SMP_W_BARRIER_W()
-#define COMPILER_MEM_BARRIER() SMP_RW_BARRIER_RW()
-
-/*
- * Memory barriers. These take the form of
- *
- * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type>
- *
- * where:
- * <mem_type/purpose> is either SMP, DMA, or MMIO.
- * <*_access type> is either R(load), W(store) or RW(any).
- *
- * Above every use of these memory barriers in the code, there _must_ be a
- * comment to justify the use, i.e. a comment which:
- * 1) Precisely identifies which memory accesses must not be re-ordered across
- * the memory barrier.
- * 2) Explains why it is important that the memory accesses not be re-ordered.
- *
- * Thanks for pasting this whole comment into every architecture header.
- */
-
-#define SMP_R_BARRIER_R() SMP_R_BARRIER_RW()
-#define SMP_R_BARRIER_W() SMP_R_BARRIER_RW()
-#define SMP_R_BARRIER_RW() _DMB(_ISHLD)
-#define SMP_W_BARRIER_R() SMP_RW_BARRIER_RW()
-#define SMP_W_BARRIER_W() _DMB(_ISHST)
-#define SMP_W_BARRIER_RW() SMP_RW_BARRIER_RW()
-#define SMP_RW_BARRIER_R() SMP_RW_BARRIER_RW()
-#define SMP_RW_BARRIER_W() SMP_RW_BARRIER_RW()
-#define SMP_RW_BARRIER_RW() _DMB(_ISH)
-
-/*
- * Like the above, only for use with cache coherent observers other than CPUs,
- * i.e. DMA masters.
- * On ARM it means that we extend the `dmb' options to an outer-shareable
- * memory where all our devices are.
- */
-
-#define DMA_R_BARRIER_R() DMA_R_BARRIER_RW()
-#define DMA_R_BARRIER_W() DMA_R_BARRIER_RW()
-#define DMA_R_BARRIER_RW() _DMB(_OSHLD)
-#define DMA_W_BARRIER_R() DMA_RW_BARRIER_RW()
-#define DMA_W_BARRIER_W() _DMB(_OSHST)
-#define DMA_W_BARRIER_RW() DMA_RW_BARRIER_RW()
-#define DMA_RW_BARRIER_R() DMA_RW_BARRIER_RW()
-#define DMA_RW_BARRIER_W() DMA_RW_BARRIER_RW()
-#define DMA_RW_BARRIER_RW() _DMB(_OSH)
-
-/*
- * And finally a set for use with MMIO accesses.
- * Synchronization of accesses to a non-cache coherent device memory
- * (in general case) requires strongest available barriers on ARM.
- */
-
-#define MMIO_R_BARRIER_R() MMIO_R_BARRIER_RW()
-#define MMIO_R_BARRIER_W() MMIO_R_BARRIER_RW()
-#define MMIO_R_BARRIER_RW() _DSB(_LD)
-#define MMIO_W_BARRIER_R() MMIO_RW_BARRIER_RW()
-#define MMIO_W_BARRIER_W() _DSB(_ST)
-#define MMIO_W_BARRIER_RW() MMIO_RW_BARRIER_RW()
-#define MMIO_RW_BARRIER_R() MMIO_RW_BARRIER_RW()
-#define MMIO_RW_BARRIER_W() MMIO_RW_BARRIER_RW()
-#define MMIO_RW_BARRIER_RW() _DSB(_SY)
-
-
/*
*-----------------------------------------------------------------------------
*