]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
RISC-V: Upgrade smp_mb__after_spinlock() to iorw,iorw
authorPalmer Dabbelt <palmerdabbelt@google.com>
Thu, 16 Jul 2020 18:57:26 +0000 (11:57 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Jul 2020 08:19:54 +0000 (10:19 +0200)
[ Upstream commit 38b7c2a3ffb1fce8358ddc6006cfe5c038ff9963 ]

While digging through the recent mmiowb preemption issue it came up that
we aren't actually preventing IO from crossing a scheduling boundary.
While it's a bit ugly to overload smp_mb__after_spinlock() with this
behavior, it's what PowerPC is doing so there's some precedent.

Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/riscv/include/asm/barrier.h

index 3f1737f301ccb6631bb43479578f86f23f1e157a..d0e24aaa2aa060499d338cbeae06a7e99352c7e5 100644 (file)
@@ -58,8 +58,16 @@ do {                                                                 \
  * The AQ/RL pair provides a RCpc critical section, but there's not really any
  * way we can take advantage of that here because the ordering is only enforced
  * on that one lock.  Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart.  While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary.  In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
  */
-#define smp_mb__after_spinlock()       RISCV_FENCE(rw,rw)
+#define smp_mb__after_spinlock()       RISCV_FENCE(iorw,iorw)
 
 #include <asm-generic/barrier.h>