]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drivers/infiniband: Remove now-redundant smp_read_barrier_depends()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 27 Nov 2017 17:04:22 +0000 (09:04 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 5 Dec 2017 19:56:54 +0000 (11:56 -0800)
The smp_read_barrier_depends() does nothing at all except on DEC Alpha,
and no current DEC Alpha systems use Infiniband:

lkml.kernel.org/r/20171023085921.jwbntptn6ictbnvj@tower

This commit therefore makes Infiniband depend on !ALPHA and removes
the now-ineffective invocations of smp_read_barrier_depends() from
the InfiniBand driver.

Please note that this patch should not be construed as my saying that
InfiniBand's memory ordering is correct, but rather that this patch does
not in any way affect InfiniBand's correctness.  In other words, the
result of applying this patch is bug-for-bug compatible with the original.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Doug Ledford <dledford@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: <linux-rdma@vger.kernel.org>
Cc: <linux-alpha@vger.kernel.org>
[ paulmck: Removed drivers/dma/ioat/dma.c per Jason Gunthorpe's feedback. ]
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/Kconfig
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/ruc.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/uc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_uc.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/sw/rdmavt/qp.c

index 98ac46ed7214f574fbe13d5f617b9f2b0836bc40..3bb6e35b0bbf3560f400746db8a2c07c10aeb143 100644 (file)
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
        depends on NET
        depends on INET
        depends on m || IPV6 != m
+       depends on !ALPHA
        select IRQ_POLL
        ---help---
          Core support for InfiniBand (IB).  Make sure to also select
index fd01a760259fa1887d2f233a2fcbb3ee581e6f5f..f527bcda46506f34231ee8637b5509274d7d966b 100644 (file)
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                newreq = 0;
                if (qp->s_cur == qp->s_tail) {
                        /* Check if send work queue is empty. */
-                       smp_read_barrier_depends(); /* see post_one_send() */
                        if (qp->s_tail == READ_ONCE(qp->s_head)) {
                                clear_ahg(qp);
                                goto bail;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
        }
 
        /* Ensure s_rdma_ack_cnt changes are committed */
-       smp_read_barrier_depends();
        if (qp->s_rdma_ack_cnt) {
                hfi1_queue_rc_ack(qp, is_fecn);
                return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
        trace_hfi1_ack(qp, psn);
 
        /* Ignore invalid responses. */
-       smp_read_barrier_depends(); /* see post_one_send */
        if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
                goto ack_done;
 
index 2c7fc6e331eab797cd4de42ff961b17940a8fef4..13b994738f416efa7320f575bfdf92d74b310299 100644 (file)
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
        sqp->s_flags |= RVT_S_BUSY;
 
 again:
-       smp_read_barrier_depends(); /* see post_one_send() */
        if (sqp->s_last == READ_ONCE(sqp->s_head))
                goto clr_busy;
        wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
index 31c8f89b5fc8532f27f38ef5f7d119423edede9d..61c130dbed109c2f810a4588e8dc17ebf3637e52 100644 (file)
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
 
 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
 {
-       smp_read_barrier_depends(); /* see sdma_update_tail() */
        return sde->tx_ring[sde->tx_head & sde->sdma_mask];
 }
 
index 991bbee0482183392a894d32b65475d82cd903f8..132b63e787d1367d3dd5ebe00e0a79e606bc1c25 100644 (file)
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                    RVT_PROCESS_NEXT_SEND_OK))
                        goto bail;
                /* Check if send work queue is empty. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_cur == READ_ONCE(qp->s_head)) {
                        clear_ahg(qp);
                        goto bail;
index beb5091eccca35129f778aab768c93ceaa86cc57..deb1845743956a05455a0271d2091c8e825dcd06 100644 (file)
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        }
 
        /* see post_one_send() */
-       smp_read_barrier_depends();
        if (qp->s_cur == READ_ONCE(qp->s_head))
                goto bail;
 
index 8f5754fb857982095e6cac1c1dd63f0e6bf22c59..1a785c37ad0a9a338420ab382a2b0603eb3af779 100644 (file)
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
                newreq = 0;
                if (qp->s_cur == qp->s_tail) {
                        /* Check if send work queue is empty. */
-                       smp_read_barrier_depends(); /* see post_one_send() */
                        if (qp->s_tail == READ_ONCE(qp->s_head))
                                goto bail;
                        /*
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
                goto ack_done;
 
        /* Ignore invalid responses. */
-       smp_read_barrier_depends(); /* see post_one_send */
        if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
                goto ack_done;
 
index 9a37e844d4c8739087f82fa728ecc42cb262ff80..4662cc7bde929df5bdfbde7a7956d52274926b9d 100644 (file)
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
        sqp->s_flags |= RVT_S_BUSY;
 
 again:
-       smp_read_barrier_depends(); /* see post_one_send() */
        if (sqp->s_last == READ_ONCE(sqp->s_head))
                goto clr_busy;
        wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
index bddcc37ace4420937c5fde681edf29169a3e36c3..70c58b88192c998cea23795b2af24bc22e2aad10 100644 (file)
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
                    RVT_PROCESS_NEXT_SEND_OK))
                        goto bail;
                /* Check if send work queue is empty. */
-               smp_read_barrier_depends(); /* see post_one_send() */
                if (qp->s_cur == READ_ONCE(qp->s_head))
                        goto bail;
                /*
index 15962ed193cea0515bcda715ed536f3983d66ed8..386c3c4da0c725957ae21d05d1eac59b036e9746 100644 (file)
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
                        goto bail;
                /* We are in the error state, flush the work request. */
-               smp_read_barrier_depends(); /* see post_one_send */
                if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
        }
 
        /* see post_one_send() */
-       smp_read_barrier_depends();
        if (qp->s_cur == READ_ONCE(qp->s_head))
                goto bail;
 
index 9177df60742a3cedb0099e026d6083b13fa455fa..eae84c216e2f9c7ed38d233168a803585e4288e1 100644 (file)
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
        /* non-reserved operations */
        if (likely(qp->s_avail))
                return 0;
-       smp_read_barrier_depends(); /* see rc.c */
        slast = READ_ONCE(qp->s_last);
        if (qp->s_head >= slast)
                avail = qp->s_size - (qp->s_head - slast);