]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Jan 2015 08:37:42 +0000 (16:37 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Jan 2015 08:37:42 +0000 (16:37 +0800)
added patches:
iscsi-iser-target-expose-supported-protection-ops-according-to-t10_pi.patch
iscsi-iser-target-initiate-termination-only-once.patch
iscsi-target-fail-connection-on-short-sendmsg-writes.patch
iser-target-allocate-pi-contexts-dynamically.patch
iser-target-fix-connected_handler-teardown-flow-race.patch
iser-target-fix-flush-disconnect-completion-handling.patch
iser-target-fix-implicit-termination-of-connections.patch
iser-target-fix-null-dereference-in-sw-mode-dif.patch
iser-target-handle-addr_change-event-for-listener-cm_id.patch
iser-target-parallelize-cm-connection-establishment.patch

queue-3.18/iscsi-iser-target-expose-supported-protection-ops-according-to-t10_pi.patch [new file with mode: 0644]
queue-3.18/iscsi-iser-target-initiate-termination-only-once.patch [new file with mode: 0644]
queue-3.18/iscsi-target-fail-connection-on-short-sendmsg-writes.patch [new file with mode: 0644]
queue-3.18/iser-target-allocate-pi-contexts-dynamically.patch [new file with mode: 0644]
queue-3.18/iser-target-fix-connected_handler-teardown-flow-race.patch [new file with mode: 0644]
queue-3.18/iser-target-fix-flush-disconnect-completion-handling.patch [new file with mode: 0644]
queue-3.18/iser-target-fix-implicit-termination-of-connections.patch [new file with mode: 0644]
queue-3.18/iser-target-fix-null-dereference-in-sw-mode-dif.patch [new file with mode: 0644]
queue-3.18/iser-target-handle-addr_change-event-for-listener-cm_id.patch [new file with mode: 0644]
queue-3.18/iser-target-parallelize-cm-connection-establishment.patch [new file with mode: 0644]
queue-3.18/series

diff --git a/queue-3.18/iscsi-iser-target-expose-supported-protection-ops-according-to-t10_pi.patch b/queue-3.18/iscsi-iser-target-expose-supported-protection-ops-according-to-t10_pi.patch
new file mode 100644 (file)
index 0000000..6d0ad69
--- /dev/null
@@ -0,0 +1,98 @@
+From 23a548ee656c8ba6da8cb2412070edcd62e2ac5d Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:35 +0200
+Subject: iscsi,iser-target: Expose supported protection ops according to t10_pi
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 23a548ee656c8ba6da8cb2412070edcd62e2ac5d upstream.
+
+iSER will report supported protection operations based on
+the tpg attribute t10_pi settings and HCA PI offload capabilities.
+If the HCA does not support PI offload or tpg attribute t10_pi is
+not set, we fall to SW PI mode.
+
+In order to do that, we move iscsit_get_sup_prot_ops after connection
+tpg assignment.
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c   |   14 +++++++++++---
+ drivers/infiniband/ulp/isert/ib_isert.h   |    1 +
+ drivers/target/iscsi/iscsi_target_login.c |    7 ++++---
+ 3 files changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -64,7 +64,7 @@ struct rdma_cm_id *isert_setup_id(struct
+ static inline bool
+ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+ {
+-      return (conn->conn_device->pi_capable &&
++      return (conn->pi_support &&
+               cmd->prot_op != TARGET_PROT_NORMAL);
+ }
+@@ -2324,8 +2324,16 @@ isert_get_sup_prot_ops(struct iscsi_conn
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       struct isert_device *device = isert_conn->conn_device;
+-      if (device->pi_capable)
+-              return TARGET_PROT_ALL;
++      if (conn->tpg->tpg_attrib.t10_pi) {
++              if (device->pi_capable) {
++                      pr_info("conn %p PI offload enabled\n", isert_conn);
++                      isert_conn->pi_support = true;
++                      return TARGET_PROT_ALL;
++              }
++      }
++
++      pr_info("conn %p PI offload disabled\n", isert_conn);
++      isert_conn->pi_support = false;
+       return TARGET_PROT_NORMAL;
+ }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -128,6 +128,7 @@ struct isert_conn {
+       atomic_t                post_send_buf_count;
+       u32                     responder_resources;
+       u32                     initiator_depth;
++      bool                    pi_support;
+       u32                     max_sge;
+       char                    *login_buf;
+       char                    *login_req_buf;
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
+ {
+       struct iscsi_session *sess = NULL;
+       struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+-      enum target_prot_op sup_pro_ops;
+       int ret;
+       sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
+               kfree(sess);
+               return -ENOMEM;
+       }
+-      sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+-      sess->se_sess = transport_init_session(sup_pro_ops);
++      sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+       if (IS_ERR(sess->se_sess)) {
+               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
+@@ -1367,6 +1365,9 @@ static int __iscsi_target_login_thread(s
+       }
+       login->zero_tsih = zero_tsih;
++      conn->sess->se_sess->sup_prot_ops =
++              conn->conn_transport->iscsit_get_sup_prot_ops(conn);
++
+       tpg = conn->tpg;
+       if (!tpg) {
+               pr_err("Unable to locate struct iscsi_conn->tpg\n");
diff --git a/queue-3.18/iscsi-iser-target-initiate-termination-only-once.patch b/queue-3.18/iscsi-iser-target-initiate-termination-only-once.patch
new file mode 100644 (file)
index 0000000..3dc1279
--- /dev/null
@@ -0,0 +1,217 @@
+From 954f23722b5753305be490330cf2680b7a25f4a3 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:17 +0200
+Subject: iscsi,iser-target: Initiate termination only once
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 954f23722b5753305be490330cf2680b7a25f4a3 upstream.
+
+Since commit 0fc4ea701fcf ("Target/iser: Don't put isert_conn inside
+disconnected handler") we put the conn kref in isert_wait_conn, so we
+need .wait_conn to be invoked also in the error path.
+
+Introduce call to isert_conn_terminate (called under lock)
+which transitions the connection state to TERMINATING and calls
+rdma_disconnect. If the state is already teminating, just bail
+out back (temination started).
+
+Also, make sure to destroy the connection when getting a connect
+error event if didn't get to connected (state UP). Same for the
+handling of REJECTED and UNREACHABLE cma events.
+
+Squashed:
+
+iscsi-target: Add call to wait_conn in establishment error flow
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c   |   84 ++++++++++++++++++------------
+ drivers/infiniband/ulp/isert/ib_isert.h   |    1 
+ drivers/target/iscsi/iscsi_target_login.c |    3 +
+ 3 files changed, 54 insertions(+), 34 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -777,6 +777,33 @@ isert_put_conn(struct isert_conn *isert_
+       kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ }
++/**
++ * isert_conn_terminate() - Initiate connection termination
++ * @isert_conn: isert connection struct
++ *
++ * Notes:
++ * In case the connection state is UP, move state
++ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ *
++ * This routine must be called with conn_mutex held. Thus it is
++ * safe to call multiple times.
++ */
++static void
++isert_conn_terminate(struct isert_conn *isert_conn)
++{
++      int err;
++
++      if (isert_conn->state == ISER_CONN_UP) {
++              isert_conn->state = ISER_CONN_TERMINATING;
++              pr_info("Terminating conn %p state %d\n",
++                         isert_conn, isert_conn->state);
++              err = rdma_disconnect(isert_conn->conn_cm_id);
++              if (err)
++                      pr_warn("Failed rdma_disconnect isert_conn %p\n",
++                                 isert_conn);
++      }
++}
++
+ static void
+ isert_disconnect_work(struct work_struct *work)
+ {
+@@ -785,33 +812,15 @@ isert_disconnect_work(struct work_struct
+       pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+       mutex_lock(&isert_conn->conn_mutex);
+-      if (isert_conn->state == ISER_CONN_UP)
+-              isert_conn->state = ISER_CONN_TERMINATING;
+-
+-      if (isert_conn->post_recv_buf_count == 0 &&
+-          atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-              mutex_unlock(&isert_conn->conn_mutex);
+-              goto wake_up;
+-      }
+-      if (!isert_conn->conn_cm_id) {
+-              mutex_unlock(&isert_conn->conn_mutex);
+-              isert_put_conn(isert_conn);
+-              return;
+-      }
+-
+-      if (isert_conn->disconnect) {
+-              /* Send DREQ/DREP towards our initiator */
+-              rdma_disconnect(isert_conn->conn_cm_id);
+-      }
+-
++      isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
+-wake_up:
++      pr_info("conn %p completing conn_wait\n", isert_conn);
+       complete(&isert_conn->conn_wait);
+ }
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
++isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
+       struct isert_conn *isert_conn;
+@@ -824,18 +833,24 @@ isert_disconnected_handler(struct rdma_c
+       isert_conn = (struct isert_conn *)cma_id->context;
+-      isert_conn->disconnect = disconnect;
+       INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+       schedule_work(&isert_conn->conn_logout_work);
+       return 0;
+ }
++static void
++isert_connect_error(struct rdma_cm_id *cma_id)
++{
++      struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++
++      isert_put_conn(isert_conn);
++}
++
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+       int ret = 0;
+-      bool disconnect = false;
+       pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+                event->event, event->status, cma_id->context, cma_id);
+@@ -853,11 +868,14 @@ isert_cma_handler(struct rdma_cm_id *cma
+       case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+-              disconnect = true;
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-              ret = isert_disconnected_handler(cma_id, disconnect);
++              ret = isert_disconnected_handler(cma_id);
+               break;
++      case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
++      case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_CONNECT_ERROR:
++              isert_connect_error(cma_id);
++              break;
+       default:
+               pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+               break;
+@@ -2046,7 +2064,7 @@ isert_cq_rx_comp_err(struct isert_conn *
+               msleep(3000);
+       mutex_lock(&isert_conn->conn_mutex);
+-      isert_conn->state = ISER_CONN_DOWN;
++      isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
+       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+@@ -3219,10 +3237,6 @@ static void isert_wait_conn(struct iscsi
+       pr_debug("isert_wait_conn: Starting \n");
+       mutex_lock(&isert_conn->conn_mutex);
+-      if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
+-              pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+-              rdma_disconnect(isert_conn->conn_cm_id);
+-      }
+       /*
+        * Only wait for conn_wait_comp_err if the isert_conn made it
+        * into full feature phase..
+@@ -3231,13 +3245,17 @@ static void isert_wait_conn(struct iscsi
+               mutex_unlock(&isert_conn->conn_mutex);
+               return;
+       }
+-      if (isert_conn->state == ISER_CONN_UP)
+-              isert_conn->state = ISER_CONN_TERMINATING;
++      isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
+       wait_for_completion(&isert_conn->conn_wait_comp_err);
+-
+       wait_for_completion(&isert_conn->conn_wait);
++
++      mutex_lock(&isert_conn->conn_mutex);
++      isert_conn->state = ISER_CONN_DOWN;
++      mutex_unlock(&isert_conn->conn_mutex);
++
++      pr_info("Destroying conn %p\n", isert_conn);
+       isert_put_conn(isert_conn);
+ }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -150,7 +150,6 @@ struct isert_conn {
+ #define ISERT_COMP_BATCH_COUNT        8
+       int                     conn_comp_batch;
+       struct llist_head       conn_comp_llist;
+-      bool                    disconnect;
+ };
+ #define ISERT_MAX_CQ 64
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1204,6 +1204,9 @@ old_sess_out:
+               conn->sock = NULL;
+       }
++      if (conn->conn_transport->iscsit_wait_conn)
++              conn->conn_transport->iscsit_wait_conn(conn);
++
+       if (conn->conn_transport->iscsit_free_conn)
+               conn->conn_transport->iscsit_free_conn(conn);
diff --git a/queue-3.18/iscsi-target-fail-connection-on-short-sendmsg-writes.patch b/queue-3.18/iscsi-target-fail-connection-on-short-sendmsg-writes.patch
new file mode 100644 (file)
index 0000000..3717c07
--- /dev/null
@@ -0,0 +1,86 @@
+From 6bf6ca7515c1df06f5c03737537f5e0eb191e29e Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Thu, 20 Nov 2014 20:50:07 -0800
+Subject: iscsi-target: Fail connection on short sendmsg writes
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 6bf6ca7515c1df06f5c03737537f5e0eb191e29e upstream.
+
+This patch changes iscsit_do_tx_data() to fail on short writes
+when kernel_sendmsg() returns a value different than requested
+transfer length, returning -EPIPE and thus causing a connection
+reset to occur.
+
+This avoids a potential bug in the original code where a short
+write would result in kernel_sendmsg() being called again with
+the original iovec base + length.
+
+In practice this has not been an issue because iscsit_do_tx_data()
+is only used for transferring 48 byte headers + 4 byte digests,
+along with seldom used control payloads from NOPIN + TEXT_RSP +
+REJECT with less than 32k of data.
+
+So following Al's audit of iovec consumers, go ahead and fail
+the connection on short writes for now, and remove the bogus
+logic ahead of his proper upstream fix.
+
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/iscsi/iscsi_target_util.c |   26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1358,15 +1358,15 @@ static int iscsit_do_tx_data(
+       struct iscsi_conn *conn,
+       struct iscsi_data_count *count)
+ {
+-      int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
++      int ret, iov_len;
+       struct kvec *iov_p;
+       struct msghdr msg;
+       if (!conn || !conn->sock || !conn->conn_ops)
+               return -1;
+-      if (data <= 0) {
+-              pr_err("Data length is: %d\n", data);
++      if (count->data_length <= 0) {
++              pr_err("Data length is: %d\n", count->data_length);
+               return -1;
+       }
+@@ -1375,20 +1375,16 @@ static int iscsit_do_tx_data(
+       iov_p = count->iov;
+       iov_len = count->iov_count;
+-      while (total_tx < data) {
+-              tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+-                                      (data - total_tx));
+-              if (tx_loop <= 0) {
+-                      pr_debug("tx_loop: %d total_tx %d\n",
+-                              tx_loop, total_tx);
+-                      return tx_loop;
+-              }
+-              total_tx += tx_loop;
+-              pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+-                                      tx_loop, total_tx, data);
++      ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
++                           count->data_length);
++      if (ret != count->data_length) {
++              pr_err("Unexpected ret: %d send data %d\n",
++                     ret, count->data_length);
++              return -EPIPE;
+       }
++      pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
+-      return total_tx;
++      return ret;
+ }
+ int rx_data(
diff --git a/queue-3.18/iser-target-allocate-pi-contexts-dynamically.patch b/queue-3.18/iser-target-allocate-pi-contexts-dynamically.patch
new file mode 100644 (file)
index 0000000..b94a5e7
--- /dev/null
@@ -0,0 +1,462 @@
+From 570db170f37715b7df23c95868169f3d9affa48c Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:31 +0200
+Subject: iser-target: Allocate PI contexts dynamically
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 570db170f37715b7df23c95868169f3d9affa48c upstream.
+
+This patch converts to allocate PI contexts dynamically in order
+avoid a potentially bogus np->tpg_np and associated NULL pointer
+dereference in isert_connect_request() during iser-target endpoint
+shutdown with multiple network portals.
+
+Also, there is really no need to allocate these at connection
+establishment since it is not guaranteed that all the IOs on
+that connection will be to a PI formatted device.
+
+We can do it in a lazy fashion so the initial burst will have a
+transient slow down, but very fast all IOs will allocate a PI
+context.
+
+Squashed:
+
+iser-target: Centralize PI context handling code
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |  267 ++++++++++++++++++--------------
+ drivers/infiniband/ulp/isert/ib_isert.h |    7 
+ 2 files changed, 158 insertions(+), 116 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -96,8 +96,7 @@ isert_query_device(struct ib_device *ib_
+ }
+ static int
+-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
+-                  u8 protection)
++isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+ {
+       struct isert_device *device = isert_conn->conn_device;
+       struct ib_qp_init_attr attr;
+@@ -132,7 +131,7 @@ isert_conn_setup_qp(struct isert_conn *i
+       attr.cap.max_recv_sge = 1;
+       attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+       attr.qp_type = IB_QPT_RC;
+-      if (protection)
++      if (device->pi_capable)
+               attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+       pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
+@@ -442,8 +441,68 @@ isert_conn_free_fastreg_pool(struct iser
+ }
+ static int
++isert_create_pi_ctx(struct fast_reg_descriptor *desc,
++                  struct ib_device *device,
++                  struct ib_pd *pd)
++{
++      struct ib_mr_init_attr mr_init_attr;
++      struct pi_context *pi_ctx;
++      int ret;
++
++      pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
++      if (!pi_ctx) {
++              pr_err("Failed to allocate pi context\n");
++              return -ENOMEM;
++      }
++
++      pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
++                                          ISCSI_ISER_SG_TABLESIZE);
++      if (IS_ERR(pi_ctx->prot_frpl)) {
++              pr_err("Failed to allocate prot frpl err=%ld\n",
++                        PTR_ERR(pi_ctx->prot_frpl));
++              ret = PTR_ERR(pi_ctx->prot_frpl);
++              goto err_pi_ctx;
++      }
++
++      pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
++      if (IS_ERR(pi_ctx->prot_mr)) {
++              pr_err("Failed to allocate prot frmr err=%ld\n",
++                        PTR_ERR(pi_ctx->prot_mr));
++              ret = PTR_ERR(pi_ctx->prot_mr);
++              goto err_prot_frpl;
++      }
++      desc->ind |= ISERT_PROT_KEY_VALID;
++
++      memset(&mr_init_attr, 0, sizeof(mr_init_attr));
++      mr_init_attr.max_reg_descriptors = 2;
++      mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
++      pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
++      if (IS_ERR(pi_ctx->sig_mr)) {
++              pr_err("Failed to allocate signature enabled mr err=%ld\n",
++                        PTR_ERR(pi_ctx->sig_mr));
++              ret = PTR_ERR(pi_ctx->sig_mr);
++              goto err_prot_mr;
++      }
++
++      desc->pi_ctx = pi_ctx;
++      desc->ind |= ISERT_SIG_KEY_VALID;
++      desc->ind &= ~ISERT_PROTECTED;
++
++      return 0;
++
++err_prot_mr:
++      ib_dereg_mr(desc->pi_ctx->prot_mr);
++err_prot_frpl:
++      ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
++err_pi_ctx:
++      kfree(desc->pi_ctx);
++
++      return ret;
++}
++
++static int
+ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
+-                   struct fast_reg_descriptor *fr_desc, u8 protection)
++                   struct fast_reg_descriptor *fr_desc)
+ {
+       int ret;
+@@ -462,62 +521,12 @@ isert_create_fr_desc(struct ib_device *i
+               ret = PTR_ERR(fr_desc->data_mr);
+               goto err_data_frpl;
+       }
+-      pr_debug("Create fr_desc %p page_list %p\n",
+-               fr_desc, fr_desc->data_frpl->page_list);
+       fr_desc->ind |= ISERT_DATA_KEY_VALID;
+-      if (protection) {
+-              struct ib_mr_init_attr mr_init_attr = {0};
+-              struct pi_context *pi_ctx;
+-
+-              fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
+-              if (!fr_desc->pi_ctx) {
+-                      pr_err("Failed to allocate pi context\n");
+-                      ret = -ENOMEM;
+-                      goto err_data_mr;
+-              }
+-              pi_ctx = fr_desc->pi_ctx;
+-
+-              pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+-                                                  ISCSI_ISER_SG_TABLESIZE);
+-              if (IS_ERR(pi_ctx->prot_frpl)) {
+-                      pr_err("Failed to allocate prot frpl err=%ld\n",
+-                             PTR_ERR(pi_ctx->prot_frpl));
+-                      ret = PTR_ERR(pi_ctx->prot_frpl);
+-                      goto err_pi_ctx;
+-              }
+-
+-              pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+-              if (IS_ERR(pi_ctx->prot_mr)) {
+-                      pr_err("Failed to allocate prot frmr err=%ld\n",
+-                             PTR_ERR(pi_ctx->prot_mr));
+-                      ret = PTR_ERR(pi_ctx->prot_mr);
+-                      goto err_prot_frpl;
+-              }
+-              fr_desc->ind |= ISERT_PROT_KEY_VALID;
+-
+-              mr_init_attr.max_reg_descriptors = 2;
+-              mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+-              pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+-              if (IS_ERR(pi_ctx->sig_mr)) {
+-                      pr_err("Failed to allocate signature enabled mr err=%ld\n",
+-                             PTR_ERR(pi_ctx->sig_mr));
+-                      ret = PTR_ERR(pi_ctx->sig_mr);
+-                      goto err_prot_mr;
+-              }
+-              fr_desc->ind |= ISERT_SIG_KEY_VALID;
+-      }
+-      fr_desc->ind &= ~ISERT_PROTECTED;
++      pr_debug("Created fr_desc %p\n", fr_desc);
+       return 0;
+-err_prot_mr:
+-      ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+-err_prot_frpl:
+-      ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+-err_pi_ctx:
+-      kfree(fr_desc->pi_ctx);
+-err_data_mr:
+-      ib_dereg_mr(fr_desc->data_mr);
++
+ err_data_frpl:
+       ib_free_fast_reg_page_list(fr_desc->data_frpl);
+@@ -525,7 +534,7 @@ err_data_frpl:
+ }
+ static int
+-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
++isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
+ {
+       struct fast_reg_descriptor *fr_desc;
+       struct isert_device *device = isert_conn->conn_device;
+@@ -549,8 +558,7 @@ isert_conn_create_fastreg_pool(struct is
+               }
+               ret = isert_create_fr_desc(device->ib_device,
+-                                         isert_conn->conn_pd, fr_desc,
+-                                         pi_support);
++                                         isert_conn->conn_pd, fr_desc);
+               if (ret) {
+                       pr_err("Failed to create fastreg descriptor err=%d\n",
+                              ret);
+@@ -581,7 +589,6 @@ isert_connect_request(struct rdma_cm_id
+       struct isert_device *device;
+       struct ib_device *ib_dev = cma_id->device;
+       int ret = 0;
+-      u8 pi_support;
+       spin_lock_bh(&np->np_thread_lock);
+       if (!np->enabled) {
+@@ -681,15 +688,7 @@ isert_connect_request(struct rdma_cm_id
+               goto out_mr;
+       }
+-      pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+-      if (pi_support && !device->pi_capable) {
+-              pr_err("Protection information requested but not supported, "
+-                     "rejecting connect request\n");
+-              ret = rdma_reject(cma_id, NULL, 0);
+-              goto out_mr;
+-      }
+-
+-      ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
++      ret = isert_conn_setup_qp(isert_conn, cma_id);
+       if (ret)
+               goto out_conn_dev;
+@@ -1151,11 +1150,7 @@ isert_put_login_tx(struct iscsi_conn *co
+               if (login->login_complete) {
+                       if (!conn->sess->sess_ops->SessionType &&
+                           isert_conn->conn_device->use_fastreg) {
+-                              /* Normal Session and fastreg is used */
+-                              u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
+-
+-                              ret = isert_conn_create_fastreg_pool(isert_conn,
+-                                                                   pi_support);
++                              ret = isert_conn_create_fastreg_pool(isert_conn);
+                               if (ret) {
+                                       pr_err("Conn: %p failed to create"
+                                              " fastreg pool\n", isert_conn);
+@@ -2771,10 +2766,10 @@ isert_set_prot_checks(u8 prot_checks)
+ }
+ static int
+-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
+-               struct fast_reg_descriptor *fr_desc,
+-               struct ib_sge *data_sge, struct ib_sge *prot_sge,
+-               struct ib_sge *sig_sge)
++isert_reg_sig_mr(struct isert_conn *isert_conn,
++               struct se_cmd *se_cmd,
++               struct isert_rdma_wr *rdma_wr,
++               struct fast_reg_descriptor *fr_desc)
+ {
+       struct ib_send_wr sig_wr, inv_wr;
+       struct ib_send_wr *bad_wr, *wr = NULL;
+@@ -2804,13 +2799,13 @@ isert_reg_sig_mr(struct isert_conn *iser
+       memset(&sig_wr, 0, sizeof(sig_wr));
+       sig_wr.opcode = IB_WR_REG_SIG_MR;
+       sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+-      sig_wr.sg_list = data_sge;
++      sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
+       sig_wr.num_sge = 1;
+       sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
+       sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+       sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+       if (se_cmd->t_prot_sg)
+-              sig_wr.wr.sig_handover.prot = prot_sge;
++              sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
+       if (!wr)
+               wr = &sig_wr;
+@@ -2824,34 +2819,93 @@ isert_reg_sig_mr(struct isert_conn *iser
+       }
+       fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
+-      sig_sge->lkey = pi_ctx->sig_mr->lkey;
+-      sig_sge->addr = 0;
+-      sig_sge->length = se_cmd->data_length;
++      rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
++      rdma_wr->ib_sg[SIG].addr = 0;
++      rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
+       if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
+           se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
+               /*
+                * We have protection guards on the wire
+                * so we need to set a larget transfer
+                */
+-              sig_sge->length += se_cmd->prot_length;
++              rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
+       pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
+-               sig_sge->addr, sig_sge->length,
+-               sig_sge->lkey);
++                rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
++                rdma_wr->ib_sg[SIG].lkey);
+ err:
+       return ret;
+ }
+ static int
++isert_handle_prot_cmd(struct isert_conn *isert_conn,
++                    struct isert_cmd *isert_cmd,
++                    struct isert_rdma_wr *wr)
++{
++      struct isert_device *device = isert_conn->conn_device;
++      struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
++      int ret;
++
++      if (!wr->fr_desc->pi_ctx) {
++              ret = isert_create_pi_ctx(wr->fr_desc,
++                                        device->ib_device,
++                                        isert_conn->conn_pd);
++              if (ret) {
++                      pr_err("conn %p failed to allocate pi_ctx\n",
++                                isert_conn);
++                      return ret;
++              }
++      }
++
++      if (se_cmd->t_prot_sg) {
++              ret = isert_map_data_buf(isert_conn, isert_cmd,
++                                       se_cmd->t_prot_sg,
++                                       se_cmd->t_prot_nents,
++                                       se_cmd->prot_length,
++                                       0, wr->iser_ib_op, &wr->prot);
++              if (ret) {
++                      pr_err("conn %p failed to map protection buffer\n",
++                                isert_conn);
++                      return ret;
++              }
++
++              memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
++              ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
++                                      ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
++              if (ret) {
++                      pr_err("conn %p failed to fast reg mr\n",
++                                isert_conn);
++                      goto unmap_prot_cmd;
++              }
++      }
++
++      ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
++      if (ret) {
++              pr_err("conn %p failed to fast reg mr\n",
++                        isert_conn);
++              goto unmap_prot_cmd;
++      }
++      wr->fr_desc->ind |= ISERT_PROTECTED;
++
++      return 0;
++
++unmap_prot_cmd:
++      if (se_cmd->t_prot_sg)
++              isert_unmap_data_buf(isert_conn, &wr->prot);
++
++      return ret;
++}
++
++static int
+ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+              struct isert_rdma_wr *wr)
+ {
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       struct isert_conn *isert_conn = conn->context;
+-      struct ib_sge data_sge;
+-      struct ib_send_wr *send_wr;
+       struct fast_reg_descriptor *fr_desc = NULL;
++      struct ib_send_wr *send_wr;
++      struct ib_sge *ib_sg;
+       u32 offset;
+       int ret = 0;
+       unsigned long flags;
+@@ -2876,38 +2930,21 @@ isert_reg_rdma(struct iscsi_conn *conn,
+       }
+       ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
+-                              ISERT_DATA_KEY_VALID, &data_sge);
++                              ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
+       if (ret)
+               goto unmap_cmd;
+       if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
+-              struct ib_sge prot_sge, sig_sge;
+-
+-              if (se_cmd->t_prot_sg) {
+-                      ret = isert_map_data_buf(isert_conn, isert_cmd,
+-                                               se_cmd->t_prot_sg,
+-                                               se_cmd->t_prot_nents,
+-                                               se_cmd->prot_length,
+-                                               0, wr->iser_ib_op, &wr->prot);
+-                      if (ret)
+-                              goto unmap_cmd;
+-
+-                      ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
+-                                              ISERT_PROT_KEY_VALID, &prot_sge);
+-                      if (ret)
+-                              goto unmap_prot_cmd;
+-              }
+-
+-              ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
+-                                     &data_sge, &prot_sge, &sig_sge);
++              ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
+               if (ret)
+-                      goto unmap_prot_cmd;
++                      goto unmap_cmd;
+-              fr_desc->ind |= ISERT_PROTECTED;
+-              memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
+-      } else
+-              memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
++              ib_sg = &wr->ib_sg[SIG];
++      } else {
++              ib_sg = &wr->ib_sg[DATA];
++      }
++      memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
+       wr->ib_sge = &wr->s_ib_sge;
+       wr->send_wr_num = 1;
+       memset(&wr->s_send_wr, 0, sizeof(*send_wr));
+@@ -2932,9 +2969,7 @@ isert_reg_rdma(struct iscsi_conn *conn,
+       }
+       return 0;
+-unmap_prot_cmd:
+-      if (se_cmd->t_prot_sg)
+-              isert_unmap_data_buf(isert_conn, &wr->prot);
++
+ unmap_cmd:
+       if (fr_desc) {
+               spin_lock_irqsave(&isert_conn->conn_lock, flags);
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -82,6 +82,12 @@ struct isert_data_buf {
+       enum dma_data_direction dma_dir;
+ };
++enum {
++      DATA = 0,
++      PROT = 1,
++      SIG = 2,
++};
++
+ struct isert_rdma_wr {
+       struct list_head        wr_list;
+       struct isert_cmd        *isert_cmd;
+@@ -91,6 +97,7 @@ struct isert_rdma_wr {
+       int                     send_wr_num;
+       struct ib_send_wr       *send_wr;
+       struct ib_send_wr       s_send_wr;
++      struct ib_sge           ib_sg[3];
+       struct isert_data_buf   data;
+       struct isert_data_buf   prot;
+       struct fast_reg_descriptor *fr_desc;
diff --git a/queue-3.18/iser-target-fix-connected_handler-teardown-flow-race.patch b/queue-3.18/iser-target-fix-connected_handler-teardown-flow-race.patch
new file mode 100644 (file)
index 0000000..819ef0d
--- /dev/null
@@ -0,0 +1,130 @@
+From 19e2090fb246ca21b3e569ead51a6a7a1748eadd Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:26 +0200
+Subject: iser-target: Fix connected_handler + teardown flow race
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 19e2090fb246ca21b3e569ead51a6a7a1748eadd upstream.
+
+Take isert_conn pointer from cm_id->qp->qp_context. This
+will allow us to know that the cm_id context is always
+the network portal. This will make the cm_id event check
+(connection or network portal) more reliable.
+
+In order to avoid a NULL dereference in cma_id->qp->qp_context
+we destroy the qp after we destroy the cm_id (and make the
+dereference safe). session stablishment/teardown sequences
+can happen in parallel, we should take into account that
+connected_handler might race with connection teardown flow.
+
+Also, protect isert_conn->conn_device->active_qps decrement
+within the error patch during QP creation failure and the
+normal teardown path in isert_connect_release().
+
+Squashed:
+
+iser-target: Decrement completion context active_qps in error flow
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |   31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -141,12 +141,18 @@ isert_conn_setup_qp(struct isert_conn *i
+       ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+       if (ret) {
+               pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+-              return ret;
++              goto err;
+       }
+       isert_conn->conn_qp = cma_id->qp;
+       pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+       return 0;
++err:
++      mutex_lock(&device_list_mutex);
++      device->cq_active_qps[min_index]--;
++      mutex_unlock(&device_list_mutex);
++
++      return ret;
+ }
+ static void
+@@ -602,7 +608,6 @@ isert_connect_request(struct rdma_cm_id
+       spin_lock_init(&isert_conn->conn_lock);
+       INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+-      cma_id->context = isert_conn;
+       isert_conn->conn_cm_id = cma_id;
+       isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+@@ -734,18 +739,20 @@ isert_connect_release(struct isert_conn
+       if (device && device->use_fastreg)
+               isert_conn_free_fastreg_pool(isert_conn);
++      isert_free_rx_descriptors(isert_conn);
++      rdma_destroy_id(isert_conn->conn_cm_id);
++
+       if (isert_conn->conn_qp) {
+               cq_index = ((struct isert_cq_desc *)
+                       isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+               pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
++              mutex_lock(&device_list_mutex);
+               isert_conn->conn_device->cq_active_qps[cq_index]--;
++              mutex_unlock(&device_list_mutex);
+-              rdma_destroy_qp(isert_conn->conn_cm_id);
++              ib_destroy_qp(isert_conn->conn_qp);
+       }
+-      isert_free_rx_descriptors(isert_conn);
+-      rdma_destroy_id(isert_conn->conn_cm_id);
+-
+       ib_dereg_mr(isert_conn->conn_mr);
+       ib_dealloc_pd(isert_conn->conn_pd);
+@@ -768,7 +775,7 @@ isert_connect_release(struct isert_conn
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+-      struct isert_conn *isert_conn = cma_id->context;
++      struct isert_conn *isert_conn = cma_id->qp->qp_context;
+       pr_info("conn %p\n", isert_conn);
+@@ -846,16 +853,16 @@ isert_conn_terminate(struct isert_conn *
+ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
++      struct iscsi_np *np = cma_id->context;
++      struct isert_np *isert_np = np->np_context;
+       struct isert_conn *isert_conn;
+-      if (!cma_id->qp) {
+-              struct isert_np *isert_np = cma_id->context;
+-
++      if (isert_np->np_cm_id == cma_id) {
+               isert_np->np_cm_id = NULL;
+               return -1;
+       }
+-      isert_conn = (struct isert_conn *)cma_id->context;
++      isert_conn = cma_id->qp->qp_context;
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn_terminate(isert_conn);
+@@ -870,7 +877,7 @@ isert_disconnected_handler(struct rdma_c
+ static void
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+-      struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++      struct isert_conn *isert_conn = cma_id->qp->qp_context;
+       isert_put_conn(isert_conn);
+ }
diff --git a/queue-3.18/iser-target-fix-flush-disconnect-completion-handling.patch b/queue-3.18/iser-target-fix-flush-disconnect-completion-handling.patch
new file mode 100644 (file)
index 0000000..82a3450
--- /dev/null
@@ -0,0 +1,165 @@
+From 128e9cc84566a84146baea2335b3824288eed817 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:20 +0200
+Subject: iser-target: Fix flush + disconnect completion handling
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 128e9cc84566a84146baea2335b3824288eed817 upstream.
+
+ISER_CONN_UP state is not sufficient to know if
+we should wait for completion of flush errors and
+disconnected_handler event.
+
+Instead, split it to 2 states:
+- ISER_CONN_UP: Got to CM connected phase, This state
+indicates that we need to wait for a CM disconnect
+event before going to teardown.
+
+- ISER_CONN_FULL_FEATURE: Got to full feature phase
+after we posted login response, This state indicates
+that we posted recv buffers and we need to wait for
+flush completions before going to teardown.
+
+Also avoid deffering disconnected handler to a work,
+and handle it within disconnected handler.
+More work here is needed to handle DEVICE_REMOVAL event
+correctly (cleanup all resources).
+
+Squashed:
+
+iser-target: Don't deffer disconnected handler to a work
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |   52 ++++++++++++++++++--------------
+ drivers/infiniband/ulp/isert/ib_isert.h |    2 -
+ 2 files changed, 31 insertions(+), 23 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -756,6 +756,9 @@ isert_connected_handler(struct rdma_cm_i
+ {
+       struct isert_conn *isert_conn = cma_id->context;
++      pr_info("conn %p\n", isert_conn);
++
++      isert_conn->state = ISER_CONN_UP;
+       kref_get(&isert_conn->conn_kref);
+ }
+@@ -782,8 +785,9 @@ isert_put_conn(struct isert_conn *isert_
+  * @isert_conn: isert connection struct
+  *
+  * Notes:
+- * In case the connection state is UP, move state
++ * In case the connection state is FULL_FEATURE, move state
+  * to TEMINATING and start teardown sequence (rdma_disconnect).
++ * In case the connection state is UP, complete flush as well.
+  *
+  * This routine must be called with conn_mutex held. Thus it is
+  * safe to call multiple times.
+@@ -793,32 +797,31 @@ isert_conn_terminate(struct isert_conn *
+ {
+       int err;
+-      if (isert_conn->state == ISER_CONN_UP) {
+-              isert_conn->state = ISER_CONN_TERMINATING;
++      switch (isert_conn->state) {
++      case ISER_CONN_TERMINATING:
++              break;
++      case ISER_CONN_UP:
++              /*
++               * No flush completions will occur as we didn't
++               * get to ISER_CONN_FULL_FEATURE yet, complete
++               * to allow teardown progress.
++               */
++              complete(&isert_conn->conn_wait_comp_err);
++      case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+               pr_info("Terminating conn %p state %d\n",
+                          isert_conn, isert_conn->state);
++              isert_conn->state = ISER_CONN_TERMINATING;
+               err = rdma_disconnect(isert_conn->conn_cm_id);
+               if (err)
+                       pr_warn("Failed rdma_disconnect isert_conn %p\n",
+                                  isert_conn);
++              break;
++      default:
++              pr_warn("conn %p teminating in state %d\n",
++                         isert_conn, isert_conn->state);
+       }
+ }
+-static void
+-isert_disconnect_work(struct work_struct *work)
+-{
+-      struct isert_conn *isert_conn = container_of(work,
+-                              struct isert_conn, conn_logout_work);
+-
+-      pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+-      mutex_lock(&isert_conn->conn_mutex);
+-      isert_conn_terminate(isert_conn);
+-      mutex_unlock(&isert_conn->conn_mutex);
+-
+-      pr_info("conn %p completing conn_wait\n", isert_conn);
+-      complete(&isert_conn->conn_wait);
+-}
+-
+ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
+@@ -833,8 +836,12 @@ isert_disconnected_handler(struct rdma_c
+       isert_conn = (struct isert_conn *)cma_id->context;
+-      INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+-      schedule_work(&isert_conn->conn_logout_work);
++      mutex_lock(&isert_conn->conn_mutex);
++      isert_conn_terminate(isert_conn);
++      mutex_unlock(&isert_conn->conn_mutex);
++
++      pr_info("conn %p completing conn_wait\n", isert_conn);
++      complete(&isert_conn->conn_wait);
+       return 0;
+ }
+@@ -1009,7 +1016,7 @@ isert_init_send_wr(struct isert_conn *is
+        * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
+        */
+       mutex_lock(&isert_conn->conn_mutex);
+-      if (coalesce && isert_conn->state == ISER_CONN_UP &&
++      if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
+           ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+               tx_desc->llnode_active = true;
+               llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
+@@ -1110,7 +1117,8 @@ isert_put_login_tx(struct iscsi_conn *co
+                       if (ret)
+                               return ret;
+-                      isert_conn->state = ISER_CONN_UP;
++                      /* Now we are in FULL_FEATURE phase */
++                      isert_conn->state = ISER_CONN_FULL_FEATURE;
+                       goto post_send;
+               }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -23,6 +23,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+       ISER_CONN_INIT,
+       ISER_CONN_UP,
++      ISER_CONN_FULL_FEATURE,
+       ISER_CONN_TERMINATING,
+       ISER_CONN_DOWN,
+ };
+@@ -138,7 +139,6 @@ struct isert_conn {
+       struct ib_mr            *conn_mr;
+       struct ib_qp            *conn_qp;
+       struct isert_device     *conn_device;
+-      struct work_struct      conn_logout_work;
+       struct mutex            conn_mutex;
+       struct completion       conn_wait;
+       struct completion       conn_wait_comp_err;
diff --git a/queue-3.18/iser-target-fix-implicit-termination-of-connections.patch b/queue-3.18/iser-target-fix-implicit-termination-of-connections.patch
new file mode 100644 (file)
index 0000000..5dff14c
--- /dev/null
@@ -0,0 +1,125 @@
+From b02efbfc9a051b41e71fe8f94ddf967260e024a6 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:29 +0200
+Subject: iser-target: Fix implicit termination of connections
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit b02efbfc9a051b41e71fe8f94ddf967260e024a6 upstream.
+
+In situations such as bond failover, The new session establishment
+implicitly invokes the termination of the old connection.
+
+So, we don't want to wait for the old connection wait_conn to completely
+terminate before we accept the new connection and post a login response.
+
+The solution is to deffer the comp_wait completion and the conn_put to
+a work so wait_conn will effectively be non-blocking (flush errors are
+assumed to come very fast).
+
+We allocate isert_release_wq with WQ_UNBOUND and WQ_UNBOUND_MAX_ACTIVE
+to spread the concurrency of release works.
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |   42 +++++++++++++++++++++++++-------
+ drivers/infiniband/ulp/isert/ib_isert.h |    1 
+ 2 files changed, 35 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+ static struct workqueue_struct *isert_rx_wq;
+ static struct workqueue_struct *isert_comp_wq;
++static struct workqueue_struct *isert_release_wq;
+ static void
+ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+@@ -3326,6 +3327,24 @@ isert_free_np(struct iscsi_np *np)
+       kfree(isert_np);
+ }
++static void isert_release_work(struct work_struct *work)
++{
++      struct isert_conn *isert_conn = container_of(work,
++                                                   struct isert_conn,
++                                                   release_work);
++
++      pr_info("Starting release conn %p\n", isert_conn);
++
++      wait_for_completion(&isert_conn->conn_wait);
++
++      mutex_lock(&isert_conn->conn_mutex);
++      isert_conn->state = ISER_CONN_DOWN;
++      mutex_unlock(&isert_conn->conn_mutex);
++
++      pr_info("Destroying conn %p\n", isert_conn);
++      isert_put_conn(isert_conn);
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+       struct isert_conn *isert_conn = conn->context;
+@@ -3345,14 +3364,9 @@ static void isert_wait_conn(struct iscsi
+       mutex_unlock(&isert_conn->conn_mutex);
+       wait_for_completion(&isert_conn->conn_wait_comp_err);
+-      wait_for_completion(&isert_conn->conn_wait);
+-
+-      mutex_lock(&isert_conn->conn_mutex);
+-      isert_conn->state = ISER_CONN_DOWN;
+-      mutex_unlock(&isert_conn->conn_mutex);
+-      pr_info("Destroying conn %p\n", isert_conn);
+-      isert_put_conn(isert_conn);
++      INIT_WORK(&isert_conn->release_work, isert_release_work);
++      queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ static void isert_free_conn(struct iscsi_conn *conn)
+@@ -3400,10 +3414,21 @@ static int __init isert_init(void)
+               goto destroy_rx_wq;
+       }
++      isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
++                                      WQ_UNBOUND_MAX_ACTIVE);
++      if (!isert_release_wq) {
++              pr_err("Unable to allocate isert_release_wq\n");
++              ret = -ENOMEM;
++              goto destroy_comp_wq;
++      }
++
+       iscsit_register_transport(&iser_target_transport);
+-      pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
++      pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
++
+       return 0;
++destroy_comp_wq:
++      destroy_workqueue(isert_comp_wq);
+ destroy_rx_wq:
+       destroy_workqueue(isert_rx_wq);
+       return ret;
+@@ -3412,6 +3437,7 @@ destroy_rx_wq:
+ static void __exit isert_exit(void)
+ {
+       flush_scheduled_work();
++      destroy_workqueue(isert_release_wq);
+       destroy_workqueue(isert_comp_wq);
+       destroy_workqueue(isert_rx_wq);
+       iscsit_unregister_transport(&iser_target_transport);
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -149,6 +149,7 @@ struct isert_conn {
+       int                     conn_fr_pool_size;
+       /* lock to protect fastreg pool */
+       spinlock_t              conn_lock;
++      struct work_struct      release_work;
+ #define ISERT_COMP_BATCH_COUNT        8
+       int                     conn_comp_batch;
+       struct llist_head       conn_comp_llist;
diff --git a/queue-3.18/iser-target-fix-null-dereference-in-sw-mode-dif.patch b/queue-3.18/iser-target-fix-null-dereference-in-sw-mode-dif.patch
new file mode 100644 (file)
index 0000000..b7018ac
--- /dev/null
@@ -0,0 +1,107 @@
+From 302cc7c3ca14d21ccdffdebdb61c4fe028f2d5ad Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:34 +0200
+Subject: iser-target: Fix NULL dereference in SW mode DIF
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 302cc7c3ca14d21ccdffdebdb61c4fe028f2d5ad upstream.
+
+Fallback to software mode DIF if HCA does not support
+PI (without crashing obviously). It is still possible to
+run with backend protection and an unprotected frontend,
+so looking at the command prot_op is not enough. Check
+device PI capability on a per-IO basis (isert_prot_cmd
+inline static) to determine if we need to handle protection
+information.
+
+Trace:
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
+IP: [<ffffffffa037f8b1>] isert_reg_sig_mr+0x351/0x3b0 [ib_isert]
+Call Trace:
+ [<ffffffff812b003a>] ? swiotlb_map_sg_attrs+0x7a/0x130
+ [<ffffffffa038184d>] isert_reg_rdma+0x2fd/0x370 [ib_isert]
+ [<ffffffff8108f2ec>] ? idle_balance+0x6c/0x2c0
+ [<ffffffffa0382b68>] isert_put_datain+0x68/0x210 [ib_isert]
+ [<ffffffffa02acf5b>] lio_queue_data_in+0x2b/0x30 [iscsi_target_mod]
+ [<ffffffffa02306eb>] target_complete_ok_work+0x21b/0x310 [target_core_mod]
+ [<ffffffff8106ece2>] process_one_work+0x182/0x3b0
+ [<ffffffff8106fda0>] worker_thread+0x120/0x3c0
+ [<ffffffff8106fc80>] ? maybe_create_worker+0x190/0x190
+ [<ffffffff8107594e>] kthread+0xce/0xf0
+ [<ffffffff81075880>] ? kthread_freezable_should_stop+0x70/0x70
+ [<ffffffff8159a22c>] ret_from_fork+0x7c/0xb0
+ [<ffffffff81075880>] ? kthread_freezable_should_stop+0x70/0x70
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |   19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -61,6 +61,14 @@ static int
+ isert_rdma_accept(struct isert_conn *isert_conn);
+ struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
++static inline bool
++isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
++{
++      return (conn->conn_device->pi_capable &&
++              cmd->prot_op != TARGET_PROT_NORMAL);
++}
++
++
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+ {
+@@ -2919,8 +2927,7 @@ isert_reg_rdma(struct iscsi_conn *conn,
+       if (ret)
+               return ret;
+-      if (wr->data.dma_nents != 1 ||
+-          se_cmd->prot_op != TARGET_PROT_NORMAL) {
++      if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
+               spin_lock_irqsave(&isert_conn->conn_lock, flags);
+               fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+                                          struct fast_reg_descriptor, list);
+@@ -2934,7 +2941,7 @@ isert_reg_rdma(struct iscsi_conn *conn,
+       if (ret)
+               goto unmap_cmd;
+-      if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
++      if (isert_prot_cmd(isert_conn, se_cmd)) {
+               ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
+               if (ret)
+                       goto unmap_cmd;
+@@ -2959,7 +2966,7 @@ isert_reg_rdma(struct iscsi_conn *conn,
+               send_wr->opcode = IB_WR_RDMA_WRITE;
+               send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
+               send_wr->wr.rdma.rkey = isert_cmd->read_stag;
+-              send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
++              send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
+                                     0 : IB_SEND_SIGNALED;
+       } else {
+               send_wr->opcode = IB_WR_RDMA_READ;
+@@ -3001,7 +3008,7 @@ isert_put_datain(struct iscsi_conn *conn
+               return rc;
+       }
+-      if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
++      if (!isert_prot_cmd(isert_conn, se_cmd)) {
+               /*
+                * Build isert_conn->tx_desc for iSCSI response PDU and attach
+                */
+@@ -3024,7 +3031,7 @@ isert_put_datain(struct iscsi_conn *conn
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+       }
+-      if (se_cmd->prot_op == TARGET_PROT_NORMAL)
++      if (!isert_prot_cmd(isert_conn, se_cmd))
+               pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+                        "READ\n", isert_cmd);
+       else
diff --git a/queue-3.18/iser-target-handle-addr_change-event-for-listener-cm_id.patch b/queue-3.18/iser-target-handle-addr_change-event-for-listener-cm_id.patch
new file mode 100644 (file)
index 0000000..8fd5adb
--- /dev/null
@@ -0,0 +1,224 @@
+From ca6c1d82d12d8013fb75ce015900d62b9754623c Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:27 +0200
+Subject: iser-target: Handle ADDR_CHANGE event for listener cm_id
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit ca6c1d82d12d8013fb75ce015900d62b9754623c upstream.
+
+The np listener cm_id will also get ADDR_CHANGE event
+upcall (in case it is bound to a specific IP). Handle
+it correctly by creating a new cm_id and implicitly
+destroy the old one.
+
+Since this is the second event a listener np cm_id may
+encounter, we move the np cm_id event handling to a
+routine.
+
+Squashed:
+
+iser-target: Move cma_id setup to a function
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |  107 ++++++++++++++++++++++----------
+ drivers/infiniband/ulp/isert/ib_isert.h |    1 
+ 2 files changed, 77 insertions(+), 31 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -58,6 +58,7 @@ static int
+ isert_rdma_post_recvl(struct isert_conn *isert_conn);
+ static int
+ isert_rdma_accept(struct isert_conn *isert_conn);
++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+@@ -573,8 +574,8 @@ err:
+ static int
+ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+-      struct iscsi_np *np = cma_id->context;
+-      struct isert_np *isert_np = np->np_context;
++      struct isert_np *isert_np = cma_id->context;
++      struct iscsi_np *np = isert_np->np;
+       struct isert_conn *isert_conn;
+       struct isert_device *device;
+       struct ib_device *ib_dev = cma_id->device;
+@@ -851,17 +852,41 @@ isert_conn_terminate(struct isert_conn *
+ }
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id)
++isert_np_cma_handler(struct isert_np *isert_np,
++                   enum rdma_cm_event_type event)
+ {
+-      struct iscsi_np *np = cma_id->context;
+-      struct isert_np *isert_np = np->np_context;
+-      struct isert_conn *isert_conn;
++      pr_debug("isert np %p, handling event %d\n", isert_np, event);
+-      if (isert_np->np_cm_id == cma_id) {
++      switch (event) {
++      case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_np->np_cm_id = NULL;
+-              return -1;
++              break;
++      case RDMA_CM_EVENT_ADDR_CHANGE:
++              isert_np->np_cm_id = isert_setup_id(isert_np);
++              if (IS_ERR(isert_np->np_cm_id)) {
++                      pr_err("isert np %p setup id failed: %ld\n",
++                               isert_np, PTR_ERR(isert_np->np_cm_id));
++                      isert_np->np_cm_id = NULL;
++              }
++              break;
++      default:
++              pr_err("isert np %p Unexpected event %d\n",
++                        isert_np, event);
+       }
++      return -1;
++}
++
++static int
++isert_disconnected_handler(struct rdma_cm_id *cma_id,
++                         enum rdma_cm_event_type event)
++{
++      struct isert_np *isert_np = cma_id->context;
++      struct isert_conn *isert_conn;
++
++      if (isert_np->np_cm_id == cma_id)
++              return isert_np_cma_handler(cma_id->context, event);
++
+       isert_conn = cma_id->qp->qp_context;
+       mutex_lock(&isert_conn->conn_mutex);
+@@ -904,7 +929,7 @@ isert_cma_handler(struct rdma_cm_id *cma
+       case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-              ret = isert_disconnected_handler(cma_id);
++              ret = isert_disconnected_handler(cma_id, event->event);
+               break;
+       case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+       case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+@@ -3065,13 +3090,51 @@ isert_response_queue(struct iscsi_conn *
+       return ret;
+ }
++struct rdma_cm_id *
++isert_setup_id(struct isert_np *isert_np)
++{
++      struct iscsi_np *np = isert_np->np;
++      struct rdma_cm_id *id;
++      struct sockaddr *sa;
++      int ret;
++
++      sa = (struct sockaddr *)&np->np_sockaddr;
++      pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
++
++      id = rdma_create_id(isert_cma_handler, isert_np,
++                          RDMA_PS_TCP, IB_QPT_RC);
++      if (IS_ERR(id)) {
++              pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
++              ret = PTR_ERR(id);
++              goto out;
++      }
++      pr_debug("id %p context %p\n", id, id->context);
++
++      ret = rdma_bind_addr(id, sa);
++      if (ret) {
++              pr_err("rdma_bind_addr() failed: %d\n", ret);
++              goto out_id;
++      }
++
++      ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
++      if (ret) {
++              pr_err("rdma_listen() failed: %d\n", ret);
++              goto out_id;
++      }
++
++      return id;
++out_id:
++      rdma_destroy_id(id);
++out:
++      return ERR_PTR(ret);
++}
++
+ static int
+ isert_setup_np(struct iscsi_np *np,
+              struct __kernel_sockaddr_storage *ksockaddr)
+ {
+       struct isert_np *isert_np;
+       struct rdma_cm_id *isert_lid;
+-      struct sockaddr *sa;
+       int ret;
+       isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+@@ -3083,9 +3146,8 @@ isert_setup_np(struct iscsi_np *np,
+       mutex_init(&isert_np->np_accept_mutex);
+       INIT_LIST_HEAD(&isert_np->np_accept_list);
+       init_completion(&isert_np->np_login_comp);
++      isert_np->np = np;
+-      sa = (struct sockaddr *)ksockaddr;
+-      pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+       /*
+        * Setup the np->np_sockaddr from the passed sockaddr setup
+        * in iscsi_target_configfs.c code..
+@@ -3093,37 +3155,20 @@ isert_setup_np(struct iscsi_np *np,
+       memcpy(&np->np_sockaddr, ksockaddr,
+              sizeof(struct __kernel_sockaddr_storage));
+-      isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+-                              IB_QPT_RC);
++      isert_lid = isert_setup_id(isert_np);
+       if (IS_ERR(isert_lid)) {
+-              pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
+-                     PTR_ERR(isert_lid));
+               ret = PTR_ERR(isert_lid);
+               goto out;
+       }
+-      ret = rdma_bind_addr(isert_lid, sa);
+-      if (ret) {
+-              pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+-              goto out_lid;
+-      }
+-
+-      ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+-      if (ret) {
+-              pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+-              goto out_lid;
+-      }
+-
+       isert_np->np_cm_id = isert_lid;
+       np->np_context = isert_np;
+-      pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+       return 0;
+-out_lid:
+-      rdma_destroy_id(isert_lid);
+ out:
+       kfree(isert_np);
++
+       return ret;
+ }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -183,6 +183,7 @@ struct isert_device {
+ };
+ struct isert_np {
++      struct iscsi_np         *np;
+       struct semaphore        np_sem;
+       struct rdma_cm_id       *np_cm_id;
+       struct mutex            np_accept_mutex;
diff --git a/queue-3.18/iser-target-parallelize-cm-connection-establishment.patch b/queue-3.18/iser-target-parallelize-cm-connection-establishment.patch
new file mode 100644 (file)
index 0000000..bdaff55
--- /dev/null
@@ -0,0 +1,235 @@
+From 2371e5da8cfe91443339b54444dec6254fdd6dfc Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Tue, 2 Dec 2014 16:57:21 +0200
+Subject: iser-target: Parallelize CM connection establishment
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 2371e5da8cfe91443339b54444dec6254fdd6dfc upstream.
+
+There is no point in accepting a new CM request only
+when we are completely done with the last iscsi login.
+Instead we accept immediately, this will also cause the
+CM connection to reach connected state and the initiator
+is allowed to send the first login. We mark that we got
+the initial login and let iscsi layer pick it up when it
+gets there.
+
+This reduces the parallel login sequence by a factor of
+more then 4 (and more for multi-login) and also prevents
+the initiator (who does all logins in parallel) from
+giving up on login timeout expiration.
+
+In order to support multiple login requests sequence (CHAP)
+we call isert_rx_login_req from isert_rx_completion insead
+of letting isert_get_login_rx call it.
+
+Squashed:
+
+iser-target: Use kref_get_unless_zero in connected_handler
+iser-target: Acquire conn_mutex when changing connection state
+iser-target: Reject connect request in failure path
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c |   84 ++++++++++++++++++++++----------
+ drivers/infiniband/ulp/isert/ib_isert.h |    2 
+ 2 files changed, 62 insertions(+), 24 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -54,6 +54,10 @@ isert_reg_rdma(struct iscsi_conn *conn,
+              struct isert_rdma_wr *wr);
+ static int
+ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
++static int
++isert_rdma_post_recvl(struct isert_conn *isert_conn);
++static int
++isert_rdma_accept(struct isert_conn *isert_conn);
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+@@ -590,6 +594,7 @@ isert_connect_request(struct rdma_cm_id
+       isert_conn->state = ISER_CONN_INIT;
+       INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+       init_completion(&isert_conn->conn_login_comp);
++      init_completion(&isert_conn->login_req_comp);
+       init_completion(&isert_conn->conn_wait);
+       init_completion(&isert_conn->conn_wait_comp_err);
+       kref_init(&isert_conn->conn_kref);
+@@ -681,6 +686,14 @@ isert_connect_request(struct rdma_cm_id
+       if (ret)
+               goto out_conn_dev;
++      ret = isert_rdma_post_recvl(isert_conn);
++      if (ret)
++              goto out_conn_dev;
++
++      ret = isert_rdma_accept(isert_conn);
++      if (ret)
++              goto out_conn_dev;
++
+       mutex_lock(&isert_np->np_accept_mutex);
+       list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+       mutex_unlock(&isert_np->np_accept_mutex);
+@@ -705,6 +718,7 @@ out_login_buf:
+       kfree(isert_conn->login_buf);
+ out:
+       kfree(isert_conn);
++      rdma_reject(cma_id, NULL, 0);
+       return ret;
+ }
+@@ -758,8 +772,15 @@ isert_connected_handler(struct rdma_cm_i
+       pr_info("conn %p\n", isert_conn);
+-      isert_conn->state = ISER_CONN_UP;
+-      kref_get(&isert_conn->conn_kref);
++      if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
++              pr_warn("conn %p connect_release is running\n", isert_conn);
++              return;
++      }
++
++      mutex_lock(&isert_conn->conn_mutex);
++      if (isert_conn->state != ISER_CONN_FULL_FEATURE)
++              isert_conn->state = ISER_CONN_UP;
++      mutex_unlock(&isert_conn->conn_mutex);
+ }
+ static void
+@@ -1118,7 +1139,9 @@ isert_put_login_tx(struct iscsi_conn *co
+                               return ret;
+                       /* Now we are in FULL_FEATURE phase */
++                      mutex_lock(&isert_conn->conn_mutex);
+                       isert_conn->state = ISER_CONN_FULL_FEATURE;
++                      mutex_unlock(&isert_conn->conn_mutex);
+                       goto post_send;
+               }
+@@ -1135,18 +1158,17 @@ post_send:
+ }
+ static void
+-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+-                 struct isert_conn *isert_conn)
++isert_rx_login_req(struct isert_conn *isert_conn)
+ {
++      struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
++      int rx_buflen = isert_conn->login_req_len;
+       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsi_login *login = conn->conn_login;
+       int size;
+-      if (!login) {
+-              pr_err("conn->conn_login is NULL\n");
+-              dump_stack();
+-              return;
+-      }
++      pr_info("conn %p\n", isert_conn);
++
++      WARN_ON_ONCE(!login);
+       if (login->first_request) {
+               struct iscsi_login_req *login_req =
+@@ -1509,11 +1531,20 @@ isert_rx_completion(struct iser_rx_desc
+                hdr->opcode, hdr->itt, hdr->flags,
+                (int)(xfer_len - ISER_HEADERS_LEN));
+-      if ((char *)desc == isert_conn->login_req_buf)
+-              isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+-                                 isert_conn);
+-      else
++      if ((char *)desc == isert_conn->login_req_buf) {
++              isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
++              if (isert_conn->conn) {
++                      struct iscsi_login *login = isert_conn->conn->conn_login;
++
++                      if (login && !login->first_request)
++                              isert_rx_login_req(isert_conn);
++              }
++              mutex_lock(&isert_conn->conn_mutex);
++              complete(&isert_conn->login_req_comp);
++              mutex_unlock(&isert_conn->conn_mutex);
++      } else {
+               isert_rx_do_work(desc, isert_conn);
++      }
+       ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+                                     DMA_FROM_DEVICE);
+@@ -3120,7 +3151,15 @@ isert_get_login_rx(struct iscsi_conn *co
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       int ret;
+-      pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
++      pr_info("before login_req comp conn: %p\n", isert_conn);
++      ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
++      if (ret) {
++              pr_err("isert_conn %p interrupted before got login req\n",
++                        isert_conn);
++              return ret;
++      }
++      reinit_completion(&isert_conn->login_req_comp);
++
+       /*
+        * For login requests after the first PDU, isert_rx_login_req() will
+        * kick schedule_delayed_work(&conn->login_work) as the packet is
+@@ -3130,11 +3169,15 @@ isert_get_login_rx(struct iscsi_conn *co
+       if (!login->first_request)
+               return 0;
++      isert_rx_login_req(isert_conn);
++
++      pr_info("before conn_login_comp conn: %p\n", conn);
+       ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+       if (ret)
+               return ret;
+-      pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
++      pr_info("processing login->req: %p\n", login->req);
++
+       return 0;
+ }
+@@ -3212,17 +3255,10 @@ accept_wait:
+       isert_conn->conn = conn;
+       max_accept = 0;
+-      ret = isert_rdma_post_recvl(isert_conn);
+-      if (ret)
+-              return ret;
+-
+-      ret = isert_rdma_accept(isert_conn);
+-      if (ret)
+-              return ret;
+-
+       isert_set_conn_info(np, conn, isert_conn);
+-      pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
++      pr_debug("Processing isert_conn: %p\n", isert_conn);
++
+       return 0;
+ }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -126,6 +126,7 @@ struct isert_conn {
+       char                    *login_req_buf;
+       char                    *login_rsp_buf;
+       u64                     login_req_dma;
++      int                     login_req_len;
+       u64                     login_rsp_dma;
+       unsigned int            conn_rx_desc_head;
+       struct iser_rx_desc     *conn_rx_descs;
+@@ -133,6 +134,7 @@ struct isert_conn {
+       struct iscsi_conn       *conn;
+       struct list_head        conn_accept_node;
+       struct completion       conn_login_comp;
++      struct completion       login_req_comp;
+       struct iser_tx_desc     conn_login_tx_desc;
+       struct rdma_cm_id       *conn_cm_id;
+       struct ib_pd            *conn_pd;
index 0148c5c12ee2961f12f040244e4e2c7d48f9eb80..490e9f294126880bbf0da879909e64749067c7ca 100644 (file)
@@ -67,3 +67,13 @@ uapi-linux-target_core_user.h-fix-headers_install.sh-badness.patch
 tcm_loop-fix-wrong-i_t-nexus-association.patch
 ib-iser-fix-possible-sq-overflow.patch
 genirq-prevent-proc-race-against-freeing-of-irq-descriptors.patch
+iscsi-target-fail-connection-on-short-sendmsg-writes.patch
+iscsi-iser-target-initiate-termination-only-once.patch
+iser-target-fix-flush-disconnect-completion-handling.patch
+iser-target-parallelize-cm-connection-establishment.patch
+iser-target-fix-connected_handler-teardown-flow-race.patch
+iser-target-handle-addr_change-event-for-listener-cm_id.patch
+iser-target-fix-implicit-termination-of-connections.patch
+iser-target-allocate-pi-contexts-dynamically.patch
+iser-target-fix-null-dereference-in-sw-mode-dif.patch
+iscsi-iser-target-expose-supported-protection-ops-according-to-t10_pi.patch