--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:06:58 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:20 +0000
+Subject: IB/isert: Adjust CQ size to HW limits
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Chris Moore <Chris.Moore@Emulex.Com>
+Message-ID: <1422656251-29468-2-git-send-email-nab@linux-iscsi.org>
+
+From: Chris Moore <Chris.Moore@Emulex.Com>
+
+commit b1a5ad006b34ded9dc7ec64988deba1b3ecad367 upstream.
+
+isert has an issue of trying to create a CQ with more CQEs than are
+supported by the hardware, that currently results in failures during
+isert_device creation during first session login.
+
+This is the isert version of the patch that Minh Tran submitted for
+iser, and is simple a workaround required to function with existing
+ocrdma hardware.
+
+Signed-off-by: Chris Moore <chris.moore@emulex.com>
+Reviewied-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -212,6 +212,13 @@ isert_create_device_ib_res(struct isert_
+ struct ib_device *ib_dev = device->ib_device;
+ struct isert_cq_desc *cq_desc;
+ int ret = 0, i, j;
++ int max_rx_cqe, max_tx_cqe;
++ struct ib_device_attr dev_attr;
++
++ memset(&dev_attr, 0, sizeof(struct ib_device_attr));
++ ret = isert_query_device(device->ib_device, &dev_attr);
++ if (ret)
++ return ret;
+
+ device->cqs_used = min_t(int, num_online_cpus(),
+ device->ib_device->num_comp_vectors);
+@@ -234,6 +241,9 @@ isert_create_device_ib_res(struct isert_
+ goto out_cq_desc;
+ }
+
++ max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
++ max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
++
+ for (i = 0; i < device->cqs_used; i++) {
+ cq_desc[i].device = device;
+ cq_desc[i].cq_index = i;
+@@ -242,7 +252,7 @@ isert_create_device_ib_res(struct isert_
+ isert_cq_rx_callback,
+ isert_cq_event_callback,
+ (void *)&cq_desc[i],
+- ISER_MAX_RX_CQ_LEN, i);
++ max_rx_cqe, i);
+ if (IS_ERR(device->dev_rx_cq[i])) {
+ ret = PTR_ERR(device->dev_rx_cq[i]);
+ device->dev_rx_cq[i] = NULL;
+@@ -253,7 +263,7 @@ isert_create_device_ib_res(struct isert_
+ isert_cq_tx_callback,
+ isert_cq_event_callback,
+ (void *)&cq_desc[i],
+- ISER_MAX_TX_CQ_LEN, i);
++ max_tx_cqe, i);
+ if (IS_ERR(device->dev_tx_cq[i])) {
+ ret = PTR_ERR(device->dev_tx_cq[i]);
+ device->dev_tx_cq[i] = NULL;
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:07:15 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:21 +0000
+Subject: ib_isert: Add max_send_sge=2 minimum for control PDU responses
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Or Gerlitz <ogerlitz@mellanox.com>
+Message-ID: <1422656251-29468-3-git-send-email-nab@linux-iscsi.org>
+
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+commit f57915cfa5b2b14c1cffa2e83c034f55e3f0e70d upstream.
+
+This patch adds a max_send_sge=2 minimum in isert_conn_setup_qp()
+to ensure outgoing control PDU responses with tx_desc->num_sge=2
+are able to function correctly.
+
+This addresses a bug with RDMA hardware using dev_attr.max_sge=3,
+that in the original code with the ConnectX-2 work-around would
+result in isert_conn->max_sge=1 being negotiated.
+
+Originally reported by Chris with ocrdma driver.
+
+Reported-by: Chris Moore <Chris.Moore@emulex.com>
+Tested-by: Chris Moore <Chris.Moore@emulex.com>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -107,9 +107,12 @@ isert_conn_setup_qp(struct isert_conn *i
+ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ /*
+ * FIXME: Use devattr.max_sge - 2 for max_send_sge as
+- * work-around for RDMA_READ..
++ * work-around for RDMA_READs with ConnectX-2.
++ *
++ * Also, still make sure to have at least two SGEs for
++ * outgoing control PDU responses.
+ */
+- attr.cap.max_send_sge = devattr.max_sge - 2;
++ attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
+ isert_conn->max_sge = attr.cap.max_send_sge;
+
+ attr.cap.max_recv_sge = 1;
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:01 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:25 +0000
+Subject: iscsi,iser-target: Initiate termination only once
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-7-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 954f23722b5753305be490330cf2680b7a25f4a3 upstream.
+
+Since commit 0fc4ea701fcf ("Target/iser: Don't put isert_conn inside
+disconnected handler") we put the conn kref in isert_wait_conn, so we
+need .wait_conn to be invoked also in the error path.
+
+Introduce call to isert_conn_terminate (called under lock)
+which transitions the connection state to TERMINATING and calls
+rdma_disconnect. If the state is already teminating, just bail
+out back (temination started).
+
+Also, make sure to destroy the connection when getting a connect
+error event if didn't get to connected (state UP). Same for the
+handling of REJECTED and UNREACHABLE cma events.
+
+Squashed:
+
+iscsi-target: Add call to wait_conn in establishment error flow
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 84 ++++++++++++++++++------------
+ drivers/infiniband/ulp/isert/ib_isert.h | 1
+ drivers/target/iscsi/iscsi_target_login.c | 3 +
+ 3 files changed, 54 insertions(+), 34 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -565,6 +565,33 @@ isert_put_conn(struct isert_conn *isert_
+ kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ }
+
++/**
++ * isert_conn_terminate() - Initiate connection termination
++ * @isert_conn: isert connection struct
++ *
++ * Notes:
++ * In case the connection state is UP, move state
++ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ *
++ * This routine must be called with conn_mutex held. Thus it is
++ * safe to call multiple times.
++ */
++static void
++isert_conn_terminate(struct isert_conn *isert_conn)
++{
++ int err;
++
++ if (isert_conn->state == ISER_CONN_UP) {
++ isert_conn->state = ISER_CONN_TERMINATING;
++ pr_info("Terminating conn %p state %d\n",
++ isert_conn, isert_conn->state);
++ err = rdma_disconnect(isert_conn->conn_cm_id);
++ if (err)
++ pr_warn("Failed rdma_disconnect isert_conn %p\n",
++ isert_conn);
++ }
++}
++
+ static void
+ isert_disconnect_work(struct work_struct *work)
+ {
+@@ -573,33 +600,15 @@ isert_disconnect_work(struct work_struct
+
+ pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ mutex_lock(&isert_conn->conn_mutex);
+- if (isert_conn->state == ISER_CONN_UP)
+- isert_conn->state = ISER_CONN_TERMINATING;
+-
+- if (isert_conn->post_recv_buf_count == 0 &&
+- atomic_read(&isert_conn->post_send_buf_count) == 0) {
+- mutex_unlock(&isert_conn->conn_mutex);
+- goto wake_up;
+- }
+- if (!isert_conn->conn_cm_id) {
+- mutex_unlock(&isert_conn->conn_mutex);
+- isert_put_conn(isert_conn);
+- return;
+- }
+-
+- if (isert_conn->disconnect) {
+- /* Send DREQ/DREP towards our initiator */
+- rdma_disconnect(isert_conn->conn_cm_id);
+- }
+-
++ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
+
+-wake_up:
++ pr_info("conn %p completing conn_wait\n", isert_conn);
+ complete(&isert_conn->conn_wait);
+ }
+
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
++isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
+ struct isert_conn *isert_conn;
+
+@@ -612,18 +621,24 @@ isert_disconnected_handler(struct rdma_c
+
+ isert_conn = (struct isert_conn *)cma_id->context;
+
+- isert_conn->disconnect = disconnect;
+ INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+ schedule_work(&isert_conn->conn_logout_work);
+
+ return 0;
+ }
+
++static void
++isert_connect_error(struct rdma_cm_id *cma_id)
++{
++ struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++
++ isert_put_conn(isert_conn);
++}
++
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+ int ret = 0;
+- bool disconnect = false;
+
+ pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+ event->event, event->status, cma_id->context, cma_id);
+@@ -641,11 +656,14 @@ isert_cma_handler(struct rdma_cm_id *cma
+ case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
+ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+- disconnect = true;
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
+- ret = isert_disconnected_handler(cma_id, disconnect);
++ ret = isert_disconnected_handler(cma_id);
+ break;
++ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
++ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ case RDMA_CM_EVENT_CONNECT_ERROR:
++ isert_connect_error(cma_id);
++ break;
+ default:
+ pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ break;
+@@ -1496,7 +1514,7 @@ isert_cq_rx_comp_err(struct isert_conn *
+ msleep(3000);
+
+ mutex_lock(&isert_conn->conn_mutex);
+- isert_conn->state = ISER_CONN_DOWN;
++ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+@@ -2268,10 +2286,6 @@ static void isert_wait_conn(struct iscsi
+ pr_debug("isert_wait_conn: Starting \n");
+
+ mutex_lock(&isert_conn->conn_mutex);
+- if (isert_conn->conn_cm_id) {
+- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+- rdma_disconnect(isert_conn->conn_cm_id);
+- }
+ /*
+ * Only wait for conn_wait_comp_err if the isert_conn made it
+ * into full feature phase..
+@@ -2280,13 +2294,17 @@ static void isert_wait_conn(struct iscsi
+ mutex_unlock(&isert_conn->conn_mutex);
+ return;
+ }
+- if (isert_conn->state == ISER_CONN_UP)
+- isert_conn->state = ISER_CONN_TERMINATING;
++ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+-
+ wait_for_completion(&isert_conn->conn_wait);
++
++ mutex_lock(&isert_conn->conn_mutex);
++ isert_conn->state = ISER_CONN_DOWN;
++ mutex_unlock(&isert_conn->conn_mutex);
++
++ pr_info("Destroying conn %p\n", isert_conn);
+ isert_put_conn(isert_conn);
+ }
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -105,7 +105,6 @@ struct isert_conn {
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
+ struct kref conn_kref;
+- bool disconnect;
+ };
+
+ #define ISERT_MAX_CQ 64
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1360,6 +1360,9 @@ old_sess_out:
+ conn->sock = NULL;
+ }
+
++ if (conn->conn_transport->iscsit_wait_conn)
++ conn->conn_transport->iscsit_wait_conn(conn);
++
+ if (conn->conn_transport->iscsit_free_conn)
+ conn->conn_transport->iscsit_free_conn(conn);
+
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:27 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:28 +0000
+Subject: iser-target: Fix connected_handler + teardown flow race
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-10-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 19e2090fb246ca21b3e569ead51a6a7a1748eadd upstream.
+
+Take isert_conn pointer from cm_id->qp->qp_context. This
+will allow us to know that the cm_id context is always
+the network portal. This will make the cm_id event check
+(connection or network portal) more reliable.
+
+In order to avoid a NULL dereference in cma_id->qp->qp_context
+we destroy the qp after we destroy the cm_id (and make the
+dereference safe). session stablishment/teardown sequences
+can happen in parallel, we should take into account that
+connected_handler might race with connection teardown flow.
+
+Also, protect isert_conn->conn_device->active_qps decrement
+within the error patch during QP creation failure and the
+normal teardown path in isert_connect_release().
+
+Squashed:
+
+iser-target: Decrement completion context active_qps in error flow
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -132,12 +132,18 @@ isert_conn_setup_qp(struct isert_conn *i
+ ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+ if (ret) {
+ pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+- return ret;
++ goto err;
+ }
+ isert_conn->conn_qp = cma_id->qp;
+ pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+
+ return 0;
++err:
++ mutex_lock(&device_list_mutex);
++ device->cq_active_qps[min_index]--;
++ mutex_unlock(&device_list_mutex);
++
++ return ret;
+ }
+
+ static void
+@@ -425,7 +431,6 @@ isert_connect_request(struct rdma_cm_id
+ kref_init(&isert_conn->conn_kref);
+ mutex_init(&isert_conn->conn_mutex);
+
+- cma_id->context = isert_conn;
+ isert_conn->conn_cm_id = cma_id;
+ isert_conn->responder_resources = event->param.conn.responder_resources;
+ isert_conn->initiator_depth = event->param.conn.initiator_depth;
+@@ -526,18 +531,20 @@ isert_connect_release(struct isert_conn
+
+ pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+
++ isert_free_rx_descriptors(isert_conn);
++ rdma_destroy_id(isert_conn->conn_cm_id);
++
+ if (isert_conn->conn_qp) {
+ cq_index = ((struct isert_cq_desc *)
+ isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+ pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
++ mutex_lock(&device_list_mutex);
+ isert_conn->conn_device->cq_active_qps[cq_index]--;
++ mutex_unlock(&device_list_mutex);
+
+- rdma_destroy_qp(isert_conn->conn_cm_id);
++ ib_destroy_qp(isert_conn->conn_qp);
+ }
+
+- isert_free_rx_descriptors(isert_conn);
+- rdma_destroy_id(isert_conn->conn_cm_id);
+-
+ if (isert_conn->login_buf) {
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+@@ -557,7 +564,7 @@ isert_connect_release(struct isert_conn
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+- struct isert_conn *isert_conn = cma_id->context;
++ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ pr_info("conn %p\n", isert_conn);
+
+@@ -635,16 +642,16 @@ isert_conn_terminate(struct isert_conn *
+ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
++ struct iscsi_np *np = cma_id->context;
++ struct isert_np *isert_np = np->np_context;
+ struct isert_conn *isert_conn;
+
+- if (!cma_id->qp) {
+- struct isert_np *isert_np = cma_id->context;
+-
++ if (isert_np->np_cm_id == cma_id) {
+ isert_np->np_cm_id = NULL;
+ return -1;
+ }
+
+- isert_conn = (struct isert_conn *)cma_id->context;
++ isert_conn = cma_id->qp->qp_context;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn_terminate(isert_conn);
+@@ -659,7 +666,7 @@ isert_disconnected_handler(struct rdma_c
+ static void
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+- struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ isert_put_conn(isert_conn);
+ }
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:10 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:26 +0000
+Subject: iser-target: Fix flush + disconnect completion handling
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-8-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 128e9cc84566a84146baea2335b3824288eed817 upstream.
+
+ISER_CONN_UP state is not sufficient to know if
+we should wait for completion of flush errors and
+disconnected_handler event.
+
+Instead, split it to 2 states:
+- ISER_CONN_UP: Got to CM connected phase, This state
+indicates that we need to wait for a CM disconnect
+event before going to teardown.
+
+- ISER_CONN_FULL_FEATURE: Got to full feature phase
+after we posted login response, This state indicates
+that we posted recv buffers and we need to wait for
+flush completions before going to teardown.
+
+Also avoid deffering disconnected handler to a work,
+and handle it within disconnected handler.
+More work here is needed to handle DEVICE_REMOVAL event
+correctly (cleanup all resources).
+
+Squashed:
+
+iser-target: Don't deffer disconnected handler to a work
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 50 ++++++++++++++++++--------------
+ drivers/infiniband/ulp/isert/ib_isert.h | 2 -
+ 2 files changed, 30 insertions(+), 22 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -544,6 +544,9 @@ isert_connected_handler(struct rdma_cm_i
+ {
+ struct isert_conn *isert_conn = cma_id->context;
+
++ pr_info("conn %p\n", isert_conn);
++
++ isert_conn->state = ISER_CONN_UP;
+ kref_get(&isert_conn->conn_kref);
+ }
+
+@@ -570,8 +573,9 @@ isert_put_conn(struct isert_conn *isert_
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+- * In case the connection state is UP, move state
++ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+@@ -581,32 +585,31 @@ isert_conn_terminate(struct isert_conn *
+ {
+ int err;
+
+- if (isert_conn->state == ISER_CONN_UP) {
+- isert_conn->state = ISER_CONN_TERMINATING;
++ switch (isert_conn->state) {
++ case ISER_CONN_TERMINATING:
++ break;
++ case ISER_CONN_UP:
++ /*
++ * No flush completions will occur as we didn't
++ * get to ISER_CONN_FULL_FEATURE yet, complete
++ * to allow teardown progress.
++ */
++ complete(&isert_conn->conn_wait_comp_err);
++ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ pr_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
++ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->conn_cm_id);
+ if (err)
+ pr_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
++ break;
++ default:
++ pr_warn("conn %p teminating in state %d\n",
++ isert_conn, isert_conn->state);
+ }
+ }
+
+-static void
+-isert_disconnect_work(struct work_struct *work)
+-{
+- struct isert_conn *isert_conn = container_of(work,
+- struct isert_conn, conn_logout_work);
+-
+- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+- mutex_lock(&isert_conn->conn_mutex);
+- isert_conn_terminate(isert_conn);
+- mutex_unlock(&isert_conn->conn_mutex);
+-
+- pr_info("conn %p completing conn_wait\n", isert_conn);
+- complete(&isert_conn->conn_wait);
+-}
+-
+ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ {
+@@ -621,8 +624,12 @@ isert_disconnected_handler(struct rdma_c
+
+ isert_conn = (struct isert_conn *)cma_id->context;
+
+- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+- schedule_work(&isert_conn->conn_logout_work);
++ mutex_lock(&isert_conn->conn_mutex);
++ isert_conn_terminate(isert_conn);
++ mutex_unlock(&isert_conn->conn_mutex);
++
++ pr_info("conn %p completing conn_wait\n", isert_conn);
++ complete(&isert_conn->conn_wait);
+
+ return 0;
+ }
+@@ -865,7 +872,8 @@ isert_put_login_tx(struct iscsi_conn *co
+ if (ret)
+ return ret;
+
+- isert_conn->state = ISER_CONN_UP;
++ /* Now we are in FULL_FEATURE phase */
++ isert_conn->state = ISER_CONN_FULL_FEATURE;
+ goto post_send;
+ }
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -21,6 +21,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ ISER_CONN_INIT,
+ ISER_CONN_UP,
++ ISER_CONN_FULL_FEATURE,
+ ISER_CONN_TERMINATING,
+ ISER_CONN_DOWN,
+ };
+@@ -100,7 +101,6 @@ struct isert_conn {
+ struct ib_mr *conn_mr;
+ struct ib_qp *conn_qp;
+ struct isert_device *conn_device;
+- struct work_struct conn_logout_work;
+ struct mutex conn_mutex;
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:43 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:30 +0000
+Subject: iser-target: Fix implicit termination of connections
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-12-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit b02efbfc9a051b41e71fe8f94ddf967260e024a6 upstream.
+
+In situations such as bond failover, The new session establishment
+implicitly invokes the termination of the old connection.
+
+So, we don't want to wait for the old connection wait_conn to completely
+terminate before we accept the new connection and post a login response.
+
+The solution is to deffer the comp_wait completion and the conn_put to
+a work so wait_conn will effectively be non-blocking (flush errors are
+assumed to come very fast).
+
+We allocate isert_release_wq with WQ_UNBOUND and WQ_UNBOUND_MAX_ACTIVE
+to spread the concurrency of release works.
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 45 ++++++++++++++++++++++++--------
+ drivers/infiniband/ulp/isert/ib_isert.h | 1
+ 2 files changed, 36 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -40,6 +40,7 @@ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+ static struct workqueue_struct *isert_rx_wq;
+ static struct workqueue_struct *isert_comp_wq;
++static struct workqueue_struct *isert_release_wq;
+ static struct kmem_cache *isert_cmd_cache;
+
+ static int
+@@ -2379,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
+ kfree(isert_np);
+ }
+
++static void isert_release_work(struct work_struct *work)
++{
++ struct isert_conn *isert_conn = container_of(work,
++ struct isert_conn,
++ release_work);
++
++ pr_info("Starting release conn %p\n", isert_conn);
++
++ wait_for_completion(&isert_conn->conn_wait);
++
++ mutex_lock(&isert_conn->conn_mutex);
++ isert_conn->state = ISER_CONN_DOWN;
++ mutex_unlock(&isert_conn->conn_mutex);
++
++ pr_info("Destroying conn %p\n", isert_conn);
++ isert_put_conn(isert_conn);
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ struct isert_conn *isert_conn = conn->context;
+@@ -2398,14 +2417,9 @@ static void isert_wait_conn(struct iscsi
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+- wait_for_completion(&isert_conn->conn_wait);
+-
+- mutex_lock(&isert_conn->conn_mutex);
+- isert_conn->state = ISER_CONN_DOWN;
+- mutex_unlock(&isert_conn->conn_mutex);
+
+- pr_info("Destroying conn %p\n", isert_conn);
+- isert_put_conn(isert_conn);
++ INIT_WORK(&isert_conn->release_work, isert_release_work);
++ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+
+ static void isert_free_conn(struct iscsi_conn *conn)
+@@ -2451,20 +2465,30 @@ static int __init isert_init(void)
+ goto destroy_rx_wq;
+ }
+
++ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
++ WQ_UNBOUND_MAX_ACTIVE);
++ if (!isert_release_wq) {
++ pr_err("Unable to allocate isert_release_wq\n");
++ ret = -ENOMEM;
++ goto destroy_comp_wq;
++ }
++
+ isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
+ sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
+ 0, NULL);
+ if (!isert_cmd_cache) {
+ pr_err("Unable to create isert_cmd_cache\n");
+ ret = -ENOMEM;
+- goto destroy_tx_cq;
++ goto destroy_release_wq;
+ }
+
+ iscsit_register_transport(&iser_target_transport);
+- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
++ pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+ return 0;
+
+-destroy_tx_cq:
++destroy_release_wq:
++ destroy_workqueue(isert_release_wq);
++destroy_comp_wq:
+ destroy_workqueue(isert_comp_wq);
+ destroy_rx_wq:
+ destroy_workqueue(isert_rx_wq);
+@@ -2475,6 +2499,7 @@ static void __exit isert_exit(void)
+ {
+ flush_scheduled_work();
+ kmem_cache_destroy(isert_cmd_cache);
++ destroy_workqueue(isert_release_wq);
+ destroy_workqueue(isert_comp_wq);
+ destroy_workqueue(isert_rx_wq);
+ iscsit_unregister_transport(&iser_target_transport);
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -107,6 +107,7 @@ struct isert_conn {
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
+ struct kref conn_kref;
++ struct work_struct release_work;
+ };
+
+ #define ISERT_MAX_CQ 64
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:34 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:29 +0000
+Subject: iser-target: Handle ADDR_CHANGE event for listener cm_id
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-11-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit ca6c1d82d12d8013fb75ce015900d62b9754623c upstream.
+
+The np listener cm_id will also get ADDR_CHANGE event
+upcall (in case it is bound to a specific IP). Handle
+it correctly by creating a new cm_id and implicitly
+destroy the old one.
+
+Since this is the second event a listener np cm_id may
+encounter, we move the np cm_id event handling to a
+routine.
+
+Squashed:
+
+iser-target: Move cma_id setup to a function
+
+Reported-by: Slava Shwartsman <valyushash@gmail.com>
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 107 ++++++++++++++++++++++----------
+ drivers/infiniband/ulp/isert/ib_isert.h | 1
+ 2 files changed, 77 insertions(+), 31 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -46,6 +46,7 @@ static int
+ isert_rdma_post_recvl(struct isert_conn *isert_conn);
+ static int
+ isert_rdma_accept(struct isert_conn *isert_conn);
++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+@@ -399,8 +400,8 @@ isert_device_find_by_ib_dev(struct rdma_
+ static int
+ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+- struct iscsi_np *np = cma_id->context;
+- struct isert_np *isert_np = np->np_context;
++ struct isert_np *isert_np = cma_id->context;
++ struct iscsi_np *np = isert_np->np;
+ struct isert_conn *isert_conn;
+ struct isert_device *device;
+ struct ib_device *ib_dev = cma_id->device;
+@@ -640,17 +641,41 @@ isert_conn_terminate(struct isert_conn *
+ }
+
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id)
++isert_np_cma_handler(struct isert_np *isert_np,
++ enum rdma_cm_event_type event)
+ {
+- struct iscsi_np *np = cma_id->context;
+- struct isert_np *isert_np = np->np_context;
+- struct isert_conn *isert_conn;
++ pr_debug("isert np %p, handling event %d\n", isert_np, event);
+
+- if (isert_np->np_cm_id == cma_id) {
++ switch (event) {
++ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_np->np_cm_id = NULL;
+- return -1;
++ break;
++ case RDMA_CM_EVENT_ADDR_CHANGE:
++ isert_np->np_cm_id = isert_setup_id(isert_np);
++ if (IS_ERR(isert_np->np_cm_id)) {
++ pr_err("isert np %p setup id failed: %ld\n",
++ isert_np, PTR_ERR(isert_np->np_cm_id));
++ isert_np->np_cm_id = NULL;
++ }
++ break;
++ default:
++ pr_err("isert np %p Unexpected event %d\n",
++ isert_np, event);
+ }
+
++ return -1;
++}
++
++static int
++isert_disconnected_handler(struct rdma_cm_id *cma_id,
++ enum rdma_cm_event_type event)
++{
++ struct isert_np *isert_np = cma_id->context;
++ struct isert_conn *isert_conn;
++
++ if (isert_np->np_cm_id == cma_id)
++ return isert_np_cma_handler(cma_id->context, event);
++
+ isert_conn = cma_id->qp->qp_context;
+
+ mutex_lock(&isert_conn->conn_mutex);
+@@ -693,7 +718,7 @@ isert_cma_handler(struct rdma_cm_id *cma
+ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
+- ret = isert_disconnected_handler(cma_id);
++ ret = isert_disconnected_handler(cma_id, event->event);
+ break;
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+@@ -2123,13 +2148,51 @@ isert_response_queue(struct iscsi_conn *
+ return ret;
+ }
+
++struct rdma_cm_id *
++isert_setup_id(struct isert_np *isert_np)
++{
++ struct iscsi_np *np = isert_np->np;
++ struct rdma_cm_id *id;
++ struct sockaddr *sa;
++ int ret;
++
++ sa = (struct sockaddr *)&np->np_sockaddr;
++ pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
++
++ id = rdma_create_id(isert_cma_handler, isert_np,
++ RDMA_PS_TCP, IB_QPT_RC);
++ if (IS_ERR(id)) {
++ pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
++ ret = PTR_ERR(id);
++ goto out;
++ }
++ pr_debug("id %p context %p\n", id, id->context);
++
++ ret = rdma_bind_addr(id, sa);
++ if (ret) {
++ pr_err("rdma_bind_addr() failed: %d\n", ret);
++ goto out_id;
++ }
++
++ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
++ if (ret) {
++ pr_err("rdma_listen() failed: %d\n", ret);
++ goto out_id;
++ }
++
++ return id;
++out_id:
++ rdma_destroy_id(id);
++out:
++ return ERR_PTR(ret);
++}
++
+ static int
+ isert_setup_np(struct iscsi_np *np,
+ struct __kernel_sockaddr_storage *ksockaddr)
+ {
+ struct isert_np *isert_np;
+ struct rdma_cm_id *isert_lid;
+- struct sockaddr *sa;
+ int ret;
+
+ isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+@@ -2141,9 +2204,8 @@ isert_setup_np(struct iscsi_np *np,
+ mutex_init(&isert_np->np_accept_mutex);
+ INIT_LIST_HEAD(&isert_np->np_accept_list);
+ init_completion(&isert_np->np_login_comp);
++ isert_np->np = np;
+
+- sa = (struct sockaddr *)ksockaddr;
+- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+ /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+@@ -2151,37 +2213,20 @@ isert_setup_np(struct iscsi_np *np,
+ memcpy(&np->np_sockaddr, ksockaddr,
+ sizeof(struct __kernel_sockaddr_storage));
+
+- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+- IB_QPT_RC);
++ isert_lid = isert_setup_id(isert_np);
+ if (IS_ERR(isert_lid)) {
+- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
+- PTR_ERR(isert_lid));
+ ret = PTR_ERR(isert_lid);
+ goto out;
+ }
+
+- ret = rdma_bind_addr(isert_lid, sa);
+- if (ret) {
+- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+- goto out_lid;
+- }
+-
+- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+- if (ret) {
+- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+- goto out_lid;
+- }
+-
+ isert_np->np_cm_id = isert_lid;
+ np->np_context = isert_np;
+- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+
+ return 0;
+
+-out_lid:
+- rdma_destroy_id(isert_lid);
+ out:
+ kfree(isert_np);
++
+ return ret;
+ }
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -132,6 +132,7 @@ struct isert_device {
+ };
+
+ struct isert_np {
++ struct iscsi_np *np;
+ struct semaphore np_sem;
+ struct rdma_cm_id *np_cm_id;
+ struct mutex np_accept_mutex;
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:19 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:27 +0000
+Subject: iser-target: Parallelize CM connection establishment
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Sagi Grimberg <sagig@mellanox.com>
+Message-ID: <1422656251-29468-9-git-send-email-nab@linux-iscsi.org>
+
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 2371e5da8cfe91443339b54444dec6254fdd6dfc upstream.
+
+There is no point in accepting a new CM request only
+when we are completely done with the last iscsi login.
+Instead we accept immediately, this will also cause the
+CM connection to reach connected state and the initiator
+is allowed to send the first login. We mark that we got
+the initial login and let iscsi layer pick it up when it
+gets there.
+
+This reduces the parallel login sequence by a factor of
+more then 4 (and more for multi-login) and also prevents
+the initiator (who does all logins in parallel) from
+giving up on login timeout expiration.
+
+In order to support multiple login requests sequence (CHAP)
+we call isert_rx_login_req from isert_rx_completion insead
+of letting isert_get_login_rx call it.
+
+Squashed:
+
+iser-target: Use kref_get_unless_zero in connected_handler
+iser-target: Acquire conn_mutex when changing connection state
+iser-target: Reject connect request in failure path
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 90 +++++++++++++++++++++++---------
+ drivers/infiniband/ulp/isert/ib_isert.h | 2
+ 2 files changed, 67 insertions(+), 25 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -42,6 +42,11 @@ static struct workqueue_struct *isert_rx
+ static struct workqueue_struct *isert_comp_wq;
+ static struct kmem_cache *isert_cmd_cache;
+
++static int
++isert_rdma_post_recvl(struct isert_conn *isert_conn);
++static int
++isert_rdma_accept(struct isert_conn *isert_conn);
++
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+ {
+@@ -414,6 +419,7 @@ isert_connect_request(struct rdma_cm_id
+ isert_conn->state = ISER_CONN_INIT;
+ INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+ init_completion(&isert_conn->conn_login_comp);
++ init_completion(&isert_conn->login_req_comp);
+ init_completion(&isert_conn->conn_wait);
+ init_completion(&isert_conn->conn_wait_comp_err);
+ kref_init(&isert_conn->conn_kref);
+@@ -479,6 +485,14 @@ isert_connect_request(struct rdma_cm_id
+ if (ret)
+ goto out_conn_dev;
+
++ ret = isert_rdma_post_recvl(isert_conn);
++ if (ret)
++ goto out_conn_dev;
++
++ ret = isert_rdma_accept(isert_conn);
++ if (ret)
++ goto out_conn_dev;
++
+ mutex_lock(&isert_np->np_accept_mutex);
+ list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ mutex_unlock(&isert_np->np_accept_mutex);
+@@ -499,6 +513,7 @@ out_login_buf:
+ kfree(isert_conn->login_buf);
+ out:
+ kfree(isert_conn);
++ rdma_reject(cma_id, NULL, 0);
+ return ret;
+ }
+
+@@ -546,8 +561,15 @@ isert_connected_handler(struct rdma_cm_i
+
+ pr_info("conn %p\n", isert_conn);
+
+- isert_conn->state = ISER_CONN_UP;
+- kref_get(&isert_conn->conn_kref);
++ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
++ pr_warn("conn %p connect_release is running\n", isert_conn);
++ return;
++ }
++
++ mutex_lock(&isert_conn->conn_mutex);
++ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
++ isert_conn->state = ISER_CONN_UP;
++ mutex_unlock(&isert_conn->conn_mutex);
+ }
+
+ static void
+@@ -873,7 +895,9 @@ isert_put_login_tx(struct iscsi_conn *co
+ return ret;
+
+ /* Now we are in FULL_FEATURE phase */
++ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
++ mutex_unlock(&isert_conn->conn_mutex);
+ goto post_send;
+ }
+
+@@ -890,18 +914,17 @@ post_send:
+ }
+
+ static void
+-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+- struct isert_conn *isert_conn)
++isert_rx_login_req(struct isert_conn *isert_conn)
+ {
++ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
++ int rx_buflen = isert_conn->login_req_len;
+ struct iscsi_conn *conn = isert_conn->conn;
+ struct iscsi_login *login = conn->conn_login;
+ int size;
+
+- if (!login) {
+- pr_err("conn->conn_login is NULL\n");
+- dump_stack();
+- return;
+- }
++ pr_info("conn %p\n", isert_conn);
++
++ WARN_ON_ONCE(!login);
+
+ if (login->first_request) {
+ struct iscsi_login_req *login_req =
+@@ -931,7 +954,8 @@ isert_rx_login_req(struct iser_rx_desc *
+ size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+ memcpy(login->req_buf, &rx_desc->data[0], size);
+
+- complete(&isert_conn->conn_login_comp);
++ if (login->first_request)
++ complete(&isert_conn->conn_login_comp);
+ }
+
+ static void
+@@ -1208,11 +1232,20 @@ isert_rx_completion(struct iser_rx_desc
+ hdr->opcode, hdr->itt, hdr->flags,
+ (int)(xfer_len - ISER_HEADERS_LEN));
+
+- if ((char *)desc == isert_conn->login_req_buf)
+- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+- isert_conn);
+- else
++ if ((char *)desc == isert_conn->login_req_buf) {
++ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
++ if (isert_conn->conn) {
++ struct iscsi_login *login = isert_conn->conn->conn_login;
++
++ if (login && !login->first_request)
++ isert_rx_login_req(isert_conn);
++ }
++ mutex_lock(&isert_conn->conn_mutex);
++ complete(&isert_conn->login_req_comp);
++ mutex_unlock(&isert_conn->conn_mutex);
++ } else {
+ isert_rx_do_work(desc, isert_conn);
++ }
+
+ ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+ DMA_FROM_DEVICE);
+@@ -2177,13 +2210,27 @@ isert_get_login_rx(struct iscsi_conn *co
+ struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ int ret;
+
+- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
++ pr_info("before login_req comp conn: %p\n", isert_conn);
++ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
++ if (ret) {
++ pr_err("isert_conn %p interrupted before got login req\n",
++ isert_conn);
++ return ret;
++ }
++ isert_conn->login_req_comp.done = 0;
++
++ if (!login->first_request)
++ return 0;
++
++ isert_rx_login_req(isert_conn);
++
++ pr_info("before conn_login_comp conn: %p\n", conn);
+
+ ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+ if (ret)
+ return ret;
+
+- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
++ pr_info("processing login->req: %p\n", login->req);
+ return 0;
+ }
+
+@@ -2261,17 +2308,10 @@ accept_wait:
+ isert_conn->conn = conn;
+ max_accept = 0;
+
+- ret = isert_rdma_post_recvl(isert_conn);
+- if (ret)
+- return ret;
+-
+- ret = isert_rdma_accept(isert_conn);
+- if (ret)
+- return ret;
+-
+ isert_set_conn_info(np, conn, isert_conn);
+
+- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
++ pr_debug("Processing isert_conn: %p\n", isert_conn);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -88,6 +88,7 @@ struct isert_conn {
+ char *login_req_buf;
+ char *login_rsp_buf;
+ u64 login_req_dma;
++ int login_req_len;
+ u64 login_rsp_dma;
+ unsigned int conn_rx_desc_head;
+ struct iser_rx_desc *conn_rx_descs;
+@@ -95,6 +96,7 @@ struct isert_conn {
+ struct iscsi_conn *conn;
+ struct list_head conn_accept_node;
+ struct completion conn_login_comp;
++ struct completion login_req_comp;
+ struct iser_tx_desc conn_login_tx_desc;
+ struct rdma_cm_id *conn_cm_id;
+ struct ib_pd *conn_pd;
efi-pstore-make-efi-pstore-return-a-unique-id.patch
gpio-squelch-a-compiler-warning.patch
workqueue-fix-subtle-pool-management-issue-which-can-stall-whole-worker_pool.patch
+ib-isert-adjust-cq-size-to-hw-limits.patch
+ib_isert-add-max_send_sge-2-minimum-for-control-pdu-responses.patch
+vhost-scsi-take-configfs-group-dependency-during-vhost_scsi_set_endpoint.patch
+tcm_loop-fix-wrong-i_t-nexus-association.patch
+vhost-scsi-add-missing-virtio-scsi-tcm-attribute-conversion.patch
+iscsi-iser-target-initiate-termination-only-once.patch
+iser-target-fix-flush-disconnect-completion-handling.patch
+iser-target-parallelize-cm-connection-establishment.patch
+iser-target-fix-connected_handler-teardown-flow-race.patch
+iser-target-handle-addr_change-event-for-listener-cm_id.patch
+iser-target-fix-implicit-termination-of-connections.patch
+target-drop-arbitrary-maximum-i-o-size-limit.patch
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:08:50 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:31 +0000
+Subject: target: Drop arbitrary maximum I/O size limit
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Nicholas Bellinger <nab@linux-iscsi.org>, Christoph Hellwig <hch@lst.de>, "Martin K. Petersen" <martin.petersen@oracle.com>, Roland Dreier <roland@purestorage.com>
+Message-ID: <1422656251-29468-13-git-send-email-nab@linux-iscsi.org>
+
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 046ba64285a4389ae5e9a7dfa253c6bff3d7c341 upstream.
+
+This patch drops the arbitrary maximum I/O size limit in sbc_parse_cdb(),
+which currently for fabric_max_sectors is hardcoded to 8192 (4 MB for 512
+byte sector devices), and for hw_max_sectors is a backend driver dependent
+value.
+
+This limit is problematic because Linux initiators have only recently
+started to honor block limits MAXIMUM TRANSFER LENGTH, and other non-Linux
+based initiators (eg: MSFT Fibre Channel) can also generate I/Os larger
+than 4 MB in size.
+
+Currently when this happens, the following message will appear on the
+target resulting in I/Os being returned with non recoverable status:
+
+ SCSI OP 28h with too big sectors 16384 exceeds fabric_max_sectors: 8192
+
+Instead, drop both [fabric,hw]_max_sector checks in sbc_parse_cdb(),
+and convert the existing hw_max_sectors into a purely informational
+attribute used to represent the granuality that backend driver and/or
+subsystem code is splitting I/Os upon.
+
+Also, update FILEIO with an explicit FD_MAX_BYTES check in fd_execute_rw()
+to deal with the one special iovec limitiation case.
+
+v2 changes:
+ - Drop hw_max_sectors check in sbc_parse_cdb()
+
+Reported-by: Lance Gropper <lance.gropper@qosserver.com>
+Reported-by: Stefan Priebe <s.priebe@profihost.ag>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/target/target_core_device.c | 8 ++++----
+ drivers/target/target_core_file.c | 11 ++++++++++-
+ drivers/target/target_core_iblock.c | 2 +-
+ drivers/target/target_core_sbc.c | 15 ---------------
+ drivers/target/target_core_spc.c | 5 +----
+ 5 files changed, 16 insertions(+), 25 deletions(-)
+
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1037,10 +1037,10 @@ int se_dev_set_optimal_sectors(struct se
+ " changed for TCM/pSCSI\n", dev);
+ return -EINVAL;
+ }
+- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
++ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+- " greater than fabric_max_sectors: %u\n", dev,
+- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
++ " greater than hw_max_sectors: %u\n", dev,
++ optimal_sectors, dev->dev_attrib.hw_max_sectors);
+ return -EINVAL;
+ }
+
+@@ -1442,7 +1442,6 @@ struct se_device *target_alloc_device(st
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+
+ return dev;
+ }
+@@ -1475,6 +1474,7 @@ int target_configure_device(struct se_de
+ dev->dev_attrib.hw_max_sectors =
+ se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size);
++ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -554,7 +554,16 @@ fd_execute_rw(struct se_cmd *cmd)
+ enum dma_data_direction data_direction = cmd->data_direction;
+ struct se_device *dev = cmd->se_dev;
+ int ret = 0;
+-
++ /*
++ * We are currently limited by the number of iovecs (2048) per
++ * single vfs_[writev,readv] call.
++ */
++ if (cmd->data_length > FD_MAX_BYTES) {
++ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
++ "FD_MAX_BYTES: %u iovec count limitiation\n",
++ cmd->data_length, FD_MAX_BYTES);
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ }
+ /*
+ * Call vectorized fileio functions to map struct scatterlist
+ * physical memory addresses to struct iovec virtual memory.
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -122,7 +122,7 @@ static int iblock_configure_device(struc
+ q = bdev_get_queue(bd);
+
+ dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+- dev->dev_attrib.hw_max_sectors = UINT_MAX;
++ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+ dev->dev_attrib.hw_queue_depth = q->nr_requests;
+
+ /*
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -561,21 +561,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ unsigned long long end_lba;
+
+- if (sectors > dev->dev_attrib.fabric_max_sectors) {
+- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+- " big sectors %u exceeds fabric_max_sectors:"
+- " %u\n", cdb[0], sectors,
+- dev->dev_attrib.fabric_max_sectors);
+- return TCM_INVALID_CDB_FIELD;
+- }
+- if (sectors > dev->dev_attrib.hw_max_sectors) {
+- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+- " big sectors %u exceeds backend hw_max_sectors:"
+- " %u\n", cdb[0], sectors,
+- dev->dev_attrib.hw_max_sectors);
+- return TCM_INVALID_CDB_FIELD;
+- }
+-
+ end_lba = dev->transport->get_blocks(dev) + 1;
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("cmd exceeds last lba %llu "
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -444,7 +444,6 @@ static sense_reason_t
+ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+ {
+ struct se_device *dev = cmd->se_dev;
+- u32 max_sectors;
+ int have_tp = 0;
+
+ /*
+@@ -469,9 +468,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd,
+ /*
+ * Set MAXIMUM TRANSFER LENGTH
+ */
+- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+- dev->dev_attrib.hw_max_sectors);
+- put_unaligned_be32(max_sectors, &buf[8]);
++ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:07:40 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:23 +0000
+Subject: tcm_loop: Fix wrong I_T nexus association
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Hannes Reinecke <hare@suse.de>
+Message-ID: <1422656251-29468-5-git-send-email-nab@linux-iscsi.org>
+
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit 506787a2c7daed45f0a213674ca706cbc83a9089 upstream.
+
+tcm_loop has the I_T nexus associated with the HBA. This causes
+commands to become misdirected if the HBA has more than one
+target portal group; any command is then being sent to the
+first target portal group instead of the correct one.
+
+The nexus needs to be associated with the target portal group
+instead.
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/target/loopback/tcm_loop.c | 29 ++++++++++++++---------------
+ drivers/target/loopback/tcm_loop.h | 7 +------
+ 2 files changed, 15 insertions(+), 21 deletions(-)
+
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(str
+ goto out_done;
+ }
+
+- tl_nexus = tl_hba->tl_nexus;
++ tl_nexus = tl_tpg->tl_nexus;
+ if (!tl_nexus) {
+ scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+ " does not exist\n");
+@@ -258,20 +258,20 @@ static int tcm_loop_device_reset(struct
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
++ * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
++ */
++ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
++ se_tpg = &tl_tpg->tl_se_tpg;
++ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+- tl_nexus = tl_hba->tl_nexus;
++ tl_nexus = tl_tpg->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ se_sess = tl_nexus->se_sess;
+- /*
+- * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+- */
+- tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+- se_tpg = &tl_tpg->tl_se_tpg;
+
+ tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+ if (!tl_cmd) {
+@@ -879,8 +879,8 @@ static int tcm_loop_make_nexus(
+ struct tcm_loop_nexus *tl_nexus;
+ int ret = -ENOMEM;
+
+- if (tl_tpg->tl_hba->tl_nexus) {
+- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
++ if (tl_tpg->tl_nexus) {
++ pr_debug("tl_tpg->tl_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tl_tpg->tl_se_tpg;
+@@ -915,7 +915,7 @@ static int tcm_loop_make_nexus(
+ */
+ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+ tl_nexus->se_sess, tl_nexus);
+- tl_tpg->tl_hba->tl_nexus = tl_nexus;
++ tl_tpg->tl_nexus = tl_nexus;
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ name);
+@@ -931,9 +931,8 @@ static int tcm_loop_drop_nexus(
+ {
+ struct se_session *se_sess;
+ struct tcm_loop_nexus *tl_nexus;
+- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+
+- tl_nexus = tpg->tl_hba->tl_nexus;
++ tl_nexus = tpg->tl_nexus;
+ if (!tl_nexus)
+ return -ENODEV;
+
+@@ -949,13 +948,13 @@ static int tcm_loop_drop_nexus(
+ }
+
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
++ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
+ tl_nexus->se_sess->se_node_acl->initiatorname);
+ /*
+ * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ */
+ transport_deregister_session(tl_nexus->se_sess);
+- tpg->tl_hba->tl_nexus = NULL;
++ tpg->tl_nexus = NULL;
+ kfree(tl_nexus);
+ return 0;
+ }
+@@ -971,7 +970,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
+ struct tcm_loop_nexus *tl_nexus;
+ ssize_t ret;
+
+- tl_nexus = tl_tpg->tl_hba->tl_nexus;
++ tl_nexus = tl_tpg->tl_nexus;
+ if (!tl_nexus)
+ return -ENODEV;
+
+--- a/drivers/target/loopback/tcm_loop.h
++++ b/drivers/target/loopback/tcm_loop.h
+@@ -25,11 +25,6 @@ struct tcm_loop_tmr {
+ };
+
+ struct tcm_loop_nexus {
+- int it_nexus_active;
+- /*
+- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+- */
+- struct scsi_host *sh;
+ /*
+ * Pointer to TCM session for I_T Nexus
+ */
+@@ -45,6 +40,7 @@ struct tcm_loop_tpg {
+ atomic_t tl_tpg_port_count;
+ struct se_portal_group tl_se_tpg;
+ struct tcm_loop_hba *tl_hba;
++ struct tcm_loop_nexus *tl_nexus;
+ };
+
+ struct tcm_loop_hba {
+@@ -53,7 +49,6 @@ struct tcm_loop_hba {
+ struct se_hba_s *se_hba;
+ struct se_lun *tl_hba_lun;
+ struct se_port *tl_hba_lun_sep;
+- struct tcm_loop_nexus *tl_nexus;
+ struct device dev;
+ struct Scsi_Host *sh;
+ struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:07:50 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:24 +0000
+Subject: vhost-scsi: Add missing virtio-scsi -> TCM attribute conversion
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Nicholas Bellinger <nab@linux-iscsi.org>, Christoph Hellwig <hch@lst.de>, "Michael S. Tsirkin" <mst@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>
+Message-ID: <1422656251-29468-6-git-send-email-nab@linux-iscsi.org>
+
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 46243860806bdc2756f3ce8ac86b4d7c616bcd6c upstream.
+
+While looking at hch's recent conversion to drop the MSG_*_TAG
+definitions, I noticed a long standing bug in vhost-scsi where
+the VIRTIO_SCSI_S_* attribute definitions where incorrectly
+being passed directly into target_submit_cmd_map_sgls().
+
+This patch adds the missing virtio-scsi to TCM/SAM task attribute
+conversion.
+
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/scsi.c | 23 ++++++++++++++++++++---
+ 1 file changed, 20 insertions(+), 3 deletions(-)
+
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -820,6 +820,23 @@ static int vhost_scsi_map_iov_to_sgl(str
+ return 0;
+ }
+
++static int vhost_scsi_to_tcm_attr(int attr)
++{
++ switch (attr) {
++ case VIRTIO_SCSI_S_SIMPLE:
++ return MSG_SIMPLE_TAG;
++ case VIRTIO_SCSI_S_ORDERED:
++ return MSG_ORDERED_TAG;
++ case VIRTIO_SCSI_S_HEAD:
++ return MSG_HEAD_TAG;
++ case VIRTIO_SCSI_S_ACA:
++ return MSG_ACA_TAG;
++ default:
++ break;
++ }
++ return MSG_SIMPLE_TAG;
++}
++
+ static void tcm_vhost_submission_work(struct work_struct *work)
+ {
+ struct tcm_vhost_cmd *tv_cmd =
+@@ -846,9 +863,9 @@ static void tcm_vhost_submission_work(st
+ rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
+ tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
+ tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
+- tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
+- 0, sg_ptr, tv_cmd->tvc_sgl_count,
+- sg_bidi_ptr, sg_no_bidi);
++ vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
++ tv_cmd->tvc_data_direction, 0, sg_ptr,
++ tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
+ if (rc < 0) {
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
--- /dev/null
+From nab@linux-iscsi.org Tue Feb 3 15:07:28 2015
+From: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
+Date: Fri, 30 Jan 2015 22:17:22 +0000
+Subject: vhost-scsi: Take configfs group dependency during VHOST_SCSI_SET_ENDPOINT
+To: target-devel <target-devel@vger.kernel.org>
+Cc: Greg-KH <gregkh@linuxfoundation.org>, stable <stable@vger.kernel.org>, Nicholas Bellinger <nab@linux-iscsi.org>, "Michael S. Tsirkin" <mst@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>, Stefan Hajnoczi <stefanha@redhat.com>
+Message-ID: <1422656251-29468-4-git-send-email-nab@linux-iscsi.org>
+
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit ab8edab132829b26dd13db6caca3c242cce35dc1 upstream.
+
+This patch addresses a bug where individual vhost-scsi configfs endpoint
+groups can be removed from below while active exports to QEMU userspace
+still exist, resulting in an OOPs.
+
+It adds a configfs_depend_item() in vhost_scsi_set_endpoint() to obtain
+an explicit dependency on se_tpg->tpg_group in order to prevent individual
+vhost-scsi WWPN endpoints from being released via normal configfs methods
+while an QEMU ioctl reference still exists.
+
+Also, add matching configfs_undepend_item() in vhost_scsi_clear_endpoint()
+to release the dependency, once QEMU's reference to the individual group
+at /sys/kernel/config/target/vhost/$WWPN/$TPGT is released.
+
+(Fix up vhost_scsi_clear_endpoint() error path - DanC)
+
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/scsi.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1150,6 +1150,7 @@ static int vhost_scsi_set_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+ {
++ struct se_portal_group *se_tpg;
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ struct tcm_vhost_tpg **vs_tpg;
+@@ -1197,6 +1198,21 @@ static int vhost_scsi_set_endpoint(
+ ret = -EEXIST;
+ goto out;
+ }
++ /*
++ * In order to ensure individual vhost-scsi configfs
++ * groups cannot be removed while in use by vhost ioctl,
++ * go ahead and take an explicit se_tpg->tpg_group.cg_item
++ * dependency now.
++ */
++ se_tpg = &tv_tpg->se_tpg;
++ ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
++ &se_tpg->tpg_group.cg_item);
++ if (ret) {
++ pr_warn("configfs_depend_item() failed: %d\n", ret);
++ kfree(vs_tpg);
++ mutex_unlock(&tv_tpg->tv_tpg_mutex);
++ goto out;
++ }
+ tv_tpg->tv_tpg_vhost_count++;
+ tv_tpg->vhost_scsi = vs;
+ vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+@@ -1240,6 +1256,7 @@ static int vhost_scsi_clear_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+ {
++ struct se_portal_group *se_tpg;
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ struct vhost_virtqueue *vq;
+@@ -1288,6 +1305,13 @@ static int vhost_scsi_clear_endpoint(
+ vs->vs_tpg[target] = NULL;
+ match = true;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
++ /*
++ * Release se_tpg->tpg_group.cg_item configfs dependency now
++ * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
++ */
++ se_tpg = &tv_tpg->se_tpg;
++ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
++ &se_tpg->tpg_group.cg_item);
+ }
+ if (match) {
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {