]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 7 Mar 2016 23:38:57 +0000 (15:38 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 7 Mar 2016 23:38:57 +0000 (15:38 -0800)
added patches:
target-add-tfo-abort_task-for-aborted-task-resources-release.patch
target-fix-lun_reset-active-i-o-handling-for-ack_kref.patch
target-fix-lun_reset-active-tmr-descriptor-handling.patch
target-fix-race-with-scf_send_delayed_tas-handling.patch
target-fix-remote-port-tmr-abort-se_cmd-fabric-stop.patch
target-fix-tas-handling-for-multi-session-se_node_acls.patch
target-fix-task-aborted-status-tas-handling.patch
target-fix-write_same-discard-conversion-to-linux-512b-sectors.patch

queue-3.14/series
queue-3.14/target-add-tfo-abort_task-for-aborted-task-resources-release.patch [new file with mode: 0644]
queue-3.14/target-fix-lun_reset-active-i-o-handling-for-ack_kref.patch [new file with mode: 0644]
queue-3.14/target-fix-lun_reset-active-tmr-descriptor-handling.patch [new file with mode: 0644]
queue-3.14/target-fix-race-with-scf_send_delayed_tas-handling.patch [new file with mode: 0644]
queue-3.14/target-fix-remote-port-tmr-abort-se_cmd-fabric-stop.patch [new file with mode: 0644]
queue-3.14/target-fix-tas-handling-for-multi-session-se_node_acls.patch [new file with mode: 0644]
queue-3.14/target-fix-task-aborted-status-tas-handling.patch [new file with mode: 0644]
queue-3.14/target-fix-write_same-discard-conversion-to-linux-512b-sectors.patch [new file with mode: 0644]

index 94fbb9cd717bfb8aef597586f2c4f714f17e39a9..f844881f9ac01b8a365a907aa1ea76a4d49db51d 100644 (file)
@@ -26,3 +26,11 @@ usb-serial-option-add-support-for-telit-le922-pid-0x1045.patch
 usb-serial-option-add-support-for-quectel-uc20.patch
 mips-traps-fix-sigfpe-information-leak-from-do_ov-and-do_trap_or_bp.patch
 ubi-fix-out-of-bounds-write-in-volume-update-code.patch
+target-fix-task-aborted-status-tas-handling.patch
+target-add-tfo-abort_task-for-aborted-task-resources-release.patch
+target-fix-lun_reset-active-tmr-descriptor-handling.patch
+target-fix-lun_reset-active-i-o-handling-for-ack_kref.patch
+target-fix-tas-handling-for-multi-session-se_node_acls.patch
+target-fix-remote-port-tmr-abort-se_cmd-fabric-stop.patch
+target-fix-race-with-scf_send_delayed_tas-handling.patch
+target-fix-write_same-discard-conversion-to-linux-512b-sectors.patch
diff --git a/queue-3.14/target-add-tfo-abort_task-for-aborted-task-resources-release.patch b/queue-3.14/target-add-tfo-abort_task-for-aborted-task-resources-release.patch
new file mode 100644 (file)
index 0000000..b66f26b
--- /dev/null
@@ -0,0 +1,424 @@
+From 131e6abc674edb9f9a59090bb35bf6650569b7e7 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Sat, 22 Mar 2014 14:55:56 -0700
+Subject: target: Add TFO->abort_task for aborted task resources release
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 131e6abc674edb9f9a59090bb35bf6650569b7e7 upstream.
+
+Now that TASK_ABORTED status is not generated for all cases by
+TMR ABORT_TASK + LUN_RESET, a new TFO->abort_task() caller is
+necessary in order to give fabric drivers a chance to unmap
+hardware / software resources before the se_cmd descriptor is
+released via the normal TFO->release_cmd() codepath.
+
+This patch adds TFO->aborted_task() in core_tmr_abort_task()
+in place of the original transport_send_task_abort(), and
+also updates all fabric drivers to implement this caller.
+
+The fabric drivers that include changes to perform cleanup
+via ->aborted_task() are:
+
+  - iscsi-target
+  - iser-target
+  - srpt
+  - tcm_qla2xxx
+
+The fabric drivers that currently set ->aborted_task() to
+NOPs are:
+
+  - loopback
+  - tcm_fc
+  - usb-gadget
+  - sbp-target
+  - vhost-scsi
+
+For the latter five, there appears to be no additional cleanup
+required before invoking TFO->release_cmd() to release the
+se_cmd descriptor.
+
+v2 changes:
+  - Move ->aborted_task() call into transport_cmd_finish_abort (Alex)
+
+Cc: Alex Leung <amleung21@yahoo.com>
+Cc: Mark Rustad <mark.d.rustad@intel.com>
+Cc: Roland Dreier <roland@kernel.org>
+Cc: Vu Pham <vu@mellanox.com>
+Cc: Chris Boot <bootc@bootc.net>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Giridhar Malavali <giridhar.malavali@qlogic.com>
+Cc: Saurav Kashyap <saurav.kashyap@qlogic.com>
+Cc: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c      |   19 +++++++++++++++++++
+ drivers/infiniband/ulp/srpt/ib_srpt.c        |    9 +++++++++
+ drivers/scsi/qla2xxx/tcm_qla2xxx.c           |   16 ++++++++++++++++
+ drivers/target/iscsi/iscsi_target.c          |   13 +++++++++++++
+ drivers/target/iscsi/iscsi_target_configfs.c |    8 ++++++++
+ drivers/target/iscsi/iscsi_target_util.c     |    4 ++--
+ drivers/target/iscsi/iscsi_target_util.h     |    1 +
+ drivers/target/loopback/tcm_loop.c           |    6 ++++++
+ drivers/target/sbp/sbp_target.c              |    6 ++++++
+ drivers/target/target_core_configfs.c        |    4 ++++
+ drivers/target/target_core_transport.c       |    6 ++++++
+ drivers/target/tcm_fc/tcm_fc.h               |    1 +
+ drivers/target/tcm_fc/tfc_cmd.c              |    5 +++++
+ drivers/target/tcm_fc/tfc_conf.c             |    1 +
+ drivers/usb/gadget/tcm_usb_gadget.c          |    6 ++++++
+ drivers/vhost/scsi.c                         |    6 ++++++
+ include/target/iscsi/iscsi_transport.h       |    1 +
+ include/target/target_core_fabric.h          |    1 +
+ 18 files changed, 111 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2071,6 +2071,24 @@ isert_put_response(struct iscsi_conn *co
+       return isert_post_response(isert_conn, isert_cmd);
+ }
++static void
++isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
++{
++      struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
++      struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
++      struct isert_device *device = isert_conn->conn_device;
++
++      spin_lock_bh(&conn->cmd_lock);
++      if (!list_empty(&cmd->i_conn_node))
++              list_del_init(&cmd->i_conn_node);
++      spin_unlock_bh(&conn->cmd_lock);
++
++      if (cmd->data_direction == DMA_TO_DEVICE)
++              iscsit_stop_dataout_timer(cmd);
++
++      device->unreg_rdma_mem(isert_cmd, isert_conn);
++}
++
+ static int
+ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+               bool nopout_response)
+@@ -2999,6 +3017,7 @@ static struct iscsit_transport iser_targ
+       .iscsit_get_dataout     = isert_get_dataout,
+       .iscsit_queue_data_in   = isert_put_datain,
+       .iscsit_queue_status    = isert_put_response,
++      .iscsit_aborted_task    = isert_aborted_task,
+ };
+ static int __init isert_init(void)
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -3093,6 +3093,14 @@ static void srpt_queue_tm_rsp(struct se_
+       srpt_queue_response(cmd);
+ }
++static void srpt_aborted_task(struct se_cmd *cmd)
++{
++      struct srpt_send_ioctx *ioctx = container_of(cmd,
++                              struct srpt_send_ioctx, cmd);
++
++      srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
++}
++
+ static int srpt_queue_status(struct se_cmd *cmd)
+ {
+       struct srpt_send_ioctx *ioctx;
+@@ -3940,6 +3948,7 @@ static struct target_core_fabric_ops srp
+       .queue_data_in                  = srpt_queue_data_in,
+       .queue_status                   = srpt_queue_status,
+       .queue_tm_rsp                   = srpt_queue_tm_rsp,
++      .aborted_task                   = srpt_aborted_task,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(str
+       qlt_xmit_tm_rsp(mcmd);
+ }
++static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
++{
++      struct qla_tgt_cmd *cmd = container_of(se_cmd,
++                              struct qla_tgt_cmd, se_cmd);
++      struct scsi_qla_host *vha = cmd->vha;
++      struct qla_hw_data *ha = vha->hw;
++
++      if (!cmd->sg_mapped)
++              return;
++
++      pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
++      cmd->sg_mapped = 0;
++}
++
+ /* Local pointer to allocated TCM configfs fabric module */
+ struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+ struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+@@ -1886,6 +1900,7 @@ static struct target_core_fabric_ops tcm
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
++      .aborted_task                   = tcm_qla2xxx_aborted_task,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+@@ -1935,6 +1950,7 @@ static struct target_core_fabric_ops tcm
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
++      .aborted_task                   = tcm_qla2xxx_aborted_task,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -500,6 +500,18 @@ static int iscsit_queue_rsp(struct iscsi
+       return 0;
+ }
++static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
++{
++      bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
++
++      spin_lock_bh(&conn->cmd_lock);
++      if (!list_empty(&cmd->i_conn_node))
++              list_del_init(&cmd->i_conn_node);
++      spin_unlock_bh(&conn->cmd_lock);
++
++      __iscsit_free_cmd(cmd, scsi_cmd, true);
++}
++
+ static struct iscsit_transport iscsi_target_transport = {
+       .name                   = "iSCSI/TCP",
+       .transport_type         = ISCSI_TCP,
+@@ -514,6 +526,7 @@ static struct iscsit_transport iscsi_tar
+       .iscsit_response_queue  = iscsit_response_queue,
+       .iscsit_queue_data_in   = iscsit_queue_rsp,
+       .iscsit_queue_status    = iscsit_queue_rsp,
++      .iscsit_aborted_task    = iscsit_aborted_task,
+ };
+ static int __init iscsi_target_init_module(void)
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1815,6 +1815,13 @@ static void lio_queue_tm_rsp(struct se_c
+       iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ }
++static void lio_aborted_task(struct se_cmd *se_cmd)
++{
++      struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
++
++      cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
++}
++
+ static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+ {
+       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+@@ -2013,6 +2020,7 @@ int iscsi_target_register_configfs(void)
+       fabric->tf_ops.queue_data_in = &lio_queue_data_in;
+       fabric->tf_ops.queue_status = &lio_queue_status;
+       fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
++      fabric->tf_ops.aborted_task = &lio_aborted_task;
+       /*
+        * Setup function pointers for generic logic in target_core_fabric_configfs.c
+        */
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd
+ }
+ EXPORT_SYMBOL(iscsit_release_cmd);
+-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+-                            bool check_queues)
++void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
++                     bool check_queues)
+ {
+       struct iscsi_conn *conn = cmd->conn;
+--- a/drivers/target/iscsi/iscsi_target_util.h
++++ b/drivers/target/iscsi/iscsi_target_util.h
+@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_qu
+ extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
+ extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+ extern void iscsit_release_cmd(struct iscsi_cmd *);
++extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
+ extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
+ extern int iscsit_check_session_usage_count(struct iscsi_session *);
+ extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -892,6 +892,11 @@ static void tcm_loop_queue_tm_rsp(struct
+       wake_up(&tl_tmr->tl_tmr_wait);
+ }
++static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
++{
++      return;
++}
++
+ static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
+ {
+       switch (tl_hba->tl_proto_id) {
+@@ -1456,6 +1461,7 @@ static int tcm_loop_register_configfs(vo
+       fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
+       fabric->tf_ops.queue_status = &tcm_loop_queue_status;
+       fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
++      fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
+       /*
+        * Setup function pointers for generic logic in target_core_fabric_configfs.c
+--- a/drivers/target/sbp/sbp_target.c
++++ b/drivers/target/sbp/sbp_target.c
+@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_c
+ {
+ }
++static void sbp_aborted_task(struct se_cmd *se_cmd)
++{
++      return;
++}
++
+ static int sbp_check_stop_free(struct se_cmd *se_cmd)
+ {
+       struct sbp_target_request *req = container_of(se_cmd,
+@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp
+       .queue_data_in                  = sbp_queue_data_in,
+       .queue_status                   = sbp_queue_status,
+       .queue_tm_rsp                   = sbp_queue_tm_rsp,
++      .aborted_task                   = sbp_aborted_task,
+       .check_stop_free                = sbp_check_stop_free,
+       .fabric_make_wwn                = sbp_make_tport,
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
+               pr_err("Missing tfo->queue_tm_rsp()\n");
+               return -EINVAL;
+       }
++      if (!tfo->aborted_task) {
++              pr_err("Missing tfo->aborted_task()\n");
++              return -EINVAL;
++      }
+       /*
+        * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+        * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -605,6 +605,12 @@ void transport_cmd_finish_abort(struct s
+ {
+       if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+               transport_lun_remove_cmd(cmd);
++      /*
++       * Allow the fabric driver to unmap any resources before
++       * releasing the descriptor via TFO->release_cmd()
++       */
++      if (remove)
++              cmd->se_tfo->aborted_task(cmd);
+       if (transport_cmd_check_stop_to_fabric(cmd))
+               return;
+--- a/drivers/target/tcm_fc/tcm_fc.h
++++ b/drivers/target/tcm_fc/tcm_fc.h
+@@ -163,6 +163,7 @@ int ft_write_pending_status(struct se_cm
+ u32 ft_get_task_tag(struct se_cmd *);
+ int ft_get_cmd_state(struct se_cmd *);
+ void ft_queue_tm_resp(struct se_cmd *);
++void ft_aborted_task(struct se_cmd *);
+ /*
+  * other internal functions.
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_
+       ft_send_resp_code(cmd, code);
+ }
++void ft_aborted_task(struct se_cmd *se_cmd)
++{
++      return;
++}
++
+ static void ft_send_work(struct work_struct *work);
+ /*
+--- a/drivers/target/tcm_fc/tfc_conf.c
++++ b/drivers/target/tcm_fc/tfc_conf.c
+@@ -536,6 +536,7 @@ static struct target_core_fabric_ops ft_
+       .queue_data_in =                ft_queue_data_in,
+       .queue_status =                 ft_queue_status,
+       .queue_tm_rsp =                 ft_queue_tm_resp,
++      .aborted_task =                 ft_aborted_task,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+--- a/drivers/usb/gadget/tcm_usb_gadget.c
++++ b/drivers/usb/gadget/tcm_usb_gadget.c
+@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_
+ {
+ }
++static void usbg_aborted_task(struct se_cmd *se_cmd)
++{
++      return;
++}
++
+ static const char *usbg_check_wwn(const char *name)
+ {
+       const char *n;
+@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usb
+       .queue_data_in                  = usbg_send_read_response,
+       .queue_status                   = usbg_send_status_response,
+       .queue_tm_rsp                   = usbg_queue_tm_rsp,
++      .aborted_task                   = usbg_aborted_task,
+       .check_stop_free                = usbg_check_stop_free,
+       .fabric_make_wwn                = usbg_make_tport,
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struc
+       return;
+ }
++static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
++{
++      return;
++}
++
+ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+ {
+       vs->vs_events_nr--;
+@@ -2173,6 +2178,7 @@ static struct target_core_fabric_ops tcm
+       .queue_data_in                  = tcm_vhost_queue_data_in,
+       .queue_status                   = tcm_vhost_queue_status,
+       .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
++      .aborted_task                   = tcm_vhost_aborted_task,
+       /*
+        * Setup callers for generic logic in target_core_fabric_configfs.c
+        */
+--- a/include/target/iscsi/iscsi_transport.h
++++ b/include/target/iscsi/iscsi_transport.h
+@@ -21,6 +21,7 @@ struct iscsit_transport {
+       int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
+       int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
+       int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
++      void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+ };
+ static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -62,6 +62,7 @@ struct target_core_fabric_ops {
+       int (*queue_data_in)(struct se_cmd *);
+       int (*queue_status)(struct se_cmd *);
+       void (*queue_tm_rsp)(struct se_cmd *);
++      void (*aborted_task)(struct se_cmd *);
+       /*
+        * fabric module calls for target_core_fabric_configfs.c
+        */
diff --git a/queue-3.14/target-fix-lun_reset-active-i-o-handling-for-ack_kref.patch b/queue-3.14/target-fix-lun_reset-active-i-o-handling-for-ack_kref.patch
new file mode 100644 (file)
index 0000000..9888d74
--- /dev/null
@@ -0,0 +1,359 @@
+From febe562c20dfa8f33bee7d419c6b517986a5aa33 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Mon, 11 Jan 2016 21:31:09 -0800
+Subject: target: Fix LUN_RESET active I/O handling for ACK_KREF
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit febe562c20dfa8f33bee7d419c6b517986a5aa33 upstream.
+
+This patch fixes a NULL pointer se_cmd->cmd_kref < 0
+refcount bug during TMR LUN_RESET with active se_cmd
+I/O, that can be triggered during se_cmd descriptor
+shutdown + release via core_tmr_drain_state_list() code.
+
+To address this bug, add common __target_check_io_state()
+helper for ABORT_TASK + LUN_RESET w/ CMD_T_COMPLETE
+checking, and set CMD_T_ABORTED + obtain ->cmd_kref for
+both cases ahead of last target_put_sess_cmd() after
+TFO->aborted_task() -> transport_cmd_finish_abort()
+callback has completed.
+
+It also introduces SCF_ACK_KREF to determine when
+transport_cmd_finish_abort() needs to drop the second
+extra reference, ahead of calling target_put_sess_cmd()
+for the final kref_put(&se_cmd->cmd_kref).
+
+It also updates transport_cmd_check_stop() to avoid
+holding se_cmd->t_state_lock while dropping se_cmd
+device state via target_remove_from_state_list(), now
+that core_tmr_drain_state_list() is holding the
+se_device lock while checking se_cmd state from
+within TMR logic.
+
+Finally, move transport_put_cmd() release of SGL +
+TMR + extended CDB memory into target_free_cmd_mem()
+in order to avoid potential resource leaks in TMR
+ABORT_TASK + LUN_RESET code-paths.  Also update
+target_release_cmd_kref() accordingly.
+
+Reviewed-by: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Andy Grover <agrover@redhat.com>
+Cc: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_tmr.c       |   64 +++++++++++++++++++++++--------
+ drivers/target/target_core_transport.c |   67 ++++++++++++++-------------------
+ 2 files changed, 76 insertions(+), 55 deletions(-)
+
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -115,6 +115,34 @@ static int target_check_cdb_and_preempt(
+       return 1;
+ }
++static bool __target_check_io_state(struct se_cmd *se_cmd)
++{
++      struct se_session *sess = se_cmd->se_sess;
++
++      assert_spin_locked(&sess->sess_cmd_lock);
++      WARN_ON_ONCE(!irqs_disabled());
++      /*
++       * If command already reached CMD_T_COMPLETE state within
++       * target_complete_cmd(), this se_cmd has been passed to
++       * fabric driver and will not be aborted.
++       *
++       * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
++       * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
++       * long as se_cmd->cmd_kref is still active unless zero.
++       */
++      spin_lock(&se_cmd->t_state_lock);
++      if (se_cmd->transport_state & CMD_T_COMPLETE) {
++              pr_debug("Attempted to abort io tag: %u already complete,"
++                      " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd));
++              spin_unlock(&se_cmd->t_state_lock);
++              return false;
++      }
++      se_cmd->transport_state |= CMD_T_ABORTED;
++      spin_unlock(&se_cmd->t_state_lock);
++
++      return kref_get_unless_zero(&se_cmd->cmd_kref);
++}
++
+ void core_tmr_abort_task(
+       struct se_device *dev,
+       struct se_tmr_req *tmr,
+@@ -137,25 +165,20 @@ void core_tmr_abort_task(
+               printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+                       se_cmd->se_tfo->get_fabric_name(), ref_tag);
+-              spin_lock(&se_cmd->t_state_lock);
+-              if (se_cmd->transport_state & CMD_T_COMPLETE) {
+-                      printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+-                      spin_unlock(&se_cmd->t_state_lock);
++              if (!__target_check_io_state(se_cmd)) {
+                       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++                      target_put_sess_cmd(se_sess, se_cmd);
+                       goto out;
+               }
+-              se_cmd->transport_state |= CMD_T_ABORTED;
+-              spin_unlock(&se_cmd->t_state_lock);
+               list_del_init(&se_cmd->se_cmd_list);
+-              kref_get(&se_cmd->cmd_kref);
+               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               cancel_work_sync(&se_cmd->work);
+               transport_wait_for_tasks(se_cmd);
+-              target_put_sess_cmd(se_sess, se_cmd);
+               transport_cmd_finish_abort(se_cmd, true);
++              target_put_sess_cmd(se_sess, se_cmd);
+               printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+                               " ref_tag: %d\n", ref_tag);
+@@ -260,8 +283,10 @@ static void core_tmr_drain_state_list(
+       struct list_head *preempt_and_abort_list)
+ {
+       LIST_HEAD(drain_task_list);
++      struct se_session *sess;
+       struct se_cmd *cmd, *next;
+       unsigned long flags;
++      int rc;
+       /*
+        * Complete outstanding commands with TASK_ABORTED SAM status.
+@@ -300,6 +325,16 @@ static void core_tmr_drain_state_list(
+               if (prout_cmd == cmd)
+                       continue;
++              sess = cmd->se_sess;
++              if (WARN_ON_ONCE(!sess))
++                      continue;
++
++              spin_lock(&sess->sess_cmd_lock);
++              rc = __target_check_io_state(cmd);
++              spin_unlock(&sess->sess_cmd_lock);
++              if (!rc)
++                      continue;
++
+               list_move_tail(&cmd->state_list, &drain_task_list);
+               cmd->state_active = false;
+       }
+@@ -307,7 +342,7 @@ static void core_tmr_drain_state_list(
+       while (!list_empty(&drain_task_list)) {
+               cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+-              list_del(&cmd->state_list);
++              list_del_init(&cmd->state_list);
+               pr_debug("LUN_RESET: %s cmd: %p"
+                       " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+@@ -331,16 +366,11 @@ static void core_tmr_drain_state_list(
+                * loop above, but we do it down here given that
+                * cancel_work_sync may block.
+                */
+-              if (cmd->t_state == TRANSPORT_COMPLETE)
+-                      cancel_work_sync(&cmd->work);
+-
+-              spin_lock_irqsave(&cmd->t_state_lock, flags);
+-              target_stop_cmd(cmd, &flags);
+-
+-              cmd->transport_state |= CMD_T_ABORTED;
+-              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++              cancel_work_sync(&cmd->work);
++              transport_wait_for_tasks(cmd);
+               core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++              target_put_sess_cmd(cmd->se_sess, cmd);
+       }
+ }
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -509,9 +509,6 @@ void transport_deregister_session(struct
+ }
+ EXPORT_SYMBOL(transport_deregister_session);
+-/*
+- * Called with cmd->t_state_lock held.
+- */
+ static void target_remove_from_state_list(struct se_cmd *cmd)
+ {
+       struct se_device *dev = cmd->se_dev;
+@@ -536,10 +533,6 @@ static int transport_cmd_check_stop(stru
+ {
+       unsigned long flags;
+-      spin_lock_irqsave(&cmd->t_state_lock, flags);
+-      if (write_pending)
+-              cmd->t_state = TRANSPORT_WRITE_PENDING;
+-
+       if (remove_from_lists) {
+               target_remove_from_state_list(cmd);
+@@ -549,6 +542,10 @@ static int transport_cmd_check_stop(stru
+               cmd->se_lun = NULL;
+       }
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      if (write_pending)
++              cmd->t_state = TRANSPORT_WRITE_PENDING;
++
+       /*
+        * Determine if frontend context caller is requesting the stopping of
+        * this command for frontend exceptions.
+@@ -603,6 +600,8 @@ static void transport_lun_remove_cmd(str
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++      bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++
+       if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+               transport_lun_remove_cmd(cmd);
+       /*
+@@ -614,7 +613,7 @@ void transport_cmd_finish_abort(struct s
+       if (transport_cmd_check_stop_to_fabric(cmd))
+               return;
+-      if (remove)
++      if (remove && ack_kref)
+               transport_put_cmd(cmd);
+ }
+@@ -682,7 +681,7 @@ void target_complete_cmd(struct se_cmd *
+        * Check for case where an explicit ABORT_TASK has been received
+        * and transport_wait_for_tasks() will be waiting for completion..
+        */
+-      if (cmd->transport_state & CMD_T_ABORTED &&
++      if (cmd->transport_state & CMD_T_ABORTED ||
+           cmd->transport_state & CMD_T_STOP) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               complete_all(&cmd->t_transport_stop_comp);
+@@ -2085,20 +2084,14 @@ static inline void transport_free_pages(
+ }
+ /**
+- * transport_release_cmd - free a command
+- * @cmd:       command to free
++ * transport_put_cmd - release a reference to a command
++ * @cmd:       command to release
+  *
+- * This routine unconditionally frees a command, and reference counting
+- * or list removal must be done in the caller.
++ * This routine releases our reference to the command and frees it if possible.
+  */
+-static int transport_release_cmd(struct se_cmd *cmd)
++static int transport_put_cmd(struct se_cmd *cmd)
+ {
+       BUG_ON(!cmd->se_tfo);
+-
+-      if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+-              core_tmr_release_req(cmd->se_tmr_req);
+-      if (cmd->t_task_cdb != cmd->__t_task_cdb)
+-              kfree(cmd->t_task_cdb);
+       /*
+        * If this cmd has been setup with target_get_sess_cmd(), drop
+        * the kref and call ->release_cmd() in kref callback.
+@@ -2106,18 +2099,6 @@ static int transport_release_cmd(struct
+       return target_put_sess_cmd(cmd->se_sess, cmd);
+ }
+-/**
+- * transport_put_cmd - release a reference to a command
+- * @cmd:       command to release
+- *
+- * This routine releases our reference to the command and frees it if possible.
+- */
+-static int transport_put_cmd(struct se_cmd *cmd)
+-{
+-      transport_free_pages(cmd);
+-      return transport_release_cmd(cmd);
+-}
+-
+ void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+       struct scatterlist *sg = cmd->t_data_sg;
+@@ -2307,14 +2288,13 @@ static void transport_write_pending_qf(s
+ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ {
+-      unsigned long flags;
+       int ret = 0;
+       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+               if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+-                       transport_wait_for_tasks(cmd);
++                      transport_wait_for_tasks(cmd);
+-              ret = transport_release_cmd(cmd);
++              ret = transport_put_cmd(cmd);
+       } else {
+               if (wait_for_tasks)
+                       transport_wait_for_tasks(cmd);
+@@ -2323,11 +2303,8 @@ int transport_generic_free_cmd(struct se
+                * has already added se_cmd to state_list, but fabric has
+                * failed command before I/O submission.
+                */
+-              if (cmd->state_active) {
+-                      spin_lock_irqsave(&cmd->t_state_lock, flags);
++              if (cmd->state_active)
+                       target_remove_from_state_list(cmd);
+-                      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+-              }
+               if (cmd->se_lun)
+                       transport_lun_remove_cmd(cmd);
+@@ -2375,6 +2352,16 @@ out:
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
++static void target_free_cmd_mem(struct se_cmd *cmd)
++{
++      transport_free_pages(cmd);
++
++      if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
++              core_tmr_release_req(cmd->se_tmr_req);
++      if (cmd->t_task_cdb != cmd->__t_task_cdb)
++              kfree(cmd->t_task_cdb);
++}
++
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+       struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+@@ -2382,17 +2369,20 @@ static void target_release_cmd_kref(stru
+       if (list_empty(&se_cmd->se_cmd_list)) {
+               spin_unlock(&se_sess->sess_cmd_lock);
++              target_free_cmd_mem(se_cmd);
+               se_cmd->se_tfo->release_cmd(se_cmd);
+               return;
+       }
+       if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+               spin_unlock(&se_sess->sess_cmd_lock);
++              target_free_cmd_mem(se_cmd);
+               complete(&se_cmd->cmd_wait_comp);
+               return;
+       }
+       list_del(&se_cmd->se_cmd_list);
+       spin_unlock(&se_sess->sess_cmd_lock);
++      target_free_cmd_mem(se_cmd);
+       se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+@@ -2403,6 +2393,7 @@ static void target_release_cmd_kref(stru
+ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+ {
+       if (!se_sess) {
++              target_free_cmd_mem(se_cmd);
+               se_cmd->se_tfo->release_cmd(se_cmd);
+               return 1;
+       }
diff --git a/queue-3.14/target-fix-lun_reset-active-tmr-descriptor-handling.patch b/queue-3.14/target-fix-lun_reset-active-tmr-descriptor-handling.patch
new file mode 100644 (file)
index 0000000..5dd9777
--- /dev/null
@@ -0,0 +1,151 @@
+From a6d9bb1c9605cd4f44e2d8290dc4d0e88f20292d Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Mon, 11 Jan 2016 21:53:05 -0800
+Subject: target: Fix LUN_RESET active TMR descriptor handling
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit a6d9bb1c9605cd4f44e2d8290dc4d0e88f20292d upstream.
+
+This patch fixes a NULL pointer se_cmd->cmd_kref < 0
+refcount bug during TMR LUN_RESET with active TMRs,
+triggered during se_cmd + se_tmr_req descriptor
+shutdown + release via core_tmr_drain_tmr_list().
+
+To address this bug, go ahead and obtain a local
+kref_get_unless_zero(&se_cmd->cmd_kref) for active I/O
+to set CMD_T_ABORTED, and transport_wait_for_tasks()
+followed by the final target_put_sess_cmd() to drop
+the local ->cmd_kref.
+
+Also add two new checks within target_tmr_work() to
+avoid CMD_T_ABORTED -> TFO->queue_tm_rsp() callbacks
+ahead of invoking the backend -> fabric put in
+transport_cmd_check_stop_to_fabric().
+
+For good measure, also change core_tmr_release_req()
+to use list_del_init() ahead of se_tmr_req memory
+free.
+
+Reviewed-by: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Andy Grover <agrover@redhat.com>
+Cc: Mike Christie <mchristi@redhat.com>
+Cc: stable@vger.kernel.org # 3.10+
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_tmr.c       |   22 +++++++++++++++++++++-
+ drivers/target/target_core_transport.c |   17 +++++++++++++++++
+ 2 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -76,7 +76,7 @@ void core_tmr_release_req(
+       }
+       spin_lock_irqsave(&dev->se_tmr_lock, flags);
+-      list_del(&tmr->tmr_list);
++      list_del_init(&tmr->tmr_list);
+       spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+       kfree(tmr);
+@@ -176,9 +176,11 @@ static void core_tmr_drain_tmr_list(
+       struct list_head *preempt_and_abort_list)
+ {
+       LIST_HEAD(drain_tmr_list);
++      struct se_session *sess;
+       struct se_tmr_req *tmr_p, *tmr_pp;
+       struct se_cmd *cmd;
+       unsigned long flags;
++      bool rc;
+       /*
+        * Release all pending and outgoing TMRs aside from the received
+        * LUN_RESET tmr..
+@@ -204,17 +206,31 @@ static void core_tmr_drain_tmr_list(
+               if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+                       continue;
++              sess = cmd->se_sess;
++              if (WARN_ON_ONCE(!sess))
++                      continue;
++
++              spin_lock(&sess->sess_cmd_lock);
+               spin_lock(&cmd->t_state_lock);
+               if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+                       spin_unlock(&cmd->t_state_lock);
++                      spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
+               if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+                       spin_unlock(&cmd->t_state_lock);
++                      spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
++              cmd->transport_state |= CMD_T_ABORTED;
+               spin_unlock(&cmd->t_state_lock);
++              rc = kref_get_unless_zero(&cmd->cmd_kref);
++              spin_unlock(&sess->sess_cmd_lock);
++              if (!rc) {
++                      printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++                      continue;
++              }
+               list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+       }
+       spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -228,7 +244,11 @@ static void core_tmr_drain_tmr_list(
+                       (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+                       tmr_p->function, tmr_p->response, cmd->t_state);
++              cancel_work_sync(&cmd->work);
++              transport_wait_for_tasks(cmd);
++
+               transport_cmd_finish_abort(cmd, 1);
++              target_put_sess_cmd(cmd->se_sess, cmd);
+       }
+ }
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2895,8 +2895,17 @@ static void target_tmr_work(struct work_
+       struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+       struct se_device *dev = cmd->se_dev;
+       struct se_tmr_req *tmr = cmd->se_tmr_req;
++      unsigned long flags;
+       int ret;
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      if (cmd->transport_state & CMD_T_ABORTED) {
++              tmr->response = TMR_FUNCTION_REJECTED;
++              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++              goto check_stop;
++      }
++      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+       switch (tmr->function) {
+       case TMR_ABORT_TASK:
+               core_tmr_abort_task(dev, tmr, cmd->se_sess);
+@@ -2924,9 +2933,17 @@ static void target_tmr_work(struct work_
+               break;
+       }
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      if (cmd->transport_state & CMD_T_ABORTED) {
++              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++              goto check_stop;
++      }
+       cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
++      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+       cmd->se_tfo->queue_tm_rsp(cmd);
++check_stop:
+       transport_cmd_check_stop_to_fabric(cmd);
+ }
diff --git a/queue-3.14/target-fix-race-with-scf_send_delayed_tas-handling.patch b/queue-3.14/target-fix-race-with-scf_send_delayed_tas-handling.patch
new file mode 100644 (file)
index 0000000..2285dae
--- /dev/null
@@ -0,0 +1,144 @@
+From 310d3d314be7f0a84011ebdc4bdccbcae9755a87 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Fri, 5 Feb 2016 14:51:36 -0800
+Subject: target: Fix race with SCF_SEND_DELAYED_TAS handling
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 310d3d314be7f0a84011ebdc4bdccbcae9755a87 upstream.
+
+This patch fixes a race between setting of SCF_SEND_DELAYED_TAS
+in transport_send_task_abort(), and check of the same bit in
+transport_check_aborted_status().
+
+It adds a __transport_check_aborted_status() version that is
+used by target_execute_cmd() when se_cmd->t_state_lock is
+held, and a transport_check_aborted_status() wrapper for
+all other existing callers.
+
+Also, it handles the case where the check happens before
+transport_send_task_abort() gets called.  For this, go
+ahead and set SCF_SEND_DELAYED_TAS early when necessary,
+and have transport_send_task_abort() send the abort.
+
+Cc: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Andy Grover <agrover@redhat.com>
+Cc: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_transport.c |   54 +++++++++++++++++++++++++--------
+ 1 file changed, 42 insertions(+), 12 deletions(-)
+
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1754,19 +1754,21 @@ static bool target_handle_task_attr(stru
+       return true;
+ }
++static int __transport_check_aborted_status(struct se_cmd *, int);
++
+ void target_execute_cmd(struct se_cmd *cmd)
+ {
+       /*
+-       * If the received CDB has aleady been aborted stop processing it here.
+-       */
+-      if (transport_check_aborted_status(cmd, 1))
+-              return;
+-
+-      /*
+        * Determine if frontend context caller is requesting the stopping of
+        * this command for frontend exceptions.
++       *
++       * If the received CDB has aleady been aborted stop processing it here.
+        */
+       spin_lock_irq(&cmd->t_state_lock);
++      if (__transport_check_aborted_status(cmd, 1)) {
++              spin_unlock_irq(&cmd->t_state_lock);
++              return;
++      }
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
+@@ -2895,8 +2897,13 @@ after_reason:
+ }
+ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++      __releases(&cmd->t_state_lock)
++      __acquires(&cmd->t_state_lock)
+ {
++      assert_spin_locked(&cmd->t_state_lock);
++      WARN_ON_ONCE(!irqs_disabled());
++
+       if (!(cmd->transport_state & CMD_T_ABORTED))
+               return 0;
+@@ -2904,19 +2911,37 @@ int transport_check_aborted_status(struc
+        * If cmd has been aborted but either no status is to be sent or it has
+        * already been sent, just return
+        */
+-      if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
++      if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
++              if (send_status)
++                      cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+               return 1;
++      }
+-      pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+-               cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
++      pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
++              " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0],
++              cmd->se_tfo->get_task_tag(cmd));
+       cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+       trace_target_cmd_complete(cmd);
++
++      spin_unlock_irq(&cmd->t_state_lock);
+       cmd->se_tfo->queue_status(cmd);
++      spin_lock_irq(&cmd->t_state_lock);
+       return 1;
+ }
++
++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++{
++      int ret;
++
++      spin_lock_irq(&cmd->t_state_lock);
++      ret = __transport_check_aborted_status(cmd, send_status);
++      spin_unlock_irq(&cmd->t_state_lock);
++
++      return ret;
++}
+ EXPORT_SYMBOL(transport_check_aborted_status);
+ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -2938,12 +2963,17 @@ void transport_send_task_abort(struct se
+        */
+       if (cmd->data_direction == DMA_TO_DEVICE) {
+               if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+-                      cmd->transport_state |= CMD_T_ABORTED;
++                      spin_lock_irqsave(&cmd->t_state_lock, flags);
++                      if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
++                              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++                              goto send_abort;
++                      }
+                       cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+-                      smp_mb__after_atomic_inc();
++                      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+                       return;
+               }
+       }
++send_abort:
+       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+       transport_lun_remove_cmd(cmd);
diff --git a/queue-3.14/target-fix-remote-port-tmr-abort-se_cmd-fabric-stop.patch b/queue-3.14/target-fix-remote-port-tmr-abort-se_cmd-fabric-stop.patch
new file mode 100644 (file)
index 0000000..d8b4901
--- /dev/null
@@ -0,0 +1,442 @@
+From 0f4a943168f31d29a1701908931acaba518b131a Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Tue, 19 Jan 2016 15:23:02 -0800
+Subject: target: Fix remote-port TMR ABORT + se_cmd fabric stop
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 0f4a943168f31d29a1701908931acaba518b131a upstream.
+
+To address the bug where fabric driver level shutdown
+of se_cmd occurs at the same time when TMR CMD_T_ABORTED
+is happening resulting in a -1 ->cmd_kref, this patch
+adds a CMD_T_FABRIC_STOP bit that is used to determine
+when TMR + driver I_T nexus shutdown is happening
+concurrently.
+
+It changes target_sess_cmd_list_set_waiting() to obtain
+se_cmd->cmd_kref + set CMD_T_FABRIC_STOP, and drop local
+reference in target_wait_for_sess_cmds() and invoke extra
+target_put_sess_cmd() during Task Aborted Status (TAS)
+when necessary.
+
+Also, it adds a new target_wait_free_cmd() wrapper around
+transport_wait_for_tasks() for the special case within
+transport_generic_free_cmd() to set CMD_T_FABRIC_STOP,
+and is now aware of CMD_T_ABORTED + CMD_T_TAS status
+bits to know when an extra transport_put_cmd() during
+TAS is required.
+
+Note transport_generic_free_cmd() is expected to block on
+cmd->cmd_wait_comp in order to follow what iscsi-target
+expects during iscsi_conn context se_cmd shutdown.
+
+Cc: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Andy Grover <agrover@redhat.com>
+Cc: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@daterainc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_tmr.c       |   57 +++++++++----
+ drivers/target/target_core_transport.c |  137 +++++++++++++++++++++++++--------
+ include/target/target_core_base.h      |    2 
+ 3 files changed, 148 insertions(+), 48 deletions(-)
+
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -82,16 +82,18 @@ void core_tmr_release_req(
+       kfree(tmr);
+ }
+-static void core_tmr_handle_tas_abort(
+-      struct se_session *tmr_sess,
+-      struct se_cmd *cmd,
+-      int tas)
++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+-      bool remove = true;
++      unsigned long flags;
++      bool remove = true, send_tas;
+       /*
+        * TASK ABORTED status (TAS) bit support
+-      */
+-      if (tmr_sess && tmr_sess != cmd->se_sess && tas) {
++       */
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      send_tas = (cmd->transport_state & CMD_T_TAS);
++      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
++      if (send_tas) {
+               remove = false;
+               transport_send_task_abort(cmd);
+       }
+@@ -114,7 +116,8 @@ static int target_check_cdb_and_preempt(
+       return 1;
+ }
+-static bool __target_check_io_state(struct se_cmd *se_cmd)
++static bool __target_check_io_state(struct se_cmd *se_cmd,
++                                  struct se_session *tmr_sess, int tas)
+ {
+       struct se_session *sess = se_cmd->se_sess;
+@@ -122,21 +125,33 @@ static bool __target_check_io_state(stru
+       WARN_ON_ONCE(!irqs_disabled());
+       /*
+        * If command already reached CMD_T_COMPLETE state within
+-       * target_complete_cmd(), this se_cmd has been passed to
+-       * fabric driver and will not be aborted.
++       * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
++       * this se_cmd has been passed to fabric driver and will
++       * not be aborted.
+        *
+        * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+        * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+        * long as se_cmd->cmd_kref is still active unless zero.
+        */
+       spin_lock(&se_cmd->t_state_lock);
+-      if (se_cmd->transport_state & CMD_T_COMPLETE) {
+-              pr_debug("Attempted to abort io tag: %u already complete,"
++      if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
++              pr_debug("Attempted to abort io tag: %u already complete or"
++                      " fabric stop, skipping\n",
++                      se_cmd->se_tfo->get_task_tag(se_cmd));
++              spin_unlock(&se_cmd->t_state_lock);
++              return false;
++      }
++      if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
++              pr_debug("Attempted to abort io tag: %u already shutdown,"
+                       " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd));
+               spin_unlock(&se_cmd->t_state_lock);
+               return false;
+       }
+       se_cmd->transport_state |= CMD_T_ABORTED;
++
++      if ((tmr_sess != se_cmd->se_sess) && tas)
++              se_cmd->transport_state |= CMD_T_TAS;
++
+       spin_unlock(&se_cmd->t_state_lock);
+       return kref_get_unless_zero(&se_cmd->cmd_kref);
+@@ -164,7 +179,7 @@ void core_tmr_abort_task(
+               printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+                       se_cmd->se_tfo->get_fabric_name(), ref_tag);
+-              if (!__target_check_io_state(se_cmd)) {
++              if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+                       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+                       target_put_sess_cmd(se_sess, se_cmd);
+                       goto out;
+@@ -234,7 +249,8 @@ static void core_tmr_drain_tmr_list(
+               spin_lock(&sess->sess_cmd_lock);
+               spin_lock(&cmd->t_state_lock);
+-              if (!(cmd->transport_state & CMD_T_ACTIVE)) {
++              if (!(cmd->transport_state & CMD_T_ACTIVE) ||
++                   (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+                       spin_unlock(&cmd->t_state_lock);
+                       spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+@@ -244,15 +260,22 @@ static void core_tmr_drain_tmr_list(
+                       spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
++              if (sess->sess_tearing_down || cmd->cmd_wait_set) {
++                      spin_unlock(&cmd->t_state_lock);
++                      spin_unlock(&sess->sess_cmd_lock);
++                      continue;
++              }
+               cmd->transport_state |= CMD_T_ABORTED;
+               spin_unlock(&cmd->t_state_lock);
+               rc = kref_get_unless_zero(&cmd->cmd_kref);
+-              spin_unlock(&sess->sess_cmd_lock);
+               if (!rc) {
+                       printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++                      spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
++              spin_unlock(&sess->sess_cmd_lock);
++
+               list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+       }
+       spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -329,7 +352,7 @@ static void core_tmr_drain_state_list(
+                       continue;
+               spin_lock(&sess->sess_cmd_lock);
+-              rc = __target_check_io_state(cmd);
++              rc = __target_check_io_state(cmd, tmr_sess, tas);
+               spin_unlock(&sess->sess_cmd_lock);
+               if (!rc)
+                       continue;
+@@ -368,7 +391,7 @@ static void core_tmr_drain_state_list(
+               cancel_work_sync(&cmd->work);
+               transport_wait_for_tasks(cmd);
+-              core_tmr_handle_tas_abort(tmr_sess, cmd, tas);
++              core_tmr_handle_tas_abort(cmd, tas);
+               target_put_sess_cmd(cmd->se_sess, cmd);
+       }
+ }
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2286,18 +2286,33 @@ static void transport_write_pending_qf(s
+       }
+ }
++static bool
++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
++                         unsigned long *flags);
++
++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
++      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++}
++
+ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ {
+       int ret = 0;
++      bool aborted = false, tas = false;
+       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+               if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+-                      transport_wait_for_tasks(cmd);
++                      target_wait_free_cmd(cmd, &aborted, &tas);
+-              ret = transport_put_cmd(cmd);
++              if (!aborted || tas)
++                      ret = transport_put_cmd(cmd);
+       } else {
+               if (wait_for_tasks)
+-                      transport_wait_for_tasks(cmd);
++                      target_wait_free_cmd(cmd, &aborted, &tas);
+               /*
+                * Handle WRITE failure case where transport_generic_new_cmd()
+                * has already added se_cmd to state_list, but fabric has
+@@ -2309,7 +2324,21 @@ int transport_generic_free_cmd(struct se
+               if (cmd->se_lun)
+                       transport_lun_remove_cmd(cmd);
+-              ret = transport_put_cmd(cmd);
++              if (!aborted || tas)
++                      ret = transport_put_cmd(cmd);
++      }
++      /*
++       * If the task has been internally aborted due to TMR ABORT_TASK
++       * or LUN_RESET, target_core_tmr.c is responsible for performing
++       * the remaining calls to target_put_sess_cmd(), and not the
++       * callers of this function.
++       */
++      if (aborted) {
++              pr_debug("Detected CMD_T_ABORTED for ITT: %u\n",
++                      cmd->se_tfo->get_task_tag(cmd));
++              wait_for_completion(&cmd->cmd_wait_comp);
++              cmd->se_tfo->release_cmd(cmd);
++              ret = 1;
+       }
+       return ret;
+ }
+@@ -2366,6 +2395,7 @@ static void target_release_cmd_kref(stru
+ {
+       struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+       struct se_session *se_sess = se_cmd->se_sess;
++      bool fabric_stop;
+       if (list_empty(&se_cmd->se_cmd_list)) {
+               spin_unlock(&se_sess->sess_cmd_lock);
+@@ -2373,13 +2403,19 @@ static void target_release_cmd_kref(stru
+               se_cmd->se_tfo->release_cmd(se_cmd);
+               return;
+       }
+-      if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
++
++      spin_lock(&se_cmd->t_state_lock);
++      fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++      spin_unlock(&se_cmd->t_state_lock);
++
++      if (se_cmd->cmd_wait_set || fabric_stop) {
++              list_del_init(&se_cmd->se_cmd_list);
+               spin_unlock(&se_sess->sess_cmd_lock);
+               target_free_cmd_mem(se_cmd);
+               complete(&se_cmd->cmd_wait_comp);
+               return;
+       }
+-      list_del(&se_cmd->se_cmd_list);
++      list_del_init(&se_cmd->se_cmd_list);
+       spin_unlock(&se_sess->sess_cmd_lock);
+       target_free_cmd_mem(se_cmd);
+@@ -2411,6 +2447,7 @@ void target_sess_cmd_list_set_waiting(st
+ {
+       struct se_cmd *se_cmd;
+       unsigned long flags;
++      int rc;
+       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+       if (se_sess->sess_tearing_down) {
+@@ -2420,8 +2457,15 @@ void target_sess_cmd_list_set_waiting(st
+       se_sess->sess_tearing_down = 1;
+       list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+-      list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+-              se_cmd->cmd_wait_set = 1;
++      list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
++              rc = kref_get_unless_zero(&se_cmd->cmd_kref);
++              if (rc) {
++                      se_cmd->cmd_wait_set = 1;
++                      spin_lock(&se_cmd->t_state_lock);
++                      se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++                      spin_unlock(&se_cmd->t_state_lock);
++              }
++      }
+       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ }
+@@ -2434,15 +2478,25 @@ void target_wait_for_sess_cmds(struct se
+ {
+       struct se_cmd *se_cmd, *tmp_cmd;
+       unsigned long flags;
++      bool tas;
+       list_for_each_entry_safe(se_cmd, tmp_cmd,
+                               &se_sess->sess_wait_list, se_cmd_list) {
+-              list_del(&se_cmd->se_cmd_list);
++              list_del_init(&se_cmd->se_cmd_list);
+               pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+                       " %d\n", se_cmd, se_cmd->t_state,
+                       se_cmd->se_tfo->get_cmd_state(se_cmd));
++              spin_lock_irqsave(&se_cmd->t_state_lock, flags);
++              tas = (se_cmd->transport_state & CMD_T_TAS);
++              spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
++
++              if (!target_put_sess_cmd(se_sess, se_cmd)) {
++                      if (tas)
++                              target_put_sess_cmd(se_sess, se_cmd);
++              }
++
+               wait_for_completion(&se_cmd->cmd_wait_comp);
+               pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+                       " fabric state: %d\n", se_cmd, se_cmd->t_state,
+@@ -2485,34 +2539,38 @@ int transport_clear_lun_ref(struct se_lu
+       return 0;
+ }
+-/**
+- * transport_wait_for_tasks - wait for completion to occur
+- * @cmd:      command to wait
+- *
+- * Called from frontend fabric context to wait for storage engine
+- * to pause and/or release frontend generated struct se_cmd.
+- */
+-bool transport_wait_for_tasks(struct se_cmd *cmd)
++static bool
++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
++                         bool *aborted, bool *tas, unsigned long *flags)
++      __releases(&cmd->t_state_lock)
++      __acquires(&cmd->t_state_lock)
+ {
+-      unsigned long flags;
+-      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      assert_spin_locked(&cmd->t_state_lock);
++      WARN_ON_ONCE(!irqs_disabled());
++
++      if (fabric_stop)
++              cmd->transport_state |= CMD_T_FABRIC_STOP;
++
++      if (cmd->transport_state & CMD_T_ABORTED)
++              *aborted = true;
++
++      if (cmd->transport_state & CMD_T_TAS)
++              *tas = true;
++
+       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+-          !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++          !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+               return false;
+-      }
+       if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+-          !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++          !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+               return false;
+-      }
+-      if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+-              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++      if (!(cmd->transport_state & CMD_T_ACTIVE))
++              return false;
++
++      if (fabric_stop && *aborted)
+               return false;
+-      }
+       cmd->transport_state |= CMD_T_STOP;
+@@ -2521,20 +2579,37 @@ bool transport_wait_for_tasks(struct se_
+               cmd, cmd->se_tfo->get_task_tag(cmd),
+               cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+-      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++      spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+       wait_for_completion(&cmd->t_transport_stop_comp);
+-      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      spin_lock_irqsave(&cmd->t_state_lock, *flags);
+       cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+       pr_debug("wait_for_tasks: Stopped wait_for_completion("
+               "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+               cmd->se_tfo->get_task_tag(cmd));
++      return true;
++}
++
++/**
++ * transport_wait_for_tasks - wait for completion to occur
++ * @cmd:      command to wait
++ *
++ * Called from frontend fabric context to wait for storage engine
++ * to pause and/or release frontend generated struct se_cmd.
++ */
++bool transport_wait_for_tasks(struct se_cmd *cmd)
++{
++      unsigned long flags;
++      bool ret, aborted = false, tas = false;
++
++      spin_lock_irqsave(&cmd->t_state_lock, flags);
++      ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+-      return true;
++      return ret;
+ }
+ EXPORT_SYMBOL(transport_wait_for_tasks);
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -528,6 +528,8 @@ struct se_cmd {
+ #define CMD_T_DEV_ACTIVE      (1 << 7)
+ #define CMD_T_REQUEST_STOP    (1 << 8)
+ #define CMD_T_BUSY            (1 << 9)
++#define CMD_T_TAS             (1 << 10)
++#define CMD_T_FABRIC_STOP     (1 << 11)
+       spinlock_t              t_state_lock;
+       struct completion       t_transport_stop_comp;
diff --git a/queue-3.14/target-fix-tas-handling-for-multi-session-se_node_acls.patch b/queue-3.14/target-fix-tas-handling-for-multi-session-se_node_acls.patch
new file mode 100644 (file)
index 0000000..1e8d3d8
--- /dev/null
@@ -0,0 +1,109 @@
+From ebde1ca5a908b10312db4ecd7553e3ba039319ab Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Sat, 16 Jan 2016 12:49:49 -0800
+Subject: target: Fix TAS handling for multi-session se_node_acls
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit ebde1ca5a908b10312db4ecd7553e3ba039319ab upstream.
+
+This patch fixes a bug in TMR task aborted status (TAS)
+handling when multiple sessions are connected to the
+same target WWPN endpoint and se_node_acl descriptor,
+resulting in TASK_ABORTED status to not be generated
+for aborted se_cmds on the remote port.
+
+This is due to core_tmr_handle_tas_abort() incorrectly
+comparing se_node_acl instead of se_session, for which
+the multi-session case is expected to be sharing the
+same se_node_acl.
+
+Instead, go ahead and update core_tmr_handle_tas_abort()
+to compare tmr_sess + cmd->se_sess in order to determine
+if the LUN_RESET was received on a different I_T nexus,
+and TASK_ABORTED status response needs to be generated.
+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Quinn Tran <quinn.tran@qlogic.com>
+Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Andy Grover <agrover@redhat.com>
+Cc: Mike Christie <mchristi@redhat.com>
+Cc: stable@vger.kernel.org # 3.10+
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_tmr.c |   17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -83,7 +83,7 @@ void core_tmr_release_req(
+ }
+ static void core_tmr_handle_tas_abort(
+-      struct se_node_acl *tmr_nacl,
++      struct se_session *tmr_sess,
+       struct se_cmd *cmd,
+       int tas)
+ {
+@@ -91,8 +91,7 @@ static void core_tmr_handle_tas_abort(
+       /*
+        * TASK ABORTED status (TAS) bit support
+       */
+-      if ((tmr_nacl &&
+-           (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++      if (tmr_sess && tmr_sess != cmd->se_sess && tas) {
+               remove = false;
+               transport_send_task_abort(cmd);
+       }
+@@ -278,7 +277,7 @@ static void core_tmr_drain_tmr_list(
+ static void core_tmr_drain_state_list(
+       struct se_device *dev,
+       struct se_cmd *prout_cmd,
+-      struct se_node_acl *tmr_nacl,
++      struct se_session *tmr_sess,
+       int tas,
+       struct list_head *preempt_and_abort_list)
+ {
+@@ -369,7 +368,7 @@ static void core_tmr_drain_state_list(
+               cancel_work_sync(&cmd->work);
+               transport_wait_for_tasks(cmd);
+-              core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++              core_tmr_handle_tas_abort(tmr_sess, cmd, tas);
+               target_put_sess_cmd(cmd->se_sess, cmd);
+       }
+ }
+@@ -382,6 +381,7 @@ int core_tmr_lun_reset(
+ {
+       struct se_node_acl *tmr_nacl = NULL;
+       struct se_portal_group *tmr_tpg = NULL;
++      struct se_session *tmr_sess = NULL;
+       int tas;
+         /*
+        * TASK_ABORTED status bit, this is configurable via ConfigFS
+@@ -400,8 +400,9 @@ int core_tmr_lun_reset(
+        * or struct se_device passthrough..
+        */
+       if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+-              tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+-              tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
++              tmr_sess = tmr->task_cmd->se_sess;
++              tmr_nacl = tmr_sess->se_node_acl;
++              tmr_tpg = tmr_sess->se_tpg;
+               if (tmr_nacl && tmr_tpg) {
+                       pr_debug("LUN_RESET: TMR caller fabric: %s"
+                               " initiator port %s\n",
+@@ -414,7 +415,7 @@ int core_tmr_lun_reset(
+               dev->transport->name, tas);
+       core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+-      core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
++      core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+                               preempt_and_abort_list);
+       /*
diff --git a/queue-3.14/target-fix-task-aborted-status-tas-handling.patch b/queue-3.14/target-fix-task-aborted-status-tas-handling.patch
new file mode 100644 (file)
index 0000000..33c4b1f
--- /dev/null
@@ -0,0 +1,141 @@
+From 68259b5aac13a57cba797b9605ed9812158f0e72 Mon Sep 17 00:00:00 2001
+From: Alex Leung <amleung21@yahoo.com>
+Date: Fri, 21 Mar 2014 22:20:41 -0700
+Subject: target: Fix Task Aborted Status (TAS) handling
+
+From: Alex Leung <amleung21@yahoo.com>
+
+commit 68259b5aac13a57cba797b9605ed9812158f0e72 upstream.
+
+This patch addresses three of long standing issues wrt to Task
+Aborted Status (TAS) handling.
+
+The first is the incorrect assumption in core_tmr_handle_tas_abort()
+that TASK_ABORTED status is sent for the task referenced by TMR
+ABORT_TASK, and sending TASK_ABORTED status for TMR LUN_RESET on
+the same nexus the LUN_RESET was received.
+
+The second is to ensure the lun reference count is dropped within
+transport_cmd_finish_abort() by calling transport_lun_remove_cmd()
+before invoking transport_cmd_check_stop_to_fabric().
+
+The last is to fix the delayed TAS handling to allow outstanding
+WRITEs to complete before sending the TASK_ABORTED status. This
+includes changing transport_check_aborted_status() to avoid
+processing when SCF_SEND_DELAYED_TAS has not be set, and updating
+transport_send_task_abort() to drop the SCF_SENT_DELAYED_TAS
+check.
+
+Signed-off-by: Alex Leung <amleung21@yahoo.com>
+Cc: Alex Leung <amleung21@yahoo.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_tmr.c       |   18 ++++++------------
+ drivers/target/target_core_transport.c |   14 +++++++++++---
+ include/target/target_core_base.h      |    2 +-
+ 3 files changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
+       struct se_cmd *cmd,
+       int tas)
+ {
++      bool remove = true;
+       /*
+        * TASK ABORTED status (TAS) bit support
+       */
+       if ((tmr_nacl &&
+-           (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
++           (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++              remove = false;
+               transport_send_task_abort(cmd);
++      }
+-      transport_cmd_finish_abort(cmd, 0);
++      transport_cmd_finish_abort(cmd, remove);
+ }
+ static int target_check_cdb_and_preempt(struct list_head *list,
+@@ -150,18 +153,9 @@ void core_tmr_abort_task(
+               cancel_work_sync(&se_cmd->work);
+               transport_wait_for_tasks(se_cmd);
+-              /*
+-               * Now send SAM_STAT_TASK_ABORTED status for the referenced
+-               * se_cmd descriptor..
+-               */
+-              transport_send_task_abort(se_cmd);
+-              /*
+-               * Also deal with possible extra acknowledge reference..
+-               */
+-              if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
+-                      target_put_sess_cmd(se_sess, se_cmd);
+               target_put_sess_cmd(se_sess, se_cmd);
++              transport_cmd_finish_abort(se_cmd, true);
+               printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+                               " ref_tag: %d\n", ref_tag);
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -603,6 +603,9 @@ static void transport_lun_remove_cmd(str
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++      if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
++              transport_lun_remove_cmd(cmd);
++
+       if (transport_cmd_check_stop_to_fabric(cmd))
+               return;
+       if (remove)
+@@ -2825,13 +2828,17 @@ int transport_check_aborted_status(struc
+       if (!(cmd->transport_state & CMD_T_ABORTED))
+               return 0;
+-      if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
++      /*
++       * If cmd has been aborted but either no status is to be sent or it has
++       * already been sent, just return
++       */
++      if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+               return 1;
+       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+                cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+-      cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
++      cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+       trace_target_cmd_complete(cmd);
+       cmd->se_tfo->queue_status(cmd);
+@@ -2845,7 +2852,7 @@ void transport_send_task_abort(struct se
+       unsigned long flags;
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+-      if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
++      if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               return;
+       }
+@@ -2860,6 +2867,7 @@ void transport_send_task_abort(struct se
+       if (cmd->data_direction == DMA_TO_DEVICE) {
+               if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+                       cmd->transport_state |= CMD_T_ABORTED;
++                      cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+                       smp_mb__after_atomic_inc();
+                       return;
+               }
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -162,7 +162,7 @@ enum se_cmd_flags_table {
+       SCF_SENT_CHECK_CONDITION        = 0x00000800,
+       SCF_OVERFLOW_BIT                = 0x00001000,
+       SCF_UNDERFLOW_BIT               = 0x00002000,
+-      SCF_SENT_DELAYED_TAS            = 0x00004000,
++      SCF_SEND_DELAYED_TAS            = 0x00004000,
+       SCF_ALUA_NON_OPTIMIZED          = 0x00008000,
+       SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
+       SCF_ACK_KREF                    = 0x00040000,
diff --git a/queue-3.14/target-fix-write_same-discard-conversion-to-linux-512b-sectors.patch b/queue-3.14/target-fix-write_same-discard-conversion-to-linux-512b-sectors.patch
new file mode 100644 (file)
index 0000000..140e8d9
--- /dev/null
@@ -0,0 +1,268 @@
+From 8a9ebe717a133ba7bc90b06047f43cc6b8bcb8b3 Mon Sep 17 00:00:00 2001
+From: Mike Christie <mchristi@redhat.com>
+Date: Mon, 18 Jan 2016 14:09:27 -0600
+Subject: target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors
+
+From: Mike Christie <mchristi@redhat.com>
+
+commit 8a9ebe717a133ba7bc90b06047f43cc6b8bcb8b3 upstream.
+
+In a couple places we are not converting to/from the Linux
+block layer 512 bytes sectors.
+
+1.
+
+The request queue values and what we do are a mismatch of
+things:
+
+max_discard_sectors - This is in linux block layer 512 byte
+sectors. We are just copying this to max_unmap_lba_count.
+
+discard_granularity - This is in bytes. We are converting it
+to Linux block layer 512 byte sectors.
+
+discard_alignment - This is in bytes. We are just copying
+this over.
+
+The problem is that the core LIO code exports these values in
+spc_emulate_evpd_b0 and we use them to test request arguments
+in sbc_execute_unmap, but we never convert to the block size
+we export to the initiator. If we are not using 512 byte sectors
+then we are exporting the wrong values or are checks are off.
+And, for the discard_alignment/bytes case we are just plain messed
+up.
+
+2.
+
+blkdev_issue_discard's start and number of sector arguments
+are supposed to be in linux block layer 512 byte sectors. We are
+currently passing in the values we get from the initiator which
+might be based on some other sector size.
+
+There is a similar problem in iblock_execute_write_same where
+the bio functions want values in 512 byte sectors but we are
+passing in what we got from the initiator.
+
+Signed-off-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/target/target_core_device.c  |   43 ++++++++++++++++++++++++++
+ drivers/target/target_core_file.c    |   29 +++++-------------
+ drivers/target/target_core_iblock.c  |   56 ++++++++---------------------------
+ include/target/target_core_backend.h |    4 ++
+ 4 files changed, 70 insertions(+), 62 deletions(-)
+
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1577,6 +1577,49 @@ struct se_device *target_alloc_device(st
+       return dev;
+ }
++/*
++ * Check if the underlying struct block_device request_queue supports
++ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
++ * in ATA and we need to set TPE=1
++ */
++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
++                                     struct request_queue *q, int block_size)
++{
++      if (!blk_queue_discard(q))
++              return false;
++
++      attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
++                                                              block_size;
++      /*
++       * Currently hardcoded to 1 in Linux/SCSI code..
++       */
++      attrib->max_unmap_block_desc_count = 1;
++      attrib->unmap_granularity = q->limits.discard_granularity / block_size;
++      attrib->unmap_granularity_alignment = q->limits.discard_alignment /
++                                                              block_size;
++      return true;
++}
++EXPORT_SYMBOL(target_configure_unmap_from_queue);
++
++/*
++ * Convert from blocksize advertised to the initiator to the 512 byte
++ * units unconditionally used by the Linux block layer.
++ */
++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
++{
++      switch (dev->dev_attrib.block_size) {
++      case 4096:
++              return lb << 3;
++      case 2048:
++              return lb << 2;
++      case 1024:
++              return lb << 1;
++      default:
++              return lb;
++      }
++}
++EXPORT_SYMBOL(target_to_linux_sector);
++
+ int target_configure_device(struct se_device *dev)
+ {
+       struct se_hba *hba = dev->se_hba;
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -164,25 +164,11 @@ static int fd_configure_device(struct se
+                       " block_device blocks: %llu logical_block_size: %d\n",
+                       dev_size, div_u64(dev_size, fd_dev->fd_block_size),
+                       fd_dev->fd_block_size);
+-              /*
+-               * Check if the underlying struct block_device request_queue supports
+-               * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+-               * in ATA and we need to set TPE=1
+-               */
+-              if (blk_queue_discard(q)) {
+-                      dev->dev_attrib.max_unmap_lba_count =
+-                              q->limits.max_discard_sectors;
+-                      /*
+-                       * Currently hardcoded to 1 in Linux/SCSI code..
+-                       */
+-                      dev->dev_attrib.max_unmap_block_desc_count = 1;
+-                      dev->dev_attrib.unmap_granularity =
+-                              q->limits.discard_granularity >> 9;
+-                      dev->dev_attrib.unmap_granularity_alignment =
+-                              q->limits.discard_alignment;
++
++              if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
++                                                    fd_dev->fd_block_size))
+                       pr_debug("IFILE: BLOCK Discard support available,"
+-                                      " disabled by default\n");
+-              }
++                               " disabled by default\n");
+               /*
+                * Enable write same emulation for IBLOCK and use 0xFFFF as
+                * the smaller WRITE_SAME(10) only has a two-byte block count.
+@@ -545,9 +531,12 @@ fd_do_unmap(struct se_cmd *cmd, void *pr
+       if (S_ISBLK(inode->i_mode)) {
+               /* The backend is block device, use discard */
+               struct block_device *bdev = inode->i_bdev;
++              struct se_device *dev = cmd->se_dev;
+-              ret = blkdev_issue_discard(bdev, lba,
+-                              nolb, GFP_KERNEL, 0);
++              ret = blkdev_issue_discard(bdev,
++                                         target_to_linux_sector(dev, lba),
++                                         target_to_linux_sector(dev,  nolb),
++                                         GFP_KERNEL, 0);
+               if (ret < 0) {
+                       pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
+                               ret);
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -126,27 +126,11 @@ static int iblock_configure_device(struc
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+       dev->dev_attrib.hw_queue_depth = q->nr_requests;
+-      /*
+-       * Check if the underlying struct block_device request_queue supports
+-       * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+-       * in ATA and we need to set TPE=1
+-       */
+-      if (blk_queue_discard(q)) {
+-              dev->dev_attrib.max_unmap_lba_count =
+-                              q->limits.max_discard_sectors;
+-
+-              /*
+-               * Currently hardcoded to 1 in Linux/SCSI code..
+-               */
+-              dev->dev_attrib.max_unmap_block_desc_count = 1;
+-              dev->dev_attrib.unmap_granularity =
+-                              q->limits.discard_granularity >> 9;
+-              dev->dev_attrib.unmap_granularity_alignment =
+-                              q->limits.discard_alignment;
+-
++      if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
++                                            dev->dev_attrib.hw_block_size))
+               pr_debug("IBLOCK: BLOCK Discard support available,"
+-                              " disabled by default\n");
+-      }
++                       " disabled by default\n");
++
+       /*
+        * Enable write same emulation for IBLOCK and use 0xFFFF as
+        * the smaller WRITE_SAME(10) only has a two-byte block count.
+@@ -418,9 +402,13 @@ iblock_do_unmap(struct se_cmd *cmd, void
+               sector_t lba, sector_t nolb)
+ {
+       struct block_device *bdev = priv;
++      struct se_device *dev = cmd->se_dev;
+       int ret;
+-      ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
++      ret = blkdev_issue_discard(bdev,
++                                 target_to_linux_sector(dev, lba),
++                                 target_to_linux_sector(dev,  nolb),
++                                 GFP_KERNEL, 0);
+       if (ret < 0) {
+               pr_err("blkdev_issue_discard() failed: %d\n", ret);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+@@ -460,8 +448,10 @@ iblock_execute_write_same(struct se_cmd
+       struct scatterlist *sg;
+       struct bio *bio;
+       struct bio_list list;
+-      sector_t block_lba = cmd->t_task_lba;
+-      sector_t sectors = sbc_get_write_same_sectors(cmd);
++      struct se_device *dev = cmd->se_dev;
++      sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
++      sector_t sectors = target_to_linux_sector(dev,
++                                      sbc_get_write_same_sectors(cmd));
+       sg = &cmd->t_data_sg[0];
+@@ -670,12 +660,12 @@ iblock_execute_rw(struct se_cmd *cmd, st
+                 enum dma_data_direction data_direction)
+ {
+       struct se_device *dev = cmd->se_dev;
++      sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+       struct iblock_req *ibr;
+       struct bio *bio, *bio_start;
+       struct bio_list list;
+       struct scatterlist *sg;
+       u32 sg_num = sgl_nents;
+-      sector_t block_lba;
+       unsigned bio_cnt;
+       int rw = 0;
+       int i;
+@@ -701,24 +691,6 @@ iblock_execute_rw(struct se_cmd *cmd, st
+               rw = READ;
+       }
+-      /*
+-       * Convert the blocksize advertised to the initiator to the 512 byte
+-       * units unconditionally used by the Linux block layer.
+-       */
+-      if (dev->dev_attrib.block_size == 4096)
+-              block_lba = (cmd->t_task_lba << 3);
+-      else if (dev->dev_attrib.block_size == 2048)
+-              block_lba = (cmd->t_task_lba << 2);
+-      else if (dev->dev_attrib.block_size == 1024)
+-              block_lba = (cmd->t_task_lba << 1);
+-      else if (dev->dev_attrib.block_size == 512)
+-              block_lba = cmd->t_task_lba;
+-      else {
+-              pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
+-                              " %u\n", dev->dev_attrib.block_size);
+-              return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+-      }
+-
+       ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+       if (!ibr)
+               goto fail;
+--- a/include/target/target_core_backend.h
++++ b/include/target/target_core_backend.h
+@@ -94,4 +94,8 @@ sense_reason_t       transport_generic_map_mem
+ void  array_free(void *array, int n);
++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
++                                     struct request_queue *q, int block_size);
++
+ #endif /* TARGET_CORE_BACKEND_H */