]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.drivers/libfc_locking.diff
Added missing Xen Kernel Patches which were not commited because
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / libfc_locking.diff
diff --git a/src/patches/suse-2.6.27.31/patches.drivers/libfc_locking.diff b/src/patches/suse-2.6.27.31/patches.drivers/libfc_locking.diff
new file mode 100644 (file)
index 0000000..2ff9a4e
--- /dev/null
@@ -0,0 +1,377 @@
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: libfc, fcoe: fixed locking issues with lport->lp_mutex around lport->link_status
+Patch-mainline: 6d235742e63f6b8912d8b200b75f9aa6d48f3e07
+References: bnc #468053
+
+The fcoe_xmit could call fc_pause in case the pending skb queue len is larger
+than FCOE_MAX_QUEUE_DEPTH, the fc_pause was trying to grab lport->lp_muex to
+change lport->link_status and that had these issues :-
+    
+1. The fcoe_xmit was getting called with bh disabled, thus causing
+"BUG: scheduling while atomic" when grabbing lport->lp_muex with bh disabled.
+
+2. fc_linkup and fc_linkdown function calls lport_enter function with
+lport->lp_mutex held and these enter function in turn calls fcoe_xmit to send
+lport related FC frame, e.g. fc_linkup => fc_lport_enter_flogi to send flogi
+req. In this case grabbing the same lport->lp_mutex again in fc_puase from
+fcoe_xmit would cause deadlock.
+
+The lport->lp_mutex was used for setting FC_PAUSE in fcoe_xmit path but
+FC_PAUSE bit was not used anywhere beside just setting and clear this
+bit in lport->link_status, instead used a separate field qfull in fc_lport
+to eliminate need for lport->lp_mutex to track pending queue full condition
+and in turn avoid above described two locking issues.
+
+Also added check for lp->qfull in fc_fcp_lport_queue_ready to trigger
+SCSI_MLQUEUE_HOST_BUSY when lp->qfull is set to prevent more scsi-ml cmds
+while lp->qfull is set.
+
+This patch eliminated FC_LINK_UP and FC_PAUSE and instead used dedicated
+fields in fc_lport for this, this simplified all related conditional
+code.
+    
+Also removed fc_pause and fc_unpause functions and instead used newly added
+lport->qfull directly in fcoe.
+
+Also fixed a circular locking in fc_exch_recv_abts.
+
+These issues were blocking large file copy to a 2TB lun.
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+ drivers/scsi/fcoe/fcoe_sw.c   |    6 +++---
+ drivers/scsi/fcoe/libfcoe.c   |   41 +++++++++++++++++------------------------
+ drivers/scsi/libfc/fc_exch.c  |    2 +-
+ drivers/scsi/libfc/fc_fcp.c   |    6 +++---
+ drivers/scsi/libfc/fc_lport.c |   38 +++++++-------------------------------
+ drivers/scsi/libfc/fc_rport.c |    2 +-
+ include/scsi/libfc.h          |   12 ++----------
+ 7 files changed, 34 insertions(+), 73 deletions(-)
+
+--- a/drivers/scsi/fcoe/fcoe_sw.c
++++ b/drivers/scsi/fcoe/fcoe_sw.c
+@@ -116,7 +116,8 @@ static int fcoe_sw_lport_config(struct f
+ {
+       int i = 0;
+-      lp->link_status = 0;
++      lp->link_up = 0;
++      lp->qfull = 0;
+       lp->max_retry_count = 3;
+       lp->e_d_tov = 2 * 1000; /* FC-FS default */
+       lp->r_a_tov = 2 * 2 * 1000;
+@@ -181,9 +182,8 @@ static int fcoe_sw_netdev_config(struct 
+       if (fc_set_mfs(lp, mfs))
+               return -EINVAL;
+-      lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+       if (!fcoe_link_ok(lp))
+-              lp->link_status |= FC_LINK_UP;
++              lp->link_up = 1;
+       /* offload features support */
+       if (fc->real_dev->features & NETIF_F_SG)
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -505,7 +505,7 @@ int fcoe_xmit(struct fc_lport *lp, struc
+       if (rc) {
+               fcoe_insert_wait_queue(lp, skb);
+               if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+-                      fc_pause(lp);
++                      lp->qfull = 1;
+       }
+       return 0;
+@@ -719,7 +719,7 @@ static void fcoe_recv_flogi(struct fcoe_
+  * fcoe_watchdog - fcoe timer callback
+  * @vp:
+  *
+- * This checks the pending queue length for fcoe and put fcoe to be paused state
++ * This checks the pending queue length for fcoe and set lport qfull
+  * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
+  * fcoe_hostlist.
+  *
+@@ -729,17 +729,17 @@ void fcoe_watchdog(ulong vp)
+ {
+       struct fc_lport *lp;
+       struct fcoe_softc *fc;
+-      int paused = 0;
++      int qfilled = 0;
+       read_lock(&fcoe_hostlist_lock);
+       list_for_each_entry(fc, &fcoe_hostlist, list) {
+               lp = fc->lp;
+               if (lp) {
+                       if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+-                              paused = 1;
++                              qfilled = 1;
+                       if (fcoe_check_wait_queue(lp) <  FCOE_MAX_QUEUE_DEPTH) {
+-                              if (paused)
+-                                      fc_unpause(lp);
++                              if (qfilled)
++                                      lp->qfull = 0;
+                       }
+               }
+       }
+@@ -768,8 +768,7 @@ void fcoe_watchdog(ulong vp)
+  **/
+ static int fcoe_check_wait_queue(struct fc_lport *lp)
+ {
+-      int rc, unpause = 0;
+-      int paused = 0;
++      int rc;
+       struct sk_buff *skb;
+       struct fcoe_softc *fc;
+@@ -777,10 +776,10 @@ static int fcoe_check_wait_queue(struct 
+       spin_lock_bh(&fc->fcoe_pending_queue.lock);
+       /*
+-       * is this interface paused?
++       * if interface pending queue full then set qfull in lport.
+        */
+       if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+-              paused = 1;
++              lp->qfull = 1;
+       if (fc->fcoe_pending_queue.qlen) {
+               while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+                       spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+@@ -792,11 +791,9 @@ static int fcoe_check_wait_queue(struct 
+                       spin_lock_bh(&fc->fcoe_pending_queue.lock);
+               }
+               if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
+-                      unpause = 1;
++                      lp->qfull = 0;
+       }
+       spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+-      if ((unpause) && (paused))
+-              fc_unpause(lp);
+       return fc->fcoe_pending_queue.qlen;
+ }
+@@ -874,7 +871,7 @@ static int fcoe_device_notification(stru
+       struct net_device *real_dev = ptr;
+       struct fcoe_softc *fc;
+       struct fcoe_dev_stats *stats;
+-      u16 new_status;
++      u32 new_link_up;
+       u32 mfs;
+       int rc = NOTIFY_OK;
+@@ -891,17 +888,15 @@ static int fcoe_device_notification(stru
+               goto out;
+       }
+-      new_status = lp->link_status;
++      new_link_up = lp->link_up;
+       switch (event) {
+       case NETDEV_DOWN:
+       case NETDEV_GOING_DOWN:
+-              new_status &= ~FC_LINK_UP;
++              new_link_up = 0;
+               break;
+       case NETDEV_UP:
+       case NETDEV_CHANGE:
+-              new_status &= ~FC_LINK_UP;
+-              if (!fcoe_link_ok(lp))
+-                      new_status |= FC_LINK_UP;
++              new_link_up = !fcoe_link_ok(lp);
+               break;
+       case NETDEV_CHANGEMTU:
+               mfs = fc->real_dev->mtu -
+@@ -909,17 +904,15 @@ static int fcoe_device_notification(stru
+                        sizeof(struct fcoe_crc_eof));
+               if (mfs >= FC_MIN_MAX_FRAME)
+                       fc_set_mfs(lp, mfs);
+-              new_status &= ~FC_LINK_UP;
+-              if (!fcoe_link_ok(lp))
+-                      new_status |= FC_LINK_UP;
++              new_link_up = !fcoe_link_ok(lp);
+               break;
+       case NETDEV_REGISTER:
+               break;
+       default:
+               FC_DBG("unknown event %ld call", event);
+       }
+-      if (lp->link_status != new_status) {
+-              if ((new_status & FC_LINK_UP) == FC_LINK_UP)
++      if (lp->link_up != new_link_up) {
++              if (new_link_up)
+                       fc_linkup(lp);
+               else {
+                       stats = lp->dev_stats[smp_processor_id()];
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1096,7 +1096,7 @@ static void fc_exch_recv_abts(struct fc_
+               ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+               ap->ba_low_seq_cnt = htons(sp->cnt);
+       }
+-      sp = fc_seq_start_next(sp);
++      sp = fc_seq_start_next_locked(sp);
+       spin_unlock_bh(&ep->ex_lock);
+       fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+       fc_frame_free(rx_fp);
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -20,13 +20,13 @@
+  */
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <linux/scatterlist.h>
+ #include <linux/err.h>
+ #include <linux/crc32.h>
+-#include <linux/delay.h>
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi.h>
+@@ -1622,7 +1622,7 @@ out:
+ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
+ {
+       /* lock ? */
+-      return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
++      return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
+ }
+ /**
+@@ -1891,7 +1891,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd
+       lp = shost_priv(sc_cmd->device->host);
+       if (lp->state != LPORT_ST_READY)
+               return rc;
+-      else if (!(lp->link_status & FC_LINK_UP))
++      else if (!lp->link_up)
+               return rc;
+       spin_lock_irqsave(lp->host->host_lock, flags);
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -250,7 +250,7 @@ void fc_get_host_port_state(struct Scsi_
+ {
+       struct fc_lport *lp = shost_priv(shost);
+-      if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
++      if (lp->link_up)
+               fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+       else
+               fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+@@ -484,7 +484,7 @@ static void fc_lport_recv_rnid_req(struc
+  * @sp: current sequence in the ADISC exchange
+  * @fp: ADISC request frame
+  *
+- * Locking Note: The lport lock is exected to be held before calling
++ * Locking Note: The lport lock is expected to be held before calling
+  * this function.
+  */
+ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
+@@ -577,8 +577,8 @@ void fc_linkup(struct fc_lport *lport)
+                      fc_host_port_id(lport->host));
+       mutex_lock(&lport->lp_mutex);
+-      if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
+-              lport->link_status |= FC_LINK_UP;
++      if (!lport->link_up) {
++              lport->link_up = 1;
+               if (lport->state == LPORT_ST_RESET)
+                       fc_lport_enter_flogi(lport);
+@@ -597,8 +597,8 @@ void fc_linkdown(struct fc_lport *lport)
+       FC_DEBUG_LPORT("Link is down for port (%6x)\n",
+                      fc_host_port_id(lport->host));
+-      if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
+-              lport->link_status &= ~(FC_LINK_UP);
++      if (lport->link_up) {
++              lport->link_up = 0;
+               fc_lport_enter_reset(lport);
+               lport->tt.fcp_cleanup(lport);
+       }
+@@ -607,30 +607,6 @@ void fc_linkdown(struct fc_lport *lport)
+ EXPORT_SYMBOL(fc_linkdown);
+ /**
+- * fc_pause - Pause the flow of frames
+- * @lport: The lport to be paused
+- */
+-void fc_pause(struct fc_lport *lport)
+-{
+-      mutex_lock(&lport->lp_mutex);
+-      lport->link_status |= FC_PAUSE;
+-      mutex_unlock(&lport->lp_mutex);
+-}
+-EXPORT_SYMBOL(fc_pause);
+-
+-/**
+- * fc_unpause - Unpause the flow of frames
+- * @lport: The lport to be unpaused
+- */
+-void fc_unpause(struct fc_lport *lport)
+-{
+-      mutex_lock(&lport->lp_mutex);
+-      lport->link_status &= ~(FC_PAUSE);
+-      mutex_unlock(&lport->lp_mutex);
+-}
+-EXPORT_SYMBOL(fc_unpause);
+-
+-/**
+  * fc_fabric_logoff - Logout of the fabric
+  * @lport:          fc_lport pointer to logoff the fabric
+  *
+@@ -977,7 +953,7 @@ static void fc_lport_enter_reset(struct 
+       fc_host_fabric_name(lport->host) = 0;
+       fc_host_port_id(lport->host) = 0;
+-      if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
++      if (lport->link_up)
+               fc_lport_enter_flogi(lport);
+ }
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -425,7 +425,7 @@ static void fc_rport_error(struct fc_rpo
+                      PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+       if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+-              /* 
++              /*
+                * Memory allocation failure, or the exchange timed out.
+                *  Retry after delay
+                */
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -68,9 +68,6 @@
+ /*
+  * FC HBA status
+  */
+-#define FC_PAUSE                  (1 << 1)
+-#define FC_LINK_UP                (1 << 0)
+-
+ enum fc_lport_state {
+       LPORT_ST_NONE = 0,
+       LPORT_ST_FLOGI,
+@@ -603,7 +600,8 @@ struct fc_lport {
+       /* Operational Information */
+       struct libfc_function_template tt;
+-      u16                     link_status;
++      u8                      link_up;
++      u8                      qfull;
+       enum fc_lport_state     state;
+       unsigned long           boot_time;
+@@ -704,12 +702,6 @@ void fc_linkup(struct fc_lport *);
+ void fc_linkdown(struct fc_lport *);
+ /*
+- * Pause and unpause traffic.
+- */
+-void fc_pause(struct fc_lport *);
+-void fc_unpause(struct fc_lport *);
+-
+-/*
+  * Configure the local port.
+  */
+ int fc_lport_config(struct fc_lport *);