]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Jan 2015 18:39:36 +0000 (10:39 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Jan 2015 18:39:36 +0000 (10:39 -0800)
added patches:
dm-cache-fix-problematic-dual-use-of-a-single-migration-count-variable.patch
dm-cache-share-cache-metadata-object-across-inactive-and-active-dm-tables.patch
ipr-wait-for-aborted-command-responses.patch
pci-add-flag-for-devices-where-we-can-t-use-bus-reset.patch
pci-mark-atheros-ar93xx-to-avoid-bus-reset.patch

queue-3.14/dm-cache-fix-problematic-dual-use-of-a-single-migration-count-variable.patch [new file with mode: 0644]
queue-3.14/dm-cache-share-cache-metadata-object-across-inactive-and-active-dm-tables.patch [new file with mode: 0644]
queue-3.14/ipr-wait-for-aborted-command-responses.patch [new file with mode: 0644]
queue-3.14/pci-add-flag-for-devices-where-we-can-t-use-bus-reset.patch [new file with mode: 0644]
queue-3.14/pci-mark-atheros-ar93xx-to-avoid-bus-reset.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/dm-cache-fix-problematic-dual-use-of-a-single-migration-count-variable.patch b/queue-3.14/dm-cache-fix-problematic-dual-use-of-a-single-migration-count-variable.patch
new file mode 100644 (file)
index 0000000..a74a528
--- /dev/null
@@ -0,0 +1,284 @@
+From a59db67656021fa212e9b95a583f13c34eb67cd9 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 23 Jan 2015 10:16:16 +0000
+Subject: dm cache: fix problematic dual use of a single migration count variable
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit a59db67656021fa212e9b95a583f13c34eb67cd9 upstream.
+
+Introduce a new variable to count the number of allocated migration
+structures.  The existing variable cache->nr_migrations became
+overloaded.  It was used to:
+
+ i) track of the number of migrations in flight for the purposes of
+    quiescing during suspend.
+
+ ii) to estimate the amount of background IO occuring.
+
+Recent discard changes meant that REQ_DISCARD bios are processed with
+a migration.  Discards are not background IO so nr_migrations was not
+incremented.  However this could cause quiescing to complete early.
+
+(i) is now handled with a new variable cache->nr_allocated_migrations.
+cache->nr_migrations has been renamed cache->nr_io_migrations.
+cleanup_migration() is now called free_io_migration(), since it
+decrements that variable.
+
+Also, remove the unused cache->next_migration variable that got replaced
+with with prealloc_structs a while ago.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   89 ++++++++++++++++++++++++-------------------
+ 1 file changed, 50 insertions(+), 39 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -222,7 +222,13 @@ struct cache {
+       struct list_head need_commit_migrations;
+       sector_t migration_threshold;
+       wait_queue_head_t migration_wait;
+-      atomic_t nr_migrations;
++      atomic_t nr_allocated_migrations;
++
++      /*
++       * The number of in flight migrations that are performing
++       * background io. eg, promotion, writeback.
++       */
++      atomic_t nr_io_migrations;
+       wait_queue_head_t quiescing_wait;
+       atomic_t quiescing;
+@@ -259,7 +265,6 @@ struct cache {
+       struct dm_deferred_set *all_io_ds;
+       mempool_t *migration_pool;
+-      struct dm_cache_migration *next_migration;
+       struct dm_cache_policy *policy;
+       unsigned policy_nr_args;
+@@ -350,10 +355,31 @@ static void free_prison_cell(struct cach
+       dm_bio_prison_free_cell(cache->prison, cell);
+ }
++static struct dm_cache_migration *alloc_migration(struct cache *cache)
++{
++      struct dm_cache_migration *mg;
++
++      mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++      if (mg) {
++              mg->cache = cache;
++              atomic_inc(&mg->cache->nr_allocated_migrations);
++      }
++
++      return mg;
++}
++
++static void free_migration(struct dm_cache_migration *mg)
++{
++      if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
++              wake_up(&mg->cache->migration_wait);
++
++      mempool_free(mg, mg->cache->migration_pool);
++}
++
+ static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+ {
+       if (!p->mg) {
+-              p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++              p->mg = alloc_migration(cache);
+               if (!p->mg)
+                       return -ENOMEM;
+       }
+@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct
+               free_prison_cell(cache, p->cell1);
+       if (p->mg)
+-              mempool_free(p->mg, cache->migration_pool);
++              free_migration(p->mg);
+ }
+ static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+@@ -812,24 +838,14 @@ static void remap_to_origin_then_cache(s
+  * Migration covers moving data from the origin device to the cache, or
+  * vice versa.
+  *--------------------------------------------------------------*/
+-static void free_migration(struct dm_cache_migration *mg)
+-{
+-      mempool_free(mg, mg->cache->migration_pool);
+-}
+-
+-static void inc_nr_migrations(struct cache *cache)
++static void inc_io_migrations(struct cache *cache)
+ {
+-      atomic_inc(&cache->nr_migrations);
++      atomic_inc(&cache->nr_io_migrations);
+ }
+-static void dec_nr_migrations(struct cache *cache)
++static void dec_io_migrations(struct cache *cache)
+ {
+-      atomic_dec(&cache->nr_migrations);
+-
+-      /*
+-       * Wake the worker in case we're suspending the target.
+-       */
+-      wake_up(&cache->migration_wait);
++      atomic_dec(&cache->nr_io_migrations);
+ }
+ static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+@@ -852,11 +868,10 @@ static void cell_defer(struct cache *cac
+       wake_worker(cache);
+ }
+-static void cleanup_migration(struct dm_cache_migration *mg)
++static void free_io_migration(struct dm_cache_migration *mg)
+ {
+-      struct cache *cache = mg->cache;
++      dec_io_migrations(mg->cache);
+       free_migration(mg);
+-      dec_nr_migrations(cache);
+ }
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -881,7 +896,7 @@ static void migration_failure(struct dm_
+               cell_defer(cache, mg->new_ocell, true);
+       }
+-      cleanup_migration(mg);
++      free_io_migration(mg);
+ }
+ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+@@ -892,7 +907,7 @@ static void migration_success_pre_commit
+       if (mg->writeback) {
+               clear_dirty(cache, mg->old_oblock, mg->cblock);
+               cell_defer(cache, mg->old_ocell, false);
+-              cleanup_migration(mg);
++              free_io_migration(mg);
+               return;
+       } else if (mg->demote) {
+@@ -902,14 +917,14 @@ static void migration_success_pre_commit
+                                            mg->old_oblock);
+                       if (mg->promote)
+                               cell_defer(cache, mg->new_ocell, true);
+-                      cleanup_migration(mg);
++                      free_io_migration(mg);
+                       return;
+               }
+       } else {
+               if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+                       DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+                       policy_remove_mapping(cache->policy, mg->new_oblock);
+-                      cleanup_migration(mg);
++                      free_io_migration(mg);
+                       return;
+               }
+       }
+@@ -942,7 +957,7 @@ static void migration_success_post_commi
+               } else {
+                       if (mg->invalidate)
+                               policy_remove_mapping(cache->policy, mg->old_oblock);
+-                      cleanup_migration(mg);
++                      free_io_migration(mg);
+               }
+       } else {
+@@ -957,7 +972,7 @@ static void migration_success_post_commi
+                       bio_endio(mg->new_ocell->holder, 0);
+                       cell_defer(cache, mg->new_ocell, false);
+               }
+-              cleanup_migration(mg);
++              free_io_migration(mg);
+       }
+ }
+@@ -1169,7 +1184,7 @@ static void promote(struct cache *cache,
+       mg->new_ocell = cell;
+       mg->start_jiffies = jiffies;
+-      inc_nr_migrations(cache);
++      inc_io_migrations(cache);
+       quiesce_migration(mg);
+ }
+@@ -1192,7 +1207,7 @@ static void writeback(struct cache *cach
+       mg->new_ocell = NULL;
+       mg->start_jiffies = jiffies;
+-      inc_nr_migrations(cache);
++      inc_io_migrations(cache);
+       quiesce_migration(mg);
+ }
+@@ -1218,7 +1233,7 @@ static void demote_then_promote(struct c
+       mg->new_ocell = new_ocell;
+       mg->start_jiffies = jiffies;
+-      inc_nr_migrations(cache);
++      inc_io_migrations(cache);
+       quiesce_migration(mg);
+ }
+@@ -1245,7 +1260,7 @@ static void invalidate(struct cache *cac
+       mg->new_ocell = NULL;
+       mg->start_jiffies = jiffies;
+-      inc_nr_migrations(cache);
++      inc_io_migrations(cache);
+       quiesce_migration(mg);
+ }
+@@ -1306,7 +1321,7 @@ static void process_discard_bio(struct c
+ static bool spare_migration_bandwidth(struct cache *cache)
+ {
+-      sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
++      sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
+               cache->sectors_per_block;
+       return current_volume < cache->migration_threshold;
+ }
+@@ -1661,7 +1676,7 @@ static void stop_quiescing(struct cache
+ static void wait_for_migrations(struct cache *cache)
+ {
+-      wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
++      wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
+ }
+ static void stop_worker(struct cache *cache)
+@@ -1772,9 +1787,6 @@ static void destroy(struct cache *cache)
+ {
+       unsigned i;
+-      if (cache->next_migration)
+-              mempool_free(cache->next_migration, cache->migration_pool);
+-
+       if (cache->migration_pool)
+               mempool_destroy(cache->migration_pool);
+@@ -2282,7 +2294,8 @@ static int cache_create(struct cache_arg
+       INIT_LIST_HEAD(&cache->quiesced_migrations);
+       INIT_LIST_HEAD(&cache->completed_migrations);
+       INIT_LIST_HEAD(&cache->need_commit_migrations);
+-      atomic_set(&cache->nr_migrations, 0);
++      atomic_set(&cache->nr_allocated_migrations, 0);
++      atomic_set(&cache->nr_io_migrations, 0);
+       init_waitqueue_head(&cache->migration_wait);
+       init_waitqueue_head(&cache->quiescing_wait);
+@@ -2342,8 +2355,6 @@ static int cache_create(struct cache_arg
+               goto bad;
+       }
+-      cache->next_migration = NULL;
+-
+       cache->need_tick_bio = true;
+       cache->sized = false;
+       cache->invalidate = false;
diff --git a/queue-3.14/dm-cache-share-cache-metadata-object-across-inactive-and-active-dm-tables.patch b/queue-3.14/dm-cache-share-cache-metadata-object-across-inactive-and-active-dm-tables.patch
new file mode 100644 (file)
index 0000000..e304e72
--- /dev/null
@@ -0,0 +1,160 @@
+From 9b1cc9f251affdd27f29fe46d0989ba76c33faf6 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 23 Jan 2015 10:00:07 +0000
+Subject: dm cache: share cache-metadata object across inactive and active DM tables
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 9b1cc9f251affdd27f29fe46d0989ba76c33faf6 upstream.
+
+If a DM table is reloaded with an inactive table when the device is not
+suspended (normal procedure for LVM2), then there will be two dm-bufio
+objects that can diverge.  This can lead to a situation where the
+inactive table uses bufio to read metadata at the same time the active
+table writes metadata -- resulting in the inactive table having stale
+metadata buffers once it is promoted to the active table slot.
+
+Fix this by using reference counting and a global list of cache metadata
+objects to ensure there is only one metadata object per metadata device.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c |  101 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 95 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -94,6 +94,9 @@ struct cache_disk_superblock {
+ } __packed;
+ struct dm_cache_metadata {
++      atomic_t ref_count;
++      struct list_head list;
++
+       struct block_device *bdev;
+       struct dm_block_manager *bm;
+       struct dm_space_map *metadata_sm;
+@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le
+ /*----------------------------------------------------------------*/
+-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+-                                               sector_t data_block_size,
+-                                               bool may_format_device,
+-                                               size_t policy_hint_size)
++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
++                                             sector_t data_block_size,
++                                             bool may_format_device,
++                                             size_t policy_hint_size)
+ {
+       int r;
+       struct dm_cache_metadata *cmd;
+@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metad
+               return NULL;
+       }
++      atomic_set(&cmd->ref_count, 1);
+       init_rwsem(&cmd->root_lock);
+       cmd->bdev = bdev;
+       cmd->data_block_size = data_block_size;
+@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metad
+       return cmd;
+ }
++/*
++ * We keep a little list of ref counted metadata objects to prevent two
++ * different target instances creating separate bufio instances.  This is
++ * an issue if a table is reloaded before the suspend.
++ */
++static DEFINE_MUTEX(table_lock);
++static LIST_HEAD(table);
++
++static struct dm_cache_metadata *lookup(struct block_device *bdev)
++{
++      struct dm_cache_metadata *cmd;
++
++      list_for_each_entry(cmd, &table, list)
++              if (cmd->bdev == bdev) {
++                      atomic_inc(&cmd->ref_count);
++                      return cmd;
++              }
++
++      return NULL;
++}
++
++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
++                                              sector_t data_block_size,
++                                              bool may_format_device,
++                                              size_t policy_hint_size)
++{
++      struct dm_cache_metadata *cmd, *cmd2;
++
++      mutex_lock(&table_lock);
++      cmd = lookup(bdev);
++      mutex_unlock(&table_lock);
++
++      if (cmd)
++              return cmd;
++
++      cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
++      if (cmd) {
++              mutex_lock(&table_lock);
++              cmd2 = lookup(bdev);
++              if (cmd2) {
++                      mutex_unlock(&table_lock);
++                      __destroy_persistent_data_objects(cmd);
++                      kfree(cmd);
++                      return cmd2;
++              }
++              list_add(&cmd->list, &table);
++              mutex_unlock(&table_lock);
++      }
++
++      return cmd;
++}
++
++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
++{
++      if (cmd->data_block_size != data_block_size) {
++              DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
++                    (unsigned long long) data_block_size,
++                    (unsigned long long) cmd->data_block_size);
++              return false;
++      }
++
++      return true;
++}
++
++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
++                                               sector_t data_block_size,
++                                               bool may_format_device,
++                                               size_t policy_hint_size)
++{
++      struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
++                                                     may_format_device, policy_hint_size);
++      if (cmd && !same_params(cmd, data_block_size)) {
++              dm_cache_metadata_close(cmd);
++              return NULL;
++      }
++
++      return cmd;
++}
++
+ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+ {
+-      __destroy_persistent_data_objects(cmd);
+-      kfree(cmd);
++      if (atomic_dec_and_test(&cmd->ref_count)) {
++              mutex_lock(&table_lock);
++              list_del(&cmd->list);
++              mutex_unlock(&table_lock);
++
++              __destroy_persistent_data_objects(cmd);
++              kfree(cmd);
++      }
+ }
+ /*
diff --git a/queue-3.14/ipr-wait-for-aborted-command-responses.patch b/queue-3.14/ipr-wait-for-aborted-command-responses.patch
new file mode 100644 (file)
index 0000000..ea6d150
--- /dev/null
@@ -0,0 +1,183 @@
+From 6cdb08172bc89f0a39e1643c5e7eab362692fd1b Mon Sep 17 00:00:00 2001
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Thu, 30 Oct 2014 17:27:10 -0500
+Subject: ipr: wait for aborted command responses
+
+From: Brian King <brking@linux.vnet.ibm.com>
+
+commit 6cdb08172bc89f0a39e1643c5e7eab362692fd1b upstream.
+
+Fixes a race condition in abort handling that was injected
+when multiple interrupt support was added. When only a single
+interrupt is present, the adapter guarantees it will send
+responses for aborted commands prior to the response for the
+abort command itself. With multiple interrupts, these responses
+generally come back on different interrupts, so we need to
+ensure the abort thread waits until the aborted command is
+complete so we don't perform a double completion. This race
+condition was being hit frequently in environments which
+were triggering command timeouts, which was resulting in
+a double completion causing a kernel oops.
+
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Reviewed-by: Wendy Xiong <wenxiong@linux.vnet.ibm.com>
+Tested-by: Wendy Xiong <wenxiong@linux.vnet.ibm.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ipr.c |   92 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/scsi/ipr.h |    1 
+ 2 files changed, 93 insertions(+)
+
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr
+       ipr_reinit_ipr_cmnd(ipr_cmd);
+       ipr_cmd->u.scratch = 0;
+       ipr_cmd->sibling = NULL;
++      ipr_cmd->eh_comp = NULL;
+       ipr_cmd->fast_done = fast_done;
+       init_timer(&ipr_cmd->timer);
+ }
+@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_
+       scsi_dma_unmap(ipr_cmd->scsi_cmd);
+       scsi_cmd->scsi_done(scsi_cmd);
++      if (ipr_cmd->eh_comp)
++              complete(ipr_cmd->eh_comp);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ }
+@@ -4805,6 +4808,84 @@ static int ipr_slave_alloc(struct scsi_d
+       return rc;
+ }
++/**
++ * ipr_match_lun - Match function for specified LUN
++ * @ipr_cmd:  ipr command struct
++ * @device:           device to match (sdev)
++ *
++ * Returns:
++ *    1 if command matches sdev / 0 if command does not match sdev
++ **/
++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
++{
++      if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
++              return 1;
++      return 0;
++}
++
++/**
++ * ipr_wait_for_ops - Wait for matching commands to complete
++ * @ipr_cmd:  ipr command struct
++ * @device:           device to match (sdev)
++ * @match:            match function to use
++ *
++ * Returns:
++ *    SUCCESS / FAILED
++ **/
++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
++                          int (*match)(struct ipr_cmnd *, void *))
++{
++      struct ipr_cmnd *ipr_cmd;
++      int wait;
++      unsigned long flags;
++      struct ipr_hrr_queue *hrrq;
++      signed long timeout = IPR_ABORT_TASK_TIMEOUT;
++      DECLARE_COMPLETION_ONSTACK(comp);
++
++      ENTER;
++      do {
++              wait = 0;
++
++              for_each_hrrq(hrrq, ioa_cfg) {
++                      spin_lock_irqsave(hrrq->lock, flags);
++                      list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
++                              if (match(ipr_cmd, device)) {
++                                      ipr_cmd->eh_comp = &comp;
++                                      wait++;
++                              }
++                      }
++                      spin_unlock_irqrestore(hrrq->lock, flags);
++              }
++
++              if (wait) {
++                      timeout = wait_for_completion_timeout(&comp, timeout);
++
++                      if (!timeout) {
++                              wait = 0;
++
++                              for_each_hrrq(hrrq, ioa_cfg) {
++                                      spin_lock_irqsave(hrrq->lock, flags);
++                                      list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
++                                              if (match(ipr_cmd, device)) {
++                                                      ipr_cmd->eh_comp = NULL;
++                                                      wait++;
++                                              }
++                                      }
++                                      spin_unlock_irqrestore(hrrq->lock, flags);
++                              }
++
++                              if (wait)
++                                      dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
++                              LEAVE;
++                              return wait ? FAILED : SUCCESS;
++                      }
++              }
++      } while (wait);
++
++      LEAVE;
++      return SUCCESS;
++}
++
+ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
+ {
+       struct ipr_ioa_cfg *ioa_cfg;
+@@ -5023,11 +5104,17 @@ static int __ipr_eh_dev_reset(struct scs
+ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
+ {
+       int rc;
++      struct ipr_ioa_cfg *ioa_cfg;
++
++      ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+       spin_lock_irq(cmd->device->host->host_lock);
+       rc = __ipr_eh_dev_reset(cmd);
+       spin_unlock_irq(cmd->device->host->host_lock);
++      if (rc == SUCCESS)
++              rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
++
+       return rc;
+ }
+@@ -5205,13 +5292,18 @@ static int ipr_eh_abort(struct scsi_cmnd
+ {
+       unsigned long flags;
+       int rc;
++      struct ipr_ioa_cfg *ioa_cfg;
+       ENTER;
++      ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
++
+       spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
+       rc = ipr_cancel_op(scsi_cmd);
+       spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
++      if (rc == SUCCESS)
++              rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
+       LEAVE;
+       return rc;
+ }
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1585,6 +1585,7 @@ struct ipr_cmnd {
+               struct scsi_device *sdev;
+       } u;
++      struct completion *eh_comp;
+       struct ipr_hrr_queue *hrrq;
+       struct ipr_ioa_cfg *ioa_cfg;
+ };
diff --git a/queue-3.14/pci-add-flag-for-devices-where-we-can-t-use-bus-reset.patch b/queue-3.14/pci-add-flag-for-devices-where-we-can-t-use-bus-reset.patch
new file mode 100644 (file)
index 0000000..bd752cf
--- /dev/null
@@ -0,0 +1,120 @@
+From f331a859e0ee5a898c1f47596eddad4c4f02d657 Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Thu, 15 Jan 2015 18:16:04 -0600
+Subject: PCI: Add flag for devices where we can't use bus reset
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit f331a859e0ee5a898c1f47596eddad4c4f02d657 upstream.
+
+Enable a mechanism for devices to quirk that they do not behave when
+doing a PCI bus reset.  We require a modest level of spec compliant
+behavior in order to do a reset, for instance the device should come
+out of reset without throwing errors and PCI config space should be
+accessible after reset.  This is too much to ask for some devices.
+
+Link: http://lkml.kernel.org/r/20140923210318.498dacbd@dualc.maya.org
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c   |   40 ++++++++++++++++++++++++++++++++++++----
+ include/linux/pci.h |    2 ++
+ 2 files changed, 38 insertions(+), 4 deletions(-)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3187,7 +3187,8 @@ static int pci_parent_bus_reset(struct p
+ {
+       struct pci_dev *pdev;
+-      if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
++      if (pci_is_root_bus(dev->bus) || dev->subordinate ||
++          !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
+               return -ENOTTY;
+       list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+@@ -3221,7 +3222,8 @@ static int pci_dev_reset_slot_function(s
+ {
+       struct pci_dev *pdev;
+-      if (dev->subordinate || !dev->slot)
++      if (dev->subordinate || !dev->slot ||
++          dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
+               return -ENOTTY;
+       list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+@@ -3452,6 +3454,20 @@ int pci_try_reset_function(struct pci_de
+ }
+ EXPORT_SYMBOL_GPL(pci_try_reset_function);
++/* Do any devices on or below this bus prevent a bus reset? */
++static bool pci_bus_resetable(struct pci_bus *bus)
++{
++      struct pci_dev *dev;
++
++      list_for_each_entry(dev, &bus->devices, bus_list) {
++              if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
++                  (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
++                      return false;
++      }
++
++      return true;
++}
++
+ /* Lock devices from the top of the tree down */
+ static void pci_bus_lock(struct pci_bus *bus)
+ {
+@@ -3502,6 +3518,22 @@ unlock:
+       return 0;
+ }
++/* Do any devices on or below this slot prevent a bus reset? */
++static bool pci_slot_resetable(struct pci_slot *slot)
++{
++      struct pci_dev *dev;
++
++      list_for_each_entry(dev, &slot->bus->devices, bus_list) {
++              if (!dev->slot || dev->slot != slot)
++                      continue;
++              if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
++                  (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
++                      return false;
++      }
++
++      return true;
++}
++
+ /* Lock devices from the top of the tree down */
+ static void pci_slot_lock(struct pci_slot *slot)
+ {
+@@ -3623,7 +3655,7 @@ static int pci_slot_reset(struct pci_slo
+ {
+       int rc;
+-      if (!slot)
++      if (!slot || !pci_slot_resetable(slot))
+               return -ENOTTY;
+       if (!probe)
+@@ -3715,7 +3747,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
+ static int pci_bus_reset(struct pci_bus *bus, int probe)
+ {
+-      if (!bus->self)
++      if (!bus->self || !pci_bus_resetable(bus))
+               return -ENOTTY;
+       if (probe)
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -170,6 +170,8 @@ enum pci_dev_flags {
+       PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+       /* Provide indication device is assigned by a Virtual Machine Manager */
+       PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
++      /* Do not use bus resets for device */
++      PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ };
+ enum pci_irq_reroute_variant {
diff --git a/queue-3.14/pci-mark-atheros-ar93xx-to-avoid-bus-reset.patch b/queue-3.14/pci-mark-atheros-ar93xx-to-avoid-bus-reset.patch
new file mode 100644 (file)
index 0000000..01db54c
--- /dev/null
@@ -0,0 +1,55 @@
+From c3e59ee4e76686b0c84ca8faa1011d10cd4ca1b8 Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Thu, 15 Jan 2015 18:17:12 -0600
+Subject: PCI: Mark Atheros AR93xx to avoid bus reset
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit c3e59ee4e76686b0c84ca8faa1011d10cd4ca1b8 upstream.
+
+Reports against the TL-WDN4800 card indicate that PCI bus reset of this
+Atheros device cause system lock-ups and resets.  I've also been able to
+confirm this behavior on multiple systems.  The device never returns from
+reset and attempts to access config space of the device after reset result
+in hangs.  Blacklist bus reset for the device to avoid this issue.
+
+[bhelgaas: This regression appeared in v3.14.  Andreas bisected it to
+425c1b223dac ("PCI: Add Virtual Channel to save/restore support"), but we
+don't understand the mechanism by which that commit affects the reset
+path.]
+
+[bhelgaas: changelog, references]
+Link: http://lkml.kernel.org/r/20140923210318.498dacbd@dualc.maya.org
+Reported-by: Andreas Hartmann <andihartmann@freenet.de>
+Tested-by: Andreas Hartmann <andihartmann@freenet.de>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/quirks.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3008,6 +3008,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_C
+ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+                        quirk_broken_intx_masking);
++static void quirk_no_bus_reset(struct pci_dev *dev)
++{
++      dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
++}
++
++/*
++ * Atheros AR93xx chips do not behave after a bus reset.  The device will
++ * throw a Link Down error on AER-capable systems and regardless of AER,
++ * config space of the device is never accessible again and typically
++ * causes the system to hang or reset when access is attempted.
++ * http://www.spinics.net/lists/linux-pci/msg34797.html
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
++
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+                         struct pci_fixup *end)
+ {
index cb93611c61a4107116eb9cbb657d1c8bae91e2f7..36117762ade05c964e55b6df8c9dfa0393b46dcd 100644 (file)
@@ -8,3 +8,8 @@ drm-i915-fix-mutex-owner-inspection-race-under-debug_mutexes.patch
 drm-radeon-add-a-dpm-quirk-list.patch
 drm-radeon-add-si-dpm-quirk-list.patch
 drm-radeon-use-rv515_ring_start-on-r5xx.patch
+pci-add-flag-for-devices-where-we-can-t-use-bus-reset.patch
+pci-mark-atheros-ar93xx-to-avoid-bus-reset.patch
+ipr-wait-for-aborted-command-responses.patch
+dm-cache-share-cache-metadata-object-across-inactive-and-active-dm-tables.patch
+dm-cache-fix-problematic-dual-use-of-a-single-migration-count-variable.patch