--- /dev/null
+From a59db67656021fa212e9b95a583f13c34eb67cd9 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 23 Jan 2015 10:16:16 +0000
+Subject: dm cache: fix problematic dual use of a single migration count variable
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit a59db67656021fa212e9b95a583f13c34eb67cd9 upstream.
+
+Introduce a new variable to count the number of allocated migration
+structures. The existing variable cache->nr_migrations became
+overloaded. It was used to:
+
+ i) track of the number of migrations in flight for the purposes of
+ quiescing during suspend.
+
+ ii) to estimate the amount of background IO occuring.
+
+Recent discard changes meant that REQ_DISCARD bios are processed with
+a migration. Discards are not background IO so nr_migrations was not
+incremented. However this could cause quiescing to complete early.
+
+(i) is now handled with a new variable cache->nr_allocated_migrations.
+cache->nr_migrations has been renamed cache->nr_io_migrations.
+cleanup_migration() is now called free_io_migration(), since it
+decrements that variable.
+
+Also, remove the unused cache->next_migration variable that got replaced
+with with prealloc_structs a while ago.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 89 ++++++++++++++++++++++++-------------------
+ 1 file changed, 50 insertions(+), 39 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -222,7 +222,13 @@ struct cache {
+ struct list_head need_commit_migrations;
+ sector_t migration_threshold;
+ wait_queue_head_t migration_wait;
+- atomic_t nr_migrations;
++ atomic_t nr_allocated_migrations;
++
++ /*
++ * The number of in flight migrations that are performing
++ * background io. eg, promotion, writeback.
++ */
++ atomic_t nr_io_migrations;
+
+ wait_queue_head_t quiescing_wait;
+ atomic_t quiescing;
+@@ -258,7 +264,6 @@ struct cache {
+ struct dm_deferred_set *all_io_ds;
+
+ mempool_t *migration_pool;
+- struct dm_cache_migration *next_migration;
+
+ struct dm_cache_policy *policy;
+ unsigned policy_nr_args;
+@@ -349,10 +354,31 @@ static void free_prison_cell(struct cach
+ dm_bio_prison_free_cell(cache->prison, cell);
+ }
+
++static struct dm_cache_migration *alloc_migration(struct cache *cache)
++{
++ struct dm_cache_migration *mg;
++
++ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++ if (mg) {
++ mg->cache = cache;
++ atomic_inc(&mg->cache->nr_allocated_migrations);
++ }
++
++ return mg;
++}
++
++static void free_migration(struct dm_cache_migration *mg)
++{
++ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
++ wake_up(&mg->cache->migration_wait);
++
++ mempool_free(mg, mg->cache->migration_pool);
++}
++
+ static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+ {
+ if (!p->mg) {
+- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++ p->mg = alloc_migration(cache);
+ if (!p->mg)
+ return -ENOMEM;
+ }
+@@ -381,7 +407,7 @@ static void prealloc_free_structs(struct
+ free_prison_cell(cache, p->cell1);
+
+ if (p->mg)
+- mempool_free(p->mg, cache->migration_pool);
++ free_migration(p->mg);
+ }
+
+ static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+@@ -817,24 +843,14 @@ static void remap_to_origin_then_cache(s
+ * Migration covers moving data from the origin device to the cache, or
+ * vice versa.
+ *--------------------------------------------------------------*/
+-static void free_migration(struct dm_cache_migration *mg)
+-{
+- mempool_free(mg, mg->cache->migration_pool);
+-}
+-
+-static void inc_nr_migrations(struct cache *cache)
++static void inc_io_migrations(struct cache *cache)
+ {
+- atomic_inc(&cache->nr_migrations);
++ atomic_inc(&cache->nr_io_migrations);
+ }
+
+-static void dec_nr_migrations(struct cache *cache)
++static void dec_io_migrations(struct cache *cache)
+ {
+- atomic_dec(&cache->nr_migrations);
+-
+- /*
+- * Wake the worker in case we're suspending the target.
+- */
+- wake_up(&cache->migration_wait);
++ atomic_dec(&cache->nr_io_migrations);
+ }
+
+ static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+@@ -857,11 +873,10 @@ static void cell_defer(struct cache *cac
+ wake_worker(cache);
+ }
+
+-static void cleanup_migration(struct dm_cache_migration *mg)
++static void free_io_migration(struct dm_cache_migration *mg)
+ {
+- struct cache *cache = mg->cache;
++ dec_io_migrations(mg->cache);
+ free_migration(mg);
+- dec_nr_migrations(cache);
+ }
+
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -886,7 +901,7 @@ static void migration_failure(struct dm_
+ cell_defer(cache, mg->new_ocell, true);
+ }
+
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ }
+
+ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+@@ -897,7 +912,7 @@ static void migration_success_pre_commit
+ if (mg->writeback) {
+ clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ return;
+
+ } else if (mg->demote) {
+@@ -907,14 +922,14 @@ static void migration_success_pre_commit
+ mg->old_oblock);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, true);
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ return;
+ }
+ } else {
+ if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+ DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ return;
+ }
+ }
+@@ -947,7 +962,7 @@ static void migration_success_post_commi
+ } else {
+ if (mg->invalidate)
+ policy_remove_mapping(cache->policy, mg->old_oblock);
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ }
+
+ } else {
+@@ -962,7 +977,7 @@ static void migration_success_post_commi
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
+- cleanup_migration(mg);
++ free_io_migration(mg);
+ }
+ }
+
+@@ -1178,7 +1193,7 @@ static void promote(struct cache *cache,
+ mg->new_ocell = cell;
+ mg->start_jiffies = jiffies;
+
+- inc_nr_migrations(cache);
++ inc_io_migrations(cache);
+ quiesce_migration(mg);
+ }
+
+@@ -1201,7 +1216,7 @@ static void writeback(struct cache *cach
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+- inc_nr_migrations(cache);
++ inc_io_migrations(cache);
+ quiesce_migration(mg);
+ }
+
+@@ -1227,7 +1242,7 @@ static void demote_then_promote(struct c
+ mg->new_ocell = new_ocell;
+ mg->start_jiffies = jiffies;
+
+- inc_nr_migrations(cache);
++ inc_io_migrations(cache);
+ quiesce_migration(mg);
+ }
+
+@@ -1254,7 +1269,7 @@ static void invalidate(struct cache *cac
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+- inc_nr_migrations(cache);
++ inc_io_migrations(cache);
+ quiesce_migration(mg);
+ }
+
+@@ -1320,7 +1335,7 @@ static void process_discard_bio(struct c
+
+ static bool spare_migration_bandwidth(struct cache *cache)
+ {
+- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
++ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
+ cache->sectors_per_block;
+ return current_volume < cache->migration_threshold;
+ }
+@@ -1670,7 +1685,7 @@ static void stop_quiescing(struct cache
+
+ static void wait_for_migrations(struct cache *cache)
+ {
+- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
++ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
+ }
+
+ static void stop_worker(struct cache *cache)
+@@ -1782,9 +1797,6 @@ static void destroy(struct cache *cache)
+ {
+ unsigned i;
+
+- if (cache->next_migration)
+- mempool_free(cache->next_migration, cache->migration_pool);
+-
+ if (cache->migration_pool)
+ mempool_destroy(cache->migration_pool);
+
+@@ -2292,7 +2304,8 @@ static int cache_create(struct cache_arg
+ INIT_LIST_HEAD(&cache->quiesced_migrations);
+ INIT_LIST_HEAD(&cache->completed_migrations);
+ INIT_LIST_HEAD(&cache->need_commit_migrations);
+- atomic_set(&cache->nr_migrations, 0);
++ atomic_set(&cache->nr_allocated_migrations, 0);
++ atomic_set(&cache->nr_io_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
+ init_waitqueue_head(&cache->quiescing_wait);
+@@ -2351,8 +2364,6 @@ static int cache_create(struct cache_arg
+ goto bad;
+ }
+
+- cache->next_migration = NULL;
+-
+ cache->need_tick_bio = true;
+ cache->sized = false;
+ cache->invalidate = false;
--- /dev/null
+From 9b1cc9f251affdd27f29fe46d0989ba76c33faf6 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 23 Jan 2015 10:00:07 +0000
+Subject: dm cache: share cache-metadata object across inactive and active DM tables
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 9b1cc9f251affdd27f29fe46d0989ba76c33faf6 upstream.
+
+If a DM table is reloaded with an inactive table when the device is not
+suspended (normal procedure for LVM2), then there will be two dm-bufio
+objects that can diverge. This can lead to a situation where the
+inactive table uses bufio to read metadata at the same time the active
+table writes metadata -- resulting in the inactive table having stale
+metadata buffers once it is promoted to the active table slot.
+
+Fix this by using reference counting and a global list of cache metadata
+objects to ensure there is only one metadata object per metadata device.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 101 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 95 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -94,6 +94,9 @@ struct cache_disk_superblock {
+ } __packed;
+
+ struct dm_cache_metadata {
++ atomic_t ref_count;
++ struct list_head list;
++
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *metadata_sm;
+@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le
+
+ /*----------------------------------------------------------------*/
+
+-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+- sector_t data_block_size,
+- bool may_format_device,
+- size_t policy_hint_size)
++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
++ sector_t data_block_size,
++ bool may_format_device,
++ size_t policy_hint_size)
+ {
+ int r;
+ struct dm_cache_metadata *cmd;
+@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metad
+ return NULL;
+ }
+
++ atomic_set(&cmd->ref_count, 1);
+ init_rwsem(&cmd->root_lock);
+ cmd->bdev = bdev;
+ cmd->data_block_size = data_block_size;
+@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metad
+ return cmd;
+ }
+
++/*
++ * We keep a little list of ref counted metadata objects to prevent two
++ * different target instances creating separate bufio instances. This is
++ * an issue if a table is reloaded before the suspend.
++ */
++static DEFINE_MUTEX(table_lock);
++static LIST_HEAD(table);
++
++static struct dm_cache_metadata *lookup(struct block_device *bdev)
++{
++ struct dm_cache_metadata *cmd;
++
++ list_for_each_entry(cmd, &table, list)
++ if (cmd->bdev == bdev) {
++ atomic_inc(&cmd->ref_count);
++ return cmd;
++ }
++
++ return NULL;
++}
++
++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
++ sector_t data_block_size,
++ bool may_format_device,
++ size_t policy_hint_size)
++{
++ struct dm_cache_metadata *cmd, *cmd2;
++
++ mutex_lock(&table_lock);
++ cmd = lookup(bdev);
++ mutex_unlock(&table_lock);
++
++ if (cmd)
++ return cmd;
++
++ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
++ if (cmd) {
++ mutex_lock(&table_lock);
++ cmd2 = lookup(bdev);
++ if (cmd2) {
++ mutex_unlock(&table_lock);
++ __destroy_persistent_data_objects(cmd);
++ kfree(cmd);
++ return cmd2;
++ }
++ list_add(&cmd->list, &table);
++ mutex_unlock(&table_lock);
++ }
++
++ return cmd;
++}
++
++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
++{
++ if (cmd->data_block_size != data_block_size) {
++ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
++ (unsigned long long) data_block_size,
++ (unsigned long long) cmd->data_block_size);
++ return false;
++ }
++
++ return true;
++}
++
++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
++ sector_t data_block_size,
++ bool may_format_device,
++ size_t policy_hint_size)
++{
++ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
++ may_format_device, policy_hint_size);
++ if (cmd && !same_params(cmd, data_block_size)) {
++ dm_cache_metadata_close(cmd);
++ return NULL;
++ }
++
++ return cmd;
++}
++
+ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+ {
+- __destroy_persistent_data_objects(cmd);
+- kfree(cmd);
++ if (atomic_dec_and_test(&cmd->ref_count)) {
++ mutex_lock(&table_lock);
++ list_del(&cmd->list);
++ mutex_unlock(&table_lock);
++
++ __destroy_persistent_data_objects(cmd);
++ kfree(cmd);
++ }
+ }
+
+ /*
--- /dev/null
+From 4b149e417463bbb6d1d9b805f729627ca2b54495 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Tue, 6 Jan 2015 14:38:08 -0600
+Subject: irqchip: omap-intc: Fix legacy DMA regression
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit 4b149e417463bbb6d1d9b805f729627ca2b54495 upstream.
+
+commit 55601c9f2467 (arm: omap: intc: switch over
+to linear irq domain) introduced a regression with
+SDMA legacy driver because that driver strictly depends
+on INTC's IRQs starting at NR_IRQs. Aparently
+irq_domain_add_linear() won't guarantee that, since we see
+a 7 IRQs difference when booting with and without the
+commit cited above.
+
+Until arch/arm/plat-omap/dma.c is properly fixed, we
+must maintain OMAP2/3 using irq_domain_add_legacy().
+
+A FIXME note was added so people know to delete that
+code once that legacy DMA driver is fixed up.
+
+Fixes: 55601c9f2467 (arm: omap: intc: switch over to linear irq domain)
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Tested-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Link: https://lkml.kernel.org/r/1420576688-10604-1-git-send-email-balbi@ti.com
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
+index 28718d3e8281..c03f140acbae 100644
+--- a/drivers/irqchip/irq-omap-intc.c
++++ b/drivers/irqchip/irq-omap-intc.c
+@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
+ return ret;
+ }
+
+-static int __init omap_init_irq_legacy(u32 base)
++static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
+ {
+ int j, irq_base;
+
+@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
+ irq_base = 0;
+ }
+
+- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
++ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ omap_irq_soft_reset();
+@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
+ {
+ int ret;
+
+- if (node)
++ /*
++ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
++ * depends is still not ready for linear IRQ domains; because of that
++ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
++ * linear IRQ Domain until that driver is finally fixed.
++ */
++ if (of_device_is_compatible(node, "ti,omap2-intc") ||
++ of_device_is_compatible(node, "ti,omap3-intc")) {
++ struct resource res;
++
++ if (of_address_to_resource(node, 0, &res))
++ return -ENOMEM;
++
++ base = res.start;
++ ret = omap_init_irq_legacy(base, node);
++ } else if (node) {
+ ret = omap_init_irq_of(node);
+- else
+- ret = omap_init_irq_legacy(base);
++ } else {
++ ret = omap_init_irq_legacy(base, NULL);
++ }
+
+ if (ret == 0)
+ omap_irq_enable_protection();