]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Apr 2017 07:46:02 +0000 (09:46 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Apr 2017 07:46:02 +0000 (09:46 +0200)
added patches:
arm-dts-bcm5301x-correct-gic_ppi-interrupt-flags.patch
blk-ensure-users-for-current-bio_list-can-see-the-full-list.patch
blk-improve-order-of-bio-handling-in-generic_make_request.patch
mips-lantiq-fix-cascaded-irq-setup.patch
mm-workingset-fix-premature-shadow-node-shrinking-with-cgroups.patch
qla2xxx-allow-vref-count-to-timeout-on-vport-delete.patch

queue-4.9/arm-dts-bcm5301x-correct-gic_ppi-interrupt-flags.patch [new file with mode: 0644]
queue-4.9/blk-ensure-users-for-current-bio_list-can-see-the-full-list.patch [new file with mode: 0644]
queue-4.9/blk-improve-order-of-bio-handling-in-generic_make_request.patch [new file with mode: 0644]
queue-4.9/mips-lantiq-fix-cascaded-irq-setup.patch [new file with mode: 0644]
queue-4.9/mm-workingset-fix-premature-shadow-node-shrinking-with-cgroups.patch [new file with mode: 0644]
queue-4.9/qla2xxx-allow-vref-count-to-timeout-on-vport-delete.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/arm-dts-bcm5301x-correct-gic_ppi-interrupt-flags.patch b/queue-4.9/arm-dts-bcm5301x-correct-gic_ppi-interrupt-flags.patch
new file mode 100644 (file)
index 0000000..8d53886
--- /dev/null
@@ -0,0 +1,48 @@
+From 0c2bf9f95983fe30aa2f6463cb761cd42c2d521a Mon Sep 17 00:00:00 2001
+From: Jon Mason <jon.mason@broadcom.com>
+Date: Thu, 2 Mar 2017 19:21:32 -0500
+Subject: ARM: dts: BCM5301X: Correct GIC_PPI interrupt flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jon Mason <jon.mason@broadcom.com>
+
+commit 0c2bf9f95983fe30aa2f6463cb761cd42c2d521a upstream.
+
+GIC_PPI flags were misconfigured for the timers, resulting in errors
+like:
+[    0.000000] GIC: PPI11 is secure or misconfigured
+
+Changing them to being edge triggered corrects the issue
+
+Suggested-by: Rafał Miłecki <rafal@milecki.pl>
+Signed-off-by: Jon Mason <jon.mason@broadcom.com>
+Fixes: d27509f1 ("ARM: BCM5301X: add dts files for BCM4708 SoC")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/bcm5301x.dtsi |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -66,14 +66,14 @@
+               timer@20200 {
+                       compatible = "arm,cortex-a9-global-timer";
+                       reg = <0x20200 0x100>;
+-                      interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++                      interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+                       clocks = <&periph_clk>;
+               };
+               local-timer@20600 {
+                       compatible = "arm,cortex-a9-twd-timer";
+                       reg = <0x20600 0x100>;
+-                      interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
++                      interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
+                       clocks = <&periph_clk>;
+               };
diff --git a/queue-4.9/blk-ensure-users-for-current-bio_list-can-see-the-full-list.patch b/queue-4.9/blk-ensure-users-for-current-bio_list-can-see-the-full-list.patch
new file mode 100644 (file)
index 0000000..525d6bd
--- /dev/null
@@ -0,0 +1,197 @@
+From f5fe1b51905df7cfe4fdfd85c5fb7bc5b71a094f Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Fri, 10 Mar 2017 17:00:47 +1100
+Subject: blk: Ensure users for current->bio_list can see the full list.
+
+From: NeilBrown <neilb@suse.com>
+
+commit f5fe1b51905df7cfe4fdfd85c5fb7bc5b71a094f upstream.
+
+Commit 79bd99596b73 ("blk: improve order of bio handling in generic_make_request()")
+changed current->bio_list so that it did not contain *all* of the
+queued bios, but only those submitted by the currently running
+make_request_fn.
+
+There are two places which walk the list and requeue selected bios,
+and others that check if the list is empty.  These are no longer
+correct.
+
+So redefine current->bio_list to point to an array of two lists, which
+contain all queued bios, and adjust various code to test or walk both
+lists.
+
+Signed-off-by: NeilBrown <neilb@suse.com>
+Fixes: 79bd99596b73 ("blk: improve order of bio handling in generic_make_request()")
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Cc: Jack Wang <jinpu.wang@profitbricks.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c         |   12 +++++++++---
+ block/blk-core.c    |   30 ++++++++++++++++++------------
+ drivers/md/dm.c     |   29 ++++++++++++++++-------------
+ drivers/md/raid10.c |    3 ++-
+ 4 files changed, 45 insertions(+), 29 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -372,10 +372,14 @@ static void punt_bios_to_rescuer(struct
+       bio_list_init(&punt);
+       bio_list_init(&nopunt);
+-      while ((bio = bio_list_pop(current->bio_list)))
++      while ((bio = bio_list_pop(&current->bio_list[0])))
+               bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
++      current->bio_list[0] = nopunt;
+-      *current->bio_list = nopunt;
++      bio_list_init(&nopunt);
++      while ((bio = bio_list_pop(&current->bio_list[1])))
++              bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
++      current->bio_list[1] = nopunt;
+       spin_lock(&bs->rescue_lock);
+       bio_list_merge(&bs->rescue_list, &punt);
+@@ -462,7 +466,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_m
+                * we retry with the original gfp_flags.
+                */
+-              if (current->bio_list && !bio_list_empty(current->bio_list))
++              if (current->bio_list &&
++                  (!bio_list_empty(&current->bio_list[0]) ||
++                   !bio_list_empty(&current->bio_list[1])))
+                       gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+               p = mempool_alloc(bs->bio_pool, gfp_mask);
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1994,7 +1994,14 @@ end_io:
+  */
+ blk_qc_t generic_make_request(struct bio *bio)
+ {
+-      struct bio_list bio_list_on_stack;
++      /*
++       * bio_list_on_stack[0] contains bios submitted by the current
++       * make_request_fn.
++       * bio_list_on_stack[1] contains bios that were submitted before
++       * the current make_request_fn, but that haven't been processed
++       * yet.
++       */
++      struct bio_list bio_list_on_stack[2];
+       blk_qc_t ret = BLK_QC_T_NONE;
+       if (!generic_make_request_checks(bio))
+@@ -2011,7 +2018,7 @@ blk_qc_t generic_make_request(struct bio
+        * should be added at the tail
+        */
+       if (current->bio_list) {
+-              bio_list_add(current->bio_list, bio);
++              bio_list_add(&current->bio_list[0], bio);
+               goto out;
+       }
+@@ -2030,18 +2037,17 @@ blk_qc_t generic_make_request(struct bio
+        * bio_list, and call into ->make_request() again.
+        */
+       BUG_ON(bio->bi_next);
+-      bio_list_init(&bio_list_on_stack);
+-      current->bio_list = &bio_list_on_stack;
++      bio_list_init(&bio_list_on_stack[0]);
++      current->bio_list = bio_list_on_stack;
+       do {
+               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+               if (likely(blk_queue_enter(q, false) == 0)) {
+-                      struct bio_list hold;
+                       struct bio_list lower, same;
+                       /* Create a fresh bio_list for all subordinate requests */
+-                      hold = bio_list_on_stack;
+-                      bio_list_init(&bio_list_on_stack);
++                      bio_list_on_stack[1] = bio_list_on_stack[0];
++                      bio_list_init(&bio_list_on_stack[0]);
+                       ret = q->make_request_fn(q, bio);
+                       blk_queue_exit(q);
+@@ -2051,19 +2057,19 @@ blk_qc_t generic_make_request(struct bio
+                        */
+                       bio_list_init(&lower);
+                       bio_list_init(&same);
+-                      while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
++                      while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+                               if (q == bdev_get_queue(bio->bi_bdev))
+                                       bio_list_add(&same, bio);
+                               else
+                                       bio_list_add(&lower, bio);
+                       /* now assemble so we handle the lowest level first */
+-                      bio_list_merge(&bio_list_on_stack, &lower);
+-                      bio_list_merge(&bio_list_on_stack, &same);
+-                      bio_list_merge(&bio_list_on_stack, &hold);
++                      bio_list_merge(&bio_list_on_stack[0], &lower);
++                      bio_list_merge(&bio_list_on_stack[0], &same);
++                      bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
+               } else {
+                       bio_io_error(bio);
+               }
+-              bio = bio_list_pop(current->bio_list);
++              bio = bio_list_pop(&bio_list_on_stack[0]);
+       } while (bio);
+       current->bio_list = NULL; /* deactivate */
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -986,26 +986,29 @@ static void flush_current_bio_list(struc
+       struct dm_offload *o = container_of(cb, struct dm_offload, cb);
+       struct bio_list list;
+       struct bio *bio;
++      int i;
+       INIT_LIST_HEAD(&o->cb.list);
+       if (unlikely(!current->bio_list))
+               return;
+-      list = *current->bio_list;
+-      bio_list_init(current->bio_list);
+-
+-      while ((bio = bio_list_pop(&list))) {
+-              struct bio_set *bs = bio->bi_pool;
+-              if (unlikely(!bs) || bs == fs_bio_set) {
+-                      bio_list_add(current->bio_list, bio);
+-                      continue;
++      for (i = 0; i < 2; i++) {
++              list = current->bio_list[i];
++              bio_list_init(&current->bio_list[i]);
++
++              while ((bio = bio_list_pop(&list))) {
++                      struct bio_set *bs = bio->bi_pool;
++                      if (unlikely(!bs) || bs == fs_bio_set) {
++                              bio_list_add(&current->bio_list[i], bio);
++                              continue;
++                      }
++
++                      spin_lock(&bs->rescue_lock);
++                      bio_list_add(&bs->rescue_list, bio);
++                      queue_work(bs->rescue_workqueue, &bs->rescue_work);
++                      spin_unlock(&bs->rescue_lock);
+               }
+-
+-              spin_lock(&bs->rescue_lock);
+-              bio_list_add(&bs->rescue_list, bio);
+-              queue_work(bs->rescue_workqueue, &bs->rescue_work);
+-              spin_unlock(&bs->rescue_lock);
+       }
+ }
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -941,7 +941,8 @@ static void wait_barrier(struct r10conf
+                                   !conf->barrier ||
+                                   (atomic_read(&conf->nr_pending) &&
+                                    current->bio_list &&
+-                                   !bio_list_empty(current->bio_list)),
++                                   (!bio_list_empty(&current->bio_list[0]) ||
++                                    !bio_list_empty(&current->bio_list[1]))),
+                                   conf->resync_lock);
+               conf->nr_waiting--;
+               if (!conf->nr_waiting)
diff --git a/queue-4.9/blk-improve-order-of-bio-handling-in-generic_make_request.patch b/queue-4.9/blk-improve-order-of-bio-handling-in-generic_make_request.patch
new file mode 100644 (file)
index 0000000..2372955
--- /dev/null
@@ -0,0 +1,117 @@
+From 79bd99596b7305ab08109a8bf44a6a4511dbf1cd Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Wed, 8 Mar 2017 07:38:05 +1100
+Subject: blk: improve order of bio handling in generic_make_request()
+
+From: NeilBrown <neilb@suse.com>
+
+commit 79bd99596b7305ab08109a8bf44a6a4511dbf1cd upstream.
+
+To avoid recursion on the kernel stack when stacked block devices
+are in use, generic_make_request() will, when called recursively,
+queue new requests for later handling.  They will be handled when the
+make_request_fn for the current bio completes.
+
+If any bios are submitted by a make_request_fn, these will ultimately
+be handled seqeuntially.  If the handling of one of those generates
+further requests, they will be added to the end of the queue.
+
+This strict first-in-first-out behaviour can lead to deadlocks in
+various ways, normally because a request might need to wait for a
+previous request to the same device to complete.  This can happen when
+they share a mempool, and can happen due to interdependencies
+particular to the device.  Both md and dm have examples where this happens.
+
+These deadlocks can be erradicated by more selective ordering of bios.
+Specifically by handling them in depth-first order.  That is: when the
+handling of one bio generates one or more further bios, they are
+handled immediately after the parent, before any siblings of the
+parent.  That way, when generic_make_request() calls make_request_fn
+for some particular device, we can be certain that all previously
+submited requests for that device have been completely handled and are
+not waiting for anything in the queue of requests maintained in
+generic_make_request().
+
+An easy way to achieve this would be to use a last-in-first-out stack
+instead of a queue.  However this will change the order of consecutive
+bios submitted by a make_request_fn, which could have unexpected consequences.
+Instead we take a slightly more complex approach.
+A fresh queue is created for each call to a make_request_fn.  After it completes,
+any bios for a different device are placed on the front of the main queue, followed
+by any bios for the same device, followed by all bios that were already on
+the queue before the make_request_fn was called.
+This provides the depth-first approach without reordering bios on the same level.
+
+This, by itself, it not enough to remove all deadlocks.  It just makes
+it possible for drivers to take the extra step required themselves.
+
+To avoid deadlocks, drivers must never risk waiting for a request
+after submitting one to generic_make_request.  This includes never
+allocing from a mempool twice in the one call to a make_request_fn.
+
+A common pattern in drivers is to call bio_split() in a loop, handling
+the first part and then looping around to possibly split the next part.
+Instead, a driver that finds it needs to split a bio should queue
+(with generic_make_request) the second part, handle the first part,
+and then return.  The new code in generic_make_request will ensure the
+requests to underlying bios are processed first, then the second bio
+that was split off.  If it splits again, the same process happens.  In
+each case one bio will be completely handled before the next one is attempted.
+
+With this is place, it should be possible to disable the
+punt_bios_to_recover() recovery thread for many block devices, and
+eventually it may be possible to remove it completely.
+
+Ref: http://www.spinics.net/lists/raid/msg54680.html
+Tested-by: Jinpu Wang <jinpu.wang@profitbricks.com>
+Inspired-by: Lars Ellenberg <lars.ellenberg@linbit.com>
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Cc: Jack Wang <jinpu.wang@profitbricks.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c |   25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2036,17 +2036,34 @@ blk_qc_t generic_make_request(struct bio
+               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+               if (likely(blk_queue_enter(q, false) == 0)) {
++                      struct bio_list hold;
++                      struct bio_list lower, same;
++
++                      /* Create a fresh bio_list for all subordinate requests */
++                      hold = bio_list_on_stack;
++                      bio_list_init(&bio_list_on_stack);
+                       ret = q->make_request_fn(q, bio);
+                       blk_queue_exit(q);
+-                      bio = bio_list_pop(current->bio_list);
++                      /* sort new bios into those for a lower level
++                       * and those for the same level
++                       */
++                      bio_list_init(&lower);
++                      bio_list_init(&same);
++                      while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
++                              if (q == bdev_get_queue(bio->bi_bdev))
++                                      bio_list_add(&same, bio);
++                              else
++                                      bio_list_add(&lower, bio);
++                      /* now assemble so we handle the lowest level first */
++                      bio_list_merge(&bio_list_on_stack, &lower);
++                      bio_list_merge(&bio_list_on_stack, &same);
++                      bio_list_merge(&bio_list_on_stack, &hold);
+               } else {
+-                      struct bio *bio_next = bio_list_pop(current->bio_list);
+-
+                       bio_io_error(bio);
+-                      bio = bio_next;
+               }
++              bio = bio_list_pop(current->bio_list);
+       } while (bio);
+       current->bio_list = NULL; /* deactivate */
diff --git a/queue-4.9/mips-lantiq-fix-cascaded-irq-setup.patch b/queue-4.9/mips-lantiq-fix-cascaded-irq-setup.patch
new file mode 100644 (file)
index 0000000..424efb3
--- /dev/null
@@ -0,0 +1,104 @@
+From 6c356eda225e3ee134ed4176b9ae3a76f793f4dd Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 19 Jan 2017 12:28:22 +0100
+Subject: MIPS: Lantiq: Fix cascaded IRQ setup
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 6c356eda225e3ee134ed4176b9ae3a76f793f4dd upstream.
+
+With the IRQ stack changes integrated, the XRX200 devices started
+emitting a constant stream of kernel messages like this:
+
+[  565.415310] Spurious IRQ: CAUSE=0x1100c300
+
+This is caused by IP0 getting handled by plat_irq_dispatch() rather than
+its vectored interrupt handler, which is fixed by commit de856416e714
+("MIPS: IRQ Stack: Fix erroneous jal to plat_irq_dispatch").
+
+Fix plat_irq_dispatch() to handle non-vectored IPI interrupts correctly
+by setting up IP2-6 as proper chained IRQ handlers and calling do_IRQ
+for all MIPS CPU interrupts.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Acked-by: John Crispin <john@phrozen.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15077/
+[james.hogan@imgtec.com: tweaked commit message]
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/lantiq/irq.c |   36 ++++++++++++++++--------------------
+ 1 file changed, 16 insertions(+), 20 deletions(-)
+
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -269,6 +269,11 @@ static void ltq_hw5_irqdispatch(void)
+ DEFINE_HWx_IRQDISPATCH(5)
+ #endif
++static void ltq_hw_irq_handler(struct irq_desc *desc)
++{
++      ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
++}
++
+ #ifdef CONFIG_MIPS_MT_SMP
+ void __init arch_init_ipiirq(int irq, struct irqaction *action)
+ {
+@@ -313,23 +318,19 @@ static struct irqaction irq_call = {
+ asmlinkage void plat_irq_dispatch(void)
+ {
+       unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+-      unsigned int i;
++      int irq;
+-      if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
+-              do_IRQ(MIPS_CPU_TIMER_IRQ);
+-              goto out;
+-      } else {
+-              for (i = 0; i < MAX_IM; i++) {
+-                      if (pending & (CAUSEF_IP2 << i)) {
+-                              ltq_hw_irqdispatch(i);
+-                              goto out;
+-                      }
+-              }
++      if (!pending) {
++              spurious_interrupt();
++              return;
+       }
+-      pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
+-out:
+-      return;
++      pending >>= CAUSEB_IP;
++      while (pending) {
++              irq = fls(pending) - 1;
++              do_IRQ(MIPS_CPU_IRQ_BASE + irq);
++              pending &= ~BIT(irq);
++      }
+ }
+ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+@@ -354,11 +355,6 @@ static const struct irq_domain_ops irq_d
+       .map = icu_map,
+ };
+-static struct irqaction cascade = {
+-      .handler = no_action,
+-      .name = "cascade",
+-};
+-
+ int __init icu_of_init(struct device_node *node, struct device_node *parent)
+ {
+       struct device_node *eiu_node;
+@@ -390,7 +386,7 @@ int __init icu_of_init(struct device_nod
+       mips_cpu_irq_init();
+       for (i = 0; i < MAX_IM; i++)
+-              setup_irq(i + 2, &cascade);
++              irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
+       if (cpu_has_vint) {
+               pr_info("Setting up vectored interrupts\n");
diff --git a/queue-4.9/mm-workingset-fix-premature-shadow-node-shrinking-with-cgroups.patch b/queue-4.9/mm-workingset-fix-premature-shadow-node-shrinking-with-cgroups.patch
new file mode 100644 (file)
index 0000000..66fee5f
--- /dev/null
@@ -0,0 +1,46 @@
+From 0cefabdaf757a6455d75f00cb76874e62703ed18 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Fri, 31 Mar 2017 15:11:52 -0700
+Subject: mm: workingset: fix premature shadow node shrinking with cgroups
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 0cefabdaf757a6455d75f00cb76874e62703ed18 upstream.
+
+Commit 0a6b76dd23fa ("mm: workingset: make shadow node shrinker memcg
+aware") enabled cgroup-awareness in the shadow node shrinker, but forgot
+to also enable cgroup-awareness in the list_lru the shadow nodes sit on.
+
+Consequently, all shadow nodes are sitting on a global (per-NUMA node)
+list, while the shrinker applies the limits according to the amount of
+cache in the cgroup its shrinking.  The result is excessive pressure on
+the shadow nodes from cgroups that have very little cache.
+
+Enable memcg-mode on the shadow node LRUs, such that per-cgroup limits
+are applied to per-cgroup lists.
+
+Fixes: 0a6b76dd23fa ("mm: workingset: make shadow node shrinker memcg aware")
+Link: http://lkml.kernel.org/r/20170322005320.8165-1-hannes@cmpxchg.org
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Vladimir Davydov <vdavydov@tarantool.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/workingset.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -492,7 +492,7 @@ static int __init workingset_init(void)
+       pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+              timestamp_bits, max_order, bucket_order);
+-      ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
++      ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
+       if (ret)
+               goto err;
+       ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/queue-4.9/qla2xxx-allow-vref-count-to-timeout-on-vport-delete.patch b/queue-4.9/qla2xxx-allow-vref-count-to-timeout-on-vport-delete.patch
new file mode 100644 (file)
index 0000000..fe8dca8
--- /dev/null
@@ -0,0 +1,110 @@
+From c4a9b538ab2a109c5f9798bea1f8f4bf93aadfb9 Mon Sep 17 00:00:00 2001
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Wed, 15 Mar 2017 09:48:43 -0700
+Subject: qla2xxx: Allow vref count to timeout on vport delete.
+
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+
+commit c4a9b538ab2a109c5f9798bea1f8f4bf93aadfb9 upstream.
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_attr.c |    2 --
+ drivers/scsi/qla2xxx/qla_def.h  |    3 +++
+ drivers/scsi/qla2xxx/qla_init.c |    1 +
+ drivers/scsi/qla2xxx/qla_mid.c  |   14 ++++++++------
+ drivers/scsi/qla2xxx/qla_os.c   |    1 +
+ 5 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2153,8 +2153,6 @@ qla24xx_vport_delete(struct fc_vport *fc
+                   "Timer for the VP[%d] has stopped\n", vha->vp_idx);
+       }
+-      BUG_ON(atomic_read(&vha->vref_count));
+-
+       qla2x00_free_fcports(vha);
+       mutex_lock(&ha->vport_lock);
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -3742,6 +3742,7 @@ typedef struct scsi_qla_host {
+       struct qla8044_reset_template reset_tmplt;
+       struct qla_tgt_counters tgt_counters;
+       uint16_t        bbcr;
++      wait_queue_head_t vref_waitq;
+ } scsi_qla_host_t;
+ struct qla27xx_image_status {
+@@ -3780,6 +3781,7 @@ struct qla_tgt_vp_map {
+       mb();                                                \
+       if (__vha->flags.delete_progress) {                  \
+               atomic_dec(&__vha->vref_count);              \
++              wake_up(&__vha->vref_waitq);            \
+               __bail = 1;                                  \
+       } else {                                             \
+               __bail = 0;                                  \
+@@ -3788,6 +3790,7 @@ struct qla_tgt_vp_map {
+ #define QLA_VHA_MARK_NOT_BUSY(__vha) do {                  \
+       atomic_dec(&__vha->vref_count);                      \
++      wake_up(&__vha->vref_waitq);                    \
+ } while (0)
+ /*
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -4356,6 +4356,7 @@ qla2x00_update_fcports(scsi_qla_host_t *
+                       }
+               }
+               atomic_dec(&vha->vref_count);
++              wake_up(&vha->vref_waitq);
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+ }
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t
+        * ensures no active vp_list traversal while the vport is removed
+        * from the queue)
+        */
+-      spin_lock_irqsave(&ha->vport_slock, flags);
+-      while (atomic_read(&vha->vref_count)) {
+-              spin_unlock_irqrestore(&ha->vport_slock, flags);
+-
+-              msleep(500);
++      wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
++          10*HZ);
+-              spin_lock_irqsave(&ha->vport_slock, flags);
++      spin_lock_irqsave(&ha->vport_slock, flags);
++      if (atomic_read(&vha->vref_count)) {
++              ql_dbg(ql_dbg_vport, vha, 0xfffa,
++                  "vha->vref_count=%u timeout\n", vha->vref_count.counter);
++              vha->vref_count = (atomic_t)ATOMIC_INIT(0);
+       }
+       list_del(&vha->list);
+       qlt_update_vp_map(vha, RESET_VP_IDX);
+@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rs
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vha->vref_count);
++                      wake_up(&vha->vref_waitq);
+               }
+               i++;
+       }
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -4045,6 +4045,7 @@ struct scsi_qla_host *qla2x00_create_hos
+       spin_lock_init(&vha->work_lock);
+       spin_lock_init(&vha->cmd_list_lock);
++      init_waitqueue_head(&vha->vref_waitq);
+       sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+       ql_dbg(ql_dbg_init, vha, 0x0041,
index d14f4c450908c9b825b9dc65dbae2c5850ad4f24..6b2b40e14839c19aa9386b210c9fc0553a4770fb 100644 (file)
@@ -61,3 +61,9 @@ lib-syscall-clear-return-values-when-no-stack.patch
 mm-rmap-fix-huge-file-mmap-accounting-in-the-memcg-stats.patch
 mm-hugetlb-use-pte_present-instead-of-pmd_present-in-follow_huge_pmd.patch
 arm-bcm5301x-add-back-handler-ignoring-external-imprecise-aborts.patch
+qla2xxx-allow-vref-count-to-timeout-on-vport-delete.patch
+arm-dts-bcm5301x-correct-gic_ppi-interrupt-flags.patch
+mips-lantiq-fix-cascaded-irq-setup.patch
+mm-workingset-fix-premature-shadow-node-shrinking-with-cgroups.patch
+blk-improve-order-of-bio-handling-in-generic_make_request.patch
+blk-ensure-users-for-current-bio_list-can-see-the-full-list.patch