]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 9 May 2014 06:14:12 +0000 (08:14 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 9 May 2014 06:14:12 +0000 (08:14 +0200)
added patches:
acpi-processor-fix-failure-of-loading-acpi-cpufreq-driver.patch
aio-v4-ensure-access-to-ctx-ring_pages-is-correctly-serialised-for-migration.patch
cpufreq-at32ap-don-t-declare-local-variable-as-static.patch
cpufreq-loongson2_cpufreq-don-t-declare-local-variable-as-static.patch
cpufreq-unicore32-fix-typo-issue-for-clk.patch
dma-edma-fix-incorrect-sg-list-handling.patch
dm-cache-fix-a-lock-inversion.patch
dm-cache-prevent-corruption-caused-by-discard_block_size-cache_block_size.patch
dm-take-care-to-copy-the-space-map-roots-before-locking-the-superblock.patch
dm-thin-fix-dangling-bio-in-process_deferred_bios-error-path.patch
dm-transaction-manager-fix-corruption-due-to-non-atomic-transaction-commit.patch
gpio-mxs-allow-for-recursive-enable_irq_wake-call.patch
input-synaptics-add-min-max-quirk-for-thinkpad-edge-e431.patch
input-synaptics-add-min-max-quirk-for-thinkpad-t431s-l440-l540-s1-yoga-and-x1.patch
lib-percpu_counter.c-fix-bad-percpu-counter-state-during-suspend.patch
lockd-ensure-we-tear-down-any-live-sockets-when-socket-creation-fails-during-lockd_up.patch
mmc-sdhci-bcm-kona-fix-build-errors-when-built-in.patch
mtd-atmel_nand-disable-subpage-nand-write-when-using-atmel-pmecc.patch
mtd-diskonchip-mem-resource-name-is-not-optional.patch
mtd-nuc900_nand-null-dereference-in-nuc900_nand_enable.patch
mtd-sm_ftl-heap-corruption-in-sm_create_sysfs_attributes.patch
pinctrl-as3722-fix-handling-of-gpio-invert-bit.patch
skip-intel_crt_init-for-dell-xps-8700.patch
tgafb-fix-data-copying.patch
tgafb-fix-mode-setting-with-fbset.patch
thinkpad_acpi-fix-inconsistent-mute-led-after-resume.patch

27 files changed:
queue-3.14/acpi-processor-fix-failure-of-loading-acpi-cpufreq-driver.patch [new file with mode: 0644]
queue-3.14/aio-v4-ensure-access-to-ctx-ring_pages-is-correctly-serialised-for-migration.patch [new file with mode: 0644]
queue-3.14/cpufreq-at32ap-don-t-declare-local-variable-as-static.patch [new file with mode: 0644]
queue-3.14/cpufreq-loongson2_cpufreq-don-t-declare-local-variable-as-static.patch [new file with mode: 0644]
queue-3.14/cpufreq-unicore32-fix-typo-issue-for-clk.patch [new file with mode: 0644]
queue-3.14/dm-cache-fix-a-lock-inversion.patch [new file with mode: 0644]
queue-3.14/dm-cache-prevent-corruption-caused-by-discard_block_size-cache_block_size.patch [new file with mode: 0644]
queue-3.14/dm-take-care-to-copy-the-space-map-roots-before-locking-the-superblock.patch [new file with mode: 0644]
queue-3.14/dm-thin-fix-dangling-bio-in-process_deferred_bios-error-path.patch [new file with mode: 0644]
queue-3.14/dm-transaction-manager-fix-corruption-due-to-non-atomic-transaction-commit.patch [new file with mode: 0644]
queue-3.14/dma-edma-fix-incorrect-sg-list-handling.patch [new file with mode: 0644]
queue-3.14/gpio-mxs-allow-for-recursive-enable_irq_wake-call.patch [new file with mode: 0644]
queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-edge-e431.patch [new file with mode: 0644]
queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-t431s-l440-l540-s1-yoga-and-x1.patch [new file with mode: 0644]
queue-3.14/lib-percpu_counter.c-fix-bad-percpu-counter-state-during-suspend.patch [new file with mode: 0644]
queue-3.14/lockd-ensure-we-tear-down-any-live-sockets-when-socket-creation-fails-during-lockd_up.patch [new file with mode: 0644]
queue-3.14/mmc-sdhci-bcm-kona-fix-build-errors-when-built-in.patch [new file with mode: 0644]
queue-3.14/mtd-atmel_nand-disable-subpage-nand-write-when-using-atmel-pmecc.patch [new file with mode: 0644]
queue-3.14/mtd-diskonchip-mem-resource-name-is-not-optional.patch [new file with mode: 0644]
queue-3.14/mtd-nuc900_nand-null-dereference-in-nuc900_nand_enable.patch [new file with mode: 0644]
queue-3.14/mtd-sm_ftl-heap-corruption-in-sm_create_sysfs_attributes.patch [new file with mode: 0644]
queue-3.14/pinctrl-as3722-fix-handling-of-gpio-invert-bit.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/skip-intel_crt_init-for-dell-xps-8700.patch [new file with mode: 0644]
queue-3.14/tgafb-fix-data-copying.patch [new file with mode: 0644]
queue-3.14/tgafb-fix-mode-setting-with-fbset.patch [new file with mode: 0644]
queue-3.14/thinkpad_acpi-fix-inconsistent-mute-led-after-resume.patch [new file with mode: 0644]

diff --git a/queue-3.14/acpi-processor-fix-failure-of-loading-acpi-cpufreq-driver.patch b/queue-3.14/acpi-processor-fix-failure-of-loading-acpi-cpufreq-driver.patch
new file mode 100644 (file)
index 0000000..4e77309
--- /dev/null
@@ -0,0 +1,56 @@
+From 4985c32ee4241d1aba1beeac72294faa46aaff10 Mon Sep 17 00:00:00 2001
+From: Lan Tianyu <tianyu.lan@intel.com>
+Date: Wed, 30 Apr 2014 15:46:33 +0800
+Subject: ACPI / processor: Fix failure of loading acpi-cpufreq driver
+
+From: Lan Tianyu <tianyu.lan@intel.com>
+
+commit 4985c32ee4241d1aba1beeac72294faa46aaff10 upstream.
+
+According commit d640113fe (ACPI: processor: fix acpi_get_cpuid for UP
+processor),  BIOS may not provide _MAT or MADT tables and acpi_get_apicid()
+always returns -1. For these cases, original code will pass apic_id with
+vaule of -1 to acpi_map_cpuid() and it will check the acpi_id. If acpi_id
+is equal to zero, ignores apic_id and return zero for CPU0.
+
+Commit b981513 (ACPI / scan: bail out early if failed to parse APIC
+ID for CPU) changed the behavior. Return ENODEV when find apic_id is
+less than zero after calling acpi_get_apicid(). This causes acpi-cpufreq
+driver fails to be loaded on some machines. This patch is to fix it.
+
+Fixes: b981513f806d (ACPI / scan: bail out early if failed to parse APIC ID for CPU)
+References: https://bugzilla.kernel.org/show_bug.cgi?id=73781
+Reported-and-tested-by: KATO Hiroshi <katoh@mikage.ne.jp>
+Reported-and-tested-by: Stuart Foster <smf.linux@ntlworld.com>
+Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_processor.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(st
+       acpi_status status;
+       int ret;
++      if (pr->apic_id == -1)
++              return -ENODEV;
++
+       status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
+       if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
+               return -ENODEV;
+@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struc
+       }
+       apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
+-      if (apic_id < 0) {
++      if (apic_id < 0)
+               acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
+-              return -ENODEV;
+-      }
+       pr->apic_id = apic_id;
+       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
diff --git a/queue-3.14/aio-v4-ensure-access-to-ctx-ring_pages-is-correctly-serialised-for-migration.patch b/queue-3.14/aio-v4-ensure-access-to-ctx-ring_pages-is-correctly-serialised-for-migration.patch
new file mode 100644 (file)
index 0000000..b3d03f2
--- /dev/null
@@ -0,0 +1,302 @@
+From fa8a53c39f3fdde98c9eace6a9b412143f0f6ed6 Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Fri, 28 Mar 2014 10:14:45 -0400
+Subject: aio: v4 ensure access to ctx->ring_pages is correctly serialised for migration
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit fa8a53c39f3fdde98c9eace6a9b412143f0f6ed6 upstream.
+
+As reported by Tang Chen, Gu Zheng and Yasuaki Isimatsu, the following issues
+exist in the aio ring page migration support.
+
+As a result, for example, we have the following problem:
+
+            thread 1                      |              thread 2
+                                          |
+aio_migratepage()                         |
+ |-> take ctx->completion_lock            |
+ |-> migrate_page_copy(new, old)          |
+ |   *NOW*, ctx->ring_pages[idx] == old   |
+                                          |
+                                          |    *NOW*, ctx->ring_pages[idx] == old
+                                          |    aio_read_events_ring()
+                                          |     |-> ring = kmap_atomic(ctx->ring_pages[0])
+                                          |     |-> ring->head = head;          *HERE, write to the old ring page*
+                                          |     |-> kunmap_atomic(ring);
+                                          |
+ |-> ctx->ring_pages[idx] = new           |
+ |   *BUT NOW*, the content of            |
+ |    ring_pages[idx] is old.             |
+ |-> release ctx->completion_lock         |
+
+As above, the new ring page will not be updated.
+
+Fix this issue, as well as prevent races in aio_ring_setup() by holding
+the ring_lock mutex during kioctx setup and page migration.  This avoids
+the overhead of taking another spinlock in aio_read_events_ring() as Tang's
+and Gu's original fix did, pushing the overhead into the migration code.
+
+Note that to handle the nesting of ring_lock inside of mmap_sem, the
+migratepage operation uses mutex_trylock().  Page migration is not a 100%
+critical operation in this case, so the ocassional failure can be
+tolerated.  This issue was reported by Sasha Levin.
+
+Based on feedback from Linus, avoid the extra taking of ctx->completion_lock.
+Instead, make page migration fully serialised by mapping->private_lock, and
+have aio_free_ring() simply disconnect the kioctx from the mapping by calling
+put_aio_ring_file() before touching ctx->ring_pages[].  This simplifies the
+error handling logic in aio_migratepage(), and should improve robustness.
+
+v4: always do mutex_unlock() in cases when kioctx setup fails.
+
+Reported-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Tang Chen <tangchen@cn.fujitsu.com>
+Cc: Gu Zheng <guz.fnst@cn.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c |  120 +++++++++++++++++++++++++++++++++++----------------------------
+ 1 file changed, 67 insertions(+), 53 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -52,7 +52,8 @@
+ struct aio_ring {
+       unsigned        id;     /* kernel internal index number */
+       unsigned        nr;     /* number of io_events */
+-      unsigned        head;
++      unsigned        head;   /* Written to by userland or under ring_lock
++                               * mutex by aio_read_events_ring(). */
+       unsigned        tail;
+       unsigned        magic;
+@@ -243,6 +244,11 @@ static void aio_free_ring(struct kioctx
+ {
+       int i;
++      /* Disconnect the kiotx from the ring file.  This prevents future
++       * accesses to the kioctx from page migration.
++       */
++      put_aio_ring_file(ctx);
++
+       for (i = 0; i < ctx->nr_pages; i++) {
+               struct page *page;
+               pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
+@@ -254,8 +260,6 @@ static void aio_free_ring(struct kioctx
+               put_page(page);
+       }
+-      put_aio_ring_file(ctx);
+-
+       if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
+               kfree(ctx->ring_pages);
+               ctx->ring_pages = NULL;
+@@ -283,29 +287,38 @@ static int aio_migratepage(struct addres
+ {
+       struct kioctx *ctx;
+       unsigned long flags;
++      pgoff_t idx;
+       int rc;
+       rc = 0;
+-      /* Make sure the old page hasn't already been changed */
++      /* mapping->private_lock here protects against the kioctx teardown.  */
+       spin_lock(&mapping->private_lock);
+       ctx = mapping->private_data;
+-      if (ctx) {
+-              pgoff_t idx;
+-              spin_lock_irqsave(&ctx->completion_lock, flags);
+-              idx = old->index;
+-              if (idx < (pgoff_t)ctx->nr_pages) {
+-                      if (ctx->ring_pages[idx] != old)
+-                              rc = -EAGAIN;
+-              } else
+-                      rc = -EINVAL;
+-              spin_unlock_irqrestore(&ctx->completion_lock, flags);
++      if (!ctx) {
++              rc = -EINVAL;
++              goto out;
++      }
++
++      /* The ring_lock mutex.  The prevents aio_read_events() from writing
++       * to the ring's head, and prevents page migration from mucking in
++       * a partially initialized kiotx.
++       */
++      if (!mutex_trylock(&ctx->ring_lock)) {
++              rc = -EAGAIN;
++              goto out;
++      }
++
++      idx = old->index;
++      if (idx < (pgoff_t)ctx->nr_pages) {
++              /* Make sure the old page hasn't already been changed */
++              if (ctx->ring_pages[idx] != old)
++                      rc = -EAGAIN;
+       } else
+               rc = -EINVAL;
+-      spin_unlock(&mapping->private_lock);
+       if (rc != 0)
+-              return rc;
++              goto out_unlock;
+       /* Writeback must be complete */
+       BUG_ON(PageWriteback(old));
+@@ -314,38 +327,26 @@ static int aio_migratepage(struct addres
+       rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
+       if (rc != MIGRATEPAGE_SUCCESS) {
+               put_page(new);
+-              return rc;
++              goto out_unlock;
+       }
+-      /* We can potentially race against kioctx teardown here.  Use the
+-       * address_space's private data lock to protect the mapping's
+-       * private_data.
++      /* Take completion_lock to prevent other writes to the ring buffer
++       * while the old page is copied to the new.  This prevents new
++       * events from being lost.
+        */
+-      spin_lock(&mapping->private_lock);
+-      ctx = mapping->private_data;
+-      if (ctx) {
+-              pgoff_t idx;
+-              spin_lock_irqsave(&ctx->completion_lock, flags);
+-              migrate_page_copy(new, old);
+-              idx = old->index;
+-              if (idx < (pgoff_t)ctx->nr_pages) {
+-                      /* And only do the move if things haven't changed */
+-                      if (ctx->ring_pages[idx] == old)
+-                              ctx->ring_pages[idx] = new;
+-                      else
+-                              rc = -EAGAIN;
+-              } else
+-                      rc = -EINVAL;
+-              spin_unlock_irqrestore(&ctx->completion_lock, flags);
+-      } else
+-              rc = -EBUSY;
+-      spin_unlock(&mapping->private_lock);
++      spin_lock_irqsave(&ctx->completion_lock, flags);
++      migrate_page_copy(new, old);
++      BUG_ON(ctx->ring_pages[idx] != old);
++      ctx->ring_pages[idx] = new;
++      spin_unlock_irqrestore(&ctx->completion_lock, flags);
+-      if (rc == MIGRATEPAGE_SUCCESS)
+-              put_page(old);
+-      else
+-              put_page(new);
++      /* The old page is no longer accessible. */
++      put_page(old);
++out_unlock:
++      mutex_unlock(&ctx->ring_lock);
++out:
++      spin_unlock(&mapping->private_lock);
+       return rc;
+ }
+ #endif
+@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx
+       file = aio_private_file(ctx, nr_pages);
+       if (IS_ERR(file)) {
+               ctx->aio_ring_file = NULL;
+-              return -EAGAIN;
++              return -ENOMEM;
+       }
+       ctx->aio_ring_file = file;
+@@ -415,7 +416,7 @@ static int aio_setup_ring(struct kioctx
+       if (unlikely(i != nr_pages)) {
+               aio_free_ring(ctx);
+-              return -EAGAIN;
++              return -ENOMEM;
+       }
+       ctx->mmap_size = nr_pages * PAGE_SIZE;
+@@ -429,7 +430,7 @@ static int aio_setup_ring(struct kioctx
+       if (IS_ERR((void *)ctx->mmap_base)) {
+               ctx->mmap_size = 0;
+               aio_free_ring(ctx);
+-              return -EAGAIN;
++              return -ENOMEM;
+       }
+       pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+@@ -556,6 +557,10 @@ static int ioctx_add_table(struct kioctx
+                                       rcu_read_unlock();
+                                       spin_unlock(&mm->ioctx_lock);
++                                      /* While kioctx setup is in progress,
++                                       * we are protected from page migration
++                                       * changes ring_pages by ->ring_lock.
++                                       */
+                                       ring = kmap_atomic(ctx->ring_pages[0]);
+                                       ring->id = ctx->id;
+                                       kunmap_atomic(ring);
+@@ -640,24 +645,28 @@ static struct kioctx *ioctx_alloc(unsign
+       ctx->max_reqs = nr_events;
+-      if (percpu_ref_init(&ctx->users, free_ioctx_users))
+-              goto err;
+-
+-      if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
+-              goto err;
+-
+       spin_lock_init(&ctx->ctx_lock);
+       spin_lock_init(&ctx->completion_lock);
+       mutex_init(&ctx->ring_lock);
++      /* Protect against page migration throughout kiotx setup by keeping
++       * the ring_lock mutex held until setup is complete. */
++      mutex_lock(&ctx->ring_lock);
+       init_waitqueue_head(&ctx->wait);
+       INIT_LIST_HEAD(&ctx->active_reqs);
++      if (percpu_ref_init(&ctx->users, free_ioctx_users))
++              goto err;
++
++      if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
++              goto err;
++
+       ctx->cpu = alloc_percpu(struct kioctx_cpu);
+       if (!ctx->cpu)
+               goto err;
+-      if (aio_setup_ring(ctx) < 0)
++      err = aio_setup_ring(ctx);
++      if (err < 0)
+               goto err;
+       atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
+@@ -683,6 +692,9 @@ static struct kioctx *ioctx_alloc(unsign
+       if (err)
+               goto err_cleanup;
++      /* Release the ring_lock mutex now that all setup is complete. */
++      mutex_unlock(&ctx->ring_lock);
++
+       pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+                ctx, ctx->user_id, mm, ctx->nr_events);
+       return ctx;
+@@ -692,6 +704,7 @@ err_cleanup:
+ err_ctx:
+       aio_free_ring(ctx);
+ err:
++      mutex_unlock(&ctx->ring_lock);
+       free_percpu(ctx->cpu);
+       free_percpu(ctx->reqs.pcpu_count);
+       free_percpu(ctx->users.pcpu_count);
+@@ -1024,6 +1037,7 @@ static long aio_read_events_ring(struct
+       mutex_lock(&ctx->ring_lock);
++      /* Access to ->ring_pages here is protected by ctx->ring_lock. */
+       ring = kmap_atomic(ctx->ring_pages[0]);
+       head = ring->head;
+       tail = ring->tail;
diff --git a/queue-3.14/cpufreq-at32ap-don-t-declare-local-variable-as-static.patch b/queue-3.14/cpufreq-at32ap-don-t-declare-local-variable-as-static.patch
new file mode 100644 (file)
index 0000000..0039ac4
--- /dev/null
@@ -0,0 +1,41 @@
+From 0ca97886fece9e1acd71ade4ca3a250945c8fc8b Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Thu, 3 Apr 2014 20:20:36 +0530
+Subject: cpufreq: at32ap: don't declare local variable as static
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 0ca97886fece9e1acd71ade4ca3a250945c8fc8b upstream.
+
+Earlier commit:
+       commit 652ed95d5fa6074b3c4ea245deb0691f1acb6656
+       Author: Viresh Kumar <viresh.kumar@linaro.org>
+       Date:   Thu Jan 9 20:38:43 2014 +0530
+
+           cpufreq: introduce cpufreq_generic_get() routine
+
+did some changes to driver and by mistake made cpuclk as a 'static' local
+variable, which wasn't actually required. Fix it.
+
+Fixes: 652ed95d5fa6 (cpufreq: introduce cpufreq_generic_get() routine)
+Reported-by: Alexandre Oliva <lxoliva@fsfla.org>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Acked-by: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/at32ap-cpufreq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/at32ap-cpufreq.c
++++ b/drivers/cpufreq/at32ap-cpufreq.c
+@@ -52,7 +52,7 @@ static int at32_set_target(struct cpufre
+ static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+ {
+       unsigned int frequency, rate, min_freq;
+-      static struct clk *cpuclk;
++      struct clk *cpuclk;
+       int retval, steps, i;
+       if (policy->cpu != 0)
diff --git a/queue-3.14/cpufreq-loongson2_cpufreq-don-t-declare-local-variable-as-static.patch b/queue-3.14/cpufreq-loongson2_cpufreq-don-t-declare-local-variable-as-static.patch
new file mode 100644 (file)
index 0000000..35ed720
--- /dev/null
@@ -0,0 +1,40 @@
+From 05ed672292dc9d37db4fafdd49baa782158cd795 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Wed, 2 Apr 2014 10:14:24 +0530
+Subject: cpufreq: loongson2_cpufreq: don't declare local variable as static
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 05ed672292dc9d37db4fafdd49baa782158cd795 upstream.
+
+Earlier commit:
+       commit 652ed95d5fa6074b3c4ea245deb0691f1acb6656
+       Author: Viresh Kumar <viresh.kumar@linaro.org>
+       Date:   Thu Jan 9 20:38:43 2014 +0530
+
+           cpufreq: introduce cpufreq_generic_get() routine
+
+did some changes to driver and by mistake made cpuclk as a 'static' local
+variable, which wasn't actually required. Fix it.
+
+Fixes: 652ed95d5fa6 (cpufreq: introduce cpufreq_generic_get() routine)
+Reported-by: Alexandre Oliva <lxoliva@fsfla.org>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/loongson2_cpufreq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/loongson2_cpufreq.c
++++ b/drivers/cpufreq/loongson2_cpufreq.c
+@@ -69,7 +69,7 @@ static int loongson2_cpufreq_target(stru
+ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ {
+-      static struct clk *cpuclk;
++      struct clk *cpuclk;
+       int i;
+       unsigned long rate;
+       int ret;
diff --git a/queue-3.14/cpufreq-unicore32-fix-typo-issue-for-clk.patch b/queue-3.14/cpufreq-unicore32-fix-typo-issue-for-clk.patch
new file mode 100644 (file)
index 0000000..d9df9fa
--- /dev/null
@@ -0,0 +1,50 @@
+From b4ddad95020e65cfbbf9aee63d3bcdf682794ade Mon Sep 17 00:00:00 2001
+From: Chen Gang <gang.chen.5i5j@gmail.com>
+Date: Mon, 7 Apr 2014 20:04:21 +0800
+Subject: cpufreq: unicore32: fix typo issue for 'clk'
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Gang <gang.chen.5i5j@gmail.com>
+
+commit b4ddad95020e65cfbbf9aee63d3bcdf682794ade upstream.
+
+Need use 'clk' instead of 'mclk', which is the original removed local
+variable.
+
+The related original commit:
+
+  "652ed95 cpufreq: introduce cpufreq_generic_get() routine"
+
+The related error with allmodconfig for unicore32:
+
+    CC      drivers/cpufreq/unicore2-cpufreq.o
+  drivers/cpufreq/unicore2-cpufreq.c: In function â€˜ucv2_target’:
+  drivers/cpufreq/unicore2-cpufreq.c:48: error: â€˜struct cpufreq_policy’ has no member named â€˜mclk’
+  make[2]: *** [drivers/cpufreq/unicore2-cpufreq.o] Error 1
+  make[1]: *** [drivers/cpufreq] Error 2
+  make: *** [drivers] Error 2
+
+Fixes: 652ed95d5fa6 (cpufreq: introduce cpufreq_generic_get() routine)
+Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/cpufreq/unicore2-cpufreq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/unicore2-cpufreq.c
++++ b/drivers/cpufreq/unicore2-cpufreq.c
+@@ -45,7 +45,7 @@ static int ucv2_target(struct cpufreq_po
+       freqs.new = target_freq;
+       cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+-      ret = clk_set_rate(policy->mclk, target_freq * 1000);
++      ret = clk_set_rate(policy->clk, target_freq * 1000);
+       cpufreq_notify_post_transition(policy, &freqs, ret);
+       return ret;
diff --git a/queue-3.14/dm-cache-fix-a-lock-inversion.patch b/queue-3.14/dm-cache-fix-a-lock-inversion.patch
new file mode 100644 (file)
index 0000000..d047fab
--- /dev/null
@@ -0,0 +1,174 @@
+From 0596661f0a16d9d69bf1033320e70b6ff52b5e81 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Thu, 3 Apr 2014 16:16:44 +0100
+Subject: dm cache: fix a lock-inversion
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 0596661f0a16d9d69bf1033320e70b6ff52b5e81 upstream.
+
+When suspending a cache the policy is walked and the individual policy
+hints written to the metadata via sync_metadata().  This led to this
+lock order:
+
+      policy->lock
+        cache_metadata->root_lock
+
+When loading the cache target the policy is populated while the metadata
+lock is held:
+
+      cache_metadata->root_lock
+         policy->lock
+
+Fix this potential lock-inversion (ABBA) deadlock in sync_metadata() by
+ensuring the cache_metadata root_lock is held whilst all the hints are
+written, rather than being repeatedly locked while policy->lock is held
+(as was the case with each callout that policy_walk_mappings() made to
+the old save_hint() method).
+
+Found by turning on the CONFIG_PROVE_LOCKING ("Lock debugging: prove
+locking correctness") build option.  However, it is not clear how the
+LOCKDEP reported paths can lead to a deadlock since the two paths,
+suspending a target and loading a target, never occur at the same time.
+But that doesn't mean the same lock-inversion couldn't have occurred
+elsewhere.
+
+Reported-by: Marian Csontos <mcsontos@redhat.com>
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c |   35 +++++++++++++++++------------------
+ drivers/md/dm-cache-metadata.h |    9 +--------
+ drivers/md/dm-cache-target.c   |   28 ++--------------------------
+ 3 files changed, 20 insertions(+), 52 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -1245,22 +1245,12 @@ static int begin_hints(struct dm_cache_m
+       return 0;
+ }
+-int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
++static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
+ {
++      struct dm_cache_metadata *cmd = context;
++      __le32 value = cpu_to_le32(hint);
+       int r;
+-      down_write(&cmd->root_lock);
+-      r = begin_hints(cmd, policy);
+-      up_write(&cmd->root_lock);
+-
+-      return r;
+-}
+-
+-static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+-                   uint32_t hint)
+-{
+-      int r;
+-      __le32 value = cpu_to_le32(hint);
+       __dm_bless_for_disk(&value);
+       r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
+@@ -1270,16 +1260,25 @@ static int save_hint(struct dm_cache_met
+       return r;
+ }
+-int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+-                     uint32_t hint)
++static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+ {
+       int r;
+-      if (!hints_array_initialized(cmd))
+-              return 0;
++      r = begin_hints(cmd, policy);
++      if (r) {
++              DMERR("begin_hints failed");
++              return r;
++      }
++
++      return policy_walk_mappings(policy, save_hint, cmd);
++}
++
++int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
++{
++      int r;
+       down_write(&cmd->root_lock);
+-      r = save_hint(cmd, cblock, hint);
++      r = write_hints(cmd, policy);
+       up_write(&cmd->root_lock);
+       return r;
+--- a/drivers/md/dm-cache-metadata.h
++++ b/drivers/md/dm-cache-metadata.h
+@@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metad
+  * rather than querying the policy for each cblock, we let it walk its data
+  * structures and fill in the hints in whatever order it wishes.
+  */
+-
+-int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+-
+-/*
+- * requests hints for every cblock and stores in the metadata device.
+- */
+-int dm_cache_save_hint(struct dm_cache_metadata *cmd,
+-                     dm_cblock_t cblock, uint32_t hint);
++int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+ /*
+  * Query method.  Are all the blocks in the cache clean?
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2600,30 +2600,6 @@ static int write_discard_bitset(struct c
+       return 0;
+ }
+-static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
+-                   uint32_t hint)
+-{
+-      struct cache *cache = context;
+-      return dm_cache_save_hint(cache->cmd, cblock, hint);
+-}
+-
+-static int write_hints(struct cache *cache)
+-{
+-      int r;
+-
+-      r = dm_cache_begin_hints(cache->cmd, cache->policy);
+-      if (r) {
+-              DMERR("dm_cache_begin_hints failed");
+-              return r;
+-      }
+-
+-      r = policy_walk_mappings(cache->policy, save_hint, cache);
+-      if (r)
+-              DMERR("policy_walk_mappings failed");
+-
+-      return r;
+-}
+-
+ /*
+  * returns true on success
+  */
+@@ -2641,7 +2617,7 @@ static bool sync_metadata(struct cache *
+       save_stats(cache);
+-      r3 = write_hints(cache);
++      r3 = dm_cache_write_hints(cache->cmd, cache->policy);
+       if (r3)
+               DMERR("could not write hints");
+@@ -3114,7 +3090,7 @@ static void cache_io_hints(struct dm_tar
+ static struct target_type cache_target = {
+       .name = "cache",
+-      .version = {1, 3, 0},
++      .version = {1, 4, 0},
+       .module = THIS_MODULE,
+       .ctr = cache_ctr,
+       .dtr = cache_dtr,
diff --git a/queue-3.14/dm-cache-prevent-corruption-caused-by-discard_block_size-cache_block_size.patch b/queue-3.14/dm-cache-prevent-corruption-caused-by-discard_block_size-cache_block_size.patch
new file mode 100644 (file)
index 0000000..fa90693
--- /dev/null
@@ -0,0 +1,99 @@
+From d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Thu, 20 Mar 2014 10:11:15 -0400
+Subject: dm cache: prevent corruption caused by discard_block_size > cache_block_size
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be upstream.
+
+If the discard block size is larger than the cache block size we will
+not properly quiesce IO to a region that is about to be discarded.  This
+results in a race between a cache migration where no copy is needed, and
+a write to an adjacent cache block that's within the same large discard
+block.
+
+Workaround this by limiting the discard_block_size to cache_block_size.
+Also limit the max_discard_sectors to cache_block_size.
+
+A more comprehensive fix that introduces range locking support in the
+bio_prison and proper quiescing of a discard range that spans multiple
+cache blocks is already in development.
+
+Reported-by: Morgan Mears <Morgan.Mears@netapp.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Acked-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   37 +++----------------------------------
+ 1 file changed, 3 insertions(+), 34 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -239,7 +239,7 @@ struct cache {
+        */
+       dm_dblock_t discard_nr_blocks;
+       unsigned long *discard_bitset;
+-      uint32_t discard_block_size; /* a power of 2 times sectors per block */
++      uint32_t discard_block_size;
+       /*
+        * Rather than reconstructing the table line for the status we just
+@@ -2171,35 +2171,6 @@ static int create_cache_policy(struct ca
+       return 0;
+ }
+-/*
+- * We want the discard block size to be a power of two, at least the size
+- * of the cache block size, and have no more than 2^14 discard blocks
+- * across the origin.
+- */
+-#define MAX_DISCARD_BLOCKS (1 << 14)
+-
+-static bool too_many_discard_blocks(sector_t discard_block_size,
+-                                  sector_t origin_size)
+-{
+-      (void) sector_div(origin_size, discard_block_size);
+-
+-      return origin_size > MAX_DISCARD_BLOCKS;
+-}
+-
+-static sector_t calculate_discard_block_size(sector_t cache_block_size,
+-                                           sector_t origin_size)
+-{
+-      sector_t discard_block_size;
+-
+-      discard_block_size = roundup_pow_of_two(cache_block_size);
+-
+-      if (origin_size)
+-              while (too_many_discard_blocks(discard_block_size, origin_size))
+-                      discard_block_size *= 2;
+-
+-      return discard_block_size;
+-}
+-
+ #define DEFAULT_MIGRATION_THRESHOLD 2048
+ static int cache_create(struct cache_args *ca, struct cache **result)
+@@ -2321,9 +2292,7 @@ static int cache_create(struct cache_arg
+       }
+       clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+-      cache->discard_block_size =
+-              calculate_discard_block_size(cache->sectors_per_block,
+-                                           cache->origin_sectors);
++      cache->discard_block_size = cache->sectors_per_block;
+       cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+       cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+       if (!cache->discard_bitset) {
+@@ -3120,7 +3089,7 @@ static void set_discard_limits(struct ca
+       /*
+        * FIXME: these limits may be incompatible with the cache device
+        */
+-      limits->max_discard_sectors = cache->discard_block_size * 1024;
++      limits->max_discard_sectors = cache->discard_block_size;
+       limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+ }
diff --git a/queue-3.14/dm-take-care-to-copy-the-space-map-roots-before-locking-the-superblock.patch b/queue-3.14/dm-take-care-to-copy-the-space-map-roots-before-locking-the-superblock.patch
new file mode 100644 (file)
index 0000000..1af28b7
--- /dev/null
@@ -0,0 +1,288 @@
+From 5a32083d03fb543f63489b2946c4948398579ba0 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Thu, 27 Mar 2014 14:13:23 +0000
+Subject: dm: take care to copy the space map roots before locking the superblock
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 5a32083d03fb543f63489b2946c4948398579ba0 upstream.
+
+In theory copying the space map root can fail, but in practice it never
+does because we're careful to check what size buffer is needed.
+
+But make certain we're able to copy the space map roots before
+locking the superblock.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c |   60 +++++++++++++++++++-----------
+ drivers/md/dm-thin-metadata.c  |   80 ++++++++++++++++++++++++-----------------
+ 2 files changed, 85 insertions(+), 55 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -120,6 +120,12 @@ struct dm_cache_metadata {
+       unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
+       size_t policy_hint_size;
+       struct dm_cache_statistics stats;
++
++      /*
++       * Reading the space map root can fail, so we read it into this
++       * buffer before the superblock is locked and updated.
++       */
++      __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ };
+ /*-------------------------------------------------------------------
+@@ -260,11 +266,31 @@ static void __setup_mapping_info(struct
+       }
+ }
++static int __save_sm_root(struct dm_cache_metadata *cmd)
++{
++      int r;
++      size_t metadata_len;
++
++      r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
++      if (r < 0)
++              return r;
++
++      return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
++                             metadata_len);
++}
++
++static void __copy_sm_root(struct dm_cache_metadata *cmd,
++                         struct cache_disk_superblock *disk_super)
++{
++      memcpy(&disk_super->metadata_space_map_root,
++             &cmd->metadata_space_map_root,
++             sizeof(cmd->metadata_space_map_root));
++}
++
+ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+ {
+       int r;
+       struct dm_block *sblock;
+-      size_t metadata_len;
+       struct cache_disk_superblock *disk_super;
+       sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+@@ -272,12 +298,16 @@ static int __write_initial_superblock(st
+       if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+               bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+-      r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
++      r = dm_tm_pre_commit(cmd->tm);
+       if (r < 0)
+               return r;
+-      r = dm_tm_pre_commit(cmd->tm);
+-      if (r < 0)
++      /*
++       * dm_sm_copy_root() can fail.  So we need to do it before we start
++       * updating the superblock.
++       */
++      r = __save_sm_root(cmd);
++      if (r)
+               return r;
+       r = superblock_lock_zero(cmd, &sblock);
+@@ -293,10 +323,7 @@ static int __write_initial_superblock(st
+       memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
+       disk_super->policy_hint_size = 0;
+-      r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+-                          metadata_len);
+-      if (r < 0)
+-              goto bad_locked;
++      __copy_sm_root(cmd, disk_super);
+       disk_super->mapping_root = cpu_to_le64(cmd->root);
+       disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+@@ -313,10 +340,6 @@ static int __write_initial_superblock(st
+       disk_super->write_misses = cpu_to_le32(0);
+       return dm_tm_commit(cmd->tm, sblock);
+-
+-bad_locked:
+-      dm_bm_unlock(sblock);
+-      return r;
+ }
+ static int __format_metadata(struct dm_cache_metadata *cmd)
+@@ -560,7 +583,6 @@ static int __commit_transaction(struct d
+                               flags_mutator mutator)
+ {
+       int r;
+-      size_t metadata_len;
+       struct cache_disk_superblock *disk_super;
+       struct dm_block *sblock;
+@@ -578,8 +600,8 @@ static int __commit_transaction(struct d
+       if (r < 0)
+               return r;
+-      r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+-      if (r < 0)
++      r = __save_sm_root(cmd);
++      if (r)
+               return r;
+       r = superblock_lock(cmd, &sblock);
+@@ -606,13 +628,7 @@ static int __commit_transaction(struct d
+       disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+       disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+       disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+-
+-      r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+-                          metadata_len);
+-      if (r < 0) {
+-              dm_bm_unlock(sblock);
+-              return r;
+-      }
++      __copy_sm_root(cmd, disk_super);
+       return dm_tm_commit(cmd->tm, sblock);
+ }
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -192,6 +192,13 @@ struct dm_pool_metadata {
+        * operation possible in this state is the closing of the device.
+        */
+       bool fail_io:1;
++
++      /*
++       * Reading the space map roots can fail, so we read it into these
++       * buffers before the superblock is locked and updated.
++       */
++      __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
++      __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ };
+ struct dm_thin_device {
+@@ -431,26 +438,53 @@ static void __setup_btree_details(struct
+       pmd->details_info.value_type.equal = NULL;
+ }
++static int save_sm_roots(struct dm_pool_metadata *pmd)
++{
++      int r;
++      size_t len;
++
++      r = dm_sm_root_size(pmd->metadata_sm, &len);
++      if (r < 0)
++              return r;
++
++      r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
++      if (r < 0)
++              return r;
++
++      r = dm_sm_root_size(pmd->data_sm, &len);
++      if (r < 0)
++              return r;
++
++      return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
++}
++
++static void copy_sm_roots(struct dm_pool_metadata *pmd,
++                        struct thin_disk_superblock *disk)
++{
++      memcpy(&disk->metadata_space_map_root,
++             &pmd->metadata_space_map_root,
++             sizeof(pmd->metadata_space_map_root));
++
++      memcpy(&disk->data_space_map_root,
++             &pmd->data_space_map_root,
++             sizeof(pmd->data_space_map_root));
++}
++
+ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
+ {
+       int r;
+       struct dm_block *sblock;
+-      size_t metadata_len, data_len;
+       struct thin_disk_superblock *disk_super;
+       sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+       if (bdev_size > THIN_METADATA_MAX_SECTORS)
+               bdev_size = THIN_METADATA_MAX_SECTORS;
+-      r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
++      r = dm_sm_commit(pmd->data_sm);
+       if (r < 0)
+               return r;
+-      r = dm_sm_root_size(pmd->data_sm, &data_len);
+-      if (r < 0)
+-              return r;
+-
+-      r = dm_sm_commit(pmd->data_sm);
++      r = save_sm_roots(pmd);
+       if (r < 0)
+               return r;
+@@ -471,15 +505,7 @@ static int __write_initial_superblock(st
+       disk_super->trans_id = 0;
+       disk_super->held_root = 0;
+-      r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+-                          metadata_len);
+-      if (r < 0)
+-              goto bad_locked;
+-
+-      r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+-                          data_len);
+-      if (r < 0)
+-              goto bad_locked;
++      copy_sm_roots(pmd, disk_super);
+       disk_super->data_mapping_root = cpu_to_le64(pmd->root);
+       disk_super->device_details_root = cpu_to_le64(pmd->details_root);
+@@ -488,10 +514,6 @@ static int __write_initial_superblock(st
+       disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
+       return dm_tm_commit(pmd->tm, sblock);
+-
+-bad_locked:
+-      dm_bm_unlock(sblock);
+-      return r;
+ }
+ static int __format_metadata(struct dm_pool_metadata *pmd)
+@@ -769,6 +791,10 @@ static int __commit_transaction(struct d
+       if (r < 0)
+               return r;
++      r = save_sm_roots(pmd);
++      if (r < 0)
++              return r;
++
+       r = superblock_lock(pmd, &sblock);
+       if (r)
+               return r;
+@@ -780,21 +806,9 @@ static int __commit_transaction(struct d
+       disk_super->trans_id = cpu_to_le64(pmd->trans_id);
+       disk_super->flags = cpu_to_le32(pmd->flags);
+-      r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+-                          metadata_len);
+-      if (r < 0)
+-              goto out_locked;
+-
+-      r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+-                          data_len);
+-      if (r < 0)
+-              goto out_locked;
++      copy_sm_roots(pmd, disk_super);
+       return dm_tm_commit(pmd->tm, sblock);
+-
+-out_locked:
+-      dm_bm_unlock(sblock);
+-      return r;
+ }
+ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
diff --git a/queue-3.14/dm-thin-fix-dangling-bio-in-process_deferred_bios-error-path.patch b/queue-3.14/dm-thin-fix-dangling-bio-in-process_deferred_bios-error-path.patch
new file mode 100644 (file)
index 0000000..261f198
--- /dev/null
@@ -0,0 +1,34 @@
+From fe76cd88e654124d1431bb662a0fc6e99ca811a5 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Fri, 28 Mar 2014 02:15:02 -0400
+Subject: dm thin: fix dangling bio in process_deferred_bios error path
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit fe76cd88e654124d1431bb662a0fc6e99ca811a5 upstream.
+
+If unable to ensure_next_mapping() we must add the current bio, which
+was removed from the @bios list via bio_list_pop, back to the
+deferred_bios list before all the remaining @bios.
+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1392,9 +1392,9 @@ static void process_deferred_bios(struct
+                */
+               if (ensure_next_mapping(pool)) {
+                       spin_lock_irqsave(&pool->lock, flags);
++                      bio_list_add(&pool->deferred_bios, bio);
+                       bio_list_merge(&pool->deferred_bios, &bios);
+                       spin_unlock_irqrestore(&pool->lock, flags);
+-
+                       break;
+               }
diff --git a/queue-3.14/dm-transaction-manager-fix-corruption-due-to-non-atomic-transaction-commit.patch b/queue-3.14/dm-transaction-manager-fix-corruption-due-to-non-atomic-transaction-commit.patch
new file mode 100644 (file)
index 0000000..c0f42b4
--- /dev/null
@@ -0,0 +1,156 @@
+From a9d45396f5956d0b615c7ae3b936afd888351a47 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Thu, 27 Mar 2014 14:13:20 +0000
+Subject: dm transaction manager: fix corruption due to non-atomic transaction commit
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit a9d45396f5956d0b615c7ae3b936afd888351a47 upstream.
+
+The persistent-data library used by dm-thin, dm-cache, etc is
+transactional.  If anything goes wrong, such as an io error when writing
+new metadata or a power failure, then we roll back to the last
+transaction.
+
+Atomicity when committing a transaction is achieved by:
+
+a) Never overwriting data from the previous transaction.
+b) Writing the superblock last, after all other metadata has hit the
+   disk.
+
+This commit and the following commit ("dm: take care to copy the space
+map roots before locking the superblock") fix a bug associated with (b).
+When committing it was possible for the superblock to still be written
+in spite of an io error occurring during the preceeding metadata flush.
+With these commits we're careful not to take the write lock out on the
+superblock until after the metadata flush has completed.
+
+Change the transaction manager's semantics for dm_tm_commit() to assume
+all data has been flushed _before_ the single superblock that is passed
+in.
+
+As a prerequisite, split the block manager's block unlocking and
+flushing by simplifying dm_bm_flush_and_unlock() to dm_bm_flush().  Now
+the unlocking must be done separately.
+
+This issue was discovered by forcing io errors at the crucial time
+using dm-flakey.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c                      |    3 ++-
+ drivers/md/persistent-data/dm-block-manager.c       |   15 ++-------------
+ drivers/md/persistent-data/dm-block-manager.h       |    3 +--
+ drivers/md/persistent-data/dm-transaction-manager.c |    5 +++--
+ drivers/md/persistent-data/dm-transaction-manager.h |   17 ++++++++---------
+ 5 files changed, 16 insertions(+), 27 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -530,8 +530,9 @@ static int __begin_transaction_flags(str
+       disk_super = dm_block_data(sblock);
+       update_flags(disk_super, mutator);
+       read_superblock_fields(cmd, disk_super);
++      dm_bm_unlock(sblock);
+-      return dm_bm_flush_and_unlock(cmd->bm, sblock);
++      return dm_bm_flush(cmd->bm);
+ }
+ static int __begin_transaction(struct dm_cache_metadata *cmd)
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)
+ }
+ EXPORT_SYMBOL_GPL(dm_bm_unlock);
+-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+-                         struct dm_block *superblock)
++int dm_bm_flush(struct dm_block_manager *bm)
+ {
+-      int r;
+-
+       if (bm->read_only)
+               return -EPERM;
+-      r = dm_bufio_write_dirty_buffers(bm->bufio);
+-      if (unlikely(r)) {
+-              dm_bm_unlock(superblock);
+-              return r;
+-      }
+-
+-      dm_bm_unlock(superblock);
+-
+       return dm_bufio_write_dirty_buffers(bm->bufio);
+ }
+-EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
++EXPORT_SYMBOL_GPL(dm_bm_flush);
+ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+ {
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b);
+  *
+  * This method always blocks.
+  */
+-int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
+-                         struct dm_block *superblock);
++int dm_bm_flush(struct dm_block_manager *bm);
+ /*
+  * Request data is prefetched into the cache.
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transacti
+       if (r < 0)
+               return r;
+-      return 0;
++      return dm_bm_flush(tm->bm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+@@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_m
+               return -EWOULDBLOCK;
+       wipe_shadow_table(tm);
++      dm_bm_unlock(root);
+-      return dm_bm_flush_and_unlock(tm->bm, root);
++      return dm_bm_flush(tm->bm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_commit);
+--- a/drivers/md/persistent-data/dm-transaction-manager.h
++++ b/drivers/md/persistent-data/dm-transaction-manager.h
+@@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_cre
+ /*
+  * We use a 2-phase commit here.
+  *
+- * i) In the first phase the block manager is told to start flushing, and
+- * the changes to the space map are written to disk.  You should interrogate
+- * your particular space map to get detail of its root node etc. to be
+- * included in your superblock.
++ * i) Make all changes for the transaction *except* for the superblock.
++ * Then call dm_tm_pre_commit() to flush them to disk.
+  *
+- * ii) @root will be committed last.  You shouldn't use more than the
+- * first 512 bytes of @root if you wish the transaction to survive a power
+- * failure.  You *must* have a write lock held on @root for both stage (i)
+- * and (ii).  The commit will drop the write lock.
++ * ii) Lock your superblock.  Update.  Then call dm_tm_commit() which will
++ * unlock the superblock and flush it.  No other blocks should be updated
++ * during this period.  Care should be taken to never unlock a partially
++ * updated superblock; perform any operations that could fail *before* you
++ * take the superblock lock.
+  */
+ int dm_tm_pre_commit(struct dm_transaction_manager *tm);
+-int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
++int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);
+ /*
+  * These methods are the only way to get hold of a writeable block.
diff --git a/queue-3.14/dma-edma-fix-incorrect-sg-list-handling.patch b/queue-3.14/dma-edma-fix-incorrect-sg-list-handling.patch
new file mode 100644 (file)
index 0000000..0879efb
--- /dev/null
@@ -0,0 +1,61 @@
+From 5fc68a6cad658e45dca3e0a6607df3a8e5df4ef9 Mon Sep 17 00:00:00 2001
+From: Sekhar Nori <nsekhar@ti.com>
+Date: Wed, 19 Mar 2014 11:25:50 +0530
+Subject: dma: edma: fix incorrect SG list handling
+
+From: Sekhar Nori <nsekhar@ti.com>
+
+commit 5fc68a6cad658e45dca3e0a6607df3a8e5df4ef9 upstream.
+
+The code to handle any length SG lists calls edma_resume()
+even before edma_start() is called. This is incorrect
+because edma_resume() enables edma events on the channel
+after which CPU (in edma_start) cannot clear posted
+events by writing to ECR (per the EDMA user's guide).
+
+Because of this EDMA transfers fail to start if due
+to some reason there is a pending EDMA event registered
+even before EDMA transfers are started. This can happen if
+an EDMA event is a byproduct of device initialization.
+
+Fix this by calling edma_resume() only if it is not the
+first batch of MAX_NR_SG elements.
+
+Without this patch, MMC/SD fails to function on DA850 EVM
+with DMA. The behaviour is triggered by specific IP and
+this can explain why the issue was not reported before
+(example with MMC/SD on AM335x).
+
+Tested on DA850 EVM and AM335x EVM-SK using MMC/SD card.
+
+Cc: Joel Fernandes <joelf@ti.com>
+Acked-by: Joel Fernandes <joelf@ti.com>
+Tested-by: Jon Ringle <jringle@gridpoint.com>
+Tested-by: Alexander Holler <holler@ahsoftware.de>
+Reported-by: Jon Ringle <jringle@gridpoint.com>
+Signed-off-by: Sekhar Nori <nsekhar@ti.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/edma.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -182,11 +182,13 @@ static void edma_execute(struct edma_cha
+                                 echan->ecc->dummy_slot);
+       }
+-      edma_resume(echan->ch_num);
+-
+       if (edesc->processed <= MAX_NR_SG) {
+               dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+               edma_start(echan->ch_num);
++      } else {
++              dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
++                      echan->ch_num, edesc->processed);
++              edma_resume(echan->ch_num);
+       }
+       /*
diff --git a/queue-3.14/gpio-mxs-allow-for-recursive-enable_irq_wake-call.patch b/queue-3.14/gpio-mxs-allow-for-recursive-enable_irq_wake-call.patch
new file mode 100644 (file)
index 0000000..39319f4
--- /dev/null
@@ -0,0 +1,93 @@
+From a585f87c863e4e1d496459d382b802bf5ebe3717 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Mon, 24 Mar 2014 03:38:10 +0100
+Subject: gpio: mxs: Allow for recursive enable_irq_wake() call
+
+From: Marek Vasut <marex@denx.de>
+
+commit a585f87c863e4e1d496459d382b802bf5ebe3717 upstream.
+
+The scenario here is that someone calls enable_irq_wake() from somewhere
+in the code. This will result in the lockdep producing a backtrace as can
+be seen below. In my case, this problem is triggered when using the wl1271
+(TI WlCore) driver found in drivers/net/wireless/ti/ .
+
+The problem cause is rather obvious from the backtrace, but let's outline
+the dependency. enable_irq_wake() grabs the IRQ buslock in irq_set_irq_wake(),
+which in turns calls mxs_gpio_set_wake_irq() . But mxs_gpio_set_wake_irq()
+calls enable_irq_wake() again on the one-level-higher IRQ , thus it tries to
+grab the IRQ buslock again in irq_set_irq_wake() . Because the spinlock in
+irq_set_irq_wake()->irq_get_desc_buslock()->__irq_get_desc_lock() is not
+marked as recursive, lockdep will spew the stuff below.
+
+We know we can safely re-enter the lock, so use IRQ_GC_INIT_NESTED_LOCK to
+fix the spew.
+
+ =============================================
+ [ INFO: possible recursive locking detected ]
+ 3.10.33-00012-gf06b763-dirty #61 Not tainted
+ ---------------------------------------------
+ kworker/0:1/18 is trying to acquire lock:
+  (&irq_desc_lock_class){-.-...}, at: [<c00685f0>] __irq_get_desc_lock+0x48/0x88
+
+ but task is already holding lock:
+  (&irq_desc_lock_class){-.-...}, at: [<c00685f0>] __irq_get_desc_lock+0x48/0x88
+
+ other info that might help us debug this:
+  Possible unsafe locking scenario:
+
+        CPU0
+        ----
+   lock(&irq_desc_lock_class);
+   lock(&irq_desc_lock_class);
+
+  *** DEADLOCK ***
+
+  May be due to missing lock nesting notation
+
+ 3 locks held by kworker/0:1/18:
+  #0:  (events){.+.+.+}, at: [<c0036308>] process_one_work+0x134/0x4a4
+  #1:  ((&fw_work->work)){+.+.+.}, at: [<c0036308>] process_one_work+0x134/0x4a4
+  #2:  (&irq_desc_lock_class){-.-...}, at: [<c00685f0>] __irq_get_desc_lock+0x48/0x88
+
+ stack backtrace:
+ CPU: 0 PID: 18 Comm: kworker/0:1 Not tainted 3.10.33-00012-gf06b763-dirty #61
+ Workqueue: events request_firmware_work_func
+ [<c0013eb4>] (unwind_backtrace+0x0/0xf0) from [<c0011c74>] (show_stack+0x10/0x14)
+ [<c0011c74>] (show_stack+0x10/0x14) from [<c005bb08>] (__lock_acquire+0x140c/0x1a64)
+ [<c005bb08>] (__lock_acquire+0x140c/0x1a64) from [<c005c6a8>] (lock_acquire+0x9c/0x104)
+ [<c005c6a8>] (lock_acquire+0x9c/0x104) from [<c051d5a4>] (_raw_spin_lock_irqsave+0x44/0x58)
+ [<c051d5a4>] (_raw_spin_lock_irqsave+0x44/0x58) from [<c00685f0>] (__irq_get_desc_lock+0x48/0x88)
+ [<c00685f0>] (__irq_get_desc_lock+0x48/0x88) from [<c0068e78>] (irq_set_irq_wake+0x20/0xf4)
+ [<c0068e78>] (irq_set_irq_wake+0x20/0xf4) from [<c027260c>] (mxs_gpio_set_wake_irq+0x1c/0x24)
+ [<c027260c>] (mxs_gpio_set_wake_irq+0x1c/0x24) from [<c0068cf4>] (set_irq_wake_real+0x30/0x44)
+ [<c0068cf4>] (set_irq_wake_real+0x30/0x44) from [<c0068ee4>] (irq_set_irq_wake+0x8c/0xf4)
+ [<c0068ee4>] (irq_set_irq_wake+0x8c/0xf4) from [<c0310748>] (wlcore_nvs_cb+0x10c/0x97c)
+ [<c0310748>] (wlcore_nvs_cb+0x10c/0x97c) from [<c02be5e8>] (request_firmware_work_func+0x38/0x58)
+ [<c02be5e8>] (request_firmware_work_func+0x38/0x58) from [<c0036394>] (process_one_work+0x1c0/0x4a4)
+ [<c0036394>] (process_one_work+0x1c0/0x4a4) from [<c0036a4c>] (worker_thread+0x138/0x394)
+ [<c0036a4c>] (worker_thread+0x138/0x394) from [<c003cb74>] (kthread+0xa4/0xb0)
+ [<c003cb74>] (kthread+0xa4/0xb0) from [<c000ee00>] (ret_from_fork+0x14/0x34)
+ wlcore: loaded
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Acked-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-mxs.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-mxs.c
++++ b/drivers/gpio/gpio-mxs.c
+@@ -214,7 +214,8 @@ static void __init mxs_gpio_init_gc(stru
+       ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+       ct->regs.mask = PINCTRL_IRQEN(port);
+-      irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
++      irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
++                             IRQ_NOREQUEST, 0);
+ }
+ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
diff --git a/queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-edge-e431.patch b/queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-edge-e431.patch
new file mode 100644 (file)
index 0000000..023dd81
--- /dev/null
@@ -0,0 +1,34 @@
+From 27a38856a948c3e8de30dc71647ff9e1778c99fc Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 23 Apr 2014 13:02:35 -0700
+Subject: Input: synaptics - add min/max quirk for ThinkPad Edge E431
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 27a38856a948c3e8de30dc71647ff9e1778c99fc upstream.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/synaptics.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1515,6 +1515,14 @@ static const struct dmi_system_id min_ma
+               .driver_data = (int []){1232, 5710, 1156, 4696},
+       },
+       {
++              /* Lenovo ThinkPad Edge E431 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
++              },
++              .driver_data = (int []){1024, 5022, 2508, 4832},
++      },
++      {
+               /* Lenovo ThinkPad T431s */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-t431s-l440-l540-s1-yoga-and-x1.patch b/queue-3.14/input-synaptics-add-min-max-quirk-for-thinkpad-t431s-l440-l540-s1-yoga-and-x1.patch
new file mode 100644 (file)
index 0000000..4abe2eb
--- /dev/null
@@ -0,0 +1,99 @@
+From 46a2986ebbe18757c2d8c352f8fb6e0f4f0754e3 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Sat, 19 Apr 2014 22:31:18 -0700
+Subject: Input: synaptics - add min/max quirk for ThinkPad T431s, L440, L540, S1 Yoga and X1
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 46a2986ebbe18757c2d8c352f8fb6e0f4f0754e3 upstream.
+
+We expect that all the Haswell series will need such quirks, sigh.
+
+The T431s seems to be T430 hardware in a T440s case, using the T440s touchpad,
+with the same min/max issue.
+
+The X1 Carbon 3rd generation name says 2nd while it is a 3rd generation.
+
+The X1 and T431s share a PnPID with the T540p, but the reported ranges are
+closer to those of the T440s.
+
+HdG: Squashed 5 quirk patches into one. T431s + L440 + L540 are written by me,
+S1 Yoga and X1 are written by Benjamin Tissoires.
+
+Hdg: Standardized S1 Yoga and X1 values, Yoga uses the same touchpad as the
+X240, X1 uses the same touchpad as the T440.
+
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/synaptics.c |   42 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1515,6 +1515,14 @@ static const struct dmi_system_id min_ma
+               .driver_data = (int []){1232, 5710, 1156, 4696},
+       },
+       {
++              /* Lenovo ThinkPad T431s */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
++              },
++              .driver_data = (int []){1024, 5112, 2024, 4832},
++      },
++      {
+               /* Lenovo ThinkPad T440s */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1523,6 +1531,14 @@ static const struct dmi_system_id min_ma
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
+       {
++              /* Lenovo ThinkPad L440 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
++              },
++              .driver_data = (int []){1024, 5112, 2024, 4832},
++      },
++      {
+               /* Lenovo ThinkPad T540p */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1530,6 +1546,32 @@ static const struct dmi_system_id min_ma
+               },
+               .driver_data = (int []){1024, 5056, 2058, 4832},
+       },
++      {
++              /* Lenovo ThinkPad L540 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
++              },
++              .driver_data = (int []){1024, 5112, 2024, 4832},
++      },
++      {
++              /* Lenovo Yoga S1 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++                                      "ThinkPad S1 Yoga"),
++              },
++              .driver_data = (int []){1232, 5710, 1156, 4696},
++      },
++      {
++              /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION,
++                                      "ThinkPad X1 Carbon 2nd"),
++              },
++              .driver_data = (int []){1024, 5112, 2024, 4832},
++      },
+ #endif
+       { }
+ };
diff --git a/queue-3.14/lib-percpu_counter.c-fix-bad-percpu-counter-state-during-suspend.patch b/queue-3.14/lib-percpu_counter.c-fix-bad-percpu-counter-state-during-suspend.patch
new file mode 100644 (file)
index 0000000..f7c2d4d
--- /dev/null
@@ -0,0 +1,59 @@
+From e39435ce68bb4685288f78b1a7e24311f7ef939f Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@fb.com>
+Date: Tue, 8 Apr 2014 16:04:12 -0700
+Subject: lib/percpu_counter.c: fix bad percpu counter state during suspend
+
+From: Jens Axboe <axboe@fb.com>
+
+commit e39435ce68bb4685288f78b1a7e24311f7ef939f upstream.
+
+I got a bug report yesterday from Laszlo Ersek in which he states that
+his kvm instance fails to suspend.  Laszlo bisected it down to this
+commit 1cf7e9c68fe8 ("virtio_blk: blk-mq support") where virtio-blk is
+converted to use the blk-mq infrastructure.
+
+After digging a bit, it became clear that the issue was with the queue
+drain.  blk-mq tracks queue usage in a percpu counter, which is
+incremented on request alloc and decremented when the request is freed.
+The initial hunt was for an inconsistency in blk-mq, but everything
+seemed fine.  In fact, the counter only returned crazy values when
+suspend was in progress.
+
+When a CPU is unplugged, the percpu counters merges that CPU state with
+the general state.  blk-mq takes care to register a hotcpu notifier with
+the appropriate priority, so we know it runs after the percpu counter
+notifier.  However, the percpu counter notifier only merges the state
+when the CPU is fully gone.  This leaves a state transition where the
+CPU going away is no longer in the online mask, yet it still holds
+private values.  This means that in this state, percpu_counter_sum()
+returns invalid results, and the suspend then hangs waiting for
+abs(dead-cpu-value) requests to complete which of course will never
+happen.
+
+Fix this by clearing the state earlier, so we never have a case where
+the CPU isn't in online mask but still holds private state.  This bug
+has been there since forever, I guess we don't have a lot of users where
+percpu counters needs to be reliable during the suspend cycle.
+
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Reported-by: Laszlo Ersek <lersek@redhat.com>
+Tested-by: Laszlo Ersek <lersek@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/percpu_counter.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/percpu_counter.c
++++ b/lib/percpu_counter.c
+@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callbac
+       struct percpu_counter *fbc;
+       compute_batch_value();
+-      if (action != CPU_DEAD)
++      if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+               return NOTIFY_OK;
+       cpu = (unsigned long)hcpu;
diff --git a/queue-3.14/lockd-ensure-we-tear-down-any-live-sockets-when-socket-creation-fails-during-lockd_up.patch b/queue-3.14/lockd-ensure-we-tear-down-any-live-sockets-when-socket-creation-fails-during-lockd_up.patch
new file mode 100644 (file)
index 0000000..ef9cb4e
--- /dev/null
@@ -0,0 +1,79 @@
+From 679b033df48422191c4cac52b610d9980e019f9b Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Tue, 25 Mar 2014 11:55:26 -0700
+Subject: lockd: ensure we tear down any live sockets when socket creation fails during lockd_up
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 679b033df48422191c4cac52b610d9980e019f9b upstream.
+
+We had a Fedora ABRT report with a stack trace like this:
+
+kernel BUG at net/sunrpc/svc.c:550!
+invalid opcode: 0000 [#1] SMP
+[...]
+CPU: 2 PID: 913 Comm: rpc.nfsd Not tainted 3.13.6-200.fc20.x86_64 #1
+Hardware name: Hewlett-Packard HP ProBook 4740s/1846, BIOS 68IRR Ver. F.40 01/29/2013
+task: ffff880146b00000 ti: ffff88003f9b8000 task.ti: ffff88003f9b8000
+RIP: 0010:[<ffffffffa0305fa8>]  [<ffffffffa0305fa8>] svc_destroy+0x128/0x130 [sunrpc]
+RSP: 0018:ffff88003f9b9de0  EFLAGS: 00010206
+RAX: ffff88003f829628 RBX: ffff88003f829600 RCX: 00000000000041ee
+RDX: 0000000000000000 RSI: 0000000000000286 RDI: 0000000000000286
+RBP: ffff88003f9b9de8 R08: 0000000000017360 R09: ffff88014fa97360
+R10: ffffffff8114ce57 R11: ffffea00051c9c00 R12: ffff88003f829600
+R13: 00000000ffffff9e R14: ffffffff81cc7cc0 R15: 0000000000000000
+FS:  00007f4fde284840(0000) GS:ffff88014fa80000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f4fdf5192f8 CR3: 00000000a569a000 CR4: 00000000001407e0
+Stack:
+ ffff88003f792300 ffff88003f9b9e18 ffffffffa02de02a 0000000000000000
+ ffffffff81cc7cc0 ffff88003f9cb000 0000000000000008 ffff88003f9b9e60
+ ffffffffa033bb35 ffffffff8131c86c ffff88003f9cb000 ffff8800a5715008
+Call Trace:
+ [<ffffffffa02de02a>] lockd_up+0xaa/0x330 [lockd]
+ [<ffffffffa033bb35>] nfsd_svc+0x1b5/0x2f0 [nfsd]
+ [<ffffffff8131c86c>] ? simple_strtoull+0x2c/0x50
+ [<ffffffffa033c630>] ? write_pool_threads+0x280/0x280 [nfsd]
+ [<ffffffffa033c6bb>] write_threads+0x8b/0xf0 [nfsd]
+ [<ffffffff8114efa4>] ? __get_free_pages+0x14/0x50
+ [<ffffffff8114eff6>] ? get_zeroed_page+0x16/0x20
+ [<ffffffff811dec51>] ? simple_transaction_get+0xb1/0xd0
+ [<ffffffffa033c098>] nfsctl_transaction_write+0x48/0x80 [nfsd]
+ [<ffffffff811b8b34>] vfs_write+0xb4/0x1f0
+ [<ffffffff811c3f99>] ? putname+0x29/0x40
+ [<ffffffff811b9569>] SyS_write+0x49/0xa0
+ [<ffffffff810fc2a6>] ? __audit_syscall_exit+0x1f6/0x2a0
+ [<ffffffff816962e9>] system_call_fastpath+0x16/0x1b
+Code: 31 c0 e8 82 db 37 e1 e9 2a ff ff ff 48 8b 07 8b 57 14 48 c7 c7 d5 c6 31 a0 48 8b 70 20 31 c0 e8 65 db 37 e1 e9 f4 fe ff ff 0f 0b <0f> 0b 66 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 56 41 55
+RIP  [<ffffffffa0305fa8>] svc_destroy+0x128/0x130 [sunrpc]
+ RSP <ffff88003f9b9de0>
+
+Evidently, we created some lockd sockets and then failed to create
+others. make_socks then returned an error and we tried to tear down the
+svc, but svc->sv_permsocks was not empty so we ended up tripping over
+the BUG() in svc_destroy().
+
+Fix this by ensuring that we tear down any live sockets we created when
+socket creation is going to return an error.
+
+Fixes: 786185b5f8abefa (SUNRPC: move per-net operations from...)
+Reported-by: Raphos <raphoszap@laposte.net>
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Reviewed-by: Stanislav Kinsbursky <skinsbursky@parallels.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/svc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -235,6 +235,7 @@ out_err:
+       if (warned++ == 0)
+               printk(KERN_WARNING
+                       "lockd_up: makesock failed, error=%d\n", err);
++      svc_shutdown_net(serv, net);
+       return err;
+ }
diff --git a/queue-3.14/mmc-sdhci-bcm-kona-fix-build-errors-when-built-in.patch b/queue-3.14/mmc-sdhci-bcm-kona-fix-build-errors-when-built-in.patch
new file mode 100644 (file)
index 0000000..6f990bc
--- /dev/null
@@ -0,0 +1,33 @@
+From 4025ce24f326830135341814307c072f6c2a7738 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Tue, 4 Mar 2014 16:25:51 -0500
+Subject: mmc: sdhci-bcm-kona: fix build errors when built-in
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 4025ce24f326830135341814307c072f6c2a7738 upstream.
+
+`sdhci_bcm_kona_remove' referenced in section `.data' of drivers/built-in.o: defined in discarded section `.exit.text' of drivers/built-in.o
+
+Fixes: 058feb53666f ("mmc: sdhci-bcm-kona: make linker-section warning go away")
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Tested-by: Markus Mayer <markus.mayer@linaro.org>
+Acked-by: Matt Porter <mporter@linaro.org>
+Signed-off-by: Chris Ball <chris@printf.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-bcm-kona.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-bcm-kona.c
++++ b/drivers/mmc/host/sdhci-bcm-kona.c
+@@ -314,7 +314,7 @@ err_pltfm_free:
+       return ret;
+ }
+-static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev)
++static int sdhci_bcm_kona_remove(struct platform_device *pdev)
+ {
+       return sdhci_pltfm_unregister(pdev);
+ }
diff --git a/queue-3.14/mtd-atmel_nand-disable-subpage-nand-write-when-using-atmel-pmecc.patch b/queue-3.14/mtd-atmel_nand-disable-subpage-nand-write-when-using-atmel-pmecc.patch
new file mode 100644 (file)
index 0000000..fd0f648
--- /dev/null
@@ -0,0 +1,36 @@
+From 90445ff6241e2a13445310803e2efa606c61f276 Mon Sep 17 00:00:00 2001
+From: Herve Codina <Herve.CODINA@celad.com>
+Date: Mon, 3 Mar 2014 12:15:29 +0100
+Subject: mtd: atmel_nand: Disable subpage NAND write when using Atmel PMECC
+
+From: Herve Codina <Herve.CODINA@celad.com>
+
+commit 90445ff6241e2a13445310803e2efa606c61f276 upstream.
+
+Crash detected on sam5d35 and its pmecc nand ecc controller.
+
+The problem was a call to chip->ecc.hwctl from nand_write_subpage_hwecc
+(nand_base.c) when we write a sub page.
+chip->ecc.hwctl function is not set when we are using PMECC controller.
+As a workaround, set NAND_NO_SUBPAGE_WRITE for PMECC controller in
+order to disable sub page access in nand_write_page.
+
+Signed-off-by: Herve Codina <Herve.CODINA@celad.com>
+Acked-by: Josh Wu <josh.wu@atmel.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/atmel_nand.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/nand/atmel_nand.c
++++ b/drivers/mtd/nand/atmel_nand.c
+@@ -1220,6 +1220,7 @@ static int atmel_pmecc_nand_init_params(
+               goto err;
+       }
++      nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+       nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+       nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
diff --git a/queue-3.14/mtd-diskonchip-mem-resource-name-is-not-optional.patch b/queue-3.14/mtd-diskonchip-mem-resource-name-is-not-optional.patch
new file mode 100644 (file)
index 0000000..2e5ec68
--- /dev/null
@@ -0,0 +1,33 @@
+From 86e4bbc766b9456f583f2fc3c4f6c623b422af88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Wed, 19 Mar 2014 18:24:37 -0400
+Subject: mtd: diskonchip: mem resource name is not optional
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit 86e4bbc766b9456f583f2fc3c4f6c623b422af88 upstream.
+
+Passing a name to request_mem_region() isn't optional and can't just
+be NULL. Passing NULL causes a NULL ptr deref later in the boot
+process.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Alexander Shiyan <shc_work@mail.ru>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/diskonchip.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/diskonchip.c
++++ b/drivers/mtd/nand/diskonchip.c
+@@ -1438,7 +1438,7 @@ static int __init doc_probe(unsigned lon
+       int reg, len, numchips;
+       int ret = 0;
+-      if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL))
++      if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+               return -EBUSY;
+       virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+       if (!virtadr) {
diff --git a/queue-3.14/mtd-nuc900_nand-null-dereference-in-nuc900_nand_enable.patch b/queue-3.14/mtd-nuc900_nand-null-dereference-in-nuc900_nand_enable.patch
new file mode 100644 (file)
index 0000000..ff1ab9f
--- /dev/null
@@ -0,0 +1,32 @@
+From c69dbbf3335a21aae74376d7e5db50a486d52439 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 17 Feb 2014 23:03:08 +0300
+Subject: mtd: nuc900_nand: NULL dereference in nuc900_nand_enable()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit c69dbbf3335a21aae74376d7e5db50a486d52439 upstream.
+
+Instead of writing to "nand->reg + REG_FMICSR" we write to "REG_FMICSR"
+which is NULL and not a valid register.
+
+Fixes: 8bff82cbc308 ('mtd: add nand support for w90p910 (v2)')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/nuc900_nand.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/nuc900_nand.c
++++ b/drivers/mtd/nand/nuc900_nand.c
+@@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nu
+       val = __raw_readl(nand->reg + REG_FMICSR);
+       if (!(val & NAND_EN))
+-              __raw_writel(val | NAND_EN, REG_FMICSR);
++              __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
+       val = __raw_readl(nand->reg + REG_SMCSR);
diff --git a/queue-3.14/mtd-sm_ftl-heap-corruption-in-sm_create_sysfs_attributes.patch b/queue-3.14/mtd-sm_ftl-heap-corruption-in-sm_create_sysfs_attributes.patch
new file mode 100644 (file)
index 0000000..1b0391a
--- /dev/null
@@ -0,0 +1,53 @@
+From b4c233057771581698a13694ab6f33b48ce837dc Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 5 Dec 2013 17:53:50 +0300
+Subject: mtd: sm_ftl: heap corruption in sm_create_sysfs_attributes()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit b4c233057771581698a13694ab6f33b48ce837dc upstream.
+
+We always put a NUL terminator one space past the end of the "vendor"
+buffer.  Walter Harms also pointed out that this should just use
+kstrndup().
+
+Fixes: 7d17c02a01a1 ('mtd: Add new SmartMedia/xD FTL')
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/sm_ftl.c |   11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -59,15 +59,12 @@ static struct attribute_group *sm_create
+       struct attribute_group *attr_group;
+       struct attribute **attributes;
+       struct sm_sysfs_attribute *vendor_attribute;
++      char *vendor;
+-      int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+-                                      SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+-
+-      char *vendor = kmalloc(vendor_len, GFP_KERNEL);
++      vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
++                        SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
+       if (!vendor)
+               goto error1;
+-      memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+-      vendor[vendor_len] = 0;
+       /* Initialize sysfs attributes */
+       vendor_attribute =
+@@ -78,7 +75,7 @@ static struct attribute_group *sm_create
+       sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+       vendor_attribute->data = vendor;
+-      vendor_attribute->len = vendor_len;
++      vendor_attribute->len = strlen(vendor);
+       vendor_attribute->dev_attr.attr.name = "vendor";
+       vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+       vendor_attribute->dev_attr.show = sm_attr_show;
diff --git a/queue-3.14/pinctrl-as3722-fix-handling-of-gpio-invert-bit.patch b/queue-3.14/pinctrl-as3722-fix-handling-of-gpio-invert-bit.patch
new file mode 100644 (file)
index 0000000..628579e
--- /dev/null
@@ -0,0 +1,74 @@
+From a73d2e30b46787d478275db36c19222020e29dc5 Mon Sep 17 00:00:00 2001
+From: Andrew Bresticker <abrestic@chromium.org>
+Date: Wed, 16 Apr 2014 13:40:17 -0700
+Subject: pinctrl: as3722: fix handling of GPIO invert bit
+
+From: Andrew Bresticker <abrestic@chromium.org>
+
+commit a73d2e30b46787d478275db36c19222020e29dc5 upstream.
+
+The AS3722_GPIO_INV bit will always be blindly overwritten by
+as3722_pinctrl_gpio_set_direction() and will be ignored when
+setting the value of the GPIO in as3722_gpio_set() since the
+enable_gpio_invert flag is never set.  This will cause an
+initially inverted GPIO to toggle when requested as an output,
+which could be problematic if, for example, the GPIO controls
+a critical regulator.
+
+Instead of setting up the enable_gpio_invert flag, just leave
+the invert bit alone and check it before setting the GPIO value.
+
+Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
+Reviewed-by: Stephen Warren <swarren@nvidia.com>
+Tested-by: Stephen Warren <swarren@nvidia.com>
+Acked-by: Laxman Dewangan <ldewangan@nvidia.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/pinctrl-as3722.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/pinctrl/pinctrl-as3722.c
++++ b/drivers/pinctrl/pinctrl-as3722.c
+@@ -64,7 +64,6 @@ struct as3722_pin_function {
+ };
+ struct as3722_gpio_pin_control {
+-      bool enable_gpio_invert;
+       unsigned mode_prop;
+       int io_function;
+ };
+@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direc
+               return mode;
+       }
+-      if (as_pci->gpio_control[offset].enable_gpio_invert)
+-              mode |= AS3722_GPIO_INV;
+-
+-      return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
++      return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
++                              AS3722_GPIO_MODE_MASK, mode);
+ }
+ static const struct pinmux_ops as3722_pinmux_ops = {
+@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_
+ {
+       struct as3722_pctrl_info *as_pci = to_as_pci(chip);
+       struct as3722 *as3722 = as_pci->as3722;
+-      int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
++      int en_invert;
+       u32 val;
+       int ret;
++      ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
++      if (ret < 0) {
++              dev_err(as_pci->dev,
++                      "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
++              return;
++      }
++      en_invert = !!(val & AS3722_GPIO_INV);
++
+       if (value)
+               val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
+       else
index 8fcb3dd0b7456fe9704fbdf1fe5fdfdd4a2cd1db..375efc07f7102902616f230a621320eb9a8cba08 100644 (file)
@@ -52,3 +52,29 @@ rtlwifi-rtl8192cu-fix-too-long-disable-of-irqs.patch
 rtlwifi-rtl8192se-fix-too-long-disable-of-irqs.patch
 rtlwifi-rtl8192se-fix-regression-due-to-commit-1bf4bbb.patch
 rtlwifi-rtl8188ee-initialize-packet_beacon.patch
+gpio-mxs-allow-for-recursive-enable_irq_wake-call.patch
+pinctrl-as3722-fix-handling-of-gpio-invert-bit.patch
+tgafb-fix-mode-setting-with-fbset.patch
+tgafb-fix-data-copying.patch
+mtd-atmel_nand-disable-subpage-nand-write-when-using-atmel-pmecc.patch
+mtd-diskonchip-mem-resource-name-is-not-optional.patch
+mtd-nuc900_nand-null-dereference-in-nuc900_nand_enable.patch
+mtd-sm_ftl-heap-corruption-in-sm_create_sysfs_attributes.patch
+skip-intel_crt_init-for-dell-xps-8700.patch
+dm-cache-prevent-corruption-caused-by-discard_block_size-cache_block_size.patch
+dm-transaction-manager-fix-corruption-due-to-non-atomic-transaction-commit.patch
+dm-take-care-to-copy-the-space-map-roots-before-locking-the-superblock.patch
+dm-thin-fix-dangling-bio-in-process_deferred_bios-error-path.patch
+dm-cache-fix-a-lock-inversion.patch
+dma-edma-fix-incorrect-sg-list-handling.patch
+aio-v4-ensure-access-to-ctx-ring_pages-is-correctly-serialised-for-migration.patch
+lockd-ensure-we-tear-down-any-live-sockets-when-socket-creation-fails-during-lockd_up.patch
+lib-percpu_counter.c-fix-bad-percpu-counter-state-during-suspend.patch
+mmc-sdhci-bcm-kona-fix-build-errors-when-built-in.patch
+thinkpad_acpi-fix-inconsistent-mute-led-after-resume.patch
+input-synaptics-add-min-max-quirk-for-thinkpad-t431s-l440-l540-s1-yoga-and-x1.patch
+input-synaptics-add-min-max-quirk-for-thinkpad-edge-e431.patch
+cpufreq-loongson2_cpufreq-don-t-declare-local-variable-as-static.patch
+cpufreq-at32ap-don-t-declare-local-variable-as-static.patch
+acpi-processor-fix-failure-of-loading-acpi-cpufreq-driver.patch
+cpufreq-unicore32-fix-typo-issue-for-clk.patch
diff --git a/queue-3.14/skip-intel_crt_init-for-dell-xps-8700.patch b/queue-3.14/skip-intel_crt_init-for-dell-xps-8700.patch
new file mode 100644 (file)
index 0000000..7fc9cd0
--- /dev/null
@@ -0,0 +1,38 @@
+From 10b6ee4a87811a110cb01eaca01eb04da6801baf Mon Sep 17 00:00:00 2001
+From: Giacomo Comes <comes@naic.edu>
+Date: Thu, 3 Apr 2014 14:13:55 -0400
+Subject: Skip intel_crt_init for Dell XPS 8700
+
+From: Giacomo Comes <comes@naic.edu>
+
+commit 10b6ee4a87811a110cb01eaca01eb04da6801baf upstream.
+
+The Dell XPS 8700 has a onboard Display port and HDMI port and no VGA port.
+The call intel_crt_init freeze the machine, so skip such call.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=73559
+Signed-off-by: Giacomo Comes <comes at naic.edu>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_crt.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -765,6 +765,14 @@ static const struct dmi_system_id intel_
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+               },
+       },
++      {
++              .callback = intel_no_crt_dmi_callback,
++              .ident = "DELL XPS 8700",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
++              },
++      },
+       { }
+ };
diff --git a/queue-3.14/tgafb-fix-data-copying.patch b/queue-3.14/tgafb-fix-data-copying.patch
new file mode 100644 (file)
index 0000000..163b49d
--- /dev/null
@@ -0,0 +1,342 @@
+From 6b0df6827bb6fcacb158dff29ad0a62d6418b534 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 23 Jan 2014 14:43:10 -0500
+Subject: tgafb: fix data copying
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 6b0df6827bb6fcacb158dff29ad0a62d6418b534 upstream.
+
+The functions for data copying copyarea_foreward_8bpp and
+copyarea_backward_8bpp are buggy, they produce screen corruption.
+
+This patch fixes the functions and moves the logic to one function
+"copyarea_8bpp". For simplicity, the function only handles copying that
+is aligned on 8 pixes. If we copy an unaligned area, generic function
+cfb_copyarea is used.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/tgafb.c |  264 +++++++++-----------------------------------------
+ 1 file changed, 51 insertions(+), 213 deletions(-)
+
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -1139,222 +1139,57 @@ copyarea_line_32bpp(struct fb_info *info
+       __raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+ }
+-/* The general case of forward copy in 8bpp mode.  */
++/* The (almost) general case of backward copy in 8bpp mode.  */
+ static inline void
+-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+-                     u32 height, u32 width, u32 line_length)
++copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
++            u32 height, u32 width, u32 line_length,
++            const struct fb_copyarea *area)
+ {
+       struct tga_par *par = (struct tga_par *) info->par;
+-      unsigned long i, copied, left;
+-      unsigned long dpos, spos, dalign, salign, yincr;
+-      u32 smask_first, dmask_first, dmask_last;
+-      int pixel_shift, need_prime, need_second;
+-      unsigned long n64, n32, xincr_first;
++      unsigned i, yincr;
++      int depos, sepos, backward, last_step, step;
++      u32 mask_last;
++      unsigned n32;
+       void __iomem *tga_regs;
+       void __iomem *tga_fb;
+-      yincr = line_length;
+-      if (dy > sy) {
+-              dy += height - 1;
+-              sy += height - 1;
+-              yincr = -yincr;
+-      }
+-
+-      /* Compute the offsets and alignments in the frame buffer.
+-         More than anything else, these control how we do copies.  */
+-      dpos = dy * line_length + dx;
+-      spos = sy * line_length + sx;
+-      dalign = dpos & 7;
+-      salign = spos & 7;
+-      dpos &= -8;
+-      spos &= -8;
+-
+-      /* Compute the value for the PIXELSHIFT register.  This controls
+-         both non-co-aligned source and destination and copy direction.  */
+-      if (dalign >= salign)
+-              pixel_shift = dalign - salign;
+-      else
+-              pixel_shift = 8 - (salign - dalign);
+-
+-      /* Figure out if we need an additional priming step for the
+-         residue register.  */
+-      need_prime = (salign > dalign);
+-      if (need_prime)
+-              dpos -= 8;
+-
+-      /* Begin by copying the leading unaligned destination.  Copy enough
+-         to make the next destination address 32-byte aligned.  */
+-      copied = 32 - (dalign + (dpos & 31));
+-      if (copied == 32)
+-              copied = 0;
+-      xincr_first = (copied + 7) & -8;
+-      smask_first = dmask_first = (1ul << copied) - 1;
+-      smask_first <<= salign;
+-      dmask_first <<= dalign + need_prime*8;
+-      if (need_prime && copied > 24)
+-              copied -= 8;
+-      left = width - copied;
+-
+-      /* Care for small copies.  */
+-      if (copied > width) {
+-              u32 t;
+-              t = (1ul << width) - 1;
+-              t <<= dalign + need_prime*8;
+-              dmask_first &= t;
+-              left = 0;
+-      }
+-
+-      /* Attempt to use 64-byte copies.  This is only possible if the
+-         source and destination are co-aligned at 64 bytes.  */
+-      n64 = need_second = 0;
+-      if ((dpos & 63) == (spos & 63)
+-          && (height == 1 || line_length % 64 == 0)) {
+-              /* We may need a 32-byte copy to ensure 64 byte alignment.  */
+-              need_second = (dpos + xincr_first) & 63;
+-              if ((need_second & 32) != need_second)
+-                      printk(KERN_ERR "tgafb: need_second wrong\n");
+-              if (left >= need_second + 64) {
+-                      left -= need_second;
+-                      n64 = left / 64;
+-                      left %= 64;
+-              } else
+-                      need_second = 0;
+-      }
+-
+-      /* Copy trailing full 32-byte sections.  This will be the main
+-         loop if the 64 byte loop can't be used.  */
+-      n32 = left / 32;
+-      left %= 32;
+-
+-      /* Copy the trailing unaligned destination.  */
+-      dmask_last = (1ul << left) - 1;
+-
+-      tga_regs = par->tga_regs_base;
+-      tga_fb = par->tga_fb_base;
+-
+-      /* Set up the MODE and PIXELSHIFT registers.  */
+-      __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
+-      __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
+-      wmb();
+-
+-      for (i = 0; i < height; ++i) {
+-              unsigned long j;
+-              void __iomem *sfb;
+-              void __iomem *dfb;
+-
+-              sfb = tga_fb + spos;
+-              dfb = tga_fb + dpos;
+-              if (dmask_first) {
+-                      __raw_writel(smask_first, sfb);
+-                      wmb();
+-                      __raw_writel(dmask_first, dfb);
+-                      wmb();
+-                      sfb += xincr_first;
+-                      dfb += xincr_first;
+-              }
+-
+-              if (need_second) {
+-                      __raw_writel(0xffffffff, sfb);
+-                      wmb();
+-                      __raw_writel(0xffffffff, dfb);
+-                      wmb();
+-                      sfb += 32;
+-                      dfb += 32;
+-              }
+-
+-              if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
+-                      printk(KERN_ERR
+-                             "tgafb: misaligned copy64 (s:%p, d:%p)\n",
+-                             sfb, dfb);
+-
+-              for (j = 0; j < n64; ++j) {
+-                      __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+-                      wmb();
+-                      __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+-                      wmb();
+-                      sfb += 64;
+-                      dfb += 64;
+-              }
+-
+-              for (j = 0; j < n32; ++j) {
+-                      __raw_writel(0xffffffff, sfb);
+-                      wmb();
+-                      __raw_writel(0xffffffff, dfb);
+-                      wmb();
+-                      sfb += 32;
+-                      dfb += 32;
+-              }
+-
+-              if (dmask_last) {
+-                      __raw_writel(0xffffffff, sfb);
+-                      wmb();
+-                      __raw_writel(dmask_last, dfb);
+-                      wmb();
+-              }
+-
+-              spos += yincr;
+-              dpos += yincr;
++      /* Do acceleration only if we are aligned on 8 pixels */
++      if ((dx | sx | width) & 7) {
++              cfb_copyarea(info, area);
++              return;
+       }
+-      /* Reset the MODE register to normal.  */
+-      __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+-}
+-
+-/* The (almost) general case of backward copy in 8bpp mode.  */
+-static inline void
+-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+-                     u32 height, u32 width, u32 line_length,
+-                     const struct fb_copyarea *area)
+-{
+-      struct tga_par *par = (struct tga_par *) info->par;
+-      unsigned long i, left, yincr;
+-      unsigned long depos, sepos, dealign, sealign;
+-      u32 mask_first, mask_last;
+-      unsigned long n32;
+-      void __iomem *tga_regs;
+-      void __iomem *tga_fb;
+-
+       yincr = line_length;
+       if (dy > sy) {
+               dy += height - 1;
+               sy += height - 1;
+               yincr = -yincr;
+       }
++      backward = dy == sy && dx > sx && dx < sx + width;
+       /* Compute the offsets and alignments in the frame buffer.
+          More than anything else, these control how we do copies.  */
+-      depos = dy * line_length + dx + width;
+-      sepos = sy * line_length + sx + width;
+-      dealign = depos & 7;
+-      sealign = sepos & 7;
+-
+-      /* ??? The documentation appears to be incorrect (or very
+-         misleading) wrt how pixel shifting works in backward copy
+-         mode, i.e. when PIXELSHIFT is negative.  I give up for now.
+-         Do handle the common case of co-aligned backward copies,
+-         but frob everything else back on generic code.  */
+-      if (dealign != sealign) {
+-              cfb_copyarea(info, area);
+-              return;
+-      }
+-
+-      /* We begin the copy with the trailing pixels of the
+-         unaligned destination.  */
+-      mask_first = (1ul << dealign) - 1;
+-      left = width - dealign;
+-
+-      /* Care for small copies.  */
+-      if (dealign > width) {
+-              mask_first ^= (1ul << (dealign - width)) - 1;
+-              left = 0;
+-      }
++      depos = dy * line_length + dx;
++      sepos = sy * line_length + sx;
++      if (backward)
++              depos += width, sepos += width;
+       /* Next copy full words at a time.  */
+-      n32 = left / 32;
+-      left %= 32;
++      n32 = width / 32;
++      last_step = width % 32;
+       /* Finally copy the unaligned head of the span.  */
+-      mask_last = -1 << (32 - left);
++      mask_last = (1ul << last_step) - 1;
++
++      if (!backward) {
++              step = 32;
++              last_step = 32;
++      } else {
++              step = -32;
++              last_step = -last_step;
++              sepos -= 32;
++              depos -= 32;
++      }
+       tga_regs = par->tga_regs_base;
+       tga_fb = par->tga_fb_base;
+@@ -1371,25 +1206,33 @@ copyarea_backward_8bpp(struct fb_info *i
+               sfb = tga_fb + sepos;
+               dfb = tga_fb + depos;
+-              if (mask_first) {
+-                      __raw_writel(mask_first, sfb);
+-                      wmb();
+-                      __raw_writel(mask_first, dfb);
+-                      wmb();
+-              }
+-              for (j = 0; j < n32; ++j) {
+-                      sfb -= 32;
+-                      dfb -= 32;
++              for (j = 0; j < n32; j++) {
++                      if (j < 2 && j + 1 < n32 && !backward &&
++                          !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
++                              do {
++                                      __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
++                                      wmb();
++                                      __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
++                                      wmb();
++                                      sfb += 64;
++                                      dfb += 64;
++                                      j += 2;
++                              } while (j + 1 < n32);
++                              j--;
++                              continue;
++                      }
+                       __raw_writel(0xffffffff, sfb);
+                       wmb();
+                       __raw_writel(0xffffffff, dfb);
+                       wmb();
++                      sfb += step;
++                      dfb += step;
+               }
+               if (mask_last) {
+-                      sfb -= 32;
+-                      dfb -= 32;
++                      sfb += last_step - step;
++                      dfb += last_step - step;
+                       __raw_writel(mask_last, sfb);
+                       wmb();
+                       __raw_writel(mask_last, dfb);
+@@ -1450,14 +1293,9 @@ tgafb_copyarea(struct fb_info *info, con
+       else if (bpp == 32)
+               cfb_copyarea(info, area);
+-      /* Detect overlapping source and destination that requires
+-         a backward copy.  */
+-      else if (dy == sy && dx > sx && dx < sx + width)
+-              copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
+-                                     width, line_length, area);
+       else
+-              copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
+-                                     width, line_length);
++              copyarea_8bpp(info, dx, dy, sx, sy, height,
++                            width, line_length, area);
+ }
diff --git a/queue-3.14/tgafb-fix-mode-setting-with-fbset.patch b/queue-3.14/tgafb-fix-mode-setting-with-fbset.patch
new file mode 100644 (file)
index 0000000..46a3a28
--- /dev/null
@@ -0,0 +1,129 @@
+From 624966589041deb32a2626ee2e176e8274581101 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 23 Jan 2014 14:42:43 -0500
+Subject: tgafb: fix mode setting with fbset
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 624966589041deb32a2626ee2e176e8274581101 upstream.
+
+Mode setting in the TGA driver is broken for these reasons:
+
+- info->fix.line_length is set just once in tgafb_init_fix function. If
+  we change videomode, info->fix.line_length is not recalculated - so
+  the video mode is changed but the screen is corrupted because of wrong
+  info->fix.line_length.
+
+- info->fix.smem_len is set in tgafb_init_fix to the size of the default
+  video mode (640x480). If we set a higher resolution,
+  info->fix.smem_len is smaller than the current screen size, preventing
+  the userspace program from mapping the framebuffer.
+
+This patch fixes it:
+
+- info->fix.line_length initialization is moved to tgafb_set_par so that
+  it is recalculated with each mode change.
+
+- info->fix.smem_len is set to a fixed value representing the real
+  amount of video ram (the values are taken from xfree86 driver).
+
+- add a check to tgafb_check_var to prevent us from setting a videomode
+  that doesn't fit into videoram.
+
+- in tgafb_register, tgafb_init_fix is moved upwards, to be called
+  before fb_find_mode (because fb_find_mode already needs the videoram
+  size set in tgafb_init_fix).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/tgafb.c |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -182,6 +182,8 @@ tgafb_check_var(struct fb_var_screeninfo
+       if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
+               return -EINVAL;
++      if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
++              return -EINVAL;
+       if (var->nonstd)
+               return -EINVAL;
+       if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
+@@ -262,6 +264,7 @@ tgafb_set_par(struct fb_info *info)
+       par->yres = info->var.yres;
+       par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
+       par->bits_per_pixel = info->var.bits_per_pixel;
++      info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+       tga_type = par->tga_type;
+@@ -1470,6 +1473,7 @@ tgafb_init_fix(struct fb_info *info)
+       int tga_bus_tc = TGA_BUS_TC(par->dev);
+       u8 tga_type = par->tga_type;
+       const char *tga_type_name = NULL;
++      unsigned memory_size;
+       switch (tga_type) {
+       case TGA_TYPE_8PLANE:
+@@ -1477,22 +1481,27 @@ tgafb_init_fix(struct fb_info *info)
+                       tga_type_name = "Digital ZLXp-E1";
+               if (tga_bus_tc)
+                       tga_type_name = "Digital ZLX-E1";
++              memory_size = 2097152;
+               break;
+       case TGA_TYPE_24PLANE:
+               if (tga_bus_pci)
+                       tga_type_name = "Digital ZLXp-E2";
+               if (tga_bus_tc)
+                       tga_type_name = "Digital ZLX-E2";
++              memory_size = 8388608;
+               break;
+       case TGA_TYPE_24PLUSZ:
+               if (tga_bus_pci)
+                       tga_type_name = "Digital ZLXp-E3";
+               if (tga_bus_tc)
+                       tga_type_name = "Digital ZLX-E3";
++              memory_size = 16777216;
+               break;
+       }
+-      if (!tga_type_name)
++      if (!tga_type_name) {
+               tga_type_name = "Unknown";
++              memory_size = 16777216;
++      }
+       strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+@@ -1502,9 +1511,8 @@ tgafb_init_fix(struct fb_info *info)
+                           ? FB_VISUAL_PSEUDOCOLOR
+                           : FB_VISUAL_DIRECTCOLOR);
+-      info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+       info->fix.smem_start = (size_t) par->tga_fb_base;
+-      info->fix.smem_len = info->fix.line_length * par->yres;
++      info->fix.smem_len = memory_size;
+       info->fix.mmio_start = (size_t) par->tga_regs_base;
+       info->fix.mmio_len = 512;
+@@ -1628,6 +1636,9 @@ static int tgafb_register(struct device
+               modedb_tga = &modedb_tc;
+               modedbsize_tga = 1;
+       }
++
++      tgafb_init_fix(info);
++
+       ret = fb_find_mode(&info->var, info,
+                          mode_option ? mode_option : mode_option_tga,
+                          modedb_tga, modedbsize_tga, NULL,
+@@ -1645,7 +1656,6 @@ static int tgafb_register(struct device
+       }
+       tgafb_set_par(info);
+-      tgafb_init_fix(info);
+       if (register_framebuffer(info) < 0) {
+               printk(KERN_ERR "tgafb: Could not register framebuffer\n");
diff --git a/queue-3.14/thinkpad_acpi-fix-inconsistent-mute-led-after-resume.patch b/queue-3.14/thinkpad_acpi-fix-inconsistent-mute-led-after-resume.patch
new file mode 100644 (file)
index 0000000..68aa4c8
--- /dev/null
@@ -0,0 +1,44 @@
+From 119f449866ad18785b0445adaf0d2859c6dbdaa3 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 12 Feb 2014 16:32:45 +0100
+Subject: thinkpad_acpi: Fix inconsistent mute LED after resume
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 119f449866ad18785b0445adaf0d2859c6dbdaa3 upstream.
+
+The mute LED states have to be restored after resume.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=70351
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Matthew Garrett <matthew.garrett@nebula.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/thinkpad_acpi.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8447,9 +8447,21 @@ static void mute_led_exit(void)
+               tpacpi_led_set(i, false);
+ }
++static void mute_led_resume(void)
++{
++      int i;
++
++      for (i = 0; i < TPACPI_LED_MAX; i++) {
++              struct tp_led_table *t = &led_tables[i];
++              if (t->state >= 0)
++                      mute_led_on_off(t, t->state);
++      }
++}
++
+ static struct ibm_struct mute_led_driver_data = {
+       .name = "mute_led",
+       .exit = mute_led_exit,
++      .resume = mute_led_resume,
+ };
+ /****************************************************************************