]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 4 Mar 2014 01:46:05 +0000 (17:46 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 4 Mar 2014 01:46:05 +0000 (17:46 -0800)
added patches:
ahci-disable-ncq-on-samsung-pci-e-ssds-on-macbooks.patch
ata-enable-quirk-from-jmicron-jmb350-for-jmb394.patch
cpufreq-powernow-k8-initialize-per-cpu-data-structures-properly.patch
perf-trace-fix-ioctl-request-beautifier-build-problems-on-i386-x86_64-arches.patch
perf-x86-fix-event-scheduling.patch
powerpc-crashdump-fix-page-frame-number-check-in-copy_oldmem_page.patch
powerpc-increase-stack-redzone-for-64-bit-userspace-to-512-bytes.patch
powerpc-le-ensure-that-the-stop-self-rtas-token-is-handled-correctly.patch
powerpc-powernv-fix-indirect-xscom-unmangling.patch
powerpc-powernv-fix-opal_xscom_-read-write-prototype.patch
revert-writeback-do-not-sync-data-dirtied-after-sync-start.patch
sata_sil-apply-mod15write-quirk-to-toshiba-mk2561gsyn.patch
sunrpc-ensure-that-gss_auth-isn-t-freed-before-its-upcall-messages.patch
sunrpc-fix-races-in-xs_nospace.patch
x86-dma-mapping-fix-gfp_atomic-macro-usage.patch

16 files changed:
queue-3.13/ahci-disable-ncq-on-samsung-pci-e-ssds-on-macbooks.patch [new file with mode: 0644]
queue-3.13/ata-enable-quirk-from-jmicron-jmb350-for-jmb394.patch [new file with mode: 0644]
queue-3.13/cpufreq-powernow-k8-initialize-per-cpu-data-structures-properly.patch [new file with mode: 0644]
queue-3.13/perf-trace-fix-ioctl-request-beautifier-build-problems-on-i386-x86_64-arches.patch [new file with mode: 0644]
queue-3.13/perf-x86-fix-event-scheduling.patch [new file with mode: 0644]
queue-3.13/powerpc-crashdump-fix-page-frame-number-check-in-copy_oldmem_page.patch [new file with mode: 0644]
queue-3.13/powerpc-increase-stack-redzone-for-64-bit-userspace-to-512-bytes.patch [new file with mode: 0644]
queue-3.13/powerpc-le-ensure-that-the-stop-self-rtas-token-is-handled-correctly.patch [new file with mode: 0644]
queue-3.13/powerpc-powernv-fix-indirect-xscom-unmangling.patch [new file with mode: 0644]
queue-3.13/powerpc-powernv-fix-opal_xscom_-read-write-prototype.patch [new file with mode: 0644]
queue-3.13/revert-writeback-do-not-sync-data-dirtied-after-sync-start.patch [new file with mode: 0644]
queue-3.13/sata_sil-apply-mod15write-quirk-to-toshiba-mk2561gsyn.patch [new file with mode: 0644]
queue-3.13/series
queue-3.13/sunrpc-ensure-that-gss_auth-isn-t-freed-before-its-upcall-messages.patch [new file with mode: 0644]
queue-3.13/sunrpc-fix-races-in-xs_nospace.patch [new file with mode: 0644]
queue-3.13/x86-dma-mapping-fix-gfp_atomic-macro-usage.patch [new file with mode: 0644]

diff --git a/queue-3.13/ahci-disable-ncq-on-samsung-pci-e-ssds-on-macbooks.patch b/queue-3.13/ahci-disable-ncq-on-samsung-pci-e-ssds-on-macbooks.patch
new file mode 100644 (file)
index 0000000..42ba8d9
--- /dev/null
@@ -0,0 +1,59 @@
+From 67809f85d31eac600f6b28defa5386c9d2a13b1d Mon Sep 17 00:00:00 2001
+From: Levente Kurusa <levex@linux.com>
+Date: Tue, 18 Feb 2014 10:22:17 -0500
+Subject: ahci: disable NCQ on Samsung pci-e SSDs on macbooks
+
+From: Levente Kurusa <levex@linux.com>
+
+commit 67809f85d31eac600f6b28defa5386c9d2a13b1d upstream.
+
+Samsung's pci-e SSDs with device ID 0x1600 which are found on some
+macbooks time out on NCQ commands.  Blacklist NCQ on the device so
+that the affected machines can at least boot.
+
+Original-patch-by: Levente Kurusa <levex@linux.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=60731
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/ahci.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -61,6 +61,7 @@ enum board_ids {
+       /* board IDs by feature in alphabetical order */
+       board_ahci,
+       board_ahci_ign_iferr,
++      board_ahci_noncq,
+       board_ahci_nosntf,
+       board_ahci_yes_fbs,
+@@ -119,6 +120,13 @@ static const struct ata_port_info ahci_p
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
++      [board_ahci_noncq] = {
++              AHCI_HFLAGS     (AHCI_HFLAG_NO_NCQ),
++              .flags          = AHCI_FLAG_COMMON,
++              .pio_mask       = ATA_PIO4,
++              .udma_mask      = ATA_UDMA6,
++              .port_ops       = &ahci_ops,
++      },
+       [board_ahci_nosntf] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_NO_SNTF),
+               .flags          = AHCI_FLAG_COMMON,
+@@ -450,6 +458,12 @@ static const struct pci_device_id ahci_p
+       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
+       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
++      /*
++       * Samsung SSDs found on some macbooks.  NCQ times out.
++       * https://bugzilla.kernel.org/show_bug.cgi?id=60731
++       */
++      { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
++
+       /* Enmotus */
+       { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
diff --git a/queue-3.13/ata-enable-quirk-from-jmicron-jmb350-for-jmb394.patch b/queue-3.13/ata-enable-quirk-from-jmicron-jmb350-for-jmb394.patch
new file mode 100644 (file)
index 0000000..9682634
--- /dev/null
@@ -0,0 +1,44 @@
+From efb9e0f4f43780f0ae0c6428d66bd03e805c7539 Mon Sep 17 00:00:00 2001
+From: "Denis V. Lunev" <den@openvz.org>
+Date: Thu, 30 Jan 2014 15:20:30 +0400
+Subject: ata: enable quirk from jmicron JMB350 for JMB394
+
+From: "Denis V. Lunev" <den@openvz.org>
+
+commit efb9e0f4f43780f0ae0c6428d66bd03e805c7539 upstream.
+
+Without the patch the kernel generates the following error.
+
+ ata11.15: SATA link up 1.5 Gbps (SStatus 113 SControl 310)
+ ata11.15: Port Multiplier vendor mismatch '0x197b' != '0x123'
+ ata11.15: PMP revalidation failed (errno=-19)
+ ata11.15: failed to recover PMP after 5 tries, giving up
+
+This patch helps to bypass this error and the device becomes
+functional.
+
+Signed-off-by: Denis V. Lunev <den@openvz.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: <linux-ide@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-pmp.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_p
+                * otherwise.  Don't try hard to recover it.
+                */
+               ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+-      } else if (vendor == 0x197b && devid == 0x2352) {
+-              /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
++      } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
++              /*
++               * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
++               * 0x0325: jmicron JMB394.
++               */
+               ata_for_each_link(link, ap, EDGE) {
+                       /* SRST breaks detection and disks get misclassified
+                        * LPM disabled to avoid potential problems
diff --git a/queue-3.13/cpufreq-powernow-k8-initialize-per-cpu-data-structures-properly.patch b/queue-3.13/cpufreq-powernow-k8-initialize-per-cpu-data-structures-properly.patch
new file mode 100644 (file)
index 0000000..2785534
--- /dev/null
@@ -0,0 +1,78 @@
+From c3274763bfc3bf1ececa269ed6e6c4d7ec1c3e5e Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Mon, 17 Feb 2014 16:18:21 +0530
+Subject: cpufreq: powernow-k8: Initialize per-cpu data-structures properly
+
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+
+commit c3274763bfc3bf1ececa269ed6e6c4d7ec1c3e5e upstream.
+
+The powernow-k8 driver maintains a per-cpu data-structure called
+powernow_data that is used to perform the frequency transitions.
+It initializes this data structure only for the policy->cpu. So,
+accesses to this data structure by other CPUs results in various
+problems because they would have been uninitialized.
+
+Specifically, if a cpu (!= policy->cpu) invokes the drivers' ->get()
+function, it returns 0 as the KHz value, since its per-cpu memory
+doesn't point to anything valid. This causes problems during
+suspend/resume since cpufreq_update_policy() tries to enforce this
+(0 KHz) as the current frequency of the CPU, and this madness gets
+propagated to adjust_jiffies() as well. Eventually, lots of things
+start breaking down, including the r8169 ethernet card, in one
+particularly interesting case reported by Pierre Ossman.
+
+Fix this by initializing the per-cpu data-structures of all the CPUs
+in the policy appropriately.
+
+References: https://bugzilla.kernel.org/show_bug.cgi?id=70311
+Reported-by: Pierre Ossman <pierre@ossman.eu>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/powernow-k8.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1081,7 +1081,7 @@ static int powernowk8_cpu_init(struct cp
+ {
+       struct powernow_k8_data *data;
+       struct init_on_cpu init_on_cpu;
+-      int rc;
++      int rc, cpu;
+       smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
+       if (rc)
+@@ -1145,7 +1145,9 @@ static int powernowk8_cpu_init(struct cp
+       pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
+-      per_cpu(powernow_data, pol->cpu) = data;
++      /* Point all the CPUs in this policy to the same data */
++      for_each_cpu(cpu, pol->cpus)
++              per_cpu(powernow_data, cpu) = data;
+       return 0;
+@@ -1160,6 +1162,7 @@ err_out:
+ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+ {
+       struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
++      int cpu;
+       if (!data)
+               return -EINVAL;
+@@ -1170,7 +1173,8 @@ static int powernowk8_cpu_exit(struct cp
+       kfree(data->powernow_table);
+       kfree(data);
+-      per_cpu(powernow_data, pol->cpu) = NULL;
++      for_each_cpu(cpu, pol->cpus)
++              per_cpu(powernow_data, cpu) = NULL;
+       return 0;
+ }
diff --git a/queue-3.13/perf-trace-fix-ioctl-request-beautifier-build-problems-on-i386-x86_64-arches.patch b/queue-3.13/perf-trace-fix-ioctl-request-beautifier-build-problems-on-i386-x86_64-arches.patch
new file mode 100644 (file)
index 0000000..c34fc78
--- /dev/null
@@ -0,0 +1,95 @@
+From 844ae5b46c08dbc7ba695b543c023f9cf3bbf9ff Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Mon, 10 Feb 2014 14:09:48 -0300
+Subject: perf trace: Fix ioctl 'request' beautifier build problems on !(i386 || x86_64) arches
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 844ae5b46c08dbc7ba695b543c023f9cf3bbf9ff upstream.
+
+Supporting decoding the ioctl 'request' parameter needs more work to
+properly support more architectures, the current approach doesn't work
+on at least powerpc and sparc, as reported by Ben Hutchings in
+http://lkml.kernel.org/r/1391593985.3003.48.camel@deadeye.wl.decadent.org.uk .
+
+Work around that by making it to be ifdefed for the architectures known
+to work with the current, limited approach, i386 and x86_64 till better
+code is written.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Acked-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Link: http://lkml.kernel.org/n/tip-ss04k11insqlu329xh5g02q0@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/builtin-trace.c |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -275,6 +275,11 @@ static size_t syscall_arg__scnprintf_str
+ #define SCA_STRARRAY syscall_arg__scnprintf_strarray
++#if defined(__i386__) || defined(__x86_64__)
++/*
++ * FIXME: Make this available to all arches as soon as the ioctl beautifier
++ *      gets rewritten to support all arches.
++ */
+ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
+                                                struct syscall_arg *arg)
+ {
+@@ -282,6 +287,7 @@ static size_t syscall_arg__scnprintf_str
+ }
+ #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
++#endif /* defined(__i386__) || defined(__x86_64__) */
+ static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
+                                       struct syscall_arg *arg);
+@@ -835,6 +841,10 @@ static size_t syscall_arg__scnprintf_sig
+ #define SCA_SIGNUM syscall_arg__scnprintf_signum
++#if defined(__i386__) || defined(__x86_64__)
++/*
++ * FIXME: Make this available to all arches.
++ */
+ #define TCGETS                0x5401
+ static const char *tioctls[] = {
+@@ -856,6 +866,7 @@ static const char *tioctls[] = {
+ };
+ static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
++#endif /* defined(__i386__) || defined(__x86_64__) */
+ #define STRARRAY(arg, name, array) \
+         .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
+@@ -937,9 +948,16 @@ static struct syscall_fmt {
+       { .name     = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
+       { .name     = "ioctl",      .errmsg = true,
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ 
++#if defined(__i386__) || defined(__x86_64__)
++/*
++ * FIXME: Make this available to all arches.
++ */
+                            [1] = SCA_STRHEXARRAY, /* cmd */
+                            [2] = SCA_HEX, /* arg */ },
+         .arg_parm      = { [1] = &strarray__tioctls, /* cmd */ }, },
++#else
++                           [2] = SCA_HEX, /* arg */ }, },
++#endif
+       { .name     = "kill",       .errmsg = true,
+         .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
+       { .name     = "linkat",     .errmsg = true,
diff --git a/queue-3.13/perf-x86-fix-event-scheduling.patch b/queue-3.13/perf-x86-fix-event-scheduling.patch
new file mode 100644 (file)
index 0000000..ba5b6de
--- /dev/null
@@ -0,0 +1,116 @@
+From 26e61e8939b1fe8729572dabe9a9e97d930dd4f6 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 21 Feb 2014 16:03:12 +0100
+Subject: perf/x86: Fix event scheduling
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 26e61e8939b1fe8729572dabe9a9e97d930dd4f6 upstream.
+
+Vince "Super Tester" Weaver reported a new round of syscall fuzzing (Trinity) failures,
+with perf WARN_ON()s triggering. He also provided traces of the failures.
+
+This is I think the relevant bit:
+
+       >    pec_1076_warn-2804  [000] d...   147.926153: x86_pmu_disable: x86_pmu_disable
+       >    pec_1076_warn-2804  [000] d...   147.926153: x86_pmu_state: Events: {
+       >    pec_1076_warn-2804  [000] d...   147.926156: x86_pmu_state:   0: state: .R config: ffffffffffffffff (          (null))
+       >    pec_1076_warn-2804  [000] d...   147.926158: x86_pmu_state:   33: state: AR config: 0 (ffff88011ac99800)
+       >    pec_1076_warn-2804  [000] d...   147.926159: x86_pmu_state: }
+       >    pec_1076_warn-2804  [000] d...   147.926160: x86_pmu_state: n_events: 1, n_added: 0, n_txn: 1
+       >    pec_1076_warn-2804  [000] d...   147.926161: x86_pmu_state: Assignment: {
+       >    pec_1076_warn-2804  [000] d...   147.926162: x86_pmu_state:   0->33 tag: 1 config: 0 (ffff88011ac99800)
+       >    pec_1076_warn-2804  [000] d...   147.926163: x86_pmu_state: }
+       >    pec_1076_warn-2804  [000] d...   147.926166: collect_events: Adding event: 1 (ffff880119ec8800)
+
+So we add the insn:p event (fd[23]).
+
+At this point we should have:
+
+  n_events = 2, n_added = 1, n_txn = 1
+
+       >    pec_1076_warn-2804  [000] d...   147.926170: collect_events: Adding event: 0 (ffff8800c9e01800)
+       >    pec_1076_warn-2804  [000] d...   147.926172: collect_events: Adding event: 4 (ffff8800cbab2c00)
+
+We try and add the {BP,cycles,br_insn} group (fd[3], fd[4], fd[15]).
+These events are 0:cycles and 4:br_insn, the BP event isn't x86_pmu so
+that's not visible.
+
+       group_sched_in()
+         pmu->start_txn() /* nop - BP pmu */
+         event_sched_in()
+            event->pmu->add()
+
+So here we should end up with:
+
+  0: n_events = 3, n_added = 2, n_txn = 2
+  4: n_events = 4, n_added = 3, n_txn = 3
+
+But seeing the below state on x86_pmu_enable(), the must have failed,
+because the 0 and 4 events aren't there anymore.
+
+Looking at group_sched_in(), since the BP is the leader, its
+event_sched_in() must have succeeded, for otherwise we would not have
+seen the sibling adds.
+
+But since neither 0 or 4 are in the below state; their event_sched_in()
+must have failed; but I don't see why, the complete state: 0,0,1:p,4
+fits perfectly fine on a core2.
+
+However, since we try and schedule 4 it means the 0 event must have
+succeeded!  Therefore the 4 event must have failed, its failure will
+have put group_sched_in() into the fail path, which will call:
+
+       event_sched_out()
+         event->pmu->del()
+
+on 0 and the BP event.
+
+Now x86_pmu_del() will reduce n_events; but it will not reduce n_added;
+giving what we see below:
+
+ n_event = 2, n_added = 2, n_txn = 2
+
+       >    pec_1076_warn-2804  [000] d...   147.926177: x86_pmu_enable: x86_pmu_enable
+       >    pec_1076_warn-2804  [000] d...   147.926177: x86_pmu_state: Events: {
+       >    pec_1076_warn-2804  [000] d...   147.926179: x86_pmu_state:   0: state: .R config: ffffffffffffffff (          (null))
+       >    pec_1076_warn-2804  [000] d...   147.926181: x86_pmu_state:   33: state: AR config: 0 (ffff88011ac99800)
+       >    pec_1076_warn-2804  [000] d...   147.926182: x86_pmu_state: }
+       >    pec_1076_warn-2804  [000] d...   147.926184: x86_pmu_state: n_events: 2, n_added: 2, n_txn: 2
+       >    pec_1076_warn-2804  [000] d...   147.926184: x86_pmu_state: Assignment: {
+       >    pec_1076_warn-2804  [000] d...   147.926186: x86_pmu_state:   0->33 tag: 1 config: 0 (ffff88011ac99800)
+       >    pec_1076_warn-2804  [000] d...   147.926188: x86_pmu_state:   1->0 tag: 1 config: 1 (ffff880119ec8800)
+       >    pec_1076_warn-2804  [000] d...   147.926188: x86_pmu_state: }
+       >    pec_1076_warn-2804  [000] d...   147.926190: x86_pmu_enable: S0: hwc->idx: 33, hwc->last_cpu: 0, hwc->last_tag: 1 hwc->state: 0
+
+So the problem is that x86_pmu_del(), when called from a
+group_sched_in() that fails (for whatever reason), and without x86_pmu
+TXN support (because the leader is !x86_pmu), will corrupt the n_added
+state.
+
+Reported-and-Tested-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Dave Jones <davej@redhat.com>
+Link: http://lkml.kernel.org/r/20140221150312.GF3104@twins.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_even
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (event == cpuc->event_list[i]) {
++                      if (i >= cpuc->n_events - cpuc->n_added)
++                              --cpuc->n_added;
++
+                       if (x86_pmu.put_event_constraints)
+                               x86_pmu.put_event_constraints(cpuc, event);
diff --git a/queue-3.13/powerpc-crashdump-fix-page-frame-number-check-in-copy_oldmem_page.patch b/queue-3.13/powerpc-crashdump-fix-page-frame-number-check-in-copy_oldmem_page.patch
new file mode 100644 (file)
index 0000000..f53d36b
--- /dev/null
@@ -0,0 +1,56 @@
+From f5295bd8ea8a65dc5eac608b151386314cb978f1 Mon Sep 17 00:00:00 2001
+From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+Date: Mon, 24 Feb 2014 17:30:55 +0100
+Subject: powerpc/crashdump : Fix page frame number check in copy_oldmem_page
+
+From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+
+commit f5295bd8ea8a65dc5eac608b151386314cb978f1 upstream.
+
+In copy_oldmem_page, the current check using max_pfn and min_low_pfn to
+decide if the page is backed or not, is not valid when the memory layout is
+not continuous.
+
+This happens when running as a QEMU/KVM guest, where RTAS is mapped higher
+in the memory. In that case max_pfn points to the end of RTAS, and a hole
+between the end of the kdump kernel and RTAS is not backed by PTEs. As a
+consequence, the kdump kernel is crashing in copy_oldmem_page when accessing
+in a direct way the pages in that hole.
+
+This fix relies on the memblock's service memblock_is_region_memory to
+check if the read page is part or not of the directly accessible memory.
+
+Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+Tested-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/crash_dump.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/crash_dump.c
++++ b/arch/powerpc/kernel/crash_dump.c
+@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long p
+                       size_t csize, unsigned long offset, int userbuf)
+ {
+       void  *vaddr;
++      phys_addr_t paddr;
+       if (!csize)
+               return 0;
+       csize = min_t(size_t, csize, PAGE_SIZE);
++      paddr = pfn << PAGE_SHIFT;
+-      if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
+-              vaddr = __va(pfn << PAGE_SHIFT);
++      if (memblock_is_region_memory(paddr, csize)) {
++              vaddr = __va(paddr);
+               csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+       } else {
+-              vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
++              vaddr = __ioremap(paddr, PAGE_SIZE, 0);
+               csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+               iounmap(vaddr);
+       }
diff --git a/queue-3.13/powerpc-increase-stack-redzone-for-64-bit-userspace-to-512-bytes.patch b/queue-3.13/powerpc-increase-stack-redzone-for-64-bit-userspace-to-512-bytes.patch
new file mode 100644 (file)
index 0000000..6bc03cd
--- /dev/null
@@ -0,0 +1,105 @@
+From 573ebfa6601fa58b439e7f15828762839ccd306a Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Wed, 26 Feb 2014 17:07:38 +1100
+Subject: powerpc: Increase stack redzone for 64-bit userspace to 512 bytes
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit 573ebfa6601fa58b439e7f15828762839ccd306a upstream.
+
+The new ELFv2 little-endian ABI increases the stack redzone -- the
+area below the stack pointer that can be used for storing data --
+from 288 bytes to 512 bytes.  This means that we need to allow more
+space on the user stack when delivering a signal to a 64-bit process.
+
+To make the code a bit clearer, we define new USER_REDZONE_SIZE and
+KERNEL_REDZONE_SIZE symbols in ptrace.h.  For now, we leave the
+kernel redzone size at 288 bytes, since increasing it to 512 bytes
+would increase the size of interrupt stack frames correspondingly.
+
+Gcc currently only makes use of 288 bytes of redzone even when
+compiling for the new little-endian ABI, and the kernel cannot
+currently be compiled with the new ABI anyway.
+
+In the future, hopefully gcc will provide an option to control the
+amount of redzone used, and then we could reduce it even more.
+
+This also changes the code in arch_compat_alloc_user_space() to
+preserve the expanded redzone.  It is not clear why this function would
+ever be used on a 64-bit process, though.
+
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/compat.h |    5 +++--
+ arch/powerpc/include/asm/ptrace.h |   16 +++++++++++++++-
+ arch/powerpc/kernel/signal_64.c   |    4 ++--
+ 3 files changed, 20 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/compat.h
++++ b/arch/powerpc/include/asm/compat.h
+@@ -200,10 +200,11 @@ static inline void __user *arch_compat_a
+       /*
+        * We can't access below the stack pointer in the 32bit ABI and
+-       * can access 288 bytes in the 64bit ABI
++       * can access 288 bytes in the 64bit big-endian ABI,
++       * or 512 bytes with the new ELFv2 little-endian ABI.
+        */
+       if (!is_32bit_task())
+-              usp -= 288;
++              usp -= USER_REDZONE_SIZE;
+       return (void __user *) (usp - len);
+ }
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -28,11 +28,23 @@
+ #ifdef __powerpc64__
++/*
++ * Size of redzone that userspace is allowed to use below the stack
++ * pointer.  This is 288 in the 64-bit big-endian ELF ABI, and 512 in
++ * the new ELFv2 little-endian ABI, so we allow the larger amount.
++ *
++ * For kernel code we allow a 288-byte redzone, in order to conserve
++ * kernel stack space; gcc currently only uses 288 bytes, and will
++ * hopefully allow explicit control of the redzone size in future.
++ */
++#define USER_REDZONE_SIZE     512
++#define KERNEL_REDZONE_SIZE   288
++
+ #define STACK_FRAME_OVERHEAD  112     /* size of minimum stack frame */
+ #define STACK_FRAME_LR_SAVE   2       /* Location of LR in stack frame */
+ #define STACK_FRAME_REGS_MARKER       ASM_CONST(0x7265677368657265)
+ #define STACK_INT_FRAME_SIZE  (sizeof(struct pt_regs) + \
+-                                      STACK_FRAME_OVERHEAD + 288)
++                               STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
+ #define STACK_FRAME_MARKER    12
+ /* Size of dummy stack frame allocated when calling signal handler. */
+@@ -41,6 +53,8 @@
+ #else /* __powerpc64__ */
++#define USER_REDZONE_SIZE     0
++#define KERNEL_REDZONE_SIZE   0
+ #define STACK_FRAME_OVERHEAD  16      /* size of minimum stack frame */
+ #define STACK_FRAME_LR_SAVE   1       /* Location of LR in stack frame */
+ #define STACK_FRAME_REGS_MARKER       ASM_CONST(0x72656773)
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -65,8 +65,8 @@ struct rt_sigframe {
+       struct siginfo __user *pinfo;
+       void __user *puc;
+       struct siginfo info;
+-      /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
+-      char abigap[288];
++      /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
++      char abigap[USER_REDZONE_SIZE];
+ } __attribute__ ((aligned (16)));
+ static const char fmt32[] = KERN_INFO \
diff --git a/queue-3.13/powerpc-le-ensure-that-the-stop-self-rtas-token-is-handled-correctly.patch b/queue-3.13/powerpc-le-ensure-that-the-stop-self-rtas-token-is-handled-correctly.patch
new file mode 100644 (file)
index 0000000..68f3e2c
--- /dev/null
@@ -0,0 +1,81 @@
+From 41dd03a94c7d408d2ef32530545097f7d1befe5c Mon Sep 17 00:00:00 2001
+From: Tony Breeds <tony@bakeyournoodle.com>
+Date: Thu, 20 Feb 2014 21:13:52 +1100
+Subject: powerpc/le: Ensure that the 'stop-self' RTAS token is handled correctly
+
+From: Tony Breeds <tony@bakeyournoodle.com>
+
+commit 41dd03a94c7d408d2ef32530545097f7d1befe5c upstream.
+
+Currently we're storing a host endian RTAS token in
+rtas_stop_self_args.token.  We then pass that directly to rtas.  This is
+fine on big endian however on little endian the token is not what we
+expect.
+
+This will typically result in hitting:
+       panic("Alas, I survived.\n");
+
+To fix this we always use the stop-self token in host order and always
+convert it to be32 before passing this to rtas.
+
+Signed-off-by: Tony Breeds <tony@bakeyournoodle.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-cpu.c |   22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -35,12 +35,7 @@
+ #include "offline_states.h"
+ /* This version can't take the spinlock, because it never returns */
+-static struct rtas_args rtas_stop_self_args = {
+-      .token = RTAS_UNKNOWN_SERVICE,
+-      .nargs = 0,
+-      .nret = 1,
+-      .rets = &rtas_stop_self_args.args[0],
+-};
++static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
+ static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
+                                                       CPU_STATE_OFFLINE;
+@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
+ static void rtas_stop_self(void)
+ {
+-      struct rtas_args *args = &rtas_stop_self_args;
++      struct rtas_args args = {
++              .token = cpu_to_be32(rtas_stop_self_token),
++              .nargs = 0,
++              .nret = 1,
++              .rets = &args.args[0],
++      };
+       local_irq_disable();
+-      BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
++      BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
+       printk("cpu %u (hwid %u) Ready to die...\n",
+              smp_processor_id(), hard_smp_processor_id());
+-      enter_rtas(__pa(args));
++      enter_rtas(__pa(&args));
+       panic("Alas, I survived.\n");
+ }
+@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_in
+               }
+       }
+-      rtas_stop_self_args.token = rtas_token("stop-self");
++      rtas_stop_self_token = rtas_token("stop-self");
+       qcss_tok = rtas_token("query-cpu-stopped-state");
+-      if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
++      if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
+                       qcss_tok == RTAS_UNKNOWN_SERVICE) {
+               printk(KERN_INFO "CPU Hotplug not supported by firmware "
+                               "- disabling.\n");
diff --git a/queue-3.13/powerpc-powernv-fix-indirect-xscom-unmangling.patch b/queue-3.13/powerpc-powernv-fix-indirect-xscom-unmangling.patch
new file mode 100644 (file)
index 0000000..4df2794
--- /dev/null
@@ -0,0 +1,75 @@
+From e0cf957614976896111e676e5134ac98ee227d3d Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Fri, 28 Feb 2014 16:20:38 +1100
+Subject: powerpc/powernv: Fix indirect XSCOM unmangling
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit e0cf957614976896111e676e5134ac98ee227d3d upstream.
+
+We need to unmangle the full address, not just the register
+number, and we also need to support the real indirect bit
+being set for in-kernel uses.
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal-xscom.c |   21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/opal-xscom.c
++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
+@@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t
+       }
+ }
+-static u64 opal_scom_unmangle(u64 reg)
++static u64 opal_scom_unmangle(u64 addr)
+ {
+       /*
+        * XSCOM indirect addresses have the top bit set. Additionally
+-       * the reset of the top 3 nibbles is always 0.
++       * the rest of the top 3 nibbles is always 0.
+        *
+        * Because the debugfs interface uses signed offsets and shifts
+        * the address left by 3, we basically cannot use the top 4 bits
+@@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg)
+        * conversion here. To leave room for further xscom address
+        * expansion, we only clear out the top byte
+        *
++       * For in-kernel use, we also support the real indirect bit, so
++       * we test for any of the top 5 bits
++       *
+        */
+-      if (reg & (1ull << 59))
+-              reg = (reg & ~(0xffull << 56)) | (1ull << 63);
+-      return reg;
++      if (addr & (0x1full << 59))
++              addr = (addr & ~(0xffull << 56)) | (1ull << 63);
++      return addr;
+ }
+ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
+@@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map
+       int64_t rc;
+       __be64 v;
+-      reg = opal_scom_unmangle(reg);
+-      rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
++      reg = opal_scom_unmangle(m->addr + reg);
++      rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
+       *value = be64_to_cpu(v);
+       return opal_xscom_err_xlate(rc);
+ }
+@@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t ma
+       struct opal_scom_map *m = map;
+       int64_t rc;
+-      reg = opal_scom_unmangle(reg);
+-      rc = opal_xscom_write(m->chip, m->addr + reg, value);
++      reg = opal_scom_unmangle(m->addr + reg);
++      rc = opal_xscom_write(m->chip, reg, value);
+       return opal_xscom_err_xlate(rc);
+ }
diff --git a/queue-3.13/powerpc-powernv-fix-opal_xscom_-read-write-prototype.patch b/queue-3.13/powerpc-powernv-fix-opal_xscom_-read-write-prototype.patch
new file mode 100644 (file)
index 0000000..e6bf22c
--- /dev/null
@@ -0,0 +1,33 @@
+From 2f3f38e4d3d03dd4125cc9a1f49ab3cc91d8d670 Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Fri, 28 Feb 2014 16:20:29 +1100
+Subject: powerpc/powernv: Fix opal_xscom_{read,write} prototype
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 2f3f38e4d3d03dd4125cc9a1f49ab3cc91d8d670 upstream.
+
+The OPAL firmware functions opal_xscom_read and opal_xscom_write
+take a 64-bit argument for the XSCOM (PCB) address in order to
+support the indirect mode on P8.
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/opal.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -720,8 +720,8 @@ int64_t opal_pci_next_error(uint64_t phb
+ int64_t opal_pci_poll(uint64_t phb_id);
+ int64_t opal_return_cpu(void);
+-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
+-int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
++int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
++int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
+ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+                      uint32_t addr, uint32_t data, uint32_t sz);
diff --git a/queue-3.13/revert-writeback-do-not-sync-data-dirtied-after-sync-start.patch b/queue-3.13/revert-writeback-do-not-sync-data-dirtied-after-sync-start.patch
new file mode 100644 (file)
index 0000000..5c6b4e1
--- /dev/null
@@ -0,0 +1,224 @@
+From 0dc83bd30b0bf5410c0933cfbbf8853248eff0a9 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 21 Feb 2014 11:19:04 +0100
+Subject: Revert "writeback: do not sync data dirtied after sync start"
+
+From: Jan Kara <jack@suse.cz>
+
+commit 0dc83bd30b0bf5410c0933cfbbf8853248eff0a9 upstream.
+
+This reverts commit c4a391b53a72d2df4ee97f96f78c1d5971b47489. Dave
+Chinner <david@fromorbit.com> has reported the commit may cause some
+inodes to be left out from sync(2). This is because we can call
+redirty_tail() for some inode (which sets i_dirtied_when to current time)
+after sync(2) has started or similarly requeue_inode() can set
+i_dirtied_when to current time if writeback had to skip some pages. The
+real problem is in the functions clobbering i_dirtied_when but fixing
+that isn't trivial so revert is a safer choice for now.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fs-writeback.c                |   33 +++++++++++----------------------
+ fs/sync.c                        |   15 ++++++---------
+ fs/xfs/xfs_super.c               |    2 +-
+ include/linux/writeback.h        |    2 +-
+ include/trace/events/writeback.h |    6 +++---
+ 5 files changed, 22 insertions(+), 36 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -40,18 +40,13 @@
+ struct wb_writeback_work {
+       long nr_pages;
+       struct super_block *sb;
+-      /*
+-       * Write only inodes dirtied before this time. Don't forget to set
+-       * older_than_this_is_set when you set this.
+-       */
+-      unsigned long older_than_this;
++      unsigned long *older_than_this;
+       enum writeback_sync_modes sync_mode;
+       unsigned int tagged_writepages:1;
+       unsigned int for_kupdate:1;
+       unsigned int range_cyclic:1;
+       unsigned int for_background:1;
+       unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
+-      unsigned int older_than_this_is_set:1;
+       enum wb_reason reason;          /* why was writeback initiated? */
+       struct list_head list;          /* pending work list */
+@@ -252,10 +247,10 @@ static int move_expired_inodes(struct li
+       int do_sb_sort = 0;
+       int moved = 0;
+-      WARN_ON_ONCE(!work->older_than_this_is_set);
+       while (!list_empty(delaying_queue)) {
+               inode = wb_inode(delaying_queue->prev);
+-              if (inode_dirtied_after(inode, work->older_than_this))
++              if (work->older_than_this &&
++                  inode_dirtied_after(inode, *work->older_than_this))
+                       break;
+               list_move(&inode->i_wb_list, &tmp);
+               moved++;
+@@ -742,8 +737,6 @@ static long writeback_inodes_wb(struct b
+               .sync_mode      = WB_SYNC_NONE,
+               .range_cyclic   = 1,
+               .reason         = reason,
+-              .older_than_this = jiffies,
+-              .older_than_this_is_set = 1,
+       };
+       spin_lock(&wb->list_lock);
+@@ -802,13 +795,12 @@ static long wb_writeback(struct bdi_writ
+ {
+       unsigned long wb_start = jiffies;
+       long nr_pages = work->nr_pages;
++      unsigned long oldest_jif;
+       struct inode *inode;
+       long progress;
+-      if (!work->older_than_this_is_set) {
+-              work->older_than_this = jiffies;
+-              work->older_than_this_is_set = 1;
+-      }
++      oldest_jif = jiffies;
++      work->older_than_this = &oldest_jif;
+       spin_lock(&wb->list_lock);
+       for (;;) {
+@@ -842,10 +834,10 @@ static long wb_writeback(struct bdi_writ
+                * safe.
+                */
+               if (work->for_kupdate) {
+-                      work->older_than_this = jiffies -
++                      oldest_jif = jiffies -
+                               msecs_to_jiffies(dirty_expire_interval * 10);
+               } else if (work->for_background)
+-                      work->older_than_this = jiffies;
++                      oldest_jif = jiffies;
+               trace_writeback_start(wb->bdi, work);
+               if (list_empty(&wb->b_io))
+@@ -1357,21 +1349,18 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb
+ /**
+  * sync_inodes_sb     -       sync sb inode pages
+- * @sb:                       the superblock
+- * @older_than_this:  timestamp
++ * @sb: the superblock
+  *
+  * This function writes and waits on any dirty inode belonging to this
+- * superblock that has been dirtied before given timestamp.
++ * super_block.
+  */
+-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
++void sync_inodes_sb(struct super_block *sb)
+ {
+       DECLARE_COMPLETION_ONSTACK(done);
+       struct wb_writeback_work work = {
+               .sb             = sb,
+               .sync_mode      = WB_SYNC_ALL,
+               .nr_pages       = LONG_MAX,
+-              .older_than_this = older_than_this,
+-              .older_than_this_is_set = 1,
+               .range_cyclic   = 0,
+               .done           = &done,
+               .reason         = WB_REASON_SYNC,
+--- a/fs/sync.c
++++ b/fs/sync.c
+@@ -27,11 +27,10 @@
+  * wait == 1 case since in that case write_inode() functions do
+  * sync_dirty_buffer() and thus effectively write one block at a time.
+  */
+-static int __sync_filesystem(struct super_block *sb, int wait,
+-                           unsigned long start)
++static int __sync_filesystem(struct super_block *sb, int wait)
+ {
+       if (wait)
+-              sync_inodes_sb(sb, start);
++              sync_inodes_sb(sb);
+       else
+               writeback_inodes_sb(sb, WB_REASON_SYNC);
+@@ -48,7 +47,6 @@ static int __sync_filesystem(struct supe
+ int sync_filesystem(struct super_block *sb)
+ {
+       int ret;
+-      unsigned long start = jiffies;
+       /*
+        * We need to be protected against the filesystem going from
+@@ -62,17 +60,17 @@ int sync_filesystem(struct super_block *
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
+-      ret = __sync_filesystem(sb, 0, start);
++      ret = __sync_filesystem(sb, 0);
+       if (ret < 0)
+               return ret;
+-      return __sync_filesystem(sb, 1, start);
++      return __sync_filesystem(sb, 1);
+ }
+ EXPORT_SYMBOL_GPL(sync_filesystem);
+ static void sync_inodes_one_sb(struct super_block *sb, void *arg)
+ {
+       if (!(sb->s_flags & MS_RDONLY))
+-              sync_inodes_sb(sb, *((unsigned long *)arg));
++              sync_inodes_sb(sb);
+ }
+ static void sync_fs_one_sb(struct super_block *sb, void *arg)
+@@ -104,10 +102,9 @@ static void fdatawait_one_bdev(struct bl
+ SYSCALL_DEFINE0(sync)
+ {
+       int nowait = 0, wait = 1;
+-      unsigned long start = jiffies;
+       wakeup_flusher_threads(0, WB_REASON_SYNC);
+-      iterate_supers(sync_inodes_one_sb, &start);
++      iterate_supers(sync_inodes_one_sb, NULL);
+       iterate_supers(sync_fs_one_sb, &nowait);
+       iterate_supers(sync_fs_one_sb, &wait);
+       iterate_bdevs(fdatawrite_one_bdev, NULL);
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -913,7 +913,7 @@ xfs_flush_inodes(
+       struct super_block      *sb = mp->m_super;
+       if (down_read_trylock(&sb->s_umount)) {
+-              sync_inodes_sb(sb, jiffies);
++              sync_inodes_sb(sb);
+               up_read(&sb->s_umount);
+       }
+ }
+--- a/include/linux/writeback.h
++++ b/include/linux/writeback.h
+@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super
+ int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+ int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+                                 enum wb_reason reason);
+-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
++void sync_inodes_sb(struct super_block *);
+ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+ void inode_wait_for_writeback(struct inode *inode);
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -287,11 +287,11 @@ TRACE_EVENT(writeback_queue_io,
+               __field(int,            reason)
+       ),
+       TP_fast_assign(
+-              unsigned long older_than_this = work->older_than_this;
++              unsigned long *older_than_this = work->older_than_this;
+               strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+-              __entry->older  = older_than_this;
++              __entry->older  = older_than_this ?  *older_than_this : 0;
+               __entry->age    = older_than_this ?
+-                                (jiffies - older_than_this) * 1000 / HZ : -1;
++                                (jiffies - *older_than_this) * 1000 / HZ : -1;
+               __entry->moved  = moved;
+               __entry->reason = work->reason;
+       ),
diff --git a/queue-3.13/sata_sil-apply-mod15write-quirk-to-toshiba-mk2561gsyn.patch b/queue-3.13/sata_sil-apply-mod15write-quirk-to-toshiba-mk2561gsyn.patch
new file mode 100644 (file)
index 0000000..ec9148c
--- /dev/null
@@ -0,0 +1,36 @@
+From 9f9c47f00ce99329b1a82e2ac4f70f0fe3db549c Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 3 Feb 2014 10:42:07 -0500
+Subject: sata_sil: apply MOD15WRITE quirk to TOSHIBA MK2561GSYN
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 9f9c47f00ce99329b1a82e2ac4f70f0fe3db549c upstream.
+
+It's a bit odd to see a newer device showing mod15write; however, the
+reported behavior is highly consistent and other factors which could
+contribute seem to have been verified well enough.  Also, both
+sata_sil itself and the drive are fairly outdated at this point making
+the risk of this change fairly low.  It is possible, probably likely,
+that other drive models in the same family have the same problem;
+however, for now, let's just add the specific model which was tested.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: matson <lists-matsonpa@luxsci.me>
+References: http://lkml.kernel.org/g/201401211912.s0LJCk7F015058@rs103.luxsci.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/sata_sil.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -157,6 +157,7 @@ static const struct sil_drivelist {
+       { "ST380011ASL",        SIL_QUIRK_MOD15WRITE },
+       { "ST3120022ASL",       SIL_QUIRK_MOD15WRITE },
+       { "ST3160021ASL",       SIL_QUIRK_MOD15WRITE },
++      { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
+       { "Maxtor 4D060H3",     SIL_QUIRK_UDMA5MAX },
+       { }
+ };
index 223f93333b571f16ac4ee3c16fce0241853d2d7e..9f1f29159359ebb0ea0b913c5b493551dbd53394 100644 (file)
@@ -94,3 +94,18 @@ asoc-sta32x-fix-cache-sync.patch
 asoc-sta32x-fix-wrong-enum-for-limiter2-release-rate.patch
 asoc-sta32x-fix-array-access-overflow.patch
 asoc-wm8958-dsp-fix-firmware-block-loading.patch
+sunrpc-fix-races-in-xs_nospace.patch
+sunrpc-ensure-that-gss_auth-isn-t-freed-before-its-upcall-messages.patch
+powerpc-increase-stack-redzone-for-64-bit-userspace-to-512-bytes.patch
+powerpc-le-ensure-that-the-stop-self-rtas-token-is-handled-correctly.patch
+powerpc-crashdump-fix-page-frame-number-check-in-copy_oldmem_page.patch
+powerpc-powernv-fix-opal_xscom_-read-write-prototype.patch
+powerpc-powernv-fix-indirect-xscom-unmangling.patch
+ahci-disable-ncq-on-samsung-pci-e-ssds-on-macbooks.patch
+x86-dma-mapping-fix-gfp_atomic-macro-usage.patch
+perf-trace-fix-ioctl-request-beautifier-build-problems-on-i386-x86_64-arches.patch
+perf-x86-fix-event-scheduling.patch
+ata-enable-quirk-from-jmicron-jmb350-for-jmb394.patch
+sata_sil-apply-mod15write-quirk-to-toshiba-mk2561gsyn.patch
+cpufreq-powernow-k8-initialize-per-cpu-data-structures-properly.patch
+revert-writeback-do-not-sync-data-dirtied-after-sync-start.patch
diff --git a/queue-3.13/sunrpc-ensure-that-gss_auth-isn-t-freed-before-its-upcall-messages.patch b/queue-3.13/sunrpc-ensure-that-gss_auth-isn-t-freed-before-its-upcall-messages.patch
new file mode 100644 (file)
index 0000000..d685d83
--- /dev/null
@@ -0,0 +1,81 @@
+From 9eb2ddb48ce3a7bd745c14a933112994647fa3cd Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Sun, 16 Feb 2014 12:14:13 -0500
+Subject: SUNRPC: Ensure that gss_auth isn't freed before its upcall messages
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 9eb2ddb48ce3a7bd745c14a933112994647fa3cd upstream.
+
+Fix a race in which the RPC client is shutting down while the
+gss daemon is processing a downcall. If the RPC client manages to
+shut down before the gss daemon is done, then the struct gss_auth
+used in gss_release_msg() may have already been freed.
+
+Link: http://lkml.kernel.org/r/1392494917.71728.YahooMailNeo@web140002.mail.bf1.yahoo.com
+Reported-by: John <da_audiophile@yahoo.com>
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/auth_gss.c |   13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -108,6 +108,7 @@ struct gss_auth {
+ static DEFINE_SPINLOCK(pipe_version_lock);
+ static struct rpc_wait_queue pipe_version_rpc_waitqueue;
+ static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
++static void gss_put_auth(struct gss_auth *gss_auth);
+ static void gss_free_ctx(struct gss_cl_ctx *);
+ static const struct rpc_pipe_ops gss_upcall_ops_v0;
+@@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *g
+       if (gss_msg->ctx != NULL)
+               gss_put_ctx(gss_msg->ctx);
+       rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
++      gss_put_auth(gss_msg->auth);
+       kfree(gss_msg);
+ }
+@@ -500,6 +502,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
+               if (err)
+                       goto err_free_msg;
+       };
++      kref_get(&gss_auth->kref);
+       return gss_msg;
+ err_free_msg:
+       kfree(gss_msg);
+@@ -1071,6 +1074,12 @@ gss_free_callback(struct kref *kref)
+ }
+ static void
++gss_put_auth(struct gss_auth *gss_auth)
++{
++      kref_put(&gss_auth->kref, gss_free_callback);
++}
++
++static void
+ gss_destroy(struct rpc_auth *auth)
+ {
+       struct gss_auth *gss_auth = container_of(auth,
+@@ -1091,7 +1100,7 @@ gss_destroy(struct rpc_auth *auth)
+       gss_auth->gss_pipe[1] = NULL;
+       rpcauth_destroy_credcache(auth);
+-      kref_put(&gss_auth->kref, gss_free_callback);
++      gss_put_auth(gss_auth);
+ }
+ /*
+@@ -1262,7 +1271,7 @@ gss_destroy_nullcred(struct rpc_cred *cr
+       call_rcu(&cred->cr_rcu, gss_free_cred_callback);
+       if (ctx)
+               gss_put_ctx(ctx);
+-      kref_put(&gss_auth->kref, gss_free_callback);
++      gss_put_auth(gss_auth);
+ }
+ static void
diff --git a/queue-3.13/sunrpc-fix-races-in-xs_nospace.patch b/queue-3.13/sunrpc-fix-races-in-xs_nospace.patch
new file mode 100644 (file)
index 0000000..d51a9cb
--- /dev/null
@@ -0,0 +1,55 @@
+From 06ea0bfe6e6043cb56a78935a19f6f8ebc636226 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Tue, 11 Feb 2014 09:15:54 -0500
+Subject: SUNRPC: Fix races in xs_nospace()
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 06ea0bfe6e6043cb56a78935a19f6f8ebc636226 upstream.
+
+When a send failure occurs due to the socket being out of buffer space,
+we call xs_nospace() in order to have the RPC task wait until the
+socket has drained enough to make it worth while trying again.
+The current patch fixes a race in which the socket is drained before
+we get round to setting up the machinery in xs_nospace(), and which
+is reported to cause hangs.
+
+Link: http://lkml.kernel.org/r/20140210170315.33dfc621@notabene.brown
+Fixes: a9a6b52ee1ba (SUNRPC: Don't start the retransmission timer...)
+Reported-by: Neil Brown <neilb@suse.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtsock.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -504,6 +504,7 @@ static int xs_nospace(struct rpc_task *t
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = req->rq_xprt;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++      struct sock *sk = transport->inet;
+       int ret = -EAGAIN;
+       dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
+@@ -521,7 +522,7 @@ static int xs_nospace(struct rpc_task *t
+                        * window size
+                        */
+                       set_bit(SOCK_NOSPACE, &transport->sock->flags);
+-                      transport->inet->sk_write_pending++;
++                      sk->sk_write_pending++;
+                       /* ...and wait for more buffer space */
+                       xprt_wait_for_buffer_space(task, xs_nospace_callback);
+               }
+@@ -531,6 +532,9 @@ static int xs_nospace(struct rpc_task *t
+       }
+       spin_unlock_bh(&xprt->transport_lock);
++
++      /* Race breaker in case memory is freed before above code is called */
++      sk->sk_write_space(sk);
+       return ret;
+ }
diff --git a/queue-3.13/x86-dma-mapping-fix-gfp_atomic-macro-usage.patch b/queue-3.13/x86-dma-mapping-fix-gfp_atomic-macro-usage.patch
new file mode 100644 (file)
index 0000000..4207161
--- /dev/null
@@ -0,0 +1,35 @@
+From c091c71ad2218fc50a07b3d1dab85783f3b77efd Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Fri, 24 Jan 2014 14:49:58 +0100
+Subject: x86: dma-mapping: fix GFP_ATOMIC macro usage
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit c091c71ad2218fc50a07b3d1dab85783f3b77efd upstream.
+
+GFP_ATOMIC is not a single gfp flag, but a macro which expands to the other
+flags, where meaningful is the LACK of __GFP_WAIT flag. To check if caller
+wants to perform an atomic allocation, the code must test for a lack of the
+__GFP_WAIT flag. This patch fixes the issue introduced in v3.5-rc1.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/pci-dma.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/pci-dma.c
++++ b/arch/x86/kernel/pci-dma.c
+@@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct
+       flag |= __GFP_ZERO;
+ again:
+       page = NULL;
+-      if (!(flag & GFP_ATOMIC))
++      /* CMA can be used only in the context which permits sleeping */
++      if (flag & __GFP_WAIT)
+               page = dma_alloc_from_contiguous(dev, count, get_order(size));
++      /* fallback */
+       if (!page)
+               page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+       if (!page)